metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JordLecourt/Financial-ML",
"score": 3
} |
#### File: Twin Delayed DDPG/RL/agent.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from network import Actor, Critic
import base64
from io import BytesIO
import json
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class TD3(object):
"""
Agent class that handles the training of the networks and provides outputs as actions
:param state_dim (int): state size
:param action_dim (int): action size
:param max_action (float): highest action to take
:param device (device): cuda or cpu to process tensors
:param env (env): gym environment to use
"""
def __init__(self, algorithm, symbol, state_dim, action_dim, max_action, seed, h1_units=400, h2_units=300, warmup=1000):
self.is_trained = False
self.algorithm = algorithm
self.symbol = symbol
self.actor = Actor(state_dim, action_dim, max_action, seed, h1_units, h2_units).to(device)
self.actor_target = Actor(state_dim, action_dim, max_action, seed, h1_units, h2_units).to(device)
self.actor_target.load_state_dict(self.actor.state_dict())
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=1e-3)
self.critic = Critic(state_dim, action_dim, seed, h1_units, h2_units).to(device)
self.critic_target = Critic(state_dim, action_dim, seed, h1_units, h2_units).to(device)
self.critic_target.load_state_dict(self.critic.state_dict())
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=1e-3)
self.max_action = max_action
self.warmup = warmup
self.time_step = 0
def select_action(self, state, noise=0.1):
"""
Select an appropriate action from the agent policy
:param state (array): current state of environment
:param noise (float): how much noise to add to acitons
:return action (float): action clipped within action range
"""
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
action = self.actor(state).cpu().data.numpy().flatten()
if noise != 0:
action = (action + np.random.normal(0, noise, size=1))
self.time_step += 1
return action.clip(-self.max_action, self.max_action)
def train(self, replay_buffer, iterations, batch_size=100, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2):
"""
Train and update actor and critic networks
:param replay_buffer (ReplayBuffer): buffer for experience replay
:param iterations (int): how many times to run training
:param batch_size(int): batch size to sample from replay buffer
:param discount (float): discount factor
:param tau (float): soft update for main networks to target networks
:return actor_loss (float): loss from actor network
:return critic_loss (float): loss from critic network
"""
if replay_buffer.cntr < batch_size:
return
for it in range(iterations):
# Sample replay buffer
_state, _next_state, _action, _reward, _done = replay_buffer.sample(batch_size)
state = torch.FloatTensor(_state.reshape(batch_size, -1)).to(device)
next_state = torch.FloatTensor(_next_state.reshape(batch_size, -1)).to(device)
action = torch.FloatTensor(_action).to(device)
reward = torch.FloatTensor(_reward).to(device)
done = torch.FloatTensor(1 - _done).to(device)
# Select action according to policy and add clipped noise
next_action = self.actor_target(next_state)
next_action = next_action + torch.clamp(torch.tensor(np.random.normal(scale=policy_noise)), -noise_clip, noise_clip)
next_action = torch.clamp(next_action, -self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + (done * discount * target_Q).detach()
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if it % policy_freq == 0:
# Compute actor loss
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
def save(self, filename):
torch.save(self.actor.state_dict(), '%s_%s_actor.pth' % (self.symbol, filename))
torch.save(self.critic.state_dict(), '%s_%s_critic.pth' % (self.symbol, filename))
def load(self, filename):
self.actor.load_state_dict(torch.load('%s_%s_actor.pth' % (self.symbol, filename)))
self.critic.load_state_dict(torch.load('%s_%s_critic.pth' % (self.symbol, filename)))
```
#### File: Twin Delayed DDPG/RL/environment.py
```python
import gym
from gym import spaces
import numpy as np
import pandas as pd
import random
from enum import Enum
from scipy.stats import linregress
class Actions(Enum):
""" enum for the two actions the agent can take
None = 0
Sell = 1
Buy = 2
"""
Null = 0
Sell = 0
Buy = 1
class Positions(Enum):
""" enum for the two positions the agent can be in
None = 0
Short = 1
Long = 2
"""
Null = 0
Short = 0
Long = 1
class TradingEnv(gym.Env):
def __init__(self, symbol_data=None, window_size=10, start_tick=0, end_tick=None):
self.window_size = window_size
self.symbol_data = symbol_data
self.isReady = False
self.action_space = spaces.Box(-1, +1, (1,), dtype=np.float32)
# transaction fee (in %)
self.trade_fee_bid_percent = 0.005
self.trade_fee_ask_percent = 0.005
# episode
self._start_tick = start_tick + self.window_size
self._done = None
self._current_tick = None
self._last_trade_tick = None
self._position = None
self._position_history = None
self._total_reward = None
self._total_profit = None
self.history = None
if end_tick is None:
self._end_tick = symbol_data.length() - 1
else:
self._end_tick = end_tick - 1
def reset(self, randomIndex=False):
self._done = False
self._current_tick = self._start_tick
self._last_trade_tick = -1
self._position = Positions.Null
self._position_history = (self.window_size * [None]) + [self._position]
self._total_reward = 0.
self._total_profit = 1.
self.history = {}
observations = self._get_observation()
self.observation_space = spaces.Box(-np.inf, np.inf, shape=(len(observations),), dtype=np.float32)
return observations
def _get_observation(self):
return self.symbol_data.get_observation(self.window_size, self._current_tick)
def _is_closing_trade(self, action):
if ((action == Actions.Buy and self._position == Positions.Short) or
(action == Actions.Sell and self._position == Positions.Long)):
return True
else:
return False
def _get_reward(self, action):
step_reward = 0
# If we are closing a position, get the profit/loss
if self._is_closing_trade(action):
close_index = self.symbol_data.signal_features.columns.get_loc('close')
current_price = self.symbol_data.signal_features.iat[self._current_tick, close_index]
last_trade_price = self.symbol_data.signal_features.iat[self._last_trade_tick, close_index]
price_diff = current_price - last_trade_price
if self._position == Positions.Long:
step_reward += price_diff
else:
step_reward -= price_diff
return step_reward
def _update_profit(self, action):
# If we are closing a position, update the total profit
if self._is_closing_trade(action) or self._done:
close_index = self.symbol_data.signal_features.columns.get_loc('close')
current_price = self.symbol_data.signal_features.iat[self._current_tick, close_index]
last_trade_price = self.symbol_data.signal_features.iat[self._last_trade_tick, close_index]
if self._position == Positions.Long:
shares = (self._total_profit * (1 - self.trade_fee_ask_percent)) / last_trade_price
self._total_profit = (shares * (1 - self.trade_fee_bid_percent)) * current_price
else:
shares = (self._total_profit * (1 - self.trade_fee_ask_percent)) / current_price
self._total_profit = (shares * (1 - self.trade_fee_bid_percent)) * last_trade_price
def step(self, action):
# Get discrete action from float
action_float = float(action)
if action_float >= 0.1: action = Actions.Buy
elif action_float <= -0.1: action = Actions.Sell
else: action = Actions.Null
self._done = False
self._current_tick += 1
if self._current_tick == self._end_tick:
self._done = True
step_reward = self._get_reward(action)
self._total_reward = step_reward
self._update_profit(action)
# If the action close a trade, set the position to null
if self._is_closing_trade(action):
self._position = Positions.Null
self._last_trade_tick = -1
# If the position is null and the action is not null, open a trade
elif self._position == Positions.Null and action != Actions.Null:
self._last_trade_tick = self._current_tick
if action == Action.Buy: self._position = Positions.Long
else: self._position = Positions.Short
self._position_history.append(self._position)
observation = self._get_observation()
info = dict(
total_reward = self._total_reward,
total_profit = self._total_profit,
position = self._position.value
)
self._update_history(info)
return observation, step_reward, self._done, info
def _update_history(self, info):
if not self.history:
self.history = {key: [] for key in info.keys()}
for key, value in info.items():
self.history[key].append(value)
```
#### File: Twin Delayed DDPG/RL/network.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
def hidden_unit(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""
Initialize parameters and build model.
:param state_dim (int): Dimension of each state
:param action_dim (int): Dimension of each action
:param max_action (float): highest action to take
:param seed (int): Random seed
:param h1_units (int): Number of nodes in first hidden layer
:param h2_units (int): Number of nodes in second hidden layer
:return x: action output of network with tanh activation
"""
def __init__(self, state_dim, action_dim, max_action, seed, h1_units=400, h2_units=300):
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.state_dim = state_dim
self.action_dim = action_dim
self.max_action = max_action
self.l1 = nn.Linear(state_dim, h1_units)
self.l2 = nn.Linear(h1_units, h2_units)
self.l3 = nn.Linear(h2_units, action_dim)
def forward(self, x):
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = self.max_action * torch.tanh(self.l3(x))
return x
class Critic(nn.Module):
"""
Initialize parameters and build model.
:param state_dim (int): Dimension of each state
:param action_dim (int): Dimension of each action
:param max_action (float): highest action to take
:param seed (int): Random seed
:param h1_units (int): Number of nodes in first hidden layer
:param h2_units (int): Number of nodes in second hidden layer
:return x: value output of network
"""
def __init__(self, state_dim, action_dim, seed, h1_units=400, h2_units=300):
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.state_dim = state_dim
self.action_dim = action_dim
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, h1_units)
self.l2 = nn.Linear(h1_units, h2_units)
self.l3 = nn.Linear(h2_units, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, h1_units)
self.l5 = nn.Linear(h1_units, h2_units)
self.l6 = nn.Linear(h2_units, 1)
def forward(self, x, u):
xu = torch.cat([x, u], dim=1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
x2 = F.relu(self.l4(xu))
x2 = F.relu(self.l5(x2))
x2 = self.l6(x2)
return x1, x2
def Q1(self, x, u):
xu = torch.cat([x, u], dim=1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
``` |
{
"source": "JordLecourt/PyIGA",
"score": 4
} |
#### File: JordLecourt/PyIGA/pyiga.py
```python
import numpy as np
from random import choices, randint, choice
import math
import collections
from enum import Enum
IndividualType = Enum('IndividualType', 'Binary Float')
ReproductionType = Enum('ReproductionType', 'Crossover Average')
class IslandGeneticAlgorithm:
def __init__(
self,
n_epochs,
individual_type,
array_size,
n_islands,
n_individuals,
n_parents,
fitness_functions,
mutation_rate,
step,
reproduction_type = ReproductionType.Crossover
):
"""
The constructor accept multiple parameters allowing to parameterize the algorithm
:param n_epochs: number of epochs to train the IslandGeneticAlgorithm
:param individual_type: type IndividualType, Binary if the problem is a binary interger programming
problem, otherwise Float
:param array_size: size of the solution array
:param n_islands: number of parallel genetic algorithm to train
:param n_individuals: number of individuals in the population of each island
:param n_parents: number of parents to take into account
:param fitness_functions: function that determines the fittest individuals. There can be multiple
fitness function. Format: [{'function': func1, 'parameters': params1}, ...]
:param mutation_rate: the probability of an element to be randomly mutated
:param step: the size of the mutation if individual_type == IndividualType.Float
:param reproduction_type: The crossover method, between ReproductionType.Crossover and ReproductionType.Average
If individual_type == IndividualType.Binary, the reproduction_type must be
crossover
"""
self.n_epochs = n_epochs
self.individual_type = individual_type
self.reproduction_type = reproduction_type
if individual_type == IndividualType.Binary:
assert reproduction_type == ReproductionType.Crossover
self.array_size = array_size
self.n_islands = n_islands
self.n_individuals = n_individuals
self.n_parents = n_parents
assert n_parents < n_individuals
self.n_offsprings = n_individuals - n_parents
self.mutation_rate = mutation_rate
self.step = step
self.fitness_functions = fitness_functions
self.islands = []
for _ in range(self.n_islands):
self.islands.append(np.zeros((self.n_individuals, self.array_size)))
self.parent_weights = []
for i in range(self.n_parents):
p = 0.1
self.parent_weights.append(p * (1 - p)**i + ((1 - p)**self.n_parents) / self.n_parents)
self.best_solutions = []
self.best_solution = []
def evaluate_individuals(self):
"""
Evaluate each individual of each island for each fitness function
:return: A list of the fitness score for every individuals
"""
all_islands_results = []
for island in self.islands:
island_results = collections.defaultdict(list)
for i in range(len(island)):
for function in (self.fitness_functions):
island_results[i].append(function['function'](island[i], function['parameters']))
all_islands_results.append(island_results)
return all_islands_results
def pareto_front(self, costs):
"""
Evaluate each individual of each island for each fitness function.
It finds the pareto front that maximize the fitness score for every fitness function.
:param costs: the fitness scores for each individual of an island
:return: A mask of the pareto front
"""
is_efficient = np.ones(costs.shape[0], dtype = bool)
for i, c in enumerate(costs):
if is_efficient[i]:
is_efficient[is_efficient] = np.any(costs[is_efficient]>c, axis=1)
is_efficient[i] = True
is_efficient_mask = np.zeros(costs.shape[0], dtype = bool)
is_efficient_mask[is_efficient] = True
return is_efficient_mask
def select_best_individuals(self, n_individuals):
"""
Select the n_individuals best individuals with the pareto front
:param n_individuals: the number of individuals to select
:return: The best individuals for each islands
"""
islands_best_individuals = []
all_islands_results = self.evaluate_individuals()
for i in range(self.n_islands):
best_individuals = np.array([]).reshape(0, self.array_size)
cost = np.array(list(all_islands_results[i].values()))
current_individuals = self.islands[i].copy()
while (len(best_individuals) < n_individuals):
mask = self.pareto_front(cost)
if(best_individuals.shape[0] + sum(mask) > n_individuals):
potential_individuals = current_individuals[mask]
new_individual = potential_individuals[:(n_individuals - best_individuals.shape[0])]
best_individuals = np.concatenate((best_individuals, new_individual))
else:
best_individuals = np.concatenate((best_individuals, current_individuals[mask]))
current_individuals = current_individuals[~mask]
cost = cost[~mask]
islands_best_individuals.append(best_individuals)
return islands_best_individuals
def add_mutation(self):
"""
Add random mutation to individual element with a probability of self.mutation_rate.
If individual_type == IndividualType.Binary, the mutation toggle an element.
If individual_type == IndividualType.Float, the mutation is a change of size step.
"""
for island in self.islands:
for _ in range(math.floor(self.n_individuals * self.array_size * self.mutation_rate)):
random_individual = randint(0, island.shape[0] - 1)
random_project = randint(0, self.array_size - 1)
if self.individual_type == IndividualType.Binary:
island[random_individual][random_project] = abs(island[random_individual][random_project] - 1)
elif self.individual_type == IndividualType.Float:
step = choice([-1, 1]) * self.step
if (step < 0) and (island[random_individual][random_project] + step < 0):
step *= -1
island[random_individual][random_project] += step
def update_generation(self):
"""
Create self.n_offsprings new individuals using crossover or average, depending on self.reproduction_type.
It then add mutation and select the best self.n_individuals
"""
best_individuals = self.select_best_individuals(self.n_parents)
for i in range(self.n_islands):
offsprings = []
if self.reproduction_type == ReproductionType.Crossover:
for _ in range(self.n_offsprings):
n_crossovers = 2
crossover_positions = sorted(choices(range(self.array_size), k = n_crossovers))
crossover_positions.insert(0, 0)
crossover_positions.append(self.array_size)
random_individuals = choices(range(self.n_parents), weights = self.parent_weights, k = n_crossovers + 1)
offspring = []
for j in range(n_crossovers + 1):
projects = best_individuals[i][random_individuals[j]][crossover_positions[j] : crossover_positions[j+1]].copy()
offspring.extend(projects)
offsprings.append(offspring)
elif self.reproduction_type == ReproductionType.Average:
for _ in range(self.n_offsprings):
random_individuals = choices(range(self.n_parents), weights = self.parent_weights, k = 2)
offspring = (best_individuals[i][random_individuals[0]] + best_individuals[i][random_individuals[1]]) / 2
offsprings.append(offspring)
offsprings = np.array(offsprings)
self.islands[i] = np.concatenate((self.islands[i], offsprings))
self.add_mutation()
self.islands = self.select_best_individuals(self.n_individuals)
def train(self):
"""
Execute self.n_epochs generations and each 100 epochs, the best individual of the island i is
added to the island i+1
"""
epoch_number = 0
best_result = 0
while(epoch_number < self.n_epochs):
self.update_generation()
results = []
function = self.fitness_functions[0]
for i in range(self.n_islands):
island_result = function['function'](self.islands[i][0], function['parameters'])
results.append(round(island_result, 4))
if (island_result > best_result):
best_result = island_result
self.best_solutions.append(self.islands[i][0])
print('Best solution updated')
print('Epoch : ', epoch_number, ', Fitness: ', results)
epoch_number += 1
if (epoch_number % 100 == 0):
print('Migration')
for i in range(self.n_islands):
self.islands[(i + 1) % self.n_islands][-1] = self.islands[i][0].copy()
def evaluate(self):
"""
Get the best solution of all according to the first fitness function.
"""
results = {}
function = self.fitness_functions[0]
for i in range(len(self.best_solutions)):
results[i] = function['function'](self.best_solutions[i], function['parameters'])
sorted_results = sorted(results, key=lambda x: results[x], reverse=True)
self.best_solution = self.best_solutions[sorted_results[0]]
``` |
{
"source": "jordn/bingscraper",
"score": 2
} |
#### File: jordn/bingscraper/bingscraper.py
```python
import argparse
import hashlib
import imghdr
import logging
import os
import posixpath
import re
import signal
import socket
import threading
import time
import urllib.parse
import urllib.request
logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
# CONFIG
LOG_DIR = './.bingscraper'
DEFAULT_OUTPUT_DIR = './images/' # default output dir
socket.setdefaulttimeout(2)
_REQUEST_HEADER = {
'User-Agent': 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:60.0) '
'Gecko/20100101 Firefox/60.0'
}
class DownloadTracker:
"""Keeps track of URLs tried and images downloaded to prevent duplication."""
def __init__(self, log_dir: str = LOG_DIR):
self._log_dir = log_dir
self.tried_urls = set()
self.image_md5s = dict()
self.count = 0
def _get_tried_urls(self):
try:
with open(os.path.join(self._log_dir, 'tried_urls.txt'), 'r') as url_file:
tried_urls = set([line.strip() for line in url_file])
except FileNotFoundError:
tried_urls = set()
return tried_urls
def _get_image_md5s(self):
try:
image_md5s = {}
with open(os.path.join(self._log_dir, 'image_md5s.tsv'), 'r') as tsv:
for line in tsv:
md5, name = line.strip().split('\t')
image_md5s[md5] = name
except FileNotFoundError:
image_md5s = {}
return image_md5s
@classmethod
def load(self, log_dir):
"""Log the URLs that have been downloaded."""
self.tried_urls = self._get_tried_urls()
self.image_md5s = self._get_image_md5s()
def log(self):
"""Log the URLs that have been downloaded."""
with open(os.path.join(self._log_dir, 'tried_urls.txt'), 'w') as url_file:
url_file.write("\n".join(sorted(self.tried_urls)))
with open(os.path.join(self._log_dir, 'image_md5s.tsv'), 'w') as image_md5s_tsv:
for hash, file in self.image_md5s.items():
image_md5s_tsv.write(f"{hash}\t{file}\n")
def download_image(url: str,
dest: str,
thread_pool: threading.Semaphore = None,
tracker: DownloadTracker = None):
"""Threaded download image of an image URL."""
if not tracker:
tracker = DownloadTracker()
if url in tracker.tried_urls:
return
# Get the file name of the image.
url_path = urllib.parse.urlsplit(url).path
filename = posixpath.basename(url_path).split('?')[0]
name, ext = os.path.splitext(filename)
if thread_pool:
thread_pool.acquire()
tracker.tried_urls.add(url)
try:
request = urllib.request.Request(url, None, _REQUEST_HEADER)
image = urllib.request.urlopen(request).read()
if not imghdr.what(None, image):
logging.info(f"FAIL: Invalid image {filename} (not saving).")
return
md5_hash = hashlib.md5(image).hexdigest()
if md5_hash in tracker.image_md5s:
logging.info(f"SKIP: Image {filename} is a duplicate of "
f"{image_md5s[md5_hash]} (not saving)")
return
tracker.image_md5s[md5_hash] = filename
saved_filename = md5_hash + ext.lower()
with open(os.path.join(dest, saved_filename), 'wb') as image_file:
image_file.write(image)
logging.info(f"OK: Image {saved_filename} from {filename}")
except (urllib.error.HTTPError, urllib.error.URLError) as err:
print(f"FAIL: {filename}, {err.code}")
finally:
if thread_pool:
thread_pool.release()
def query_url(query: str, image_index: int = 0, adult_filter: bool = True,
filters: str = None):
"""Create the Bing search query."""
return ("https://www.bing.com/images/async?"
"q={query}&"
"first={page}&"
"count=35&"
"adlt={adult_filter}&"
"qft={filters}"
"".format(query=urllib.parse.quote_plus(query),
page=image_index,
adult_filter='' if adult_filter else 'off',
filters=filters))
def get_image_urls(query: str,
filters: str = '',
adult_filter: bool = True,
image_index: int = 0):
"""Extract image urls from the Bing results page."""
request_url = query_url(query=query,
image_index=image_index,
adult_filter=adult_filter,
filters=filters)
logging.info(f'Requesting {request_url}')
request = urllib.request.Request(request_url, headers=_REQUEST_HEADER)
response = urllib.request.urlopen(request)
html = response.read().decode('utf8')
uris = re.findall('murl":"(.*?)"', html)
return uris
def fetch_images(query: str,
output_dir: str,
limit: int = 50,
filters: str = '',
adult_filter: bool = True,
threads: int = 20):
"""Fetch images and place the output in output_dir."""
thread_pool = threading.BoundedSemaphore(threads)
image_index = 0
tracker = DownloadTracker()
def interupt_handler(*args):
tracker.log()
if args:
exit(0)
signal.signal(signal.SIGINT, interupt_handler)
dest = os.path.join(output_dir, query.replace(' ', '_'))
os.makedirs(dest, exist_ok=True)
while image_index < limit:
image_urls = get_image_urls(
query=query, filters=filters, adult_filter=adult_filter,
image_index=image_index)
for i, url in enumerate(image_urls):
t = threading.Thread(
target=download_image,
kwargs=dict(
thread_pool=thread_pool,
url=url,
dest=dest,
tracker=tracker))
t.start()
image_index += i
time.sleep(0.1)
tracker.log()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Bulk image downloader')
parser.add_argument("-q", "--query", required=True,
help="Search query for Bing Image API.")
parser.add_argument("-o", "--output-dir", default=DEFAULT_OUTPUT_DIR,
help="Path to output directory of images")
parser.add_argument('--disable-adult-filter',
help='Disable the adult content filter.',
action='store_true',
required=False)
parser.add_argument('--filters',
help='Any query based filters you want to append when '
'searching for images, e.g. +filterui:license-L1',
required=False)
parser.add_argument('--limit',
help='Max number of images.',
type=int,
default=100)
parser.add_argument('--threads',
help='Number of threads',
type=int,
default=20)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(LOG_DIR, exist_ok=True)
fetch_images(query=args.query,
output_dir=args.output_dir,
limit=args.limit,
filters=args.filters,
adult_filter=not args.disable_adult_filter)
``` |
{
"source": "jord-nijhuis/SVN-Ignore",
"score": 3
} |
#### File: SVN-Ignore/test/test_svn_ignore.py
```python
import unittest
import os
import subprocess
import shutil
import logging
import sys
import six
from src.svn_ignore import SVNIgnore
class TestSVNIgnore(unittest.TestCase):
def setUp(self):
"""Checkout the SVN repository"""
logging.basicConfig(stream=sys.stdout, format='%(levelname)s: %(message)s', level=logging.WARNING)
self.repository_path = os.path.abspath(os.path.dirname(__file__) + '/resources/repository')
self.checkout_path = os.path.abspath(os.path.dirname(__file__) + '/resources/checkout')
# Clear the checkout
if os.path.isdir(self.checkout_path):
shutil.rmtree(self.checkout_path)
# Checkout SVN
process = subprocess.Popen(
[
'svn',
'checkout',
'file://{}'.format(self.repository_path),
self.checkout_path,
'--non-interactive'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).communicate()
if process[1]:
raise Exception('Error while doing checkout: {}'.format(process[1]))
self.svn_ignore = SVNIgnore(
directory=self.checkout_path
)
def tearDown(self):
"""Clear the checkout directory"""
if os.path.isdir(self.checkout_path):
shutil.rmtree(self.checkout_path)
def test_get_ignores_from_file(self):
"""Check ignores from file"""
ignores = self.svn_ignore.get_ignores_from_file(self.checkout_path)
six.assertCountEqual(self, [
'VALUE1'
], ignores)
def test_get_ignores_from_file_with_comments(self):
"""Check retrieving ignores from file without removal of comments"""
ignores = self.svn_ignore.get_ignores_from_file(self.checkout_path, remove_comments=False)
six.assertCountEqual(self, [
'VALUE1',
'#comment'
], ignores)
def test_add_exceptions(self):
"""Check if exceptions (The lines starting with !) also work"""
path = os.path.join(self.checkout_path, 'directory_exception')
open(os.path.join(path, 'exception.txt'), 'w+').close()
self.svn_ignore.add_exceptions(path, [
'*.txt',
'!exception.txt'
])
output = subprocess.check_output([
'svn',
'status'
], cwd=path)
# Check if the file was added
self.assertEqual('A exception.txt\n', output.decode())
def test_add_exceptions_recursive(self):
""" Make sure we add the exceptions recursively as well"""
path = os.path.join(self.checkout_path, 'directory_exception')
# Create the child directory
os.mkdir(os.path.join(path, 'child'))
subprocess.check_output([
'svn',
'add',
os.path.join(path, 'child')
])
open(os.path.join(path, 'child', 'exception.txt'), 'w+').close()
self.svn_ignore.add_exceptions(path, [
'*.txt',
'!**/exception.txt'
])
output = subprocess.check_output([
'svn',
'status'
], cwd=path)
self.assertEqual('A child\nA child/exception.txt\n', output.decode())
def test_add_exception_recursive_without_parent(self):
""" Make sure we don't add exceptions for directories that have not been added yet"""
path = os.path.join(self.checkout_path, 'directory_exception')
# Create the child directory
os.mkdir(os.path.join(path, 'child'))
open(os.path.join(path, 'child', 'exception.txt'), 'w+').close()
self.svn_ignore.add_exceptions(path, [
'*.txt',
'!**/exception.txt'
])
output = subprocess.check_output([
'svn',
'status'
], cwd=path)
self.assertEqual('? child\n', output.decode())
def test_add_exception_on_already_added_file(self):
"""Make sure that when an exception is already added it does not raise an error"""
path = os.path.join(self.checkout_path, 'directory_exception')
open(os.path.join(path, 'exception.txt'), 'w+').close()
self.svn_ignore.add_exceptions(path, [
'*.txt',
'!exception.txt'
])
self.svn_ignore.add_exceptions(path, [
'*.txt',
'!exception.txt'
])
def test_get_existing_ignores(self):
"""Test getting ignores from the properties"""
ignores = self.svn_ignore.get_existing_ignores(os.path.join(self.checkout_path, 'directory_props'))
six.assertCountEqual(self, [
'EXISTING_VALUE'
], ignores)
def test_get_existing_ignores_empty(self):
"""Test getting ignores from properties when the property is empty"""
ignores = self.svn_ignore.get_existing_ignores(os.path.join(self.checkout_path, 'directory'))
six.assertCountEqual(self, [], ignores)
def test_set_ignore(self):
""" Test setting the ignore property"""
self.svn_ignore.set_ignores(self.checkout_path, ['VALUE1'])
ignores = self.svn_ignore.get_existing_ignores(self.checkout_path)
six.assertCountEqual(self, ['VALUE1'], ignores)
def test_apply(self):
"""Test the apply with default values"""
self.svn_ignore.apply()
ignores = self.svn_ignore.get_existing_ignores(self.checkout_path)
six.assertCountEqual(self, ['VALUE1'], ignores)
ignores = self.svn_ignore.get_existing_ignores(os.path.join(self.checkout_path, 'directory'))
six.assertCountEqual(self, ['VALUE2', 'VALUE1'], ignores)
ignores = self.svn_ignore.get_existing_ignores(os.path.join(self.checkout_path, 'directory_props'))
six.assertCountEqual(self, ['EXISTING_VALUE', 'VALUE1'], ignores)
def test_apply_exception(self):
"""Check if exceptions are applied"""
open(os.path.join(self.checkout_path, 'directory_exception', 'exception.txt'), 'w+').close()
self.svn_ignore.apply()
ignores = self.svn_ignore.get_existing_ignores(os.path.join(self.checkout_path, 'directory_exception'))
six.assertCountEqual(self, ['VALUE1', '*.txt'], ignores)
output = subprocess.check_output([
'svn',
'status'
], cwd=self.checkout_path)
# Check if the file was added
self.assertIn('A directory_exception/exception.txt', output.decode().splitlines())
def test_apply_overwrite(self):
"""Test the apply with overwrite enabled"""
self.svn_ignore.overwrite = True
self.svn_ignore.apply()
ignores = self.svn_ignore.get_existing_ignores(self.checkout_path)
six.assertCountEqual(self, ['VALUE1'], ignores)
ignores = self.svn_ignore.get_existing_ignores(os.path.join(self.checkout_path, 'directory'))
six.assertCountEqual(self, ['VALUE2', 'VALUE1'], ignores)
ignores = self.svn_ignore.get_existing_ignores(os.path.join(self.checkout_path, 'directory_props'))
six.assertCountEqual(self, ['VALUE1'], ignores)
def test_apply_no_recursive(self):
"""Test apply with recursive disabled"""
self.svn_ignore.recursive = False
self.svn_ignore.apply()
ignores = self.svn_ignore.get_existing_ignores(self.checkout_path)
six.assertCountEqual(self, ['VALUE1'], ignores)
ignores = self.svn_ignore.get_existing_ignores(os.path.join(self.checkout_path, 'directory'))
six.assertCountEqual(self, ['VALUE2'], ignores)
ignores = self.svn_ignore.get_existing_ignores(os.path.join(self.checkout_path, 'directory_props'))
six.assertCountEqual(self, ['EXISTING_VALUE'], ignores)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jordnkr/Resolute_QA",
"score": 2
} |
#### File: Resolute_QA/resoluteqa/views.py
```python
from django.core import serializers
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.views.decorators.csrf import csrf_exempt
import xml.etree.ElementTree as ET
from django.utils.dateparse import parse_datetime
from django.utils.timezone import is_aware, make_aware
from .models import Environment, Project, Suite, ProjectEnvironment, SuiteRun, Bug, TestResult, Error, Test
def index(request):
projects = Project.objects.all()
project_list = []
for project in projects:
project_list.append({'project':project, 'environments':project.projectenvironment_set.all().order_by('environment')})
context = {'project_list': project_list}
return render(request, 'resoluteqa/index.html', context)
def uploadresults(request, projenv_id):
projectenvironment = get_object_or_404(ProjectEnvironment, pk=projenv_id)
suite_list = Suite.objects.filter(project_environment_id=projenv_id).order_by('suite_name')
# Used for navbar daily results links
suite_runs = []
for suite in suite_list:
try:
suite_runs.append(SuiteRun.objects.filter(suite_id=suite.id).latest('start_time'))
except Exception:
pass # Do nothing, suite_runs will stay empty and page will load correctly
context = {
'projectenvironment': projectenvironment,
'suite_runs': suite_runs
}
return render(request, 'resoluteqa/upload.html', context)
def summary(request, projenv_id):
projectenvironment = get_object_or_404(ProjectEnvironment, pk=projenv_id)
suite_list = Suite.objects.filter(project_environment_id=projenv_id).order_by('suite_name')
passed_tests = 0
failed_tests = 0
inconclusive_tests = 0
ignored_tests = 0
total_tests = 0
pass_percentage = 0
fail_percentage = 0
suite_runs = []
for suite in suite_list:
try:
run = SuiteRun.objects.filter(suite_id=suite.id).latest('start_time')
passed_tests += run.passed_tests
failed_tests += run.failed_tests
inconclusive_tests += run.inconclusive_tests
ignored_tests += run.ignored_tests
total_tests += run.total_tests
suite_runs.append(run)
except Exception:
pass # Do nothing, suite_runs will stay empty and page will load correctly
if total_tests > 0:
pass_percentage = round(100 * (float(passed_tests) / total_tests), 2)
fail_percentage = round(100 - pass_percentage, 2)
context = {
'projectenvironment': projectenvironment,
'suite_list': suite_list,
'suite_runs': suite_runs,
'passed_tests': passed_tests,
'failed_tests': failed_tests,
'inconclusive_tests': inconclusive_tests,
'ignored_tests': ignored_tests,
'total_tests': total_tests,
'pass_percentage': pass_percentage,
'fail_percentage': fail_percentage
}
return render(request, 'resoluteqa/summary.html', context)
@csrf_exempt
def dailyresults(request, suite_run_id):
request_suite_run = get_object_or_404(SuiteRun, pk=suite_run_id)
request_suite = Suite.objects.get(id=request_suite_run.suite.id)
suite_list = Suite.objects.filter(project_environment_id=request_suite.project_environment.id).order_by('suite_name')
historical_suite_runs = SuiteRun.objects.filter(suite__id=request_suite.id).order_by('-insert_date')
test_results = TestResult.objects.filter(suite_run__id=suite_run_id)
bug_list = Bug.objects.filter(test__suite__project_environment_id=request_suite.project_environment.id).distinct()
projectenvironment = ProjectEnvironment.objects.get(pk=request_suite.project_environment.id)
# Used for navbar daily results links
suite_runs = []
for suite in suite_list:
suite_runs.append(SuiteRun.objects.filter(suite_id=suite.id).latest('start_time'))
context = {
'projectenvironment': projectenvironment,
'request_suite_run': request_suite_run,
'suite': request_suite,
'suite_list': suite_list,
'historical_suite_runs': historical_suite_runs,
'test_results': test_results,
'suite_runs': suite_runs,
'bug_list': bug_list
}
return render(request, 'resoluteqa/dailyresults.html', context)
def bugs(request, projenv_id):
projectenvironment = get_object_or_404(ProjectEnvironment, pk=projenv_id)
suite_list = Suite.objects.filter(project_environment_id=projenv_id).order_by('suite_name')
bug_list = Bug.objects.filter(test__suite__project_environment_id=projenv_id).distinct()
# Used for navbar daily results links
suite_runs = []
for suite in suite_list:
try:
suite_runs.append(SuiteRun.objects.filter(suite_id=suite.id).latest('start_time'))
except Exception:
pass # Do nothing. suite_runs will remain blank and page will load without data
context = {
'projectenvironment': projectenvironment,
'suite_list': suite_list,
'bug_list': bug_list,
'suite_runs': suite_runs
}
return render(request, 'resoluteqa/bugs.html', context)
def testbugs(request, test_id):
if request.method == 'GET':
test = Test.objects.get(id=test_id)
#bug_list = Bug.objects.filter(testbug__test__id=test_id)
bug_list = test.bugs.all()
bug_list = serializers.serialize("json", bug_list)
data = {
"bug_list": bug_list
}
return JsonResponse(data)
def projectbugs(request, projenv_id):
if request.method == 'GET':
bug_list = Bug.objects.filter(test__suite__project_environment_id=projenv_id).distinct().order_by('source_control_id')
bug_list = serializers.serialize("json", bug_list)
data = {
"bug_list": bug_list
}
return JsonResponse(data)
def individualresult(request, test_result_id):
if request.method == 'GET':
result = TestResult.objects.filter(id=test_result_id)
error_list = result[0].error_set.all()
result = serializers.serialize("json", result)
error_list = serializers.serialize("json", error_list)
data = {
"result": result,
"error_list": error_list
}
return JsonResponse(data)
@csrf_exempt
def bug_create(request):
try:
if request.method == "POST":
bug = Bug.objects.create(source_control_id=request.POST["source_control_id"], source_control=request.POST["source_control"], title=request.POST["title"], url=request.POST["url"])
test_ids = request.POST.getlist("test_ids[]")
for test_id in test_ids:
test = Test.objects.get(id=test_id)
test.bugs.add(bug)
return JsonResponse({'success': True})
else:
return JsonResponse({'error': True})
except Exception as e:
return JsonResponse({'error': True})
@csrf_exempt
def bug_update(request, bug_id):
try:
if request.method == "POST":
bug = Bug.objects.get(id=bug_id)
bug.source_control_id = request.POST["source_control_id"]
bug.title = request.POST["title"]
bug.source_control = request.POST["source_control"]
bug.url = request.POST["url"]
bug.save()
return JsonResponse({'success': True})
else:
return JsonResponse({'error': True})
except Exception as e:
return JsonResponse({'error': True})
@csrf_exempt
def bug_delete(request, bug_id):
try:
if request.method == "POST":
bug = Bug.objects.get(id=bug_id)
bug.delete()
return JsonResponse({'success': True})
else:
return JsonResponse({'error': True})
except Exception as e:
return JsonResponse({'error': True})
@csrf_exempt
def bug_add(request):
try:
if request.method == "POST":
bug_id = request.POST["bug_id"]
test_ids = request.POST.getlist("test_ids[]")
for test_id in test_ids:
bug = Bug.objects.get(id=bug_id)
test = Test.objects.get(id=test_id)
test.bugs.add(bug)
return JsonResponse({'success': True})
else:
return JsonResponse({'error': True})
except Exception as e:
return JsonResponse({'error': True})
@csrf_exempt
def bug_remove(request, bug_id, test_id):
try:
if request.method == "POST":
bug = Bug.objects.get(id=bug_id)
test = Test.objects.get(id=test_id)
test.bugs.remove(bug)
return JsonResponse({'success': True, 'bugCount':test.bugs.count()})
else:
return JsonResponse({'error': True})
except Exception as e:
return JsonResponse({'error': True})
@csrf_exempt
def upload_mstest(request):
xmlfile = request.FILES.get('resultfile')
if xmlfile is not None:
upload_project_name = request.POST.get('project')
upload_environment = request.POST.get('environment')
upload_suite_name = request.POST.get('suite_name')
tree = ET.parse(xmlfile)
for test_run in tree.iter('TestRun'):
test_settings = test_run.find('TestSettings')
result_summary = test_run.find('ResultSummary')
test_definitions = test_run.find('TestDefinitions')
results = test_run.find('Results')
# SUITE INFO
project, project_created = Project.objects.get_or_create(project_name=upload_project_name)
environment, environment_created = Environment.objects.get_or_create(environment_name=upload_environment)
projectenvironment, projenv_create = ProjectEnvironment.objects.get_or_create(
project=project,
environment=environment
)
description = test_settings.find('Description').text
suite, suite_created = Suite.objects.get_or_create(
project_environment=projectenvironment,
suite_name=upload_suite_name,
defaults={'description': description}
)
# SUITE_RUN INFO
total_tests = int(result_summary.find('Counters').attrib['total'])
executed_tests = int(result_summary.find('Counters').attrib['executed']) #doesn't go to the database. used for calculation only
passed_tests = int(result_summary.find('Counters').attrib['passed'])
failed_tests = int(result_summary.find('Counters').attrib['failed']) + int(result_summary.find('Counters').attrib['error'])
inconclusive_tests = int(result_summary.find('Counters').attrib['inconclusive'])
ignored_tests = int(result_summary.find('Counters').attrib['notExecuted'])
result_precentage = 100 * (passed_tests/executed_tests)
suite_start_time = get_aware_datetime(test_run.find('Times').attrib['start'])
suite_end_time = get_aware_datetime(test_run.find('Times').attrib['finish'])
total_execution_time = (suite_end_time-suite_start_time).total_seconds()/60 # store time in minutes
suite_run = SuiteRun.objects.create(suite=suite, total_tests=total_tests, passed_tests=passed_tests, failed_tests=failed_tests, inconclusive_tests=inconclusive_tests, ignored_tests=ignored_tests, result_precentage=result_precentage, start_time=suite_start_time, end_time=suite_end_time, total_execution_time=total_execution_time)
for test_result in results.iter('UnitTestResult'):
# TEST INFO
test_id = test_result.attrib['testId'] # not stored in database. Used to query other fields within the results file only.
test_name = test_result.attrib['testName']
test_category = '' #left blank for now
unit_test = test_definitions.findall(".//UnitTest[@id='" + test_id + "']") # not stored in database. Used for other values only.
class_name_path = unit_test[0].find('TestMethod').attrib['className'].split(',')[0] # not stored in database. Used for other values only.
pathArray = class_name_path.rsplit('.', 1) # not stored in database. Used for other values only.
class_name = pathArray[1]
namespace = pathArray[0]
test, test_created = Test.objects.get_or_create(
suite=suite,
test_name=test_name,
class_name=class_name,
namespace=namespace
)
# TEST_RESULT INFO
result = test_result.attrib['outcome']
host = test_result.attrib['computerName']
test_start_time = get_aware_datetime(test_result.attrib['startTime'])
test_end_time = get_aware_datetime(test_result.attrib['endTime'])
test_total_execution_time = parse_seconds(test_result.attrib['duration']) # store time in seconds
console_output = 'N/A'
try:
console_output = test_result.find('Output').find('StdOut').text
except Exception:
pass
tr = TestResult.objects.create(suite_run=suite_run, test=test, result=result, host=host, start_time=test_start_time, end_time=test_end_time, total_execution_time=test_total_execution_time, console_output=console_output)
# ERROR INFO
try:
for error in test_result.find('Output').iter('ErrorInfo'):
error_message = error.find('Message').text
stack_trace = error.find('StackTrace').text
Error.objects.create(test_result=tr, error_message=error_message, stack_trace=stack_trace)
except Exception:
pass
fromForm = False #make form upload this value
if fromForm:
return redirect('resoluteqa:uploadresults', projenv_id)
else:
return JsonResponse({'success': True})
def get_aware_datetime(date_str):
ret = parse_datetime(date_str)
if not is_aware(ret):
ret = make_aware(ret)
return ret
def parse_seconds(time_str):
h, m, s = time_str.split(':')
return float(h) * 3600 + float(m) * 60 + float(s)
``` |
{
"source": "Jordonkopp/Flask-Vue",
"score": 2
} |
#### File: Jordonkopp/Flask-Vue/manage.py
```python
import pytest
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from server import create_app
from server.models import db
app = create_app()
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command("db", MigrateCommand)
@manager.command
def runserver():
app.run(debug=True, host="0.0.0.0", port=5000)
@manager.command
def runworker():
app.run(debug=False)
@manager.command
def test():
pytest.main(["tests"])
@manager.command
def recreate_db():
"""
Recreates a local database. Do not use in prod
"""
db.drop_all()
db.create_all()
db.session.commit()
@manager.command
def create_db():
"""
Recreates a local database. Do not use in prod
"""
db.create_all()
db.session.commit()
if __name__ == "__main__":
manager.run()
```
#### File: Flask-Vue/server/__init__.py
```python
import os
import logging
from flask import Flask, request
from flask_cors import CORS
from flask_migrate import Migrate
from sqlalchemy_utils import create_database, database_exists
from server.settings import config
from server.utils.core_utils import all_exception_handler
from server.models import db
# import blueprints
from server.services.key import key
from server.services.main import main
from server.services.user import user
class Log_Formatter(logging.Formatter):
def format(self, record):
record.url = request.url
record.remote_addr = request.remote_addr
return super().format(record)
def create_app(test_config=None):
app = Flask(__name__)
CORS(app) # add CORS
# check environment variables to see which config to load
env = os.environ.get("FLASK_ENV", "dev")
if test_config:
# ignore environment variable config if config was given
app.config.from_mapping(**test_config)
else:
app.config.from_object(config[env])
# logging
formatter = Log_Formatter(
"%(asctime)s %(remote_addr)s: requested %(url)s: %(levelname)s in [%(module)s: %(lineno)d]: %(message)s"
)
# Set stream logger
stream = logging.StreamHandler()
stream.setLevel(logging.DEBUG)
stream.setFormatter(formatter)
app.logger.addHandler(stream)
app.logger.setLevel(logging.DEBUG)
# Set Logging to file
if app.config.get("LOG_FILE"):
file_handler = logging.FileHandler(app.config.get("LOG_FILE"))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
# if not prod recreate db every-run
if env != "prod":
db_url = app.config["SQLALCHEMY_DATABASE_URI"]
if not database_exists(db_url):
create_database(db_url)
# register sqlalchemy to this app
db.init_app(app)
Migrate(app, db)
# Register blueprints
app.register_blueprint(main)
app.register_blueprint(key)
app.register_blueprint(user)
# register error Handler
app.register_error_handler(Exception, all_exception_handler)
return app
```
#### File: server/services/main.py
```python
from flask import Blueprint, redirect, url_for
from server.utils.core_utils import logger
# Create Blueprint
main = Blueprint("main", __name__)
# redirect when you visit /
@main.route("/")
def index():
logger.info("Base redirect")
return redirect(url_for('keys'))
```
#### File: server/services/user.py
```python
from flask import Blueprint, jsonify, request, current_app
from datetime import datetime, timedelta
from server.utils.view_utils import wrapped_response, serialize_list
from server.models.key import Key
from server.models.user import User
from server.utils.core_utils import logger
from server import db
import jwt
user = Blueprint("/users/v1", __name__)
@user.route('/register', methods=['POST'])
def register():
data = request.get_json()
new_user = User(**data)
db.session.add(new_user)
db.session.commit()
return jsonify(new_user.to_dict()), 201
@user.route('/login', methods=['POST'])
def login():
data = request.get_json()
current_user = User.authenticate(**data)
if not current_user:
return jsonify({'message': 'Invalid credentials', 'authenticated': False}), 401
token = jwt.encode({
'sub': current_user.email,
'iat': datetime.utcnow(),
'exp': datetime.utcnow() + timedelta(minutes=30)},
current_app.config['SECRET_KEY'])
return jsonify({'token': token.decode('UTF-8')})
```
#### File: server/utils/view_utils.py
```python
from typing import Tuple, List
from flask import jsonify
from flask.wrappers import Response
def wrapped_response(data: dict = None, status: int = 200, message: str = "") -> Tuple[Response, int]:
"""
Create a wrapped response to have uniform json response objects
"""
if type(data) is not dict and data is not None:
raise TypeError("Expected data to be type Dictionary")
response = {
"success": 200 <= status < 300,
"code": status,
"message": message,
"result": data,
}
return jsonify(response), status
def serialize_list(items: List) -> List:
"""Serializes a list of SQLAlchemy Objects, exposing their attributes.
:param items - List of Objects that inherit from Mixin
:returns List of dictionaries
"""
if not items or items is None:
return []
return [x.to_dict() for x in items]
``` |
{
"source": "JordonPhillips/pygments",
"score": 3
} |
#### File: pygments/tests/test_mysql.py
```python
import pytest
from pygments.lexers.sql import MySqlLexer
from pygments.token import Comment, Keyword, Literal, Name, Number, Operator, \
Punctuation, String, Whitespace
@pytest.fixture(scope='module')
def lexer():
yield MySqlLexer()
@pytest.mark.parametrize('text', ('1', '22', '22 333', '22 a', '22+', '22)',
'22\n333', '22\r\n333'))
def test_integer_literals_positive_match(lexer, text):
"""Validate that integer literals are tokenized as integers."""
token = list(lexer.get_tokens(text))[0]
assert token[0] == Number.Integer
assert token[1] in {'1', '22'}
@pytest.mark.parametrize('text', ('1a', '1A', '1.', '1ひ', '1$', '1_',
'1\u0080', '1\uffff'))
def test_integer_literals_negative_match(lexer, text):
"""Validate that non-integer texts are not matched as integers."""
assert list(lexer.get_tokens(text))[0][0] != Number.Integer
@pytest.mark.parametrize(
'text',
(
'.123', '1.23', '123.',
'1e10', '1.0e10', '1.e-10', '.1e+10',
),
)
def test_float_literals(lexer, text):
assert list(lexer.get_tokens(text))[0] == (Number.Float, text)
@pytest.mark.parametrize('text', ("X'0af019'", "x'0AF019'", "0xaf019"))
def test_hexadecimal_literals(lexer, text):
assert list(lexer.get_tokens(text))[0] == (Number.Hex, text)
@pytest.mark.parametrize('text', ("B'010'", "b'010'", "0b010"))
def test_binary_literals(lexer, text):
assert list(lexer.get_tokens(text))[0] == (Number.Bin, text)
@pytest.mark.parametrize(
'text',
(
"{d'2020-01-01'}", "{ d ' 2020^01@01 ' }",
"{t'8 9:10:11'}", "{ t ' 09:10:11.12 ' }", "{ t ' 091011 ' }",
'{ts"2020-01-01 09:10:11"}', "{ ts ' 2020@01/01 09:10:11 ' }",
),
)
def test_temporal_literals(lexer, text):
assert list(lexer.get_tokens(text))[0] == (Literal.Date, text)
@pytest.mark.parametrize(
'text, expected_types',
(
(r"'a'", (String.Single,) * 3),
(r"""'""'""", (String.Single,) * 3),
(r"''''", (String.Single, String.Escape, String.Single)),
(r"'\''", (String.Single, String.Escape, String.Single)),
(r'"a"', (String.Double,) * 3),
(r'''"''"''', (String.Double,) * 3),
(r'""""', (String.Double, String.Escape, String.Double)),
(r'"\""', (String.Double, String.Escape, String.Double)),
),
)
def test_string_literals(lexer, text, expected_types):
tokens = list(lexer.get_tokens(text))[:len(expected_types)]
assert all(t[0] == e for t, e in zip(tokens, expected_types))
@pytest.mark.parametrize(
'text',
(
"@a", "@1", "@._.$",
"@'?'", """@'abc''def"`ghi'""",
'@"#"', '''@"abc""def'`ghi"''',
'@`^`', """@`abc``def'"ghi`""",
"@@timestamp",
"@@session.auto_increment_offset",
"@@global.auto_increment_offset",
"@@persist.auto_increment_offset",
"@@persist_only.auto_increment_offset",
'?',
),
)
def test_variables(lexer, text):
tokens = list(lexer.get_tokens(text))
assert all(t[0] == Name.Variable for t in tokens[:-1])
assert ''.join([t[1] for t in tokens]).strip() == text.strip()
@pytest.mark.parametrize('text', ('true', 'false', 'null', 'unknown'))
def test_constants(lexer, text):
assert list(lexer.get_tokens(text))[0] == (Name.Constant, text)
@pytest.mark.parametrize('text', ('-- abc', '--\tabc', '#abc'))
def test_comments_single_line(lexer, text):
# Test the standalone comment.
tokens = list(lexer.get_tokens(text))
assert tokens[0] == (Comment.Single, text)
# Test the comment with mixed tokens.
tokens = list(lexer.get_tokens('select' + text + '\nselect'))
assert tokens[0] == (Keyword, 'select')
assert tokens[1] == (Comment.Single, text)
assert tokens[-2] == (Keyword, 'select')
@pytest.mark.parametrize(
'text',
(
'/**/a', '/*a*b/c*/a', '/*\nabc\n*/a',
'/* /* */a'
)
)
def test_comments_multi_line(lexer, text):
tokens = list(lexer.get_tokens(text))
assert all(token[0] == Comment.Multiline for token in tokens[:-2])
assert ''.join(token[1] for token in tokens).strip() == text.strip()
# Validate nested comments are not supported.
assert tokens[-2][0] != Comment.Multiline
@pytest.mark.parametrize(
'text', ('BKA', 'SEMIJOIN'))
def test_optimizer_hints(lexer, text):
good = '/*+ ' + text + '(), */'
ignore = '/* ' + text + ' */'
bad1 = '/*+ a' + text + '() */'
bad2 = '/*+ ' + text + 'a */'
assert (Comment.Preproc, text) in lexer.get_tokens(good)
assert (Comment.Preproc, text) not in lexer.get_tokens(ignore)
assert (Comment.Preproc, text) not in lexer.get_tokens(bad1)
assert (Comment.Preproc, text) not in lexer.get_tokens(bad2)
@pytest.mark.parametrize(
'text, expected_types',
(
# SET exceptions
('SET', (Keyword,)),
('SET abc = 1;', (Keyword,)),
('SET @abc = 1;', (Keyword,)),
('CHARACTER SET latin1', (Keyword, Whitespace, Keyword,)),
('SET("r", "g", "b")', (Keyword.Type, Punctuation)),
('SET ("r", "g", "b")', (Keyword.Type, Whitespace, Punctuation)),
),
)
def test_exceptions(lexer, text, expected_types):
tokens = list(lexer.get_tokens(text))[:len(expected_types)]
assert all(t[0] == e for t, e in zip(tokens, expected_types))
@pytest.mark.parametrize(
'text',
(
'SHOW', 'CREATE', 'ALTER', 'DROP',
'SELECT', 'INSERT', 'UPDATE', 'DELETE',
'WHERE', 'GROUP', 'ORDER', 'BY', 'AS',
'DISTINCT', 'JOIN', 'WITH', 'RECURSIVE',
'PARTITION', 'NTILE', 'MASTER_PASSWORD', 'XA',
'REQUIRE_TABLE_PRIMARY_KEY_CHECK', 'STREAM',
),
)
def test_keywords(lexer, text):
assert list(lexer.get_tokens(text))[0] == (Keyword, text)
@pytest.mark.parametrize(
'text',
(
# Standard
'INT(', 'VARCHAR(', 'ENUM(', 'DATETIME', 'GEOMETRY', 'POINT', 'JSON',
# Aliases and compatibility
'FIXED', 'MEDIUMINT', 'INT3', 'REAL', 'SERIAL',
'LONG', 'NATIONAL', 'PRECISION', 'VARYING',
),
)
def test_data_types(lexer, text):
assert list(lexer.get_tokens(text))[0] == (Keyword.Type, text.strip('('))
@pytest.mark.parametrize(
'text',
(
# Common
'CAST', 'CONCAT_WS', 'DAYNAME', 'IFNULL', 'NOW', 'SUBSTR',
# Less common
'CAN_ACCESS_COLUMN', 'JSON_CONTAINS_PATH', 'ST_GEOMFROMGEOJSON',
),
)
def test_functions(lexer, text):
assert list(lexer.get_tokens(text + '('))[0] == (Name.Function, text)
assert list(lexer.get_tokens(text + ' ('))[0] == (Name.Function, text)
@pytest.mark.parametrize(
'text',
(
'abc_$123', '上市年限', 'ひらがな', '123_$abc', '123ひらがな',
),
)
def test_schema_object_names_unquoted(lexer, text):
tokens = list(lexer.get_tokens(text))[:-1]
assert all(token[0] == Name for token in tokens)
assert ''.join(token[1] for token in tokens) == text
@pytest.mark.parametrize(
'text',
(
'`a`', '`1`', '`上市年限`', '`ひらがな`', '`select`', '`concat(`',
'`-- `', '`/*`', '`#`',
),
)
def test_schema_object_names_quoted(lexer, text):
tokens = list(lexer.get_tokens(text))[:-1]
assert tokens[0] == (Name.Quoted, '`')
assert tokens[1] == (Name.Quoted, text[1:-1])
assert tokens[2] == (Name.Quoted, '`')
assert ''.join(token[1] for token in tokens) == text
@pytest.mark.parametrize('text', ('````', ))
def test_schema_object_names_quoted_escaped(lexer, text):
"""Test quoted schema object names with escape sequences."""
tokens = list(lexer.get_tokens(text))[:-1]
assert tokens[0] == (Name.Quoted, '`')
assert tokens[1] == (Name.Quoted.Escape, text[1:-1])
assert tokens[2] == (Name.Quoted, '`')
assert ''.join(token[1] for token in tokens) == text
@pytest.mark.parametrize(
'text',
('+', '*', '/', '%', '&&', ':=', '!', '<', '->>', '^', '|', '~'),
)
def test_operators(lexer, text):
assert list(lexer.get_tokens(text))[0] == (Operator, text)
@pytest.mark.parametrize(
'text, expected_types',
(
('abc.efg', (Name, Punctuation, Name)),
('abc,efg', (Name, Punctuation, Name)),
('MAX(abc)', (Name.Function, Punctuation, Name, Punctuation)),
('efg;', (Name, Punctuation)),
),
)
def test_punctuation(lexer, text, expected_types):
tokens = list(lexer.get_tokens(text))[:len(expected_types)]
assert all(t[0] == e for t, e in zip(tokens, expected_types))
```
#### File: pygments/tests/test_using_api.py
```python
from pytest import raises
from pygments.lexer import using, bygroups, this, RegexLexer
from pygments.token import String, Text, Keyword
class MyLexer(RegexLexer):
tokens = {
'root': [
(r'#.*',
using(this, state='invalid')),
(r'(")(.+?)(")',
bygroups(String, using(this, state='string'), String)),
(r'[^"]+', Text),
],
'string': [
(r'.+', Keyword),
],
}
def test_basic():
expected = [(Text, 'a'), (String, '"'), (Keyword, 'bcd'),
(String, '"'), (Text, 'e\n')]
assert list(MyLexer().get_tokens('a"bcd"e')) == expected
def test_error():
def gen():
return list(MyLexer().get_tokens('#a'))
assert raises(KeyError, gen)
``` |
{
"source": "jordotech/joeflow",
"score": 2
} |
#### File: joeflow/joeflow/forms.py
```python
from django import forms
from django.utils.translation import gettext_lazy as t
from . import models
class OverrideForm(forms.ModelForm):
next_tasks = forms.MultipleChoiceField(
label=t("Next tasks"), choices=[], required=False,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["next_tasks"].choices = [
(name, name) for name in dict(self._meta.model.get_nodes()).keys()
]
def get_next_task_nodes(self):
names = self.cleaned_data["next_tasks"]
for name in names:
yield self._meta.model.get_node(name)
def start_next_tasks(self, user=None):
active_tasks = list(self.instance.task_set.filter(completed=None))
for task in active_tasks:
task.cancel(user)
if active_tasks:
parent_tasks = active_tasks
else:
try:
parent_tasks = [self.instance.task_set.latest()]
except models.Task.DoesNotExist:
parent_tasks = []
override_task = self.instance.task_set.create(
name="override", type=models.Task.HUMAN,
)
override_task.parent_task_set.set(parent_tasks)
override_task.finish(user=user)
override_task.start_next_tasks(next_nodes=self.get_next_task_nodes())
``` |
{
"source": "JordParma/Terminus",
"score": 2
} |
#### File: Terminus/Tools/Statics.py
```python
import numpy as np
import sympy as sp
import scipy
import matplotlib.pyplot as plt
from matplotlib import patches
from mpl_toolkits.mplot3d import Axes3D
def simple_support():
L = 15
P = 5
Ploc = 5
plt.rcParams['figure.figsize'] = (10, 8) # (width, height)
fig1 = plt.figure()
ax1 = fig1.add_subplot(311) # , aspect='equal')
def add_beam():
# plt.subplot(3,1,1)
# ax = plt.gca()
plt.xlim([-1, L + 1])
plt.ylim([-1, P * 2])
# add rigid ground
rectangle = plt.Rectangle((-1, -2), L + 2, 2, hatch='//', fill=False)
ax1.add_patch(rectangle)
# add rigid rollers
# circle = plt.Circle((0, 5), radius=1, fc='g')
# ax.add_patch(circle)
e1 = patches.Ellipse((0, 2), L / 20, 4, angle=0, linewidth=2, fill=False, zorder=2)
ax1.add_patch(e1)
# add triangle
points = [[L, 4], [L - L / 40, 0], [L + L / 40, 0]]
polygon = plt.Polygon(points, fill=False)
ax1.add_patch(polygon)
# add beam
rectangle = plt.Rectangle((0, 4), L, 4, fill=False)
ax1.add_patch(rectangle)
def point_load():
# point load shear
x = np.linspace(0, L, 100)
y = np.ones(len(x)) * P / 2
y[x > Ploc] = y[x > Ploc] - P
x[0] = 0
x[-1] = 0
plt.subplot(3, 1, 2)
plt.ylabel('<NAME>')
plt.title('Shear Diagram')
plt.fill(x, y, 'b', alpha=0.25)
plt.grid(True)
plt.xlim([-1, L + 1])
# point load bending
x = np.linspace(-L / 2, L / 2, 100)
y = -(x ** 2) + (np.max(x ** 2))
x = np.linspace(0, L, 100)
plt.subplot(3, 1, 3)
plt.title('Bending Diagram')
plt.ylabel('Moment, M')
plt.fill(x, y, 'b', alpha=0.25)
plt.grid(True)
plt.xlim([-1, L + 1])
# add point load
plt.subplot(3, 1, 1)
plt.annotate('P=%i' % P, ha='center', va='bottom',
xytext=(Ploc, 15), xy=(Ploc, 7.5),
arrowprops={'facecolor': 'black', 'shrink': 0.05})
plt.title('Free Body Diagram')
plt.axis('off') # removes axis and labels
# # add point load
# ax1.arrow(3, 11+L/10, 0, -3, head_width=L*0.02, head_length=L*0.1, fc='k', ec='k')
# plt.title('Free Body Diagram')
# plt.axis('off') # removes axis and labels
# #ax1.set_yticklabels('')
def dist_load():
# add distributed load
plt.subplot(3, 1, 1)
for k in np.linspace(0, L, 20):
ax1.arrow(k, 11 + L / 10, 0, -3, head_width=L * 0.01, head_length=L * 0.1, fc='k', ec='k')
plt.title('Free Body Diagram')
plt.axis('off') # removes axis and labels
# ax1.set_yticklabels('')
# dist load shear
x = [0, 0, L, L]
y = [0, 5, -5, 0]
plt.subplot(3, 1, 2)
plt.ylabel('Shear, V')
plt.title('Shear Diagram')
plt.fill(x, y, 'b', alpha=0.25)
plt.grid(True)
plt.xlim([-1, L + 1])
# dist load bending
x = np.linspace(-L / 2, L / 2, 100)
y = -(x ** 2) + (np.max(x ** 2))
x = np.linspace(0, L, 100)
plt.subplot(3, 1, 3)
plt.title('Bending Diagram')
plt.ylabel('Moment, M')
plt.fill(x, y, 'b', alpha=0.25)
plt.grid(True)
plt.xlim([-1, L + 1])
add_beam()
dist_load()
# point_load()
plt.tight_layout()
plt.show()
def moment_calc():
fig = plt.figure()
ax = plt.axes(projection='3d')
# bar
x = [0, 0, 4, 4]
y = [0, 5, 5, 5]
z = [0, 0, 0, -2]
# Applied Forces
X = [0, 0, 4]
Y = [5, 5, 5]
Z = [0, 0, -2]
U = [-60, 0, 80]
V = [40, 50, 40]
W = [20, 0, -30]
ax.plot(x, y, z, '-b', linewidth=5)
ax.view_init(45, 45)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title('Hibbler pg 129 example')
ax.set_xlim([min(X) - 2, max(X) + 2])
ax.set_ylim([min(Y) - 5, max(Y) + 2])
ax.set_zlim([min(Z) - 2, max(Z) + 2])
# plt.tight_layout()
ax.quiver3D(X, Y, Z, U, V, W, pivot='tail');
rA = np.array([0, 5, 0]) # start of F1 and F2
rB = np.array([4, 5, -2]) # start of F3
F1 = np.array([-60, 40, 20])
F2 = np.array([0, 50, 0])
F3 = np.array([80, 40, -30])
M = np.cross(rA, F1) + np.cross(rA, F2) + np.cross(rB, F3)
print('Total Moment vector')
print(M)
print('Total Force Vector about point O')
print(sum([F1, F2, F3]))
print('unit vector of the moment')
u = M / np.linalg.norm(M)
print(u)
print('angles at which the moments react')
print(np.rad2deg(np.arccos(u)))
def point_ss_shear_bending(L, Pin, ain):
# Shear Bending plot of point loads of a simply supported beam
L = 4 # total length of beam
Pin = [5] # point load
ain = [2] # location of point load
# or more multiple points
L = 10
Pin = [3,15]
ain = [2,6]
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, L, L * 0.02)
V = np.zeros(len(x))
M = np.zeros(len(x))
for a, P in zip(ain, Pin):
V[x <= a] += P * (1 - a / L)
V[x > a] += -P * a / L
M[x <= a] += P * (1 - a / L) * x[x <= a]
M[x > a] += -P * a * (x[x > a] / L - 1)
plt.subplot(2, 1, 1)
plt.stem(x, V)
plt.ylabel('V,shear')
plt.subplot(2, 1, 2)
plt.stem(x, M)
plt.ylabel('M,moment')
def moment_ss_shear_bending(L, Pin, ain):
# Shear Bending plot of moment loads of a simply supported beam
L = 4 # total length of beam
Pin = [5] # point moment load
ain = [2] # location of point load
# or more multiple point moments
L = 10
Pin = [3,-15]
ain = [2,6]
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, L, L * 0.02)
V = np.zeros(len(x))
M = np.zeros(len(x))
for a, P in zip(ain, Pin):
V += -P / L
M[x <= a] += -P * x[x <= a] / L
M[x > a] += P * (1 - x[x > a] / L)
plt.figure()
plt.title('Point Moment Loads')
plt.subplot(2, 1, 1)
plt.stem(x, V)
plt.ylabel('V,shear')
plt.subplot(2, 1, 2)
plt.stem(x, M)
plt.ylabel('M,moment')
def dist_ss_shear_bending(L, win, ain):
# Shear Bending plot of distributed loads of a simply supported beam
L = 10 # total length of beam
win = [5] # distributed load
ain = [[3,4]] # location of point load
# or more multiple point moments
L = 10
win = [3,6]
ain = [[0,3],[4,6]]
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, L, L * 0.02)
V = np.zeros(len(x))
M = np.zeros(len(x))
for a, w in zip(ain, win):
# a = ain[0]
P = w * (a[1] - a[0]) # convert distributed load to point load
l = (a[1] + a[0]) / 2
i = [x < a[0]]
V[i] += P * (1 - l / L)
M[i] += x[i] * P * (1 - l / L)
i = [x > a[1]]
V[i] += -P * l / L
M[i] += x[i] * -P * l / L + P * l
i = [(a[0] <= x) & (x <= a[1])]
V[i] += P * (1 - l / L) - w * (x[i] - a[0])
M[i] += (P * (1 - l / L) - w * (x[i] - a[0])) * x[i] + w * (x[i] - a[0]) * (a[0] + x[i]) / 2
# V[i] += P*(1-l/L)-P*x[i]
# M[i] += P/2*(L*x[i] - x[i]**2)
# M[i] += x[i]*P*(1-l/L) - (P*x[i]**2)/2
plt.figure()
plt.title('Point Moment Loads')
plt.subplot(2, 1, 1)
plt.stem(x, V)
plt.ylabel('V,shear')
plt.subplot(2, 1, 2)
plt.stem(x, M)
plt.ylabel('M,moment')
if __name__ == '__main__':
# executed when script is run alone
# moment_calc()
dist_ss_shear_bending()
``` |
{
"source": "jordstar20001/demand-response-view-proto",
"score": 2
} |
#### File: demand-response-view-proto/server/server.py
```python
from flask import Flask, request, session, redirect, send_from_directory
import json
from demandresponse import UserManager, PermissionManager
app = Flask(__name__, static_url_path="/static")
config = json.loads(open("data/cfg.json", "r").read())
app.config["SECRET_KEY"] = config["SECRET_KEY"]
USERMANAGER = UserManager(config["USERS_JSON_FN"])
PERMSMANAGER = PermissionManager(config["AUTH_JSON_FN"])
ACC_INDEX_PAGES = config["ACCTYPE_INDEXPAGES"]
del config
def Authed(auth_str, acc_type = None):
if not PERMSMANAGER.is_anonymous_action(auth_str) and acc_type != None:
if "USER" not in session: return False
session_user = session["USER"]
if session_user == None or not PERMSMANAGER.user_permitted(session_user, auth_str) or (session_user["acc_type"] != acc_type and acc_type != None):
return False
return True
@app.route("/")
def home():
if "USER" in session: return redirect("/dashboard")
return app.send_static_file("index.html")
@app.route("/login", methods=["POST"])
def login():
loginData = request.form
u, p = loginData["txtUsername"], loginData["txtPassword"]
if USERMANAGER.valid_user(u, p):
session["USER"] = USERMANAGER.get_user_by_username(u)
return "Success!", 200
else:
return "Incorrect username or password", 403
@app.route("/logout", methods=["GET"])
def logout():
if "USER" in session: del session["USER"]
return redirect("/")
@app.route("/dashboard", defaults={'file': None})
@app.route("/dashboard/<path:file>", methods=["GET"])
def dashboard(file):
if not Authed("/dashboard"): return redirect("/")
if file == None:
user = session["USER"]
if user["acc_type"] not in ACC_INDEX_PAGES:
return f"Index webpage not found for account type {user['acc_type']}", 404
return send_from_directory("static/dashboard", ACC_INDEX_PAGES[user["acc_type"]])
return send_from_directory("static/dashboard", file)
@app.route("/dashboard/api/<endpoint>", methods=["GET", "POST"])
def dashboard_api(endpoint):
if Authed(endpoint): return 200
return "", 200
@app.route("/public/<path:path>", methods=["GET"])
def public(path):
print(f"Path: {path}")
return send_from_directory("static/public", path)
# Run the server
if __name__ == "__main__":
app.run("0.0.0.0", 8080)
``` |
{
"source": "jordstar20001/FunctionCtrlPanel",
"score": 3
} |
#### File: FunctionCtrlPanel/server/utils.py
```python
import os, binascii
def random_hex_str(n):
"""
Generate a random hexadecimal string of length n
"""
return binascii.b2a_hex(os.urandom(n // 2)).decode()
``` |
{
"source": "jordsti/hacker-jeopardy",
"score": 3
} |
#### File: webservice/funcs/add_team.py
```python
from ..service_func import service_func, func_error, meta_arg
class add_team(service_func):
def __init__(self):
service_func.__init__(self, "/team/add")
self.name = "Add team"
self.description = "Add a new team to the jeopardy"
self.args.append(meta_arg("key", "Protection Key", "none"))
self.args.append(meta_arg("name", "New Team Name", "none"))
self.team = None
def init(self):
self.team = None
def execute(self, args, server):
key = args["key"]
name = args["name"]
if key != server.key:
raise func_error("Invalid key")
else:
for t in server.game_data.teams:
if t.name == name:
raise func_error("Team name already exists")
new_team = server.game_data.new_team(name)
self.team = new_team
def answer(self):
#returning new team id
data = {"team_id": self.team.id}
return data
```
#### File: webservice/funcs/get_categories_rank.py
```python
from ..service_func import service_func, func_error, meta_arg
class get_categories_rank(service_func):
def __init__(self):
service_func.__init__(self, "/category/ranks")
self.name = "Get categories ranks"
self.description = "-"
self.cats = []
def init(self):
self.cats = []
def execute(self, args, server):
for c in server.game_data.categories:
cat = {
"id": c.id,
"name": c.name,
"ranks_available": []
}
for r in c.ranks_available:
cat["ranks_available"].append(r)
self.cats.append(cat)
def answer(self):
data = {
'categories': self.cats
}
return data
```
#### File: webservice/funcs/get_points_table.py
```python
from ..service_func import service_func, func_error, meta_arg
class get_points_table(service_func):
def __init__(self):
service_func.__init__(self, "/points/table")
self.name = "Get points table"
self.description = "Return the table of points by rank"
self.ranks = []
def init(self):
self.ranks = []
def execute(self, args, server):
ir = 0
for r in server.game_data.points_table.points:
self.ranks.append({"id": ir, "points": r})
ir += 1
def answer(self):
data = {
"ranks": self.ranks
}
return data
```
#### File: webservice/funcs/remove_team.py
```python
from ..service_func import service_func, func_error, meta_arg
class remove_team(service_func):
def __init__(self):
service_func.__init__(self, "/team/remove")
self.name = "Remove team"
self.description = "Remove a team from the Jeopardy Game"
self.args.append(meta_arg("key", "Protection key", "none"))
self.args.append(meta_arg("team", "Team id to remove", "none"))
def execute(self, args, server):
key = args["key"]
team = int(args["team"])
if server.key == key:
teams = server.game_data.teams
server.game_data.teams = []
for t in teams:
if not t.id == team:
server.game_data.teams.append(t)
else:
raise func_error("Key is invalid")
def answer(self):
data = {}
return data
``` |
{
"source": "jordsti/stigame",
"score": 3
} |
#### File: stigame/StiGame/header_list.py
```python
import os
def inner_dir(parent, dirname):
if parent is not None:
full = os.path.join(parent, dirname)
else:
full = dirname
for f in os.listdir(full):
fp = os.path.join(full, f)
if os.path.isfile(fp):
if f.endswith('.h'):
print fp
else:
inner_dir(full, f)
if __name__ == '__main__':
print "StiGame Header List, for Doxygen Doc Review"
print "-------------------------------------------"
inner_dir(None, "./")
```
#### File: gui-editor/wrapper/color.py
```python
__author__ = 'JordSti'
import object_wrap
class color(object_wrap.object_wrap):
def __init__(self, ptr=None):
object_wrap.object_wrap(self)
if ptr is None:
self.obj = self.lib.Color_new()
else:
self.obj = ptr
def get_rgba(self):
rgba = [self.lib.Color_getRed(self.obj), self.lib.Color_getGreen(self.obj), self.lib.Color_getBlue(self.obj), self.lib.Color_getAlpha(self.obj)]
return rgba
def set_rgba(self, r, g, b, a):
self.lib.Color_setRGBA(self.obj, r, g, b, a)
```
#### File: gui-editor/wrapper/gui_state.py
```python
__author__ = 'JordSti'
import object_wrap
class gui_state(object_wrap.object_wrap):
def __init__(self):
object_wrap.object_wrap.__init__(self)
self.obj = self.lib.GuiState_new()
def add(self, item):
self.lib.GuiState_add(self.obj, item.obj)
def on_resize(self, width, height):
self.lib.GuiState_onResize(self.obj, width, height)
def render(self):
return self.lib.GuiState_render(self.obj)
```
#### File: gui-editor/wrapper/py_viewport.py
```python
__author__ = 'JordSti'
import surface
class py_viewport:
def __init__(self, width, height):
self.__width = width
self.__height = height
self.current = None
def push(self, state):
self.current = state
state.on_resize(self.__width, self.__height)
def set_dimension(self, width, height):
self.__width = width
self.__height = height
if self.current is not None:
self.current.on_resize(self.__width, self.__height)
def render(self):
bf = self.current.render()
return surface.surface(bf)
def get_dimension(self):
return self.__width, self.__height
```
#### File: gui-editor/wrapper/radio_group.py
```python
__author__ = 'JordSti'
import object_wrap
class radio_group(object_wrap.object_wrap):
def __init__(self, obj=None):
object_wrap.object_wrap.__init__(self)
if obj is not None:
self.obj = obj
else:
self.obj = self.lib.RadioGroup_new()
def add_item(self, item):
self.lib.RadioGroup_addItem(self.obj, item)
```
#### File: gui-editor/wrapper/surface.py
```python
__author__ = 'JordSti'
import object_wrap
import c_library
class surface(object_wrap.object_wrap):
def __init__(self, surface=None):
object_wrap.object_wrap.__init__(self, None)
if surface is not None:
self.obj = surface
else:
self.obj = self.lib.Surface_new()
def save_bmp(self, dest):
self.lib.Surface_saveBmp(self.obj, dest, len(dest))
def get_pixels(self):
pixels = self.lib.Surface_getPixels(self.obj)
return c_library.to_uchar_array(pixels)
def get_width(self):
return self.lib.Surface_getWidth(self.obj)
def get_height(self):
return self.lib.Surface_getHeight(self.obj)
```
#### File: gui-editor/wrapper/value_object.py
```python
__author__ = 'JordSti'
import object_wrap
import c_library
class value_object(object_wrap.object_wrap):
def __init__(self, v_id, text):
object_wrap.object_wrap.__init__(self)
self.obj = self.lib.ValueObject_new(v_id, text)
def get_id(self):
return self.lib.ValueObject_getId(self.obj)
def get_text(self):
return c_library.to_str(self.lib.ValueObject_getText(self.obj))
```
#### File: tools/sprite-editor/resource_widget.py
```python
__author__ = 'JordSti'
from PyQt4 import QtCore, QtGui
import gui
class resource_widget(QtGui.QWidget, gui.Ui_resource_widget):
def __init__(self, resource, parent=None):
super(resource_widget, self).__init__(parent)
self.resource = resource
self.setupUi(self)
self.setLayout(self.layout_main)
self.pixmap = None
self.__init__widget()
def __init__widget(self):
#pixmap init
#self.setMinimumSize(300, 200)
self.pixmap = QtGui.QPixmap()
self.pixmap.loadFromData(self.resource.data)
self.lbl_width_value.setText("%d px" % self.pixmap.width())
self.lbl_height_value.setText("%d px" % self.pixmap.height())
self.le_name.setText(self.resource.name)
#fix lbl image
self.lbl_image.setPixmap(self.pixmap)
self.lbl_image.setFixedSize(self.pixmap.width(), self.pixmap.height())
```
#### File: tools/sprite-editor/var_file.py
```python
__author__ = 'JordSti'
import os
class var_file:
def __init__(self, path=None):
self.path = path
self.__vars_order = []
self.__vars = {}
if self.path is not None:
self.read()
def read(self):
if self.path is not None and os.path.exists(self.path):
fp = open(self.path, 'r')
lines = fp.readlines()
fp.close()
for l in lines:
self.__parse_line(l)
def write(self):
if self.path is not None:
fp = open(self.path, 'w')
for n in self.__vars_order:
line = "%s=%s;\n" % (n, self.__vars[n])
fp.write(line)
fp.close()
def __parse_line(self, line):
data = line.rstrip('\n')
data = data.rstrip('\r')
data = data.rstrip(';')
data = data.split('=')
if len(data) == 2:
self.__vars_order.append(data[0])
self.__vars[data[0]] = data[1]
def get_var(self, key):
return self.__vars[key]
def get_vars_name(self):
return self.__vars_order
if __name__ == '__main__':
vf = var_file('C:\\Users\\JordSti\\git\\stiuniverse-transit\\assets\\sprites\\a0.spr')
vf.path = "test.txt"
vf.write()
for k in vf.get_vars_name():
print "%s = %s" % (k, vf.get_var(k))
``` |
{
"source": "JorDunn/photorename",
"score": 4
} |
#### File: photorename/photorename/photorename.py
```python
import os
import os.path
import sys
from hashlib import md5, sha256, sha512
from docopt import docopt
args = docopt(__doc__, version='photorename v1.0.9')
def get_md5_string(input_file):
m = md5()
m.update(input_file)
md5string = str(m.hexdigest())
return md5string
def get_sha256_string(input_file):
s = sha256()
s.update(input_file)
sha256string = str(s.hexdigest())
return sha256string
def get_sha512_string(input_file):
s = sha512()
s.update(input_file)
sha512string = str(s.hexdigest())
return sha512string
def rename():
try:
print("Input File: " + os.path.realpath(args.input_file))
input_file = os.path.realpath(args.input_file)
except TypeError:
print("You must specify an input file.")
sys.exit(1)
_, file_extention = os.path.splitext(input_file)
if args.output_path:
output_path = os.path.realpath(args.output_path) + "/"
else:
output_path = os.getcwd() + "/"
if args.md5:
file_name = get_md5_string(args.input_file.encode('utf-8')) + file_extention
output_file = f"{output_path}/{file_name}"
elif args.sha256:
file_name = get_sha256_string(args.input_file.encode('utf-8')) + file_extention
output_file = f"{output_path}/{file_name}"
elif args.sha512:
file_name = get_sha512_string(args.input_file.encode('utf-8')) + file_extention
output_file = f"{output_path}/{file_name}"
try:
print(f"Output File: {output_file}")
os.rename(input_file, output_file)
except IOError as error:
print(error)
sys.exit(1)
if __name__ == '__main__':
print(__doc__)
``` |
{
"source": "Jordy19/vTox",
"score": 3
} |
#### File: vTox/src/log.py
```python
import datetime
class Log():
def __init__(self, plugin_name="", debug=False):
""""Constructor.
Args:
plugin_name: The name of the plugin
"""
self.plugin = plugin_name
self.debug_mode = debug
self.now = datetime.datetime.now()
self.timestamp = self.now.strftime("%b-%d-%Y (%H:%M:%S)")
def _getFormat(self, message):
"""Returns the string format.
Args:
message: The message
"""
if self.plugin:
return f"{self.timestamp} {self.plugin}: {message}"
else:
return f"{self.timestamp}: {message}"
def info(self, message):
"""Prints the INFO log text.
Args:
message: A string containing text.
"""
text_format = self._getFormat(message)
print(f"{'INFO':5} {text_format}")
def error(self, message):
"""Prints the ERROR log text.
Args:
message: A string containing text.
"""
text_format = self._getFormat(message)
print(f"{'ERROR':5} {text_format}")
def debug(self, message):
"""Prints the DEBUG log text.
Args:
message: A string containing text.
"""
if bool(self.debug_mode):
text_format = self._getFormat(message)
print(f"{'DEBUG':5} {text_format}")
else:
pass
``` |
{
"source": "Jordy24/spacetech-kubesat",
"score": 3
} |
#### File: spacetech-kubesat/examples/hello_service.py
```python
import asyncio
import time
from kubesat.base_service import BaseService
SERVICE_TYPE = 'hello'
hello = BaseService(service_type=SERVICE_TYPE,
config_path='./service.json')
@hello.schedule_callback(2)
# Send a hello message every two seconds.
async def send_hello_message(nats, shared_storage, logger):
"""
Send a hello message.
Args:
nats (NatsHandler): connection to nats used to send and receive messages
shared_storage (dict): dictionary that stores local data for the service
logger (NatsLogger): logger that can be used to communicate the state of the system
"""
message = nats.create_message({"message": "hello"})
# Send a hello message to public.hello subject
await nats.send_message("public.hello", message)
print(f"SEND : {message.encode_json()}")
@hello.subscribe_nats_callback("public.hello")
# Subscribe public.hello subject
async def receive_ping_message(message, nats, shared_storage, logger):
message_json = message.encode_json()
print(f"RECEIVED : {message_json}")
shared_storage["last_sent"] = message_json['time_sent']
@hello.startup_callback
# Invoke the startup function at the start time
async def startup(nats_handler, shared_storage, logger):
print(f"{SERVICE_TYPE} in {hello.sender_id} has started.")
if __name__ == '__main__':
# Start the hello service
hello.run()
```
#### File: spacetech-kubesat/kubesat/base_simulation.py
```python
import asyncio
from aiologger.loggers.json import JsonLogger
from kubesat.base_service import BaseService
from kubesat.message import Message
from kubesat.nats_handler import NatsHandler
from kubesat.nats_logger import NatsLoggerFactory
from kubesat.validation import validate_json, MessageSchemas
class BaseSimulation(BaseService):
def __init__(self, service_type: str, schema: dict, config_path: str = None):
"""
Registers a NATS callback that subscribes to the subject "simulation.timestep". When the callback receives a message,
it updates the current time internal to the simulation's NATS handler object.
"""
super().__init__(service_type, schema, config_path)
# subscribing to timestep by default to update time in nats_handler
@self.subscribe_nats_callback("simulation.timestep", MessageSchemas.TIMESTEP_MESSAGE)
async def simulation_timepulse(message: Message, nats_handler: NatsHandler, shared_storage: dict, logger: JsonLogger):
nats_handler.time_sent = message.data["time"]
async def _load_config(self):
"""
Override _load_config to get the configuration from a cluster service that has a callback registered on channel "initialize.service"
for simulation
"""
try:
# requesting a config from the config service
message = self.nats_client.create_message(
self.service_type, MessageSchemas.SERVICE_TYPE_MESSAGE)
print(
f"Requesting config from config service for node {self.service_type}")
config_response = await self.nats_client.request_message("initialize.service", message, MessageSchemas.CONFIG_MESSAGE, timeout=3)
print(f"Got config from config service: {config_response}")
print(f"Validating ...")
# validate the shared storage section of the config
validate_json(
config_response.data["shared_storage"], self._schema)
self.sender_id = config_response.data["sender_id"]
self.shared_storage = config_response.data["shared_storage"]
# write the shared storage and sender ID to Redis
self.redis_client.set_sender_id(self.sender_id)
self.redis_client.set_shared_storage(self.shared_storage)
print(
f"Successfully initialized {self.sender_id} {self.service_type} from config service")
except:
try:
await super()._load_config()
except Exception as e:
raise ValueError(
f"Failed to load configuration: {e}")
```
#### File: spacetech-kubesat/kubesat/nats_logger.py
```python
import asyncio
import json
from datetime import datetime
from aiologger.loggers.json import JsonLogger
from aiologger.formatters.json import ExtendedJsonFormatter
from aiologger.handlers.base import Handler
from kubesat.message import Message
from kubesat.validation import MessageSchemas
from kubesat.nats_handler import NatsHandler
class NatsLoggingHandler(Handler):
"""
Custom Handler to log Nats
"""
def __init__(self, nats_handler, service_type):
"""
Initialize a NatsLoggingHandler object
Args:
nats_handler: nats_handler objects
service_type: type of service initializing this object
"""
Handler.__init__(self)
self.nats_handler = nats_handler
self.channel = f"logging.{service_type}.{nats_handler.sender_id}"
self.sender_id = nats_handler.sender_id
self.service_type = service_type
async def emit(self, record):
"""
Format and send the record
Args:
record: nats record
"""
msg = json.loads(self.formatter.format(record))
# change the way that the time is accessed
msg = self.nats_handler.create_message(msg, MessageSchemas.LOG_MESSAGE)
if self.nats_handler:
assert await self.nats_handler.send_message(self.channel, msg) is True
async def close(self):
"""
Close the logger when there is no nats_handler
"""
self.nats_handler = None
class NatsLoggerFactory:
"""
Creates Nats loggers
"""
@staticmethod
def get_logger(nats_handler, service_type):
"""
Creates and returns a nats logging handler
Args:
nats_handler: a nats_handler object
service_type: type of service
"""
handler = NatsLoggingHandler(nats_handler, service_type)
formatter = ExtendedJsonFormatter(exclude_fields=["file_path"])
handler.formatter = formatter
logger = JsonLogger(name=nats_handler.sender_id)
logger.add_handler(handler)
return logger
```
#### File: simulation/cluster/cluster_service.py
```python
import socket
from kubesat.message import Message
from kubesat.base_simulation import BaseSimulation
from kubesat.services import ServiceTypes
from kubesat.validation import MessageSchemas, SharedStorageSchemas
simulation = BaseSimulation("cluster", SharedStorageSchemas.CLUSTER_SERVICE_STORAGE)
@simulation.subscribe_nats_callback("simulation.timestep", MessageSchemas.TIMESTEP_MESSAGE)
async def send_ip_address(message, nats_handler, shared_storage, logger):
"""
Broadcasts the nats host on each time step which will be read by the ground station
Args:
message (Message): incoming message with the timestep
nats_handler (NatsHandler): NatsHandler used to interact with NATS
shared_storage (dict): Dictionary to persist memory across callbacks
logger (JSONLogger): Logger that can be used to log info, error, etc,
"""
hostname = nats_handler.api_host
ip_message = nats_handler.create_message(hostname, MessageSchemas.IP_ADDRESS_MESSAGE)
await nats_handler.send_message("cluster.ip", ip_message)
@simulation.subscribe_nats_callback("command.cluster", MessageSchemas.CLUSTER_MESSAGE)
async def cluster(message, nats_handler, shared_storage, logger):
"""
Receives command from groundstation to cluster with other satellites and runs the
bash script/edits config map so that clusters satellites
Args:
message (Message): incoming message with the ip address/host names of other sats
nats_handler (NatsHandler): NatsHandler used to interact with NATS
shared_storage (dict): Dictionary to persist memory across callbacks
logger (JSONLogger): Logger that can be used to log info, error, etc,
"""
if message.data["recipient"] == nats_handler.sender_id:
# This is where the controller needs to be used to cluster the satellites
# 'm'essage.data["ip_map"]' is a list of the api host the satellite can cluster with
await logger.info(f'{nats_handler.sender_id} is clustering with {message.data["ip_map"]}')
print(f"{nats_handler.sender_id} is clustering with {message.data['ip_map']}")
pass
``` |
{
"source": "Jordy281/Tic_Tac_Toe_SuperComputer",
"score": 4
} |
#### File: Jordy281/Tic_Tac_Toe_SuperComputer/gym.py
```python
import numpy as np
import copy
from random import randrange
import game.py
def trainingAgainstRand1(states, t):
nStates = np.shape(t)[0]
nActions = np.shape(t)[1]
Q = np.zeros((nStates,nActions))
numberOfTimesStateVisted = np.zeros((nStates))
setQNoBacktrack(Q,t)
mu = 0.7
gamma = 0.25
epsilon = .15
epsilon2 = 1
nits = 0
TDWinns=0
TDDraww=0
TDLosss=0
while nits < 1000000:
# Pick initial state
s = 0
# Stop when the accepting state is reached
turn=0
while game.threecheck(states[s]) is False and turn<8:
# epsilon-greedy
if (np.random.rand()<epsilon):
indices=[]
for i in range(0,9):
if t[s][i]>-1:
indices.append(i)
pick = randrange(len(indices))
a = indices[pick]
else:
a = np.argmax(Q[s,:])
sprime = t[s][a]
numberOfTimesStateVisted[sprime]+=1
turn+=1
#If this move wins us the game
if game.threecheck(states[sprime]) is True:
Q[s,a] += mu/(numberOfTimesStateVisted[sprime]+1) * (100 + gamma*np.max(Q[sprime,:]) - Q[s,a])
TDWinns= TDWinns+1
s=sprime
elif turn==8:
TDDraww+=1
#If not, let the computer pick
else:
Q[s,a] += mu/(numberOfTimesStateVisted[sprime]+1) * (gamma*np.max(Q[sprime,:]) - Q[s,a])
# Have the computer chooses a random action -> epsilon2 = 1
if (np.random.rand()<epsilon2):
#we need to chose a random action
indices=[]
for i in range(0,9):
if t[sprime][i]>-1:
indices.append(i)
pick = randrange(len(indices))
a2 = indices[pick]
#a is the index of the next state to move to
else:
a2 = np.argmax(Q[sprime,:])
"""
if threecheck(board) is True:
Q[s,a] += mu/(numberOfTimesStateVisted[sprime]+1) * (r + gamma*np.min(Q[sprime,np.where(t[s,:]>-1)]) - Q[s,a])
"""
sDoublePrime = t[sprime][a2]
if game.threecheck(states[sDoublePrime]):
r=-100.
else:
r=0
Q[sprime,a2] += mu/(numberOfTimesStateVisted[sDoublePrime]+1) * (r + gamma*np.max(Q[sDoublePrime,:]) - Q[sprime,a2])
numberOfTimesStateVisted[sDoublePrime]+=1
s = sDoublePrime
turn+=1
if game.threecheck(states[s])is True:
TDLosss+=1
elif turn ==8:
TDDraww+=1
nits = nits+1
if nits%100==0:
TDWinPercentageTrainingFirst.append(TDWinns/float(nits))
TDDrawPercentageTrainingFirst.append(TDDraww/float(nits))
TDLossPercentageTrainingFirst.append(TDLosss/float(nits))
return Q
#print Q[0]
def trainingAgainstRand2(states, t):
nStates = np.shape(t)[0]
nActions = np.shape(t)[1]
Q = np.zeros((nStates,nActions))
numberOfTimesStateVisted = np.zeros((nStates))
setQNoBacktrack(Q,t)
mu = 0.7
gamma = 0.25
epsilon = 1
epsilon2 = .15
nits = 0
TDWins=0
TDDraw=0
TDLoss=0
while nits < 1000000:
# Pick initial state
s = 0
# Stop when the accepting state is reached
turn=0
while game.threecheck(states[s]) is False and turn<8:
# epsilon-greedy
if (np.random.rand()<epsilon):
"""
we need to chose a random action
"""
indices=[]
for i in range(0,9):
if t[s][i]>-1:
indices.append(i)
pick = randrange(len(indices))
a = indices[pick]
"""
a is the index of the next state to move to
"""
#print s,a
else:
a = np.argmax(Q[s,:])
# For this example, new state is the chosen action
sprime = t[s][a]
numberOfTimesStateVisted[sprime]+=1
turn+=1
#If this move wins us the game
if game.threecheck(states[sprime]) is True:
Q[s,a] += mu/(numberOfTimesStateVisted[sprime]+1) * (-100 + gamma*np.max(Q[sprime,:]) - Q[s,a])
TDLoss+=1
s=sprime
elif turn==8:
Q[s,a] += mu/(numberOfTimesStateVisted[sprime]+1) * (20 + gamma*np.max(Q[sprime,:]) - Q[s,a])
TDDraw+=1
#If not, let the computer pick
else:
Q[s,a] += mu/(numberOfTimesStateVisted[sprime]+1) * (gamma*np.max(Q[sprime,:]) - Q[s,a])
# Have the computer chooses a random action -> epsilon2 = 1
if (np.random.rand()<epsilon2):
#we need to chose a random action
indices=[]
for i in range(0,9):
if t[sprime][i]>-1:
indices.append(i)
pick = randrange(len(indices))
a2 = indices[pick]
#a is the index of the next state to move to
else:
a2 = np.argmax(Q[sprime,:])
"""
if game.threecheck(board) is True:
Q[s,a] += mu/(numberOfTimesStateVisted[sprime]+1) * (r + gamma*np.min(Q[sprime,np.where(t[s,:]>-1)]) - Q[s,a])
"""
sDoublePrime = t[sprime][a2]
if game.threecheck(states[sDoublePrime]):
r=80
elif turn ==7:
r=20
TDDraw+=1
else:
r=0
#print "here"
Q[sprime,a2] += mu/(numberOfTimesStateVisted[sDoublePrime]+1) * (r + gamma*np.max(Q[sDoublePrime,:]) - Q[sprime,a2])
numberOfTimesStateVisted[sDoublePrime]+=1
s = sDoublePrime
turn+=1
if game.threecheck(states[s])is True:
TDWins+=1
nits = nits+1
if nits%100==0:
TDWinPercentageTrainingSec.append(TDWins/float(nits))
TDDrawPercentageTrainingSec.append(TDDraw/float(nits))
TDLossPercentageTrainingSec.append(TDLoss/float(nits))
return Q
#print Q[0]
def trainingAgainstLearner(states, t):
nStates = np.shape(t)[0]
nActions = np.shape(t)[1]
Qplayer1 = np.zeros((nStates,nActions))
Qplayer2 = np.zeros((nStates,nActions))
numberOfTimesStateVisted = np.zeros((nStates))
setQNoBacktrack(Qplayer1,t)
setQNoBacktrack(Qplayer2,t)
mu = 0.7
gamma = 0.25
epsilon = .1
epsilon2 = .15
nits = 0
Player1Win=0
Draw=0
Player2Win=0
while nits < 1000000:
# Pick initial state
s = 0
# Stop when the accepting state is reached
turn=0
while game.threecheck(states[s]) is False and turn<8:
# epsilon-greedy
if (np.random.rand()<epsilon):
"""
we need to chose a random action
"""
indices=[]
for i in range(0,9):
if t[s][i]>-1:
indices.append(i)
pick = randrange(len(indices))
a = indices[pick]
"""
a is the index of the next state to move to
"""
#print s,a
else:
a = np.argmax(Qplayer1[s,:])
# For this example, new state is the chosen action
sprime = t[s][a]
turn+=1
numberOfTimesStateVisted[sprime]+=1
#If this move wins us the game
if game.threecheck(states[sprime]) is True:
Qplayer2[s,a] += mu/(numberOfTimesStateVisted[sprime]+1) * (-100 + gamma*np.max(Qplayer2[sprime,:]) - Qplayer2[s,a])
Qplayer1[s,a] += mu/(numberOfTimesStateVisted[sprime]+1) * (100 + gamma*np.max(Qplayer1[sprime,:]) - Qplayer1[s,a])
Player1Win+=1
s=sprime
elif turn==8:
Qplayer2[s,a]+= mu/(numberOfTimesStateVisted[sprime]+1) * (20 + gamma*np.max(Qplayer2[sprime,:]) - Qplayer2[s,a])
Qplayer1[s,a]+= mu/(numberOfTimesStateVisted[sprime]+1) * (gamma*np.max(Qplayer1[sprime,:]) - Qplayer1[s,a])
#If not, let the computer pick
Draw+=1
else:
Qplayer1[s,a] += mu/(numberOfTimesStateVisted[sprime]+1) * (gamma*np.max(Qplayer1[sprime,:]) - Qplayer1[s,a])
Qplayer2[s,a] += mu/(numberOfTimesStateVisted[sprime]+1) * (gamma*np.max(Qplayer2[sprime,:]) - Qplayer2[s,a])
# Have the computer chooses a random action -> epsilon2 = 1
if (np.random.rand()<epsilon2):
#we need to chose a random action
indices=[]
for i in range(0,9):
if t[sprime][i]>-1:
indices.append(i)
pick = randrange(len(indices))
a2 = indices[pick]
#a is the index of the next state to move to
else:
a2 = np.argmax(Qplayer2[sprime,:])
"""
if game.threecheck(board) is True:
Q[s,a] += mu/(numberOfTimesStateVisted[sprime]+1) * (r + gamma*np.min(Q[sprime,np.where(t[s,:]>-1)]) - Q[s,a])
"""
sDoublePrime = t[sprime][a2]
if game.threecheck(states[sDoublePrime]):
r1=-100
r2=80
elif turn==7:
r1=0
r2=20
Draw+=1
else:
r1=0
r2=0
#print "here"
Qplayer2[sprime,a2] += mu/(numberOfTimesStateVisted[sDoublePrime]+1) * (r2 + gamma*np.max(Qplayer2[sDoublePrime,:]) - Qplayer2[sprime,a2])
Qplayer1[sprime,a2] += mu/(numberOfTimesStateVisted[sDoublePrime]+1) * (r1 + gamma*np.max(Qplayer1[sDoublePrime,:]) - Qplayer1[sprime,a2])
numberOfTimesStateVisted[sDoublePrime]+=1
s = sDoublePrime
turn+=1
if game.threecheck(states[s])is True:
Player2Win+=1
nits = nits+1
if nits%100==0:
Player1PercentageTraining.append(Player1Win/float(nits))
DrawPercentageTraining.append(Draw/float(nits))
Player2PercentageTraining.append(Player2Win/float(nits))
#print Q[0]
return [Qplayer1, Qplayer2]
``` |
{
"source": "Jordy281/TomTom",
"score": 3
} |
#### File: TomTom/Listening/__init__.py
```python
def start_listening(sr , r):
#r = n.Recognizer()
m = sr.Microphone()
try:
print("A moment of silence, please...")
with m as source: r.adjust_for_ambient_noise(source)
print("Set minimum energy threshold to {}".format(r.energy_threshold))
print("Say something!")
with m as source: audio = r.listen(source)
return audio
#Need to discover what it passes to sphinx_audio (wav file/ mp3 / raw data)
except KeyboardInterrupt:
pass
```
#### File: TomTom/Talking/speak.py
```python
import pyttsx
speech_engine = pyttsx.init('espeak') # see http://pyttsx.readthedocs.org/en/latest/engine.html#pyttsx.init
speech_engine.setProperty('rate', 150)
def speak(text):
speech_engine.say(text)
speech_engine.runAndWait()
print text
``` |
{
"source": "jordy6/Facebooker",
"score": 3
} |
#### File: Facebooker/Facebooker/data_type.py
```python
class PostInfo:
def __init__(self,
post_id:str,
author:str,
content:str,
time:str
):
self.id = post_id
self.author = author
self.content = content
self.time = time
class CommentInfo:
def __init__(self,
comment_id:str,
author:str,
content:str,
time:str,
):
self.id = comment_id
self.author = author
self.content = content
self.time = time
```
#### File: Facebooker/Facebooker/facebook.py
```python
import requests
import pickle
import os
import logging
import time
import json
from requests_toolbelt import MultipartEncoder
from bs4 import BeautifulSoup
try:
import data_type
import privacy_level
import like_action
except ModuleNotFoundError:
from . import data_type
from . import privacy_level
from . import like_action
def letter_adder(string, num):
if ord(string[1]) + num%26 >= ord('z'):
string = chr(ord(string[0])+1) + chr(ord(string[1])+ num - 26)
else:
string = string[0] + chr(ord(string[1]) + num)
return string
class API:
'''
FB post structure:
post
|
--comment
|
--reply
'''
def __init__(self):
headers={
'scheme': 'https',
'accept': '*/*',
'accept-language': 'zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6,ja;q=0.5',
'user-agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:76.0) ' + \
'Gecko/20100101 Firefox/76.0',
}
self.session = requests.session()
self.session.headers.update(headers)
self.login_check = False
def login(self, email, password):
# get input field
self.session.cookies.clear()
if os.path.isfile(email+'.cookie'):
self._load_cookies(email+'.cookie')
else:
url = 'https://www.facebook.com/'
req = self.session.get(url)
soup = BeautifulSoup(req.text,'lxml')
all_input_data = soup.find('form').findAll('input')
data = {}
for input_data in all_input_data:
data[input_data.get('name')] = input_data.get('value')
# input email and password
data['email'] = email
data['pass'] = password
#login
login_url = 'https://www.facebook.com/login'
req = self.session.post(login_url,data=data)
self.user_id = self.session.cookies.get_dict()['c_user']
# get hidden input data
url = 'https://m.facebook.com/'
req = self.session.get(url)
soup = BeautifulSoup(req.text, 'lxml')
try:
self.fb_dtsg = soup.find('input', {'name':'fb_dtsg'}).get('value')
except Exception as e:
logging.debug(e)
logging.error('username or password is invalid')
return False
self.login_check = True
self._save_cookies(email+'.cookie')
self.send_msg_data = {
'fb_dtsg': self.fb_dtsg,
'body':'',
'send':'傳送',
'wwwupp':'C3'
}
self.post_data_template = {
'fb_dtsg': self.fb_dtsg,
'target': self.user_id,
'c_src': 'feed',
'cwevent': 'composer_entry',
'referrer': 'feed',
'ctype': 'inline',
'cver': 'amber',
'rst_icv': None,
'view_post': 'view_post',
}
def _save_cookies(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self.session.cookies, f)
def _load_cookies(self, filename):
with open(filename,'rb') as f:
self.session.cookies.update(pickle.load(f))
# post methods
def get_post(self, post_id, group_id=None):
if not self.login_check:
logging.error('You should login first')
return
url = 'https://m.facebook.com/story.php?' + \
'story_fbid=%s&id=1'%str(post_id)
if group_id:
url = 'https://m.facebook.com/groups/%s?'%str(group_id) + \
'view=permalink&id='%str(post_id)
req = self.session.get(url)
soup = BeautifulSoup(req.text,'lxml')
post_content = soup.find('div',class_='z')
author = post_content.find('h3').text
content_lines = post_content.find('div', {'data-ft':'{"tn":"*s"}'}).findAll('p')
content = ''
for content_line in content_lines:
content += content_line.text
content += '\n'
time = post_content.find('footer').find('abbr').text
post_info = data_type.PostInfo(post_id,
author,
content,
time)
if not post_content:
logging.error('This post is not supported or you don\'t have acess authority')
return post_info
def like_post(self, post_id, action=like_action.LIKE):
if not self.login_check:
logging.error('You should login first')
return
if action > 6 or action < 0:
logging.error('This action is not supported')
return
url = 'https://m.facebook.com/reactions/picker/?ft_id=' + str(post_id)
req = self.session.get(url)
try:
soup = BeautifulSoup(req.text, 'lxml')
root = soup.find('div', id='root').find('table', role='presentation')
action_href = [a.get('href') for a in root.findAll('a')][:-1]
like_url = 'https://m.facebook.com' + action_href[action]
self.session.get(like_url)
except:
logging.error('You don\'t have access authority')
def get_user_post_list(self, user_id, num=10):
if not self.login_check:
logging.error('You should login first')
return
url = 'https://m.facebook.com/profile/timeline/stream/?' + \
'end_time=%s&'%str(time.time()) + \
'profile_id=%s'%str(user_id)
posts_id = []
while len(posts_id) < num:
req = self.session.get(url)
soup = BeautifulSoup(req.text, 'lxml')
posts = soup.find('section').findAll('article', recursive=False)
for post in posts:
data = json.loads(post.get('data-ft'))
post_id = data['mf_story_key']
posts_id.append(post_id)
if len(posts_id) >= num:
break
if len(posts_id) >= num:
break
next_href = soup.find('div', id='u_0_0').find('a').get('href')
url = 'https://m.facebook.com' + next_href
return posts_id
def get_group_post_list(self, group_id, num=10):
if not self.login_check:
logging.error('You should login first')
return
url = 'https://m.facebook.com/%s'%str(group_id)
posts_id = []
while len(posts_id) < num:
req = self.session.get(url)
soup = BeautifulSoup(req.text, 'lxml')
posts = soup.find('section').findAll('article', recursive=False)
for post in posts:
data = json.loads(post.get('data-ft'))
post_id = data['mf_story_key']
posts_id.append(post_id)
if len(posts_id) >= num:
break
if len(posts_id) >= num:
break
next_href = soup.find('section').next_sibling.find('a').get('href')
url = 'https://m.facebook.com' + next_href
return posts_id
def get_fanpage_post_list(self, fanpage_id, num=10):
if not self.login_check:
logging.error('You should login first')
return
url = 'https://m.facebook.com/%s'%str(fanpage_id)
posts_id = []
while len(posts_id) < num:
req = self.session.get(url)
soup = BeautifulSoup(req.text, 'lxml')
posts = soup.find('section').findAll('article', recursive=False)
for post in posts:
data = json.loads(post.get('data-ft'))
post_id = data['mf_story_key']
posts_id.append(post_id)
if len(posts_id) >= num:
break
if len(posts_id) >= num:
break
next_href = soup.find('div', id='recent').next_sibling.find('a').get('href')
url = 'https://m.facebook.com' + next_href
return posts_id
def post(self,
content,
privacy_level=privacy_level.PUBLIC):
if not self.login_check:
logging.error('You should login first')
return
post_data = self.post_data_template
url = 'https://m.facebook.com/composer/mbasic/'
post_data['xc_message'] = content
post_data['privacyx'] = privacy_level
self.session.post(url, data=post_data)
def post_to_target(self, content, target_id=None, target_type=None):
''' target_type:
0 : user
1 : group
2 : fanpage
'''
if not self.login_check:
logging.error('You should login first')
return
referrer = ['timeline', 'group', 'pages_feed']
c_src = ['timeline_other', 'group', 'page_self']
post_data = self.post_data_template
url = 'https://m.facebook.com/composer/mbasic/'
post_data['xc_message'] = content
post_data['referrer'] = referrer[target_type]
post_data['c_src'] = c_src[target_type]
post_data['target'] = target_id
post_data['id'] = target_id
self.session.post(url, data=post_data)
def fanpage_post(self, content, fanpage_id):
if not self.login_check:
logging.error('You should login first')
return
post_data = self.post_data_template
url = 'https://m.facebook.com/composer/mbasic/?av=%s'%str(fanpage_id)
post_data['xc_message'] = content
post_data['referrer'] = 'pages_feed'
post_data['c_src'] = 'page_self'
post_data['target'] = fanpage_id
self.session.post(url, data=post_data)
def fanpage_post_photo(self, text_content, image, fanpage_id):
url = 'https://m.facebook.com/composer/mbasic/' + \
'?c_src=page_self&referrer=pages_feed&' + \
'target=%s&'%fanpage_id + \
'icv=lgc_view_photo&av=%s'%fanpage_id
req = self.session.get(url)
soup = BeautifulSoup(req.text,'lxml')
form = soup.find('form')
all_input_data = form.findAll('input')
data = {}
for input_data in all_input_data:
data[input_data.get('name')] = input_data.get('value')
url = 'https://upload.facebook.com/_mupload_/composer/?av=%s'%fanpage_id
data['file1'] = ('image',image,'image')
data['xc_message'] = text_content
m_data = MultipartEncoder(
fields = data
)
self.session.post(url, data=m_data, headers={'Content-Type': m_data.content_type})
# comment methods
def get_comments(self, post_id, num=10, start=0):
if not self.login_check:
logging.error('You should login first')
return
comment_info_list = []
while num > 0:
url = 'https://m.facebook.com/story.php?' + \
'story_fbid=%s&id=1&p=%s'%(str(post_id),start)
req = self.session.get(url)
soup = BeautifulSoup(req.text,'lxml')
try:
div = soup.find('div',id='ufi_%s'%str(post_id))
comment_div = div.find('div',id='sentence_%s'%str(post_id)).next_sibling
comments = comment_div.findAll('div', recursive=False)
comments.reverse()
except Exception as e:
logging.debug(e)
logging.error('You don\'t have access authority')
return
for comment in comments:
try:
comment_author = comment.find('h3').find('a').text
comment_id = comment.get('id')
comment_content = comment.find('h3').next_sibling.text
comment_time = comment.find('abbr').text
comment_info = data_type.comment_info(comment_id,
comment_author,
comment_content,
comment_time
)
comment_info_list.append(comment_info)
num -= 1
except:
pass
pre_page_div = comment_div.find('div', id='see_prev_%s'%str(post_id))
if pre_page_div:
pre_href = pre_page_div.find('a').get('href')
pre_href = pre_href[pre_href.find('p='):]
start = pre_href[2:pre_href.find('&')]
else:
break
return comment_info_list
def delete_comment(self, post_id, comment_id):
if not self.login_check:
logging.error('You should login first')
return
url = 'https://m.facebook.com/ufi/delete/?' + \
'delete_comment_id=%s'%str(comment_id) + \
'&delete_comment_fbid=%s'%str(comment_id) + \
'&ft_ent_identifier=%s'%str(post_id)
data = {'fb_dtsg': self.fb_dtsg}
return self.session.post(url, data=data)
def comment(self, post_id, content):
url = 'https://m.facebook.com/a/comment.php?' + \
'fs=8&actionsource=2&comment_logging' + \
'&ft_ent_identifier=%s'%str(post_id)
comment = {'comment_text':content,'fb_dtsg':self.fb_dtsg}
return self.session.post(url, data=comment)
# reply method
def reply(self, post_id ,comment_id, content):
if not self.login_check:
logging.error('You should login first')
return
url = 'https://m.facebook.com/a/comment.php?' + \
'parent_comment_id=%s'%str(comment_id) + \
'&ft_ent_identifier=%s'%str(post_id)
data = {'fb_dtsg': self.fb_dtsg, 'comment_text':content}
self.session.post(url, data=data)
# messenger method
def get_msg(self, chat_room_id, num=1):
if not self.login_check:
logging.error('You should login first')
return
url = 'https://m.facebook.com/messages/read/?tid=%s'%str(chat_room_id)
send_from = []
content = []
time = []
while num > 0:
req = self.session.get(url)
soup = BeautifulSoup(req.text, 'lxml')
msg_group = soup.find('div', id='messageGroup')
if len(msg_group) == 1:
index = 0
else:
index = 1
msgs = msg_group.findAll('div', recursive=False)[index].findAll('div', recursive=False)
if msgs:
msgs.reverse()
for msg in msgs:
content_class = letter_adder(msg.get('class')[-1], 1)
try:
msg_contents = msg.find('div', class_=content_class). \
find('div').findAll('span')
for msg_content in msg_contents:
send_from.append(msg.find('strong').text)
content.append(msg_content.text)
time.append(msg.find('abbr').text)
num -= 1
if num <= 0:
break
if num <= 0:
break
except:
logging.debug('Get non text message')
pass
pre_page = msg_group.find('div', id='see_older')
if not pre_page:
break
href = pre_page.find('a').get('href')
url = 'https://m.facebook.com' + href
return list(zip(send_from, content, time))
def get_unread_chat(self):
if not self.login_check:
logging.error('You should login first')
return
url = 'https://m.facebook.com/messages/?folder=unread'
req = self.session.get(url)
soup = BeautifulSoup(req.text, 'lxml')
unread_chats = soup.find('div', id='root').find('section').findAll('table')
unread_chat_room_id = []
for unread_chat in unread_chats:
href = unread_chat.find('a').get('href')
if href.find('cid.c') >= 0:
chat_room_id = href[href.find('cid.c.')+6:href.find('%')]
if chat_room_id == self.user_id:
chat_room_id = href[href.find('%')+3:href.find('&')]
else:
chat_room_id = href[href.find('cid.g.')+6:href.find('&')]
unread_chat_room_id.append(chat_room_id)
return unread_chat_room_id
def send_msg(self, chat_room_id, content):
if not self.login_check:
logging.error('You should login first')
return
url = 'https://m.facebook.com/messages/send/'
if len(str(chat_room_id)) > len(self.user_id):
self.send_msg_data['tids'] = 'cid.g.%s'%str(chat_room_id)
else:
self.send_msg_data['tids'] = '%s'%str(chat_room_id)
self.send_msg_data['ids[%s]'%str(chat_room_id)] = str(chat_room_id)
self.send_msg_data['body'] = content
self.session.post(url, data=self.send_msg_data)
``` |
{
"source": "jordyantunes/Imagine",
"score": 2
} |
#### File: src/imagine/goal_sampler.py
```python
import numpy as np
from mpi4py import MPI
from src.imagine.goal_generator.simple_sentence_generator import SentenceGeneratorHeuristic
from src import logger
class GoalSampler:
def __init__(self,
policy_language_model,
reward_language_model,
goal_dim,
one_hot_encoder,
params):
self.policy_language_model = policy_language_model
self.reward_language_model = reward_language_model
self.goal_dim = goal_dim
self.params = params
self.nb_feedbacks = 0
self.nb_positive_feedbacks = 0
self.nb_negative_feedbacks = 0
self.feedback2id = dict()
self.id2feedback = dict()
self.id2oracleid = dict()
self.feedback2one_hot = dict()
self.id2one_hot = dict()
self.feedback_memory = dict(memory_id=[],
string=[],
iter_discovery=[],
target_counter=[],
reached_counter=[],
oracle_id=[],
f1_score=[],
policy_encoding=[],
reward_encoding=[],
imagined=[],
)
self.imagined_goals = dict(string=[],
competence=[],
lp=[])
self.one_hot_encoder = one_hot_encoder
self.goal_generator = SentenceGeneratorHeuristic(params['train_descriptions'],
params['test_descriptions'],
sentences=None,
method=params['conditions']['imagination_method'])
self.nb_discovered_goals = 0
self.score_target_goals = None
self.perceived_learning_progress = None
self.perceived_competence = None
self.feedback_stats = None
self.rank = MPI.COMM_WORLD.Get_rank()
self.num_cpus = params['experiment_params']['n_cpus']
self.rollout_batch_size = params['experiment_params']['rollout_batch_size']
self.not_imagined_goal_ids = np.array([])
self.imagined_goal_ids = np.array([])
def store_reward_function(self, reward_function):
self.reward_function = reward_function
def update_embeddings(self):
# embeddings must be updated when the language model is udpated
for i, goal_str in enumerate(self.feedback_memory['string']):
if self.reward_language_model is not None:
reward_encoding = self.reward_language_model.encode(goal_str)
self.feedback_memory['reward_encoding'][i] = reward_encoding.copy()
policy_encoding = self.policy_language_model.encode(goal_str)
self.feedback_memory['policy_encoding'][i] = policy_encoding.copy()
def add_entries_to_feedback_memory(self, str_list, episode_count, imagined):
for goal_str in str_list:
if goal_str not in self.feedback2id.keys():
memory_id = self.nb_discovered_goals
if goal_str in self.params['train_descriptions']:
oracle_id = self.params['train_descriptions'].index(goal_str)
else:
oracle_id = None
one_hot = self.one_hot_encoder.encode(goal_str.lower().split(" "))
self.feedback2one_hot[goal_str] = one_hot
self.id2one_hot[memory_id] = one_hot
if self.reward_language_model is not None:
reward_encoding = self.reward_language_model.encode(goal_str)
self.feedback_memory['reward_encoding'].append(reward_encoding.copy())
policy_encoding = self.policy_language_model.encode(goal_str)
self.feedback2id[goal_str] = memory_id
self.id2oracleid[memory_id] = oracle_id
self.id2feedback[memory_id] = goal_str
self.feedback_memory['memory_id'].append(memory_id)
self.feedback_memory['oracle_id'].append(oracle_id)
self.feedback_memory['string'].append(goal_str)
self.feedback_memory['target_counter'].append(0)
self.feedback_memory['reached_counter'].append(0)
self.feedback_memory['iter_discovery'].append(episode_count)
self.feedback_memory['f1_score'].append(0)
self.feedback_memory['policy_encoding'].append(policy_encoding.copy())
self.feedback_memory['imagined'].append(imagined)
self.nb_discovered_goals += 1
elif goal_str in self.feedback2id.keys() and not imagined: # if goal previously imagined is discovered later, change its status
ind = self.feedback_memory['string'].index(goal_str)
if self.feedback_memory['imagined'][ind] == 1:
self.feedback_memory['imagined'][ind] = 0
logger.info('Goal already imagined:', goal_str)
def update_discovered_goals(self,
new_goals_str,
episode_count,
epoch):
# only done in cpu 0
self.add_entries_to_feedback_memory(str_list=new_goals_str,
episode_count=episode_count,
imagined=0)
# Decide whether to generate new goals
goal_invention = self.params['conditions']['goal_invention']
imagined = False
if 'from_epoch' in goal_invention:
from_epoch = int(goal_invention.split('_')[-1])
if epoch > from_epoch:
imagined = True
if len(new_goals_str) > 0 and imagined:
new_imagined_goals = []
inds_not_imagined = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
self.goal_generator.update_model(np.array(self.feedback_memory['string'])[inds_not_imagined])
generated_goals = self.goal_generator.generate_sentences(n='all')
for gen_g in generated_goals:
if gen_g not in self.imagined_goals['string']:
self.imagined_goals['string'].append(gen_g)
self.imagined_goals['competence'].append(0)
self.imagined_goals['lp'].append(0)
new_imagined_goals.append(gen_g)
self.add_entries_to_feedback_memory(str_list=new_imagined_goals,
episode_count=episode_count,
imagined=1)
def update(self,
current_episode,
all_episodes,
partner_available,
goals_reached_str,
goals_not_reached_str):
imagined_inds = np.argwhere(np.array(self.feedback_memory['imagined']) == 1).flatten()
not_imagined_inds = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
self.not_imagined_goal_ids = np.array(self.feedback_memory['memory_id'])[not_imagined_inds]
self.imagined_goal_ids = np.array(self.feedback_memory['memory_id'])[imagined_inds]
# only done in cpu 0
n_episodes = len(all_episodes)
attempted_goals_ids = []
exploit = []
for ep in all_episodes:
exploit.append(ep['exploit'])
attempted_goals_ids.append(ep['g_id'])
if partner_available:
# if partner is available, simply encodes what it said
assert n_episodes == len(goals_reached_str) == len(goals_not_reached_str) == len(exploit) == len(attempted_goals_ids)
# Get indexes in the order of discovery of the attempted goals, reached_goals, not reached_goals
goals_reached_ids = []
goals_not_reached_ids = []
for i in range(n_episodes):
goals_reached_ids.append([])
goals_not_reached_ids.append([])
for goal_str in goals_reached_str[i]:
goals_reached_ids[-1].append(self.feedback2id[goal_str])
for goal_str in goals_not_reached_str[i]:
goals_not_reached_ids[-1].append(self.feedback2id[goal_str])
else:
goals_reached_ids = []
goals_not_reached_ids = []
final_obs = np.array([ep['obs'][-1] for ep in all_episodes])
# test 50 goals for each episode
discovered_goal_ids = np.array(self.feedback_memory['memory_id'])
not_imagined_ind = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
discovered_goal_ids = discovered_goal_ids[not_imagined_ind]
n_attempts = min(50, len(discovered_goal_ids))
goals_to_try = np.random.choice(discovered_goal_ids, size=n_attempts, replace=False)
obs = np.repeat(final_obs, n_attempts, axis=0)
goals = np.tile(goals_to_try, final_obs.shape[0])
rewards = self.reward_function.predict(state=obs, goal_ids=goals)[0]
for i in range(len(all_episodes)):
pos_goals = goals_to_try[np.where(rewards[i * n_attempts: (i + 1) * n_attempts] == 0)].tolist()
goals_reached_ids.append(pos_goals)
neg_goals = goals_to_try[np.where(rewards[i * n_attempts: (i + 1) * n_attempts] == -1)].tolist()
goals_not_reached_ids.append(neg_goals)
return goals_reached_ids, goals_not_reached_ids
def share_info_to_all_cpus(self):
# share data across cpus
self.feedback_memory = MPI.COMM_WORLD.bcast(self.feedback_memory, root=0)
self.feedback2id = MPI.COMM_WORLD.bcast(self.feedback2id, root=0)
self.id2oracleid = MPI.COMM_WORLD.bcast(self.id2oracleid, root=0)
self.id2feedback = MPI.COMM_WORLD.bcast(self.id2feedback, root=0)
self.feedback2one_hot = MPI.COMM_WORLD.bcast(self.feedback2one_hot, root=0)
self.nb_discovered_goals = MPI.COMM_WORLD.bcast(self.nb_discovered_goals, root=0)
self.imagined_goals = MPI.COMM_WORLD.bcast(self.imagined_goals, root=0)
self.one_hot_encoder = MPI.COMM_WORLD.bcast(self.one_hot_encoder, root=0)
def sample_targets(self, epoch):
"""
Sample targets for all cpus and all batch, then scatter to the different cpus
"""
# Decide whether to exploit or not
exploit = True if np.random.random() < 0.1 else False
strategy = 'random'
goal_invention = self.params['conditions']['goal_invention']
imagined = False
if 'from_epoch' in goal_invention:
from_epoch = int(goal_invention.split('_')[-1])
if epoch > from_epoch:
imagined = np.random.random() < self.params['conditions']['p_imagined']
if self.rank == 0:
all_goals_str = []
all_goals_encodings = []
all_goals_ids = []
for i in range(self.num_cpus):
goals_str = []
goals_encodings = []
goals_ids = []
for j in range(self.rollout_batch_size):
# when there is no goal in memory, sample random goal from standard normal distribution
if len(self.feedback_memory['memory_id']) == 0:
goals_encodings.append(np.random.normal(size=self.goal_dim))
goals_str.append('Random Goal')
goals_ids.append(-1)
else:
if strategy == 'random':
if imagined and self.imagined_goal_ids.size > 0:
ind = np.random.choice(self.imagined_goal_ids)
else:
ind = np.random.choice(self.not_imagined_goal_ids)
else:
raise NotImplementedError
goals_encodings.append(self.feedback_memory['policy_encoding'][ind])
goals_str.append(self.id2feedback[ind])
goals_ids.append(ind)
all_goals_str.append(goals_str)
all_goals_encodings.append(goals_encodings)
all_goals_ids.append(goals_ids)
else:
all_goals_str = []
all_goals_encodings = []
all_goals_ids = []
goals_str = MPI.COMM_WORLD.scatter(all_goals_str, root=0)
goals_encodings = MPI.COMM_WORLD.scatter(all_goals_encodings, root=0)
goals_ids = MPI.COMM_WORLD.scatter(all_goals_ids, root=0)
return exploit, goals_str, goals_encodings, goals_ids, imagined
class EvalGoalSampler:
def __init__(self, policy_language_model, one_hot_encoder, params):
self.descriptions = params['train_descriptions']
self.nb_descriptions = len(self.descriptions)
self.count = 0
self.policy_language_model = policy_language_model
self.rollout_batch_size = params['evaluation_rollout_params']['rollout_batch_size']
self.params = params
def reset(self):
self.count = 0
def sample(self, method='robin'):
# print(self.descriptions[self.count])
goals_str = []
goals_encodings = []
goals_ids = []
if method == 'robin':
ind = self.count
elif method == 'random':
ind = np.random.randint(self.nb_descriptions)
else:
raise NotImplementedError
for _ in range(self.rollout_batch_size):
g_str = self.descriptions[ind]
goals_str.append(g_str)
policy_encoding = self.policy_language_model.encode(g_str).flatten()
goals_encodings.append(policy_encoding)
goals_ids.append(ind)
self.count += 1
return True, goals_str, goals_encodings, goals_ids
```
#### File: src/imagine/interaction.py
```python
from collections import deque
import pickle
import numpy as np
from mpi4py import MPI
class RolloutWorker:
"""Rollout worker generates experience by interacting with one or many environments.
Args:
make_env (function): a factory function that creates a new instance of the environment
when called
policy (object): the policy that is used to act
T (int): number of timesteps in an episode
eval_bool (bool): whether it is an evaluator rollout worker or not
rollout_batch_size (int): the number of parallel rollouts that should be used
exploit (boolean): whether or not to exploit, i.e. to act optimally according to the
current policy without any exploration
use_target_net (boolean): whether or not to use the target net for rollouts
compute_Q (boolean): whether or not to compute the Q values alongside the actions
noise_eps (float): scale of the additive Gaussian noise
random_eps (float): probability of selecting a completely random action
history_len (int): length of history for statistics smoothing
render (boolean): whether or not to render the rollouts
"""
def __init__(self,
make_env,
policy,
T,
eval_bool,
reward_function,
rollout_batch_size=1,
exploit=False,
use_target_net=False,
compute_Q=False,
noise_eps=0,
random_eps=0,
history_len=100,
render=False,
save_obs=False,
params={},
**kwargs):
self.T = T
self.policy = policy
self.reward_function = reward_function
self.eval = eval_bool
self.rollout_batch_size = rollout_batch_size
self.exploit = exploit
self.use_target_net = use_target_net
self.compute_Q = compute_Q
self.noise_eps = noise_eps
self.random_eps = random_eps
self.history_len = history_len
self.render = render
self.save_obs = save_obs
self.params = params.copy()
self.rank = MPI.COMM_WORLD.Get_rank()
self.env = make_env()
assert self.T > 0
self.Q_history = deque(maxlen=history_len)
def generate_rollouts(self, exploit, imagined, goals_str, goals_encodings, goals_ids):
"""Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current
policy acting on it accordingly.
"""
self.exploit = exploit
self.imagined = imagined
assert len(goals_str) == self.rollout_batch_size == len(goals_encodings) == len(goals_ids)
episodes = []
for i in range(self.rollout_batch_size):
# Reset the environment
env_seed = np.random.randint(int(1e6))
self.env.seed(env_seed)
self.env.reset()
initial_o = self.env.unwrapped.reset_with_goal(goals_str[i])
o = initial_o.copy()
Qs = []
obs = [o.copy()]
acts = []
goal = goals_encodings[i].copy()
# Run a rollout
for t in range(self.T):
# Get next action from policy
policy_output = self.policy.get_actions(o.copy(),
goal,
compute_Q=self.compute_Q,
noise_eps=self.noise_eps if not self.exploit else 0.,
random_eps=self.random_eps if not self.exploit else 0.,
use_target_net=self.use_target_net
)
if self.compute_Q:
u, Q = policy_output
Qs.append(Q)
else:
u = policy_output
# Env step
o, _, _, _ = self.env.step(u)
if self.render:
self.env.render()
obs.append(o.copy())
acts.append(u.copy())
episode = dict(obs=np.array(obs),
acts=np.array(acts),
g_encoding=goals_encodings[i],
g_id=goals_ids[i],
g_str=goals_str[i],
exploit=self.exploit,
imagined=self.imagined
)
episodes.append(episode)
# stats
if self.compute_Q:
self.Q_history.append(np.mean(Qs))
return episodes
def current_mean_Q(self):
return np.mean(self.Q_history)
def save_policy(self, path):
"""Pickles the current policy for later inspection.
"""
self.policy.save_model(path)
def seed(self, seed):
"""Seeds each environment with a distinct seed derived from the passed in global seed.
"""
self.env.seed(seed + 1000 * self.rank)
def clear_history(self):
"""Clears all histories that are used for statistics
"""
self.Q_history.clear()
```
#### File: imagine/rl/her.py
```python
import numpy as np
import time
def make_sample_her_transitions(goal_sampler,
goal_invention,
p_imagined,
rl_positive_ratio,
reward_fun):
"""Creates a sample function that can be used for HER experience replay.
Args:
goal_sampler (object): contains the list of discovered goals
replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',
regular DDPG experience replay is used
replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times
as many HER replays as regular replays are used)
reward_fun (function): function to re-compute the reward with substituted goals
"""
strategy_goal_invention = goal_invention
p_goal_invention = p_imagined
ratio_positive = rl_positive_ratio
n_goals_attempts = 50
def _sample_her_transitions(episode_batch, goal_ids, batch_size_in_transitions, epoch):
"""episode_batch is {key: array(buffer_size x T x dim_key)}
"""
time_dict = dict()
goal_ids_len = [len(gid) for gid in goal_ids]
t_init = time.time()
T = episode_batch['acts'].shape[1]
batch_size = batch_size_in_transitions
# whether to use imagined goals
goal_invention = strategy_goal_invention
p_imagined = 0.
if 'from_epoch' in goal_invention:
from_epoch = int(goal_invention.split('_')[-1])
if epoch > from_epoch:
p_imagined = p_goal_invention
# find valid buffers (with more than 10 episodes)
valid_buffers = []
for i in range(len(goal_ids_len)):
if goal_ids_len[i] > 0:
valid_buffers.append(i)
# sample uniformly in the task buffers, then random episodes from them
t_sample_ind = time.time()
if len(valid_buffers) > 0:
buffer_ids = np.random.choice(valid_buffers, size=batch_size)
unique, counts = np.unique(buffer_ids, return_counts=True)
episode_idxs = []
for i in range(unique.size):
count = counts[i]
index_goal = unique[i]
ids = np.random.randint(goal_ids_len[index_goal], size=count)
episode_idxs += list(np.array(goal_ids[index_goal])[ids])
else:
episode_idxs = np.random.randint(episode_batch['obs'].shape[0], size=batch_size)
time_dict['time_sample_1'] = time.time() - t_sample_ind
t_sample_shuffle = time.time()
np.random.shuffle(episode_idxs)
time_dict['time_sample_shuffle'] = time.time() - t_sample_shuffle
t_samples = np.random.randint(T, size=batch_size)
t_transition_batch = time.time()
transitions = dict()
for key in episode_batch.keys():
if 'g' in key:
if key != 'g_str':
transitions[key] = episode_batch[key][episode_idxs, 0].copy()
else:
transitions[key] = episode_batch[key][episode_idxs].copy()
else:
transitions[key] = episode_batch[key][episode_idxs, t_samples].copy()
time_dict['time_transition_batch'] = time.time() - t_transition_batch
time_dict['time_sample_ind'] = time.time() - t_sample_ind
# get list of discovered goals
discovered_goals_encodings = np.array(goal_sampler.feedback_memory['policy_encoding']).copy()
discovered_goal_ids = np.array(goal_sampler.feedback_memory['memory_id']).copy()
all_discovered_goal_ids = np.array(goal_sampler.feedback_memory['memory_id']).copy()
imagined = np.array(goal_sampler.feedback_memory['imagined']).copy()
imagined_ind = np.argwhere(imagined==1).flatten()
not_imagined_ind = np.argwhere(imagined==0).flatten()
all_perceived_lp = np.array(goal_sampler.perceived_learning_progress).copy()
perceived_lp = all_perceived_lp.copy()
nb_discovered_goals = discovered_goal_ids.size
time_dict['time_pre_replay'] = time.time() - t_init
time_dict.update(time_reward_func_replay=0,
time_argwhere=0,
time_random=0)
t_init = time.time()
if nb_discovered_goals > 0:
n_attempts = min(n_goals_attempts, nb_discovered_goals)
# for all observation, compute the reward with all of the goal.
# This is done at once to leverage the optimization in numpy
# sample goal inds to attempt, first from imagined
n_imagined = min(int(p_imagined * n_attempts), len(discovered_goal_ids[imagined_ind]))
inds_to_attempt = []
if n_imagined > 0:
inds_to_attempt += np.random.choice(discovered_goal_ids[imagined_ind], size=n_imagined, replace=False).tolist()
n_not_imagined = min(len(not_imagined_ind), n_attempts - n_imagined)
inds_to_attempt += np.random.choice(discovered_goal_ids[not_imagined_ind], size=n_not_imagined, replace=False).tolist()
inds_to_attempt = np.array(inds_to_attempt)
n_attempts = inds_to_attempt.size
obs = np.repeat(transitions['obs'], n_attempts, axis=0)
goals = np.tile(inds_to_attempt, batch_size)
t_ir = time.time()
rewards = reward_fun(state=obs, goal=goals)[0]
# print(time.time() - t_ir)
time_dict['time_reward_func_replay'] += (time.time() - t_ir)
# figure out where are the positive and negative rewards
# to balance the ratio of positive vs negative samples
t_ir = time.time()
where_neg = (rewards == -1)
where_pos = (rewards == 0)
time_dict['time_argwhere'] += (time.time() - t_ir)
n_positives = int(ratio_positive * batch_size)
n_negatives = batch_size - n_positives
t_ir = time.time()
positives_idx = []
i = 0
# try transitions from first to the last find positive rewards.
# pursue until you found n_positives rewards or you covered the whole batch
while len(positives_idx) < n_positives and i <= batch_size:
ind_pos = np.atleast_1d(np.argwhere(where_pos[i * n_attempts: (i + 1) * n_attempts]).squeeze())
if ind_pos.size > 0:
positives_idx.append(i * n_attempts + np.random.choice(ind_pos))
i += 1
# if not enough positives in the whole batch, replace by more negatives
if len(positives_idx) < n_positives:
n_negatives = batch_size - len(positives_idx)
n_positives = len(positives_idx)
positive_transition_idx = list(np.array(positives_idx) // n_attempts)
transition_to_search_negatives_in = list(range(batch_size))
for i in positive_transition_idx:
transition_to_search_negatives_in.remove(i)
transition_to_search_negatives_in += positive_transition_idx
# try transitions from the non positive transitions first,
# then in the positive transitions as well
negatives_idx = []
for i in transition_to_search_negatives_in:
ind_neg = np.atleast_1d(np.argwhere(where_neg[i * n_attempts: (i + 1) * n_attempts]).squeeze())
if ind_neg.size > 0:
negatives_idx.append(i * n_attempts + np.random.choice(ind_neg))
if len(negatives_idx) == n_negatives:
break
negatives_idx = np.array(negatives_idx)
positives_idx = np.array(positives_idx)
n_replayed = positives_idx.size + negatives_idx.size
if n_replayed < batch_size:
ind_transitions_not_replayed = set(range(batch_size)) - set(negatives_idx // n_attempts).union(set(positives_idx // n_attempts))
ind_transitions_not_replayed = list(ind_transitions_not_replayed)
if len(ind_transitions_not_replayed) > batch_size - n_replayed:
ind_transitions_not_replayed = ind_transitions_not_replayed[:batch_size - n_replayed]
left = batch_size - len(ind_transitions_not_replayed) - n_replayed
if left > 0:
ind_transitions_not_replayed += list(np.random.choice(range(batch_size), size=left))
ind_transitions_not_replayed = np.array(ind_transitions_not_replayed)
else:
ind_transitions_not_replayed = np.array([])
left = 0
# # # # # # # # # # # # # # # # # # # #
# Build the batch of transitions
# # # # # # # # # # # # # # # # # # # #
# first build an empty dict
transitions2 = dict()
for key in transitions.keys():
shape = list(transitions[key].shape)
shape[0] = 0
shape = tuple(shape)
transitions2[key] = np.array([]).reshape(shape)
transitions2['r'] = np.array([]).reshape((0,))
# then add negative samples if there are
if len(negatives_idx) > 0:
for key in transitions.keys():
if key not in ['g_encoding', 'r', 'g_id']:
if 'g' in key:
transitions2[key] = np.concatenate([transitions2[key], transitions[key][negatives_idx // n_attempts].copy()], axis=0)
else:
transitions2[key] = np.concatenate([transitions2[key], transitions[key][negatives_idx // n_attempts, :].copy()], axis=0)
negative_replay_id = goals[negatives_idx].copy()
transitions2['g_encoding'] = np.concatenate([transitions2['g_encoding'], discovered_goals_encodings[negative_replay_id]], axis=0)
transitions2['g_id'] = np.concatenate([transitions2['g_id'], negative_replay_id.copy()], axis=0)
transitions2['r'] = np.concatenate([transitions2['r'], - np.ones([len(negatives_idx)])], axis=0)
if len(positives_idx) > 0:
for key in transitions.keys():
if key not in ['g_encoding', 'r', 'g_id']:
if 'g' in key:
transitions2[key] = np.concatenate([transitions2[key], transitions[key][positives_idx // n_attempts].copy()], axis=0)
else:
transitions2[key] = np.concatenate([transitions2[key], transitions[key][positives_idx // n_attempts, :].copy()], axis=0)
positive_replay_id = goals[positives_idx].copy()
transitions2['g_encoding'] = np.concatenate([transitions2['g_encoding'], discovered_goals_encodings[positive_replay_id]], axis=0)
transitions2['g_id'] = np.concatenate([transitions2['g_id'], positive_replay_id.copy()], axis=0)
transitions2['r'] = np.concatenate([transitions2['r'], np.zeros([len(positives_idx)])], axis=0)
if len(ind_transitions_not_replayed) > 0:
for key in transitions.keys():
if key not in ['r']:
if 'g' in key:
transitions2[key] = np.concatenate([transitions2[key], transitions[key][ind_transitions_not_replayed].copy()], axis=0)
else:
transitions2[key] = np.concatenate([transitions2[key], transitions[key][ind_transitions_not_replayed, :].copy()], axis=0)
rewards = reward_fun(state=np.atleast_2d(transitions['obs'][ind_transitions_not_replayed]),
goal=np.atleast_1d(transitions['g_id'][ind_transitions_not_replayed]))[0]
transitions2['r'] = np.concatenate([transitions2['r'], rewards], axis=0)
# msg = '{} {} {} {} {}'.format(transitions2['obs'].shape[0], len(ind_transitions_not_replayed), len(negatives_idx),
# len(positives_idx), left)
# logger.info(msg)
assert transitions2['obs'].shape[0] == batch_size
ratio_per_goal_in_batch = []
ind_positive_replay = np.atleast_1d(np.argwhere(transitions2['r'] == 0).squeeze())
ind_negative_replay = np.atleast_1d(np.argwhere(transitions2['r'] == -1).squeeze())
for i in range(all_discovered_goal_ids.size):
g_id = all_discovered_goal_ids[i]
nb_positive_g_id = np.argwhere(transitions2['g_id'][ind_positive_replay] == g_id).size
ratio_per_goal_in_batch.append(nb_positive_g_id / batch_size)
time_dict['time_random'] += (time.time() - t_ir)
transitions = transitions2
time_dict['time_replay'] = time.time() - t_init
t_init2 = time.time()
# shuffle transitions
shuffled_inds = np.arange(batch_size)
np.random.shuffle(shuffled_inds)
for key in transitions.keys():
transitions[key] = transitions[key][shuffled_inds].reshape(batch_size, *transitions[key].shape[1:])
else:
t_init2 = time.time()
transitions['r'] = reward_fun(state=transitions['obs'], goal=transitions['g_id'])[0]
ratio_positive_rewards = (transitions['r']==0).mean()
time_dict['time_recompute_reward'] = time.time() - t_init2
assert(transitions['acts'].shape[0] == batch_size_in_transitions)
lp_scores = all_perceived_lp
return transitions, ratio_positive_rewards, lp_scores, ratio_per_goal_in_batch, time_dict
return _sample_her_transitions
```
#### File: src/playground_env/color_generation.py
```python
import numpy as np
import matplotlib.pyplot as plt
from gym.spaces import Box
n_colors = 10
def plot_colors(color, shade):
"""
Plots a sample of colors from the color x shade color class.
Parameters
----------
color: str
Color in red, blue, green.
shade: str
Shade in light, dark.
"""
color_class = Color(color, shade)
array = np.zeros([n_colors, n_colors, 3])
for i in range(n_colors):
for j in range(n_colors):
array[i, j, :] = color_class.sample()
plt.figure()
plt.imshow(array)
class Color:
def __init__(self, color, shade):
"""
Implements a color class characterized by a color and shade attributes.
Parameters
----------
color: str
Color in red, blue, green.
shade: str
Shade in light, dark.
"""
self.color = color
self.shade = shade
if color == 'blue':
if shade == 'light':
self.space = Box(low=np.array([0.3, 0.7, 0.9]), high=np.array([0.5, 0.8, 1.]), dtype=np.float32)
elif shade == 'dark':
self.space = Box(low=np.array([0.0, 0., 0.8]), high=np.array([0.2, 0.2, 0.9]), dtype=np.float32)
else:
raise NotImplementedError("shade is either 'light' or 'dark'")
elif color == 'red':
if shade == 'light':
self.space = Box(low=np.array([0.9, 0.4, 0.35]), high=np.array([1, 0.6, 0.65]), dtype=np.float32)
elif shade == 'dark':
self.space = Box(low=np.array([0.5, 0., 0.]), high=np.array([0.7, 0.1, 0.1]), dtype=np.float32)
else:
raise NotImplementedError("shade is either 'light' or 'dark'")
elif color == 'green':
if shade == 'light':
self.space = Box(low=np.array([0.4, 0.8, 0.4]), high=np.array([0.6, 1, 0.5]), dtype=np.float32)
elif shade == 'dark':
self.space = Box(low=np.array([0., 0.4, 0.]), high=np.array([0.1, 0.6, 0.1]), dtype=np.float32)
else:
raise NotImplementedError
elif color == 'dark':
if shade == 'dark':
self.space = Box(low=np.array([0., 0., 0.]), high=np.array([0.3, 0.3, 0.3]), dtype=np.float32)
elif shade == 'light':
self.space = Box(low=np.array([1., 1., 1.]), high=np.array([2., 2., 2.]), dtype=np.float32)
else:
raise NotImplementedError
else:
raise NotImplementedError("color is 'red', 'blue' or 'green'")
def contains(self, rgb):
"""
Whether the class contains a given rgb code.
Parameters
----------
rgb: 1D nd.array of size 3
Returns
-------
contains: Bool
True if rgb code in given Color class.
"""
contains = self.space.contains(rgb)
if self.color == 'red' and self.shade == 'light':
contains = contains and (rgb[2] - rgb[1] <= 0.05)
return contains
def sample(self):
"""
Sample an rgb code from the Color class
Returns
-------
rgb: 1D nd.array of size 3
"""
rgb = np.random.uniform(self.space.low, self.space.high, 3)
if self.color == 'red' and self.shade == 'light':
rgb[2] = rgb[1] + np.random.uniform(-0.05, 0.05)
return rgb
def sample_color(color, shade):
"""
Sample an rgb code from the Color class
Parameters
----------
color: str
Color in red, blue, green.
shade: str
Shade in light, dark.
Returns
-------
rgb: 1D nd.array of size 3
"""
color_class = Color(color, shade)
return color_class.sample()
```
#### File: src/playground_env/reward_function.py
```python
from src.playground_env.env_params import get_env_params
from src.playground_env.descriptions import generate_all_descriptions
train_descriptions, test_descriptions, extra_descriptions = generate_all_descriptions(get_env_params())
def get_move_descriptions(get_agent_position_attributes, current_state):
"""
Get all move descriptions from the current state (if any).
Parameters
----------
get_agent_position_attributes: function
Function that extracts the absolute position of the agent from the state.
current_state: nd.array
Current state of the environment.
Returns
-------
descr: list of str
List of Move descriptions satisfied by the current state.
"""
move_descriptions = []
position_attributes = get_agent_position_attributes(current_state)
for pos_att in position_attributes:
move_descriptions.append('Go ' + pos_att)
return move_descriptions.copy()
def get_grasp_descriptions(get_grasped_ids, current_state, sort_attributes, obj_attributes, params, check_if_relative, combine_two):
"""
Get all Grasp descriptions from the current state (if any).
Parameters
----------
get_grasped_ids: function
Function that extracts the id of objects that are being grasped.
current_state: nd.array
Current state of the environment.
sort_attributes: function
Function that separates adjective and name attributes.
obj_attributes: list of list
List of the list of object attributes for each object.
params: dict
Environment params.
check_if_relative: function
Checks whether an attribute is a relative attribute.
combine_two: function
Function that combines two attributes to form new attributes.
Returns
-------
descr: list of str
List of Grasp descriptions satisfied by the current state.
"""
obj_grasped = get_grasped_ids(current_state)
verb = 'Grasp'
grasp_descriptions = []
for i_obj in obj_grasped:
att = obj_attributes[i_obj]
adj_att, name_att = sort_attributes(att)
if params['attribute_combinations']:
adj_att += combine_two(adj_att, adj_att)
for adj in adj_att:
quantifier = 'any' # 'the' if check_if_relative(adj) else 'a'
if not check_if_relative(adj):
for name in name_att:
# grasp_descriptions.append('{} {} {} {}'.format(verb, quantifier, adj, name))
grasp_descriptions.append('{} {} {}'.format(verb, adj, name))
grasp_descriptions.append('{} {} {} thing'.format(verb, quantifier, adj))
for name in name_att:
grasp_descriptions.append('{} any {}'.format(verb, name))
# grasp_descriptions.append('{} a {}'.format(verb, name))
return grasp_descriptions.copy()
def get_grow_descriptions(get_grown_ids, initial_state, current_state, params, obj_attributes, sort_attributes, combine_two, check_if_relative):
"""
Get all Grow descriptions from the current state (if any).
Parameters
----------
get_grown_ids: function
Function that extracts the id of objects that are being grown.
initial_state: nd.array
Initial state of the environment.
current_state: nd.array
Current state of the environment.
sort_attributes: function
Function that separates adjective and name attributes.
obj_attributes: list of list
List of the list of object attributes for each object.
params: dict
Environment params.
check_if_relative: function
Checks whether an attribute is a relative attribute.
combine_two: function
Function that combines two attributes to form new attributes.
Returns
-------
descr: list of str
List of Grasp descriptions satisfied by the current state.
"""
obj_grown = get_grown_ids(initial_state, current_state)
verb = 'Grow'
grow_descriptions = []
list_exluded = params['categories']['furniture'] + params['categories']['supply'] + ('furniture', 'supply')
for i_obj in obj_grown:
att = obj_attributes[i_obj]
adj_att, name_att = sort_attributes(att)
if params['attribute_combinations']:
adj_att += combine_two(adj_att, adj_att)
for adj in adj_att:
if adj not in list_exluded:
quantifier = 'any' # 'the' if check_if_relative(adj) else 'a'
if not check_if_relative(adj):
for name in name_att:
# grow_descriptions.append('{} {} {} {}'.format(verb, quantifier, adj, name))
grow_descriptions.append('{} {} {}'.format(verb, adj, name))
grow_descriptions.append('{} {} {} thing'.format(verb, quantifier, adj))
for name in name_att:
# grow_descriptions.append('{} a {}'.format(verb, name))
grow_descriptions.append('{} any {}'.format(verb, name))
return grow_descriptions.copy()
def get_extra_grow_descriptions(get_supply_contact_ids, initial_state, current_state, params, obj_attributes, sort_attributes, combine_two, check_if_relative):
"""
Equivalent of the grow description for attempting to grow furniture (track funny behavior of the agent).
"""
obj_grown = get_supply_contact_ids(current_state)
verb = 'Attempted grow'
grow_descriptions = []
list_exluded = params['categories']['living_thing'] + ('living_thing', 'animal', 'plant')
for i_obj in obj_grown:
att = obj_attributes[i_obj]
adj_att, name_att = sort_attributes(att)
if params['attribute_combinations']:
adj_att += combine_two(adj_att, adj_att)
for adj in adj_att:
if adj not in list_exluded:
quantifier = 'any' # 'the' if check_if_relative(adj) else 'a'
if not check_if_relative(adj):
for name in name_att:
# grow_descriptions.append('{} {} {} {}'.format(verb, quantifier, adj, name))
grow_descriptions.append('{} {} {}'.format(verb, adj, name))
grow_descriptions.append('{} {} {} thing'.format(verb, quantifier, adj))
for name in name_att:
# grow_descriptions.append('{} a {}'.format(verb, name))
grow_descriptions.append('{} any {}'.format(verb, name))
return grow_descriptions.copy()
def sample_descriptions_from_state(state, params):
"""
This function samples all description of the current state
Parameters
----------
state: nd.array
Current environment state.
params: dict
Dict of env parameters.
Returns
-------
descr: list of str
List of descriptions satisfied by the current state.
"""
get_grasped_ids = params['extract_functions']['get_interactions']['get_grasped']
get_grown_ids = params['extract_functions']['get_interactions']['get_grown']
get_supply_contact = params['extract_functions']['get_interactions']['get_supply_contact']
get_attributes_functions=params['extract_functions']['get_attributes_functions']
admissible_attributes = params['admissible_attributes']
admissible_actions = params['admissible_actions']
get_obj_features = params['extract_functions']['get_obj_features']
count_objects = params['extract_functions']['count_objects']
get_agent_position_attributes = params['extract_functions']['get_agent_position_attributes']
check_if_relative = params['extract_functions']['check_if_relative']
combine_two = params['extract_functions']['combine_two']
current_state = state[:len(state) // 2]
initial_state = current_state - state[len(state) // 2:]
assert len(current_state) == len(initial_state)
nb_objs = count_objects(current_state)
obj_features = [get_obj_features(initial_state, i_obj) for i_obj in range(nb_objs)]
# extract object attributes
obj_attributes = []
for i_obj in range(nb_objs):
obj_att = []
for k in admissible_attributes:
obj_att += get_attributes_functions[k](obj_features, i_obj)
obj_attributes.append(obj_att)
def sort_attributes(attributes):
adj_attributes = []
name_attributes = []
for att in attributes:
if att in tuple(params['categories'].keys()) + params['attributes']['types']:
name_attributes.append(att)
else:
adj_attributes.append(att)
return adj_attributes, name_attributes
descriptions = []
# Add Move descriptions
if 'Move' in admissible_actions:
descriptions += get_move_descriptions(get_agent_position_attributes, current_state)
# Add Grasp descriptions
if 'Grasp' in admissible_actions:
descriptions += get_grasp_descriptions(get_grasped_ids, current_state, sort_attributes, obj_attributes, params, check_if_relative, combine_two)
# Add Grow descriptions
if 'Grow' in admissible_actions:
descriptions += get_grow_descriptions(get_grown_ids, initial_state, current_state, params, obj_attributes, sort_attributes, combine_two, check_if_relative)
descriptions += get_extra_grow_descriptions(get_supply_contact, initial_state, current_state, params, obj_attributes, sort_attributes, combine_two,
check_if_relative)
train_descr = []
test_descr = []
extra_descr = []
for descr in descriptions:
if descr in train_descriptions:
train_descr.append(descr)
elif descr in test_descriptions:
test_descr.append(descr)
elif descr in extra_descriptions:
extra_descr.append(descr)
else:
print(descr)
raise ValueError
return train_descr.copy(), test_descr.copy(), extra_descr.copy()
def get_reward_from_state(state, goal, params):
"""
Reward function. Whether the state satisfies the goal.
Parameters
----------
state: nd.array
Current environment state.
goal: str
Description of the goal.
params: dict
Environment parameters.
Returns
-------
bool
"""
get_grasped_ids = params['extract_functions']['get_interactions']['get_grasped']
get_grown_ids = params['extract_functions']['get_interactions']['get_grown']
get_attributes_functions = params['extract_functions']['get_attributes_functions']
admissible_attributes = params['admissible_attributes']
admissible_actions = params['admissible_actions']
get_obj_features = params['extract_functions']['get_obj_features']
count_objects = params['extract_functions']['count_objects']
get_agent_position_attributes = params['extract_functions']['get_agent_position_attributes']
check_if_relative = params['extract_functions']['check_if_relative']
combine_two = params['extract_functions']['combine_two']
current_state = state[:len(state) // 2]
initial_state = current_state - state[len(state) // 2:]
assert len(current_state) == len(initial_state)
nb_objs = count_objects(current_state)
obj_features = [get_obj_features(initial_state, i_obj) for i_obj in range(nb_objs)]
# extract object attributes
obj_attributes = []
for i_obj in range(nb_objs):
obj_att = []
for k in admissible_attributes:
obj_att += get_attributes_functions[k](obj_features, i_obj)
obj_attributes.append(obj_att)
def sort_attributes(attributes):
adj_attributes = []
name_attributes = []
for att in attributes:
if att in tuple(params['categories'].keys()) + params['attributes']['types']:
name_attributes.append(att)
else:
adj_attributes.append(att)
return adj_attributes, name_attributes
words = goal.split(' ')
reward = False
if words[0] == 'Go':
go_descr = get_move_descriptions(get_agent_position_attributes, current_state)
if goal in go_descr:
reward = True
if words[0] == 'Grasp':
grasp_descr = get_grasp_descriptions(get_grasped_ids, current_state, sort_attributes, obj_attributes, params, check_if_relative, combine_two)
if goal in grasp_descr:
reward = True
# Add Grow descriptions
if words[0] == 'Grow':
grow_descr = get_grow_descriptions(get_grown_ids, initial_state, current_state, params, obj_attributes, sort_attributes, combine_two, check_if_relative)
if goal in grow_descr:
reward = True
return reward
```
#### File: src/utils/utils.py
```python
import random
from collections import Counter
import numpy as np
from tqdm import tqdm
def add_new_data(dataset_to_update, state, embedded_instructions, instruction_index, lab, true_lab=None):
"""
Update the dataset as a list with a new list of length (state_dim + embedding_dim + 3)
The last three dimensions are :
- the label infered by the system according to the heuristic used,
- the instruction index,
- the true label given by the oracle but potentially unknown by the system,
:param dataset_to_update: list
:param state: np.array of dimension (1,state_dim)
:param embedded_instructions: np.array of dimension (n_instruction, embedding_dim)
:param instruction_index: int between 0 and n_instruction - 1
:param lab: guessed label by the system according to the heuristic used 0 or 1
:param true_lab: oracle label 0 or 1
:return: void
"""
new_data = state + embedded_instructions[instruction_index].tolist() + [lab, instruction_index, true_lab]
dataset_to_update.append(new_data)
def get_discovered_instructions(current_discovered_instructions, labels):
_, encountered_instruction = labels.nonzero()
result = set(encountered_instruction).union(current_discovered_instructions)
return sorted(result)
def update_dataset_from_exhaustive_feedback(dataset_to_update, states, labels, embedded_instructions,
discovered_instructions):
"""
:param dataset_to_update: list of list containing [state_feature, embedding_feature, label, instruciton_idx]
:param states: np array of shape [n_state, state_feature]
:param labels: np.array of shape [n_state, discovered_instruction]
:param embedded_instructions: np.array of shape [discovered_instruction, embedding_dimension]
"""
assert (len(states) == len(labels))
for s, labs in zip(states, labels):
s = s.tolist()
for lab, idx in zip(labs, discovered_instructions):
add_new_data(dataset_to_update, s, embedded_instructions, idx, lab, true_lab=lab)
def update_train_from_exhaustive_feedback(train_set, states, labels, embedded_instructions,
discovered_instruction_record):
current_discovered_instruction = [] if len(discovered_instruction_record) == 0 else discovered_instruction_record[
-1]
discovered_instruction = get_discovered_instructions(current_discovered_instruction, labels[:, -1])
update_dataset_from_exhaustive_feedback(train_set,
states[:, -1],
labels[:, -1, discovered_instruction],
embedded_instructions,
discovered_instruction)
return discovered_instruction
def get_most_complex_feedback(labels, oracle_complexity, instruction_complexity):
"""
Get most complex positive feedback if it exists. Complexity is measured by the oracle according by grouping
instructions by level of difficulty. If positive labels for same complexity task are available, the oracle chooses
one randomly.
Note : if an instruction has a positive label, it necessarily has been discovered ???
:param labels:
:param oracle_complexity:
:param instruction_complexity:
:return:
"""
feedbacks_instruction = []
for labs in labels:
feedback = -1
candidate_instruction = set(np.argwhere(labs == 1).flatten())
for _, ins_list in reversed(oracle_complexity.items()):
c = candidate_instruction.intersection(ins_list)
if c:
feedback = random.choice(tuple(c))
instruction_complexity.update([feedback])
break
feedbacks_instruction.append(feedback)
return feedbacks_instruction
def update_dataset_from_most_complex_feedback(dataset_to_update, states, labels, embedded_instructions,
instruction_complexity, oracle_complexity):
assert (len(states) == len(labels))
feedbacks_instruction = get_most_complex_feedback(labels, oracle_complexity, instruction_complexity)
for s, labs, feedback in zip(states, labels, feedbacks_instruction):
s = s.tolist()
for ins, freq in reversed(instruction_complexity.most_common()):
if ins == feedback:
add_new_data(dataset_to_update, s, embedded_instructions, ins, 1, true_lab=labs[ins])
break
else:
add_new_data(dataset_to_update, s, embedded_instructions, ins, 0, true_lab=labs[ins])
def update_train_from_complex_feedback(train_set, states, labels, embedded_instructions, instruction_complexity_record,
oracle_complexity, **kwargs):
instruction_complexity = Counter() if len(instruction_complexity_record) == 0 else instruction_complexity_record[
-1].copy()
update_dataset_from_most_complex_feedback(train_set, states[:, -1],
labels[:, -1],
embedded_instructions,
instruction_complexity,
oracle_complexity)
instruction_complexity_record.append(instruction_complexity)
discovered_instruction = sorted(map(int, instruction_complexity.keys()))
return discovered_instruction
def compute_training_set(train_update_func, states, labels, embedded_instructions, batch, max_episode, **kwargs):
"""
:param train_update_func:
:param states:
:param labels:
:param embedded_instructions:
:param batch:
:param max_episode:
:param train_set_length_record:
:param discovered_instruction_record:
:param kwargs:
:return:
"""
train_set = []
train_set_length = []
discovered_instruction_record = []
for episode in tqdm(range(max_episode)):
# for episode in tqdm(range(len(states) // (num_workers * num_rollout))):
episode_state = states[batch * episode: batch * (episode + 1)]
episode_label = labels[batch * episode: batch * (episode + 1)]
discovered_instruction = train_update_func(train_set=train_set, states=episode_state, labels=episode_label,
embedded_instructions=embedded_instructions,
discovered_instruction_record=discovered_instruction_record,
**kwargs)
train_set_length.append(len(train_set))
discovered_instruction_record.append(discovered_instruction)
return np.array(train_set), train_set_length, discovered_instruction_record
``` |
{
"source": "JordyBottelier/arpsas",
"score": 4
} |
#### File: JordyBottelier/arpsas/dataset_modifications.py
```python
from schema_matching import *
from schema_matching.misc_func import *
from sklearn.metrics import accuracy_score
from pandas_ml import ConfusionMatrix
from os.path import isfile
import os
import math
import string
def get_letter(classname):
"""
For the ckan dataset, the classnames are very long, therefore we created a mapping to letters.
This method is used to actually retrieve a letter based on the classname.
"""
mapping = read_file("ckan_subset/classname_reverse")
letter_to_ckan = read_file("ckan_subset/classname_map")
try:
return mapping[classname]
except: # class is apperently not present
alphabet = list(string.ascii_uppercase + string.ascii_lowercase)
for letter in alphabet:
if letter not in letter_to_ckan:
mapping[classname] = letter
letter_to_ckan[letter] = classname
store_file("ckan_subset/classname_map", letter_to_ckan)
store_file("ckan_subset/classname_reverse", mapping)
return mapping[classname]
"""
Used to create the new, bigger, dataset -----------
"""
def prep_dataset(test_folder):
"""
From a folder of csv files, create folders with the classes serperated. Use this to create a learnset
for the data collector from a folder with csv files.
"""
sr = Schema_Reader()
all_data = {}
for filename in sorted(os.listdir(test_folder)):
print(filename)
path = test_folder + filename
if(isfile(path)):
headers, columns = sr.get_duplicate_columns(path)
i = 0
for header in headers:
col = columns[i]
col = remove_nan_from_list(col)
col = remove_values_from_list(col, "")
col = remove_values_from_list(col, " ")
col = remove_values_from_list(col, "- ")
col = remove_values_from_list(col, "UNKNOWN")
col = remove_values_from_list(col, 'UNKNOWN ')
if header in all_data:
all_data[header] += col
else:
all_data[header] = col
i += 1
for classname in all_data:
path = test_folder + classname
if not os.path.exists(path):
os.makedirs(path)
data_path = path + "/" + classname + ".txt"
data = all_data[classname]
file = open(data_path,"w")
file.write(str(data))
def prep_dataset_ckan(test_folder):
"""
From a folder of csv files, create folders with the classes serperated. Use this to create a learnset
for the data collector from a folder with csv files.
"""
sr = Schema_Reader()
all_data = {}
for filename in sorted(os.listdir(test_folder)):
print(filename)
path = test_folder + filename
if(isfile(path)):
headers, columns = sr.get_duplicate_columns(path)
i = 0
for header in headers:
col = columns[i]
col = remove_nan_from_list(col)
col = remove_values_from_list(col, "")
col = remove_values_from_list(col, " ")
col = remove_values_from_list(col, "- ")
col = remove_values_from_list(col, "UNKNOWN")
col = remove_values_from_list(col, 'UNKNOWN ')
if header in all_data:
all_data[header] += col
else:
all_data[header] = col
i += 1
classname_map = {}
classname_reverse = {}
alphabet = list(string.ascii_uppercase + string.ascii_lowercase)
i = 0
for classname in all_data:
letter = alphabet[i]
i += 1
if len(classname) > 20:
classname_map[letter] = classname
classname_reverse[classname] = letter
else:
classname_map[classname] = classname
classname_reverse[classname] = classname
path = test_folder + classname
if not os.path.exists(path):
os.makedirs(path)
data_path = path + "/" + classname + ".txt"
data = all_data[classname]
file = open(data_path,"w")
file.write(str(data))
classname_map['unknown'] = 'unknown'
classname_reverse['unknown'] = 'unknown'
print_dict(classname_reverse)
store_file("ckan_subset/classname_map", classname_map)
store_file("ckan_subset/classname_reverse", classname_reverse)
def prep_columns(column):
""" Make sure every element of a column is a string.
"""
for i in range(0, len(column)):
column[i] = str(column[i])
# Remove occuring values from a list
def remove_values_from_list(the_list, val):
return [value for value in the_list if value != val]
# Remove value from list if substring is present (used for nan)
def remove_nan_from_list(the_list):
return [value for value in the_list if not isNaN(value) ]
def isNaN(num):
return num != num
"""
------------------------------------------------------------------
"""
def get_dataset_stats(test_folder, title="Company.Info Testset Statistics"):
"""
For a folder with csv files, read all the columns and plot how many columns there are
and how many instances in total
"""
sr = Schema_Reader()
headers_dict = {}
for filename in sorted(os.listdir(test_folder)):
print(filename)
path = test_folder + filename
if(isfile(path)):
headers, columns = sr.get_duplicate_columns(path)
i = 0
for header in headers:
col = columns[i]
col = remove_nan_from_list(col)
col = remove_values_from_list(col, "")
col = remove_values_from_list(col, " ")
col = remove_values_from_list(col, "- ")
col = remove_values_from_list(col, "UNKNOWN")
col = remove_values_from_list(col, 'UNKNOWN ')
if header in headers_dict:
headers_dict[header][0] += 1
headers_dict[header][1] += len(col)
else:
headers_dict[header] = [1, len(col)]
i += 1
y1 = []
y2 = []
x = []
for header in headers_dict:
avg = round(headers_dict[header][1] / float(headers_dict[header][0]), 2)
headers_dict[header].append(avg)
x.append(header)
y1.append(headers_dict[header][0])
y2.append(headers_dict[header][1])
print_dict(headers_dict)
gm = Graph_Maker()
gm.add_x(x)
gm.append_y(y1)
gm.append_y(y2)
print(gm)
gm.plot_bar_double_scale("Column Type", "Number of Occurences", "Total Number of Entries",
title, "Number of Occurences", "Total Number of Entries")
"""
------------------------------------------------------------------
"""
def get_dataset_stats_ckan(test_folder, title="Company.Info Testset Statistics"):
"""
For a folder with csv files, read all the columns and plot how many columns there are
and how many instances in total, but use the ckan mapping to retrieve the correct letters for
the classnames.
"""
sr = Schema_Reader()
headers_dict = {}
for filename in sorted(os.listdir(test_folder)):
print(filename)
path = test_folder + filename
if(isfile(path)):
headers, columns = sr.get_duplicate_columns(path)
i = 0
for header in headers:
col = columns[i]
col = remove_nan_from_list(col)
col = remove_values_from_list(col, "")
col = remove_values_from_list(col, " ")
col = remove_values_from_list(col, "- ")
col = remove_values_from_list(col, "UNKNOWN")
col = remove_values_from_list(col, 'UNKNOWN ')
if header in headers_dict:
headers_dict[header][0] += 1
headers_dict[header][1] += len(col)
else:
headers_dict[header] = [1, len(col)]
i += 1
y1 = []
y2 = []
x = []
for header in headers_dict:
avg = round(headers_dict[header][1] / float(headers_dict[header][0]), 2)
headers_dict[header].append(avg)
x.append(get_letter(header))
y1.append(headers_dict[header][0])
y2.append(headers_dict[header][1])
print_dict(headers_dict)
gm = Graph_Maker()
gm.add_x(x)
gm.append_y(y1)
gm.append_y(y2)
print(gm)
gm.plot_bar_double_scale("Column Type", "Number of Occurences", "Total Number of Entries",
title, "Number of Occurences", "Total Number of Entries")
def plot_learn_set():
"""
Simply plot the learnset, this is all the data that is in the data folders, didn't feel
like creating something to read it all.
"""
headers_dict = {
'address': 152539,
'city': 180253,
'company_name': 146991,
'country': 95607,
'date': 193302,
'domain_name': 42557,
'email': 33480,
'gender': 15639,
'house_number': 81807,
'kvk_number': 16268,
'legal_type': 18869,
'person_name': 29530,
'postcode': 176048,
'province': 16953,
'sbi_code': 28342,
'sbi_description': 31448,
'telephone_nr': 94419
}
gm = Graph_Maker()
y1 = []
x = []
for header in headers_dict:
x.append(header)
y1.append(headers_dict[header])
gm.add_x(x)
gm.add_y(y1)
gm.plot_bar("Column Type","Number of Occurences", "Company.Info Learnset Statistics")
def plot_learn_set_ckan():
"""
Simply plot the learnset, this is all the data that is in the data folders, didn't feel
like creating something to read it all.
"""
headers_dict = { 'has_description': 2400,
'has_identifier_has_URI': 2400,
'has_identifier_has_id_value': 2400,
'has_identifier_is_source_of_has_classification_type': 804,
'has_identifier_is_source_of_has_endDate': 804,
'has_identifier_is_source_of_has_startDate': 804,
'has_identifier_is_source_of_type': 804,
'has_identifier_label': 2400,
'has_identifier_type': 2400,
'has_name': 2400,
'is_destination_of_has_classification_type': 2400,
'is_destination_of_has_endDate': 988,
'is_destination_of_has_source_has_identifier_has_URI': 924,
'is_destination_of_has_source_has_identifier_has_id_value': 183,
'is_destination_of_has_source_has_identifier_type': 924,
'is_destination_of_has_source_is_source_of_has_classification_type': 952,
'is_destination_of_has_source_is_source_of_has_destination_has_name': 141,
'is_destination_of_has_source_is_source_of_has_destination_type': 952,
'is_destination_of_has_source_is_source_of_has_endDate': 952,
'is_destination_of_has_source_is_source_of_has_startDate': 952,
'is_destination_of_has_source_is_source_of_type': 952,
'is_destination_of_has_source_type': 2400,
'is_destination_of_has_startDate': 988,
'is_destination_of_type': 2400,
'is_source_of_has_classification_type': 2400,
'is_source_of_has_destination_type': 1679,
'is_source_of_has_endDate': 2400,
'is_source_of_has_startDate': 2400,
'is_source_of_type': 2400,
'label': 2400,
'type': 2400}
gm = Graph_Maker()
y1 = []
x = []
for header in headers_dict:
x.append(get_letter(header))
y1.append(headers_dict[header])
gm.add_x(x)
gm.add_y(y1)
gm.plot_bar("Column Type","Number of Occurences", "CERIF Learnset Statistics", rotation=90)
if __name__ == '__main__':
plot_learn_set_ckan()
plot_learn_set()
get_dataset_stats('data_test/')
test_folder = 'ckan_subset/testset/xml_csv/'
get_dataset_stats_ckan(test_folder, title="CKAN Testset Statistics")
```
#### File: JordyBottelier/arpsas/experiment4.py
```python
from schema_matching import *
from schema_matching.misc_func import *
from sklearn.metrics import accuracy_score
from pandas_ml import ConfusionMatrix
from sklearn.metrics import confusion_matrix
from os.path import isfile
import os
import math
gm = Graph_Maker()
rounds = 3
def execute_test(sm, test_folder, skip_unknown=False, iterations=0):
"""
for all the schemas in the test folder, read them and classify them,
also compute precision, recall, f_measure and accuracy.
"""
sr = Schema_Reader()
actual = []
predicted = []
i = 0
for filename in sorted(os.listdir(test_folder)):
i += 1
print(filename)
path = test_folder + filename
if(isfile(path)):
headers, columns = sr.get_duplicate_columns(path, skip_unknown)
result_headers = None
if skip_unknown:
result_headers = sm.test_schema_matcher(columns, 0, False)
else:
result_headers = sm.test_schema_matcher(columns, 0.4, True)
predicted += result_headers
actual += headers
# print(accuracy_score(actual, predicted))
# break
if i == iterations:
break
return actual, predicted
def get_confusion(pred, actual , data_map):
"""
Modify the actual classes according to the datamap, so we can look at the confusion matrix.
"""
result = []
for ac in actual:
for cl in data_map:
if ac in data_map[cl]:
result.append(cl)
for i in range(0, len(actual)):
if pred[i] != result[i]:
print(actual[i])
return result
def experiment4_inliers1():
data_folder = 'data_train/'
number_of_columns = 80
examples_per_class = 60
gm.append_x(0)
gm.append_y(0.88)
total_actual = []
total_predicted = []
tmp = []
exp_actual = []
exp_predicted = []
sf_main = Storage_Files(data_folder, ['city', 'country', 'date', 'gender', 'house_number',\
'legal_type', 'province', 'sbi_code', 'sbi_description', 'telephone_nr', 'postcode'])
sf_legal = Storage_Files(data_folder, ['legal_type', 'postcode'])
sf_province = Storage_Files(data_folder, ['province', 'postcode'])
for i in range(0, rounds):
ccc = Column_Classification_Config()
# ------------------------------------------- CONFIG ------------------------------------------
ccc.add_feature('main', 'Corpus', [sf_main, 60, 0, False, False])
ccc.add_feature('legal', 'Syntax_Feature_Model', [sf_legal, 1, 0, False, False])
ccc.add_feature('province', 'Syntax_Feature_Model', [sf_province, 1, 0, False, False])
ccc.add_matcher('main', 'Word2Vec_Matcher', {'main': 'corpus'}) # main classifier
ccc.add_matcher('legal_matcher', 'Syntax_Matcher', {'legal': 'syntax'}, ('main', 'legal_type'))
ccc.add_matcher('province_matcher', 'Syntax_Matcher', {'province': 'syntax'}, ('main', 'province'))
# ccc.add_matcher('dom_email_matcher', 'Syntax_Matcher', {'dom_email': 'syntax'}, ('main', 'domain_email'))
# ------------------------------------------- END CONFIG ------------------------------------------
sm = Schema_Matcher(ccc)
actual, predicted = execute_test(sm, 'data_test/', True, 0)
# actual = get_confusion(predicted, actual, data_map_main)
exp_actual += actual
exp_predicted += predicted
accuracy = accuracy_score(actual, predicted)
tmp.append(accuracy)
gm.append_x(1)
accuracy = round(sum(tmp) / float(rounds), 2)
gm.append_y(accuracy)
gm.store(filename="/graph_maker/exp1.4a_1")
classnames = get_class_names(exp_actual)
cm = confusion_matrix(exp_actual, exp_predicted, labels=classnames)
gm.plot_confusion_matrix(cm, classnames, normalize=True, title="Confusion Matrix Experiment 4a_1")
subtitle = "Accuracy was averaged over " + str(rounds) + " tests"
def experiment4_inliers2():
data_folder = 'data_train/'
number_of_columns = 80
examples_per_class = 60
gm.append_x(0)
gm.append_y(0.88)
total_actual = []
total_predicted = []
tmp = []
exp_actual = []
exp_predicted = []
classes = ['city', 'country', 'date', 'gender', 'house_number',\
'legal_type', 'province', 'sbi_code', 'sbi_description', 'telephone_nr']
sf_main = Storage_Files(data_folder, ['city', 'country', 'date', 'gender', 'house_number',\
'legal_type', 'province', 'sbi_code', 'sbi_description', 'telephone_nr', 'postcode'])
sf_all = Storage_Files(data_folder, classes)
for i in range(0, rounds):
ccc = Column_Classification_Config()
# ------------------------------------------- CONFIG ------------------------------------------
ccc.add_feature('main', 'Syntax_Feature_Model', [sf_main, 1, 5000, False, False])
ccc.add_feature('all', 'Corpus', [sf_all, 50, 0, False, False])
ccc.add_feature('city', 'Corpus', [sf_city, 50, 0, False, False])
ccc.add_matcher('main', 'Syntax_Matcher', {'main': 'syntax'}) # main classifier
ccc.add_matcher('legal_matcher', 'Word2Vec_Matcher', {'all': 'corpus'}, ('main', 'legal_type'))
ccc.add_matcher('1', 'Word2Vec_Matcher', {'all': 'corpus'}, ('main', 'city'))
ccc.add_matcher('2', 'Word2Vec_Matcher', {'all': 'corpus'}, ('main', 'country'))
ccc.add_matcher('3', 'Word2Vec_Matcher', {'all': 'corpus'}, ('main', 'date'))
ccc.add_matcher('4', 'Word2Vec_Matcher', {'all': 'corpus'}, ('main', 'gender'))
ccc.add_matcher('5', 'Word2Vec_Matcher', {'all': 'corpus'}, ('main', 'house_number'))
ccc.add_matcher('6', 'Word2Vec_Matcher', {'all': 'corpus'}, ('main', 'province'))
ccc.add_matcher('7', 'Word2Vec_Matcher', {'all': 'corpus'}, ('main', 'sbi_code'))
ccc.add_matcher('8', 'Word2Vec_Matcher', {'all': 'corpus'}, ('main', 'sbi_description'))
ccc.add_matcher('9', 'Word2Vec_Matcher', {'all': 'corpus'}, ('main', 'telephone_nr'))
# ------------------------------------------- END CONFIG ------------------------------------------
sm = Schema_Matcher(ccc)
actual, predicted = execute_test(sm, 'data_test/', True, 0)
# actual = get_confusion(predicted, actual, data_map_main)
exp_actual += actual
exp_predicted += predicted
accuracy = accuracy_score(actual, predicted)
tmp.append(accuracy)
gm.append_x(2)
accuracy = round(sum(tmp) / float(rounds), 2)
gm.append_y(accuracy)
gm.store(filename="/graph_maker/exp1.4a_2")
classnames = get_class_names(exp_actual)
cm = confusion_matrix(exp_actual, exp_predicted, labels=classnames)
gm.plot_confusion_matrix(cm, classnames, normalize=True, title="Confusion Matrix Experiment 4a_2")
subtitle = "Accuracy was averaged over " + str(rounds) + " rounds"
def get_class_names(ytrue):
res = []
for c in ytrue:
if c not in res:
res.append(c)
return res
if __name__ == '__main__':
experiment4_inliers1()
experiment4_inliers2()
```
#### File: schema_matching/feature_classes/fingerprint.py
```python
from .feature_base import *
import string
import os
import sys
import numpy as np
import re
from collections import Counter
import collections
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from random import shuffle
from sklearn.preprocessing import normalize
import copy
from operator import methodcaller
class Fingerprint(Feature_Base):
"""
Feature class: Fingerprint
Uses the distribution of characters and an n gram of 2 letters in its corpus as
datapoints. It also uses metadata of these distributions
"""
def __init__(self, sf=None, num_columns=10, examples_per_column=0, unique=False, use_map=False):
Feature_Base.__init__(self)
if num_columns < 10:
print("Warning, low amount of classes might cause the program to crash")
if isinstance(sf, Storage_Files):
self.dc = Data_Collector(sf,
num_columns=num_columns, examples_per_column=examples_per_column, unique=unique, use_map=use_map)
self.columns = self.dc.get_columns()
elif type(sf) == dict:
self.columns=sf
self.cv = CountVectorizer(analyzer = 'char_wb',
ngram_range = (1, 2),
min_df = 5,
decode_error = 'ignore')
self.collect_features()
def collect_features(self):
""" For the entire dictionairy, collect the features for each column """
corpus = []
for entity in self.columns:
column_chunks = self.columns[entity]
for column in column_chunks:
self.prep_columns(column)
corpus_txt = " ".join(column)
corpus.append(corpus_txt)
# Prep the features and their targets
features = self.extract_features(column)
self.features.append(features)
self.targets.append(entity)
n_grams = self.cv.fit_transform(corpus).toarray()
self.features = np.hstack((self.features, n_grams))
self.features = normalize(self.features)
def get_features_targets_test(self, learnset_ratio=0.7):
"""
Especially created for this fingerprint so the countvectorizer can also be tested along the
rest of the datapoints.
"""
learnset_corpus = []
learnset_features = []
learnset_targets = []
testset_corpus = []
testset_features = []
testset_targets = []
for entity in self.columns:
column_chunks = self.columns[entity]
for column in column_chunks:
self.prep_columns(column)
features_list = self.extract_features(column)
corpus_txt = " ".join(column)
# Split in learn and testset and prepare the features
if np.random.uniform() < learnset_ratio:
learnset_corpus.append(corpus_txt)
learnset_features.append(features_list)
learnset_targets.append(entity)
else:
testset_corpus.append(corpus_txt)
testset_features.append(features_list)
testset_targets.append(entity)
# Reinitialize countvectorizer for testing
self.cv = CountVectorizer(analyzer = 'char_wb',
ngram_range = (1, 2),
min_df = 5,
decode_error = 'ignore')
learn_n_grams = self.cv.fit_transform(learnset_corpus).toarray()
learnset_features = np.hstack((learnset_features, learn_n_grams))
learnset_features = normalize(learnset_features)
test_n_grams = self.cv.transform(testset_corpus).toarray()
testset_features = np.hstack((testset_features, test_n_grams))
testset_features = normalize(testset_features)
return learnset_features, learnset_targets, testset_features, testset_targets
def extract_features(self, column):
"""
Extract raw features from column, three types:
- char distributions
- char-length metafeatures
- token-length metafeatures
"""
feature_vector = []
feature_vector += self.char_distributions(column)
feature_vector += self.metafeatures(list(map(len, column))) # char length metafeatures
words_per_entry = list(map(len, list(map(methodcaller("split", " "), column)))) # Get the amount of words per entry
feature_vector += self.metafeatures(words_per_entry) # Amount of words per entry metafeatures
return np.array(feature_vector)
def extract_features_column(self, column):
"""
This function is used upon classification, we use this because we also need the fitted countvectorizor
Extract raw features from column, three types:
- char distributions
- char-length metafeatures
- token-length metafeatures
"""
feature_vector = []
feature_vector += self.char_distributions(column)
feature_vector += self.metafeatures(list(map(len, column))) # char length metafeatures
words_per_entry = list(map(len, list(map(methodcaller("split", " "), column)))) # Get the amount of words per entry
feature_vector += self.metafeatures(words_per_entry) # Amount of words per entry metafeatures
ngrams = self.cv.transform([" ".join(column)]).toarray()[0].tolist()
feature_vector += ngrams
feature_vector = np.array(feature_vector).reshape(1, -1)
return np.array(normalize(feature_vector))
def metafeatures(self, a):
""" Return metafeatures (i.e., descriptive statistics) of an array. """
return [np.average(a), np.median(a), np.std(a), np.amin(a),
np.amax(a), np.percentile(a, 25), np.percentile(a, 75)]
def char_distributions(self, column):
""" Counts over characters (i.e., distribution of chars per column). """
c = Counter(string.punctuation + string.ascii_letters.lower() + string.digits)
c = dict.fromkeys(c, 0)
for t in " ".join(column).lower():
if t in c:
c[t] += 1
items = sorted(c.items(), key=lambda tup: tup[0])
distribution = [val[1] for val in items]
distribution = np.array(distribution) / float(sum(distribution))
return list(distribution)
```
#### File: arpsas/schema_matching/graph_maker.py
```python
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
import numpy as np
import matplotlib
import pickle
from .misc_func import *
import itertools
class Graph_Maker():
"""
Class used to collect x and y values and then plot it all at once.
It should be stored everytime, this is useful in case you want to collect data from multiple
points in the program, and are unsure of how it should be plotted.
"""
def __init__(self, load=False, filename="graph_maker/obj1", fontsize=15):
self.x = []
self.y = []
self.colors = ['r', 'b', 'g', 'y', 'black', 'cyan', 'magenta', 'orange']
matplotlib.rcParams.update({'font.size': fontsize})
if load:
self.load(filename=filename)
def add_x(self, x, if_exists=True):
"""
Set the new x-axis of the graph, the boolean should be set to false if you dont want to overwrite it if
it already exists.
"""
if if_exists:
self.x = x
def add_y(self, y, if_exists=True):
"""
Set the new y-axis of the graph, the boolean should be set to false if you dont want to overwrite it if
it already exists.
"""
if if_exists:
self.y = y
def append_y(self, y):
"""
add another y value
"""
self.y.append(y)
def append_x(self, x):
"""
add another y value
"""
self.x.append(x)
def subplot_n(self, xlabel, ylabel, main_title, subtitles, labels, xticks=None):
"""
Plot n subplots. These are all line graphs.
"""
num_subplots = len(self.x) # The number of lists in the x variable = number of subplots
num_ys = len(self.y) / num_subplots
fig, axes = plt.subplots(num_subplots, sharex=True, sharey=True)
fig.suptitle(main_title)
i = 0
for ax in axes:
subtitle = subtitles[i]
ax.set_title(subtitle)
x = self.x[i]
for f in range(0, int(num_ys)):
color = self.colors[f]
label = labels.pop(0)
y = self.y.pop(0)
ax.plot(x, y, label=label, color=color)
ax.set_xticks(x)
if xticks != None:
ax.set_xticklabels(xticks)
ax.set_ylabel(ylabel)
i += 1
plt.xlabel(xlabel)
plt.legend(bbox_to_anchor=(1.1, 2.05))
plt.show()
def plot_line_n(self, xlabel, ylabel, title, labels, subtitle=None, xticks=None):
"""
Plot n lines in a single figure
"""
colors=self.colors
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
if subtitle != None:
plt.suptitle(subtitle, fontsize=12)
for i in range(0, len(self.y)):
y = self.y[i]
label = labels[i]
color = colors[i]
plt.plot(self.x, y, color=color, label=label, alpha=0.5)
if xticks != None:
plt.xticks(self.x, xticks)
plt.tight_layout()
plt.legend()
plt.show()
def plot_bar(self, xlabel, ylabel, title, xticks_x=None, xticks_label=None, subtitle=None, show_value=False, rotation=45):
"""
Create a bar graph of the data
"""
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if xticks_x != None:
plt.xticks(xticks_x, xticks_label, fontsize=10, rotation=rotation)
plt.title(title)
if subtitle != None:
plt.suptitle(subtitle)
plt.xticks(rotation=rotation)
plt.bar(self.x, self.y, color='r', alpha=0.5)
if show_value:
for x in range(0, len(self.x)):
plt.text(x, self.y[x]/2.0, str(self.y[x]))
plt.tight_layout()
plt.show()
def plot_bar_enlarged(self, xlabel, ylabel, title, xticks_x=None, \
xticks_label=None, subxax=None):
"""
Make a bar graph with a section of the bar graph highlighted, requires some manual fiddeling
"""
xdata = self.x
ydata = self.y
fig, ax = plt.subplots() # create a new figure with a default 111 subplot
ax.bar(xdata, ydata, color='g')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
if xticks_x != None:
plt.xticks(xticks_x, xticks_label, fontsize=10, rotation=45)
# Smaller subsection
desired_portion = 0.5
zoom = desired_portion / (len(subxax) / len(ydata))
axins = zoomed_inset_axes(ax, zoom, loc=5)
subydata = ydata[0:len(subxax)]
height = 0.02
newydata = np.array(subydata) * (height / max(subydata))
axins.bar(subxax, newydata)
plt.yticks(visible=False)
axins.set_xlabel("Characters")
axins.set_title("Subplot of character distribution")
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
plt.tight_layout()
plt.show()
def plot_scatter(self, xlabel, ylabel, title, avg=False, text=None):
"""
Scatter plot from the data
"""
xdata = self.x
ydata = self.y
fig = plt.figure(dpi=200, figsize=(10, 5))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.scatter(xdata, ydata, label="Scatter Accuracy")
if avg:
# Also plot the average scatter value
x, y = zip(*sorted((xVal, np.mean([yVal for a, yVal in zip(xdata, ydata) if xVal==a])) for xVal in set(xdata)))
plt.plot(x, y, label="Average Accuracy", color='r')
plt.text(7, 0.6, text, fontsize=10)
plt.plot()
plt.tight_layout()
axes = plt.gca()
axes.set_ylim([0.5,1])
plt.legend()
plt.show()
def plot_bar_double(self, xlabel, ylabel, title, label1, label2):
"""
Plot two bar graphs over one another
"""
xdata = self.x
ydata1 = self.y[0]
ydata2 = self.y[1]
fig = plt.figure(dpi=200, figsize=(10, 5))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xticks(rotation=90)
indices = np.arange(len(xdata))
plt.title(title)
plt.bar(indices, ydata1, color='b', label=label1, alpha=1)
plt.bar(indices, ydata2, width=0.5, color='r', alpha=1, label=label2)
plt.xticks(indices, xdata)
plt.tight_layout()
plt.legend()
plt.show()
def plot_bar_n(self, xlabel, ylabel, title, labels, subtitle=None, rotation=45):
"""
Plot n bars per class
"""
colors=self.colors
width = 0.9 / (float(len(self.y))) # always leave a little room
sep = width / float(len(self.y))
xdata = self.x
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xticks(rotation=rotation)
if subtitle != None:
plt.suptitle(subtitle, fontsize=12)
indices = np.arange(len(xdata))
plt.title(title)
plot_indices = []
minimal = -len(self.y) /2.0 * width
for i in range(0, len(self.y)):
tmp_sep = minimal + (i * width)
y = self.y[i]
label = labels[i]
color = colors[i]
plt.bar(indices - tmp_sep, y, width=width, color=color, label=label, alpha=0.5)
r = 0
for x in (indices - tmp_sep):
plt.text(x, y[r]/2.0, str(y[r]), horizontalalignment="center")
r += 1
plt.xticks(indices, xdata)
plt.tight_layout()
plt.legend()
plt.show()
def plot_bar_double_scale(self, xlabel, ylabel1, ylabel2, title, label1, label2):
"""
Plot 2 bar graphs but with a double scale
"""
fig = plt.figure()
indices = np.arange(len(self.x))
width = 0.4
sep = width / 2
ax1 = fig.add_subplot(111)
p1 = ax1.bar(indices+sep, self.y[0], width=width, color='b', label=label1, alpha=1)
ax1.set_ylabel(ylabel1)
plt.xticks(rotation=70)
ax2 = ax1.twinx()
p2 = ax2.bar(indices-sep, self.y[1], width=width, color='r', alpha=1, label=label2)
ax2.set_ylabel(ylabel2, color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax1.set_xlabel(xlabel)
plt.xticks(indices, self.x)
plt.legend(handles=[p1, p2])
plt.title(title)
plt.tight_layout()
plt.show()
def plot_confusion_matrix(self, cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black", fontsize=7)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
def plot_confusion_small_font(self, cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
for i in range(0, len(classes)):
if len(classes[i]) > 30:
classes[i] = classes[i][-30::]
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black", fontsize=7)
# plt.tick_params(labelsize=8)
# plt.gcf().subplots_adjust(bottom=0.9, top=10)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
def store(self, filename="/graph_maker/obj1"):
"""
Store the object
"""
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = dir_path + filename
store_file(filename, self)
def load(self, filename="/graph_maker/obj1"):
"""
Load the configuration of a previously stored object
"""
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = dir_path + filename
gm = read_file(filename)
self.x = gm.x
self.y = gm.y
def __repr__(self):
print("x: ", str(self.x))
if self.y != []:
if type(self.y[0]) == list:
print("y values: ")
print_list(self.y)
else:
print("y: ", str(self.y))
else:
print("y: ", str(self.y))
return ""
```
#### File: arpsas/schema_matching/misc_func.py
```python
import pickle
import pprint
import sys
import collections
def store_file(filename, data):
pickle.dump(data, open(filename, "wb"))
def store_matcher(filename, data):
pickle.dump(data, open(('trained_matchers/' + filename), "wb"))
def read_matcher(filename):
return pickle.load(open(('trained_matchers/' + filename), "rb" ), encoding='latin1')
def read_file(filename):
return pickle.load(open(filename, "rb" ), encoding='latin1')
def store_file_string(filename, data):
f = open(filename, "wb")
f.write(str(data))
def print_dict(data):
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(data)
def print_list(data):
for n in data:
print(n)
def throw_error(errorstring):
print(errorstring)
sys.exit()
def precision(actual, predicted):
tp = 0
fn = 0
for i in range(0, len(actual)):
predic = predicted[i]
act = actual[i]
if predic != "unknown":
if predic == act:
tp += 1
else:
fn += 1
return tp / float(tp + fn)
def recall(actual, predicted):
tp = 0
fp = 0
for i in range(0, len(actual)):
predic = predicted[i]
act = actual[i]
if act != "unknown":
if predic == act:
tp += 1
else:
fp += 1
return tp / float(tp + fp)
def f_measure(actual, predicted):
prec = precision(actual, predicted)
rec = recall(actual, predicted)
return 2 * (prec * rec) / (prec + rec)
``` |
{
"source": "JordyCabannes/Hades",
"score": 2
} |
#### File: Hades/others/testproxmox.py
```python
from pyproxmox import *
from pprint import pprint
import configparser
#Valeurs par défaut juste pour tester les fonctions
vmid = 101
interface = 'eth0'
service = 'pveproxy'
upid = 'UPID:stock:00000684:00001088:5846C0CD:startall::root@pam:'
ostemplate = 'local:vztmpl/debian-8.0-standard_8.4-1_amd64.tar.gz'
hostname='test.example.org'
password='<PASSWORD>'
description='python'
available_flavors = { #Ou mettre ça dans un INI
'xsmall': {'cpus': 1,
'memory': 512,
'disk': 10,
'swap': 1024},
'small': {'cpus': 1,
'memory': 1024,
'disk': 15,
'swap': 1024},
'medium': {'cpus': 2,
'memory': 2048,
'disk': 20,
'swap': 4096},
'large': {'cpus': 2,
'memory': 4096,
'disk': 50,
'swap': 8192},
'xlarge': {'cpus': 4,
'memory': 8192,
'disk': 100,
'swap': 16384}}
flavor = available_flavors['xsmall'] #juste un flavor par défaut pour tester
available_shared_storage_sizes = [10, 50, 100, 500, 1000]
class test:
def __init__(self):
pass
def ok(response):
return response['status']['ok']
def init(local=False):
"""Initialise les clients proxmox pour chaque node (à partir du fichier INI)"""
global b #Node par défaut, pratique pour faire des tests
global nodes
def read_nodes_fichier_ini():
config = configparser.SafeConfigParser()
try:
config.read('proxmoxnodes.ini')
nodes = {}
for node in [s for s in config.sections() if s.startswith('Local' if local else 'Serveur')]:
nodes[node] = dict(config[node])
return nodes
except:
input('ERREUR: Le fichier INI est mal formé ou inexistant.')
raise
def initialiser_clients_nodes(credentials_list):
def _initialiser_clients_nodes(credentials_list):
for node_name, node_creds in credentials_list.iteritems():
a = prox_auth(node_creds['ip'], node_creds['login']+'@pam', node_creds['pass'])
yield pyproxmox(a)
return list(_initialiser_clients_nodes(credentials_list))
def add_node_attributes(node):
"""Ajoute les attributs name et ip au node (on en a souvent besoin pour appeler les fonctions de l'API)"""
node_info = node.getClusterStatus()['data'][0]
node.name = node_info['name']
node.ip = node_info['ip']
return node
#En fait ce sont des serveurs proxmox que l'on manipule.
#Dans notre cas, on n'a qu'un node par serveur, donc on peut considérer que serveur == node.
nodes = initialiser_clients_nodes(read_nodes_fichier_ini())
nodes = map(add_node_attributes, nodes)
b = nodes[0]
def creer_container_test(flavor=flavor,
ostemplate=ostemplate,
hostname=hostname,
password=password,
description=description):
post_data = {'ostemplate':ostemplate,
'vmid':int(b.getClusterVmNextId()['data']),
'description':description,
'hostname':hostname,
'memory':flavor['memory'],
'password':password,
'swap':flavor['swap']}
response = b.createOpenvzContainer(b.name, post_data)
if not ok(response):
pprint(response['status'])
print(response) #Eventuellement faire quelque chose d'utile avec les élements de la réponse
return response
def liste_tout_dans_node(node):
"""Liste toutes les VMs et containers d'un certain b."""
# Attributs spécifiques à 'containers' et 'vms':
# container : pid
# virtual : lock, maxswap, swap, type
liste_containers = node.getNodeContainerIndex(node.name)['data']
liste_vms = node.getNodeVirtualIndex(node.name)['data']
return dict(containers=liste_containers, vms=liste_vms)
def liste_tout():
"""Liste toutes les VMs et containers sur tous les nodes."""
result = liste_tout_dans_node(nodes[0])
for node in nodes[1:]:
for k in result.keys():
result[k].extend(liste_tout_dans_node(node)[k])
return result
def liste_vms_user(user):
#Pas encore possible, il faudra surement checker dans la BDD
return [vm for vm in liste_tout() if vm['user'] == user]
raise NotImplementedError
def creer_vm():
pprint(b.createVirtualMachine(b.name,post_data)['data'])
pprint(b.startVirtualMachine(b.name,vmid)['data'])
# raise NotImplementedError
def cloner_vm():
pprint(b.cloneVirtualMachine(b.name,vmid,post_data)['data'])
# raise NotImplementedError
def tuer_vm():
pprint(b.deleteVirtualMachine(b.name,vmid)['data'])
# raise NotImplementedError
def migrer_vm():
pprint(b.migrateVirtualMachine(b.name,vmid,target,online=False,force=False)['data'])
# raise NotImplementedError
def resize_vm():
pprint(b.setVirtualMachineOptions(b.name,vmid,post_data)['data'])
# raise NotImplementedError
def relever_metriques_vm():
pprint(b.getVirtualStatus(b.name,vmid))
# raise NotImplementedError
def relever_metriques_node():
pprint(b.getNodeStatus(b.name)['data'])
# raise NotImplementedError
def calculer_montant_facturation_vm():
#Regarder le flavor de la vm (à chaque flavor correspond un coefficient multiplicateur de prix)
#Regarder le taux d'utilisation moyen des ressources et le temps ou un truc dans le genre
#multiplier par le prix de base par minute ou seconde
raise NotImplementedError
def calculer_montant_facturation_user(user):
user_vms = liste_vms_user(user)
billing_per_vm = map(calculer_montant_facturation_vm, user_vms)
return sum(billing_per_vm)
def creer_stockage_partage():
raise NotImplementedError
def creer_reseau_virtuel():
raise NotImplementedError
def ajouter_vm_au_reseau_virtuel():
raise NotImplementedError
init(local=False)
if __name__ == '__main__':
obj = test()
'''
pprint(b.getClusterNodeList()['data']) #liste des nodes avec leurs nom, cpu/disk/mem et utilisation
pprint(b.getClusterLog()['data'])
pprint(b.getNodeContainerIndex(b.name)['data'])
pprint(b.getNodeVirtualIndex(b.name)['data'])
pprint(b.getNodeNetworks(b.name)['data'])
pprint(b.getNodeInterface(b.name,interface)['data'])
pprint(b.deleteNodeInterface(b.name,interface)['data'])
pprint(b.getNodeScanMethods(b.name)['data'])
pprint(b.deleteNodeNetworkConfig(b.name)['data'])
pprint(b.setNodeDNSDomain(b.name,domain)['data'])
pprint(b.getNodeServiceList(b.name)['data'])
pprint(b.getNodeServiceState(b.name,service)['data'])
pprint(b.getNodeStorage(b.name)['data'])
pprint(b.getNodeFinishedTasks(b.name)['data'])
pprint(b.getNodeDNS(b.name)['data'])
pprint(b.getNodeStatus(b.name)['data'])
pprint(b.getNodeSyslog(b.name)['data'])
pprint(b.getNodeRRD(b.name)) #---Manque paramètres['data']
pprint(b.getNodeRRDData(b.name)) #---Manque paramètres['data']
pprint(b.getNodeBeans(b.name)['data'])
pprint(b.getNodeTaskByUPID(b.name,upid)['data'])
pprint(b.getNodeTaskLogByUPID(b.name,upid)['data'])
pprint(b.getNodeTaskStatusByUPID(b.name,upid)['data'])
pprint(b.setNodeSubscriptionKey(b.name,key)['data'])
pprint(b.setNodeTimeZone(b.name,timezone)['data'])
pprint(b.deletePool(poolid)['data'])
pprint(b.setPoolData(poolid, post_data)['data'])
pprint(b.getRemoteiSCSI(b.name)['data'])
pprint(b.getNodeLVMGroups(b.name)['data'])
pprint(b.getRemoteNFS(b.name)['data'])
pprint(b.getNodeUSB(b.name)['data'])
pprint(b.getClusterACL()['data'])
pprint(b.getContainerIndex(b.name,vmid)['data'])
pprint(b.getContainerStatus(b.name,vmid)['data'])
pprint(b.getContainerBeans(b.name,vmid)['data'])
pprint(b.getContainerConfig(b.name,vmid)['data'])
pprint(b.getContainerInitLog(b.name,vmid)['data'])
pprint(b.getContainerRRD(b.name,vmid)) #---Manque paramètres['data'])
pprint(b.getContainerRRDData(b.name,vmid)) #---Manque paramètres['data'])
pprint(b.getVirtualIndex(b.name,vmid)['data'])
pprint(b.getVirtualStatus(b.name,vmid)) #Voir l'allocation et l'utilisation des ressources en live d'une VM['data'])
pprint(b.getVirtualConfig(b.name,vmid,current=False)) #Autres infos sur une VM (nom, config réseau...)['data'])
pprint(b.getVirtualRRD(b.name,vmid)) #---Manque paramètres['data'])
pprint(b.getVirtualRRDData(b.name,vmid)) #---Manque paramètres['data'])
pprint(b.getStorageVolumeData(b.name,storage,volume)['data'])
pprint(b.getStorageConfig(storage)['data'])
pprint(b.deleteStorageConfiguration(storageid)['data'])
pprint(b.updateStorageConfiguration(storageid,post_data)['data'])
pprint(b.getNodeStorageContent(b.name,storage)['data'])
pprint(b.getNodeStorageRRD(b.name,storage)) #---Manque paramètres['data'])
pprint(b.getNodeStorageRRDData(b.name,storage)) #---Manque paramètres['data'])
pprint(b.createOpenvzContainer(b.name,post_data)['data'])
pprint(b.mountOpenvzPrivate(b.name,vmid)['data'])
pprint(b.shutdownOpenvzContainer(b.name,vmid)['data'])
pprint(b.startOpenvzContainer(b.name,vmid)['data'])
pprint(b.stopOpenvzContainer(b.name,vmid)['data'])
pprint(b.unmountOpenvzPrivate(b.name,vmid)['data'])
pprint(b.migrateOpenvzContainer(b.name,vmid,target)['data'])
pprint(b.deleteOpenvzContainer(b.name,vmid)['data'])
pprint(b.setOpenvzContainerOptions(b.name,vmid,post_data)['data'])
pprint(b.createVirtualMachine(b.name,post_data)['data'])
pprint(b.cloneVirtualMachine(b.name,vmid,post_data)['data'])
pprint(b.resetVirtualMachine(b.name,vmid)['data'])
pprint(b.resumeVirtualMachine(b.name,vmid)['data'])
pprint(b.shutdownVirtualMachine(b.name,vmid)['data'])
pprint(b.startVirtualMachine(b.name,vmid)['data'])
pprint(b.stopVirtualMachine(b.name,vmid)['data'])
pprint(b.suspendVirtualMachine(b.name,vmid)['data'])
pprint(b.migrateVirtualMachine(b.name,vmid,target,online=False,force=False)['data'])
pprint(b.monitorVirtualMachine(b.name,vmid,command)['data'])
pprint(b.vncproxyVirtualMachine(b.name,vmid)['data'])
pprint(b.rollbackVirtualMachine(b.name,vmid,snapname)['data'])
pprint(b.deleteVirtualMachine(b.name,vmid)['data'])
pprint(b.setVirtualMachineOptions(b.name,vmid,post_data)['data'])
pprint(b.sendKeyEventVirtualMachine(b.name,vmid, key)['data'])
pprint(b.unlinkVirtualMachineDiskImage(b.name,vmid, post_data)['data'])
pprint(b.getSnapshotConfigVirtualMachine(b.name,vmid,snapname)['data'])
pprint(b.getSnapshotsVirtualMachine(b.name,vmid)['data'])
pprint(b.createSnapshotVirtualMachine(b.name,vmid,snapname,description='',vmstate=False)['data'])
pprint(b.deleteSnapshotVirtualMachine(b.name,vmid,title,force=False)['data'])
'''
``` |
{
"source": "JordyCabannes/Projet_tut",
"score": 3
} |
#### File: Projet_tut/lib/elevation.py
```python
import array
import math
import zipfile
import os.path
import urllib
def filename(lat, lon):
if lat > 0 :
filename = "N%02d" % math.trunc(lat)
else:
filename = "S%02d" % -math.trunc(lat - 1)
if lon > 0 :
filename += "E%03d.hgt" % math.trunc(lon)
else:
filename += "W%03d.hgt" % -math.trunc(lon - 1)
return filename
class Tile:
nb_coords = 1201;
def __init__(self, lat, lon):
zf = zipfile.ZipFile(filename(lat, lon) + ".zip")
self.data = array.array("h", zf.read(filename(lat, lon)))
self.data.byteswap()
def altitude(self, lat, lon):
lon_dec = abs(lon) - math.trunc(abs(lon))
lat_dec = abs(lat) - math.trunc(abs(lat))
if lon > 0 :
lon_idx = math.trunc(lon_dec * self.nb_coords)
else:
lon_idx = math.trunc((1 - lon_dec) * self.nb_coords -1)
if lat > 0 :
lat_idx = math.trunc((1 - lat_dec) * self.nb_coords - 1)
else:
lat_idx = math.trunc(lat_dec * self.nb_coords)
return self.data[lat_idx * self.nb_coords + lon_idx]
class ElevationData:
def __init__(self, continent):
if continent not in ["Africa", "Australia", "Eurasia", "Islands", "North_America", "South_America"]:
print "Error: unknow continent %s." % continent
raise Exception
self.tiles = {}
self.continent = continent
def altitude(self, lat, lon):
fn = filename(lat, lon)
if not self.tiles.has_key(fn):
if not os.path.exists(fn + ".zip"):
# url = "ftp://e0srp01u.ecs.nasa.gov/srtm/version2/SRTM3/%s/%s.zip" % (self.continent, fn)
url = "http://dds.cr.usgs.gov/srtm/version2_1/SRTM3/%s/%s.zip" % (self.continent, fn)
print "Tile not in cache. Downloading %s " %url
urllib.urlretrieve(url, fn + ".zip")
print " Done!"
self.tiles[fn] = Tile(lat, lon)
return self.tiles[fn].altitude(lat, lon)
```
#### File: Projet_tut/lib/kalkati_reader.py
```python
from datastructures import *
import sys
import datetime
from optparse import OptionParser
from sqlalchemy.orm import mapper, sessionmaker
import xml.sax
from xml.sax.handler import ContentHandler
import iso8601
def distance(c1, c2):
try:
delta = c2[0] - c1[0]
a = math.radians(c1[1])
b = math.radians(c2[1])
C = math.radians(delta)
x = math.sin(a) * math.sin(b) + math.cos(a) * math.cos(b) * math.cos(C)
distance = math.acos(x) # in radians
distance = math.degrees(distance) # in degrees
distance = distance * 60 # 60 nautical miles / lat degree
distance = distance * 1852 # conversion to meters
return distance;
except:
return 0
def normalize_service(start, end, services, service_start):
original = services
start_delta = (start - service_start).days
if start_delta < 0:
services = services + ("0" * abs(start_delta))
else:
services = services[:-start_delta]
end_delta = len(services) - (end - start).days
if end_delta < 0:
services = ("0" * abs(end_delta)) + services
else:
services = services[end_delta :]
if len(services) != (end - start).days:
print "Crapp!!!! {0} {1} {2} {3} {4} {5}".format(services, start_delta, end_delta, service_start, original, start)
return services
class KalkatiHandler(ContentHandler):
def __init__(self, session, start_date, end_date):
# self.nodes_insert = nodes.insert()
# self.edges_insert = edges.insert()
self.start = datetime.datetime.strptime(start_date, "%Y%m%d")
self.end = datetime.datetime.strptime(end_date, "%Y%m%d")
self.session = session
self.company = {}
self.stations = {}
self.changes = []
self.map = {}
self.synonym = False
self.count = 0
def startElement(self, name, attrs):
if name == "Delivery": #[1..1]
self.firstday = iso8601.parse_date(attrs["Firstday"])
self.lastday = iso8601.parse_date(attrs["Lastday"])
elif name == "Company": #[1..*]
self.company[attrs["CompanyId"]] = attrs["Name"]
elif name == "Country": #[1..*]
pass
elif name == "Timezone": #[1..*]
pass
elif name == "Period": #[1..*] in Timezone
pass
elif name == "Language": #[1..*]
pass
elif name == "Station" and not self.synonym: #[0..*]
s = {}
if attrs["Name"]:
s["name"] = attrs["Name"]
if attrs.has_key("X") and attrs.has_key("Y"):
s["x"], s["y"] = float(attrs["X"]), float(attrs["Y"])
else:
print "Warning! No X/Y for station {0}, {1}".format(s["id"], s["name"].encode("iso-8859-1" ) )
self.stations[attrs["StationId"]] = (s)
elif name == "Trnsattr": #[0..*]
pass
elif name == "Trnsmode": #[0..*]
pass
elif name == "Synonym": #[0..*]
self.synonym = True
elif name == "Change": #[0..*]
c = {}
c["service1"] = attrs["ServiceId1"]
c["service2"] = attrs["ServiceId2"]
if attrs["ChangeTime"]:
c["change_time"] = attrs["ChangeTime"]
elif name == "Timetbls": #[0..1]
pass # It's just a container for Services
elif name == "Service": #[0..*] in Timetbls
self.service = attrs["ServiceId"]
self.prev_stop = None
self.prev_time = None
elif name == "ServiceNbr": #[1..1] in Service
self.trip = attrs["ServiceNbr"]
if not self.map.has_key(self.trip):
self.map[self.trip] = {}
elif name == "ServiceValidity": # in Service
self.footnote = attrs["FootnoteId"]
elif name == "ServiceTrnsmode": # in Service
self.mode = attrs["TrnsmodeId"]
elif name == "ServiceAttribute": # in Service
pass
elif name == "Stop": #[1..*] in Service
station = attrs["StationId"]
if not self.map[self.trip].has_key(station):
self.map[self.trip][station] = self.count
self.count += 1
self.session.add(PT_Node(station, self.stations[station]["x"],self.stations[station]["y"], self.trip))
current_stop = self.map[self.trip][station]
if attrs.has_key("Arrival"):
arrival = int(attrs["Arrival"]) * 60
else:
arrival = int(attrs["Departure"]) * 60
if attrs.has_key("Departure"):
departure = int(attrs["Departure"]) * 60
else:
departure = int(attrs["Arrival"]) * 60
if self.prev_stop:
length = distance( (self.stations[station]["x"],self.stations[station]["y"]), (self.prev_lon, self.prev_lat))
self.session.add(PT_Edge(
source = self.prev_stop,
target = current_stop,
length = length * 1.1,
start_secs = self.prev_time,
arrival_secs = arrival,
services = self.footnote,
mode = self.mode
))
self.prev_stop = current_stop
self.prev_time = departure
self.prev_lon = self.stations[station]["x"]
self.prev_lat = self.stations[station]["y"]
if self.count % 100 == 0:
self.session.flush()
if self.count % 10000 == 0:
print "Added {0} timetable elements".format(self.count)
elif name == "Footnote":
if attrs.has_key("Firstdate"):
date = datetime.datetime.strptime(attrs["Firstdate"], "%Y-%m-%d")
else:
date = self.firstday
services = normalize_service(self.start, self.end, attrs["Vector"], date)
self.session.add(PT_Service(int(attrs["FootnoteId"]), services))
def endElement(self, name):
if name == "Synonym":
self.synonym = False
def endDocument(self):
self.session.commit()
def convert(filename, session, start_date, end_date):
handler = KalkatiHandler(session, start_date, end_date)
session.commit()
xml.sax.parse(filename, handler)
```
#### File: Projet_tut/lib/layer.py
```python
from lib.datastructures import *
import core.mumoro as mumoro
from sqlalchemy import *
from sqlalchemy.orm import *
class NotAccessible(Exception):
pass
class NoLength(Exception):
pass
class DataIncoherence(Exception):
pass
def max_speed_to_average(max_speed):
if max_speed <= 0:
return 0
elif max_speed <= 30:
return 15
elif max_speed <= 90:
return max_speed - 20
else:
return max_speed - 30
def duration(length, property, type):
if not length and length != 0.0:
raise NoLength()
if type == mumoro.FootEdge:
if property == 0:
raise NotAccessible()
else:
return length * 3.6 / 5
elif type == mumoro.BikeEdge:
if property == 0:
raise NotAccessible()
else:
return length * 3.6 / 15
elif type == mumoro.CarEdge:
if property <= 0:
raise NotAccessible()
speed = max_speed_to_average( property )
return length * 3.6 / speed
else:
raise NotAccessible()
class BaseLayer(object):
def __init__(self, name, data, metadata):
self.data = data
self.name = name
self.metadata = metadata
self.nodes_table = Table(data['nodes'], metadata, autoload = True)
self.edges_table = Table(data['edges'], metadata, autoload = True)
self.count = select([func.max(self.nodes_table.c.id)]).execute().first()[0] + 1
def map(self, o_id):
s = self.nodes_table.select(self.nodes_table.c.original_id==o_id)
rs = s.execute()
result = None
for row in rs:
result = row
if result:
return result[0] + self.offset
else:
print "Unable to find id {0}".format(o_id)
return None
def borders(self):
max_lon = select([func.max(self.nodes_table.c.lon, type_=Float )]).execute().first()[0]
min_lon = select([func.min(self.nodes_table.c.lon, type_=Float )]).execute().first()[0]
max_lat = select([func.max(self.nodes_table.c.lat, type_=Float )]).execute().first()[0]
min_lat = select([func.min(self.nodes_table.c.lat, type_=Float )]).execute().first()[0]
return {'max_lon': max_lon,'min_lon': min_lon,'max_lat':max_lat,'min_lat':min_lat}
def average(self):
avg_lon = select([func.avg(self.nodes_table.c.lon, type_=Float )]).execute().first()[0]
avg_lat = select([func.avg(self.nodes_table.c.lat, type_=Float )]).execute().first()[0]
return {'avg_lon':avg_lon, 'avg_lat':avg_lat }
def match(self, ln, lt, epsilon = 0.002):
ln = float(ln)
lt = float(lt)
res = self.nodes_table.select(
(self.nodes_table.c.lon >= (ln - epsilon)) &
(self.nodes_table.c.lon <= (ln + epsilon)) &
(self.nodes_table.c.lat >= (lt - epsilon)) &
(self.nodes_table.c.lat <= (lt + epsilon)),
order_by = ((self.nodes_table.c.lon - ln) * (self.nodes_table.c.lon -ln)) + ((self.nodes_table.c.lat - lt) * (self.nodes_table.c.lat - lt))
).execute().first()
if res:
return res.id + self.offset
else:
return None
def nearest(self, ln, lt):
print "Trying to match {0}, {1}".format(ln, lt)
nearest = None
epsilon = 0.002
while not nearest and epsilon < 0.008:
nearest = self.match(ln, lt, epsilon)
epsilon += 0.001
return nearest
def coordinates(self, nd):
res = self.nodes_table.select(self.nodes_table.c.id == (nd - self.offset)).execute().first()
if res:
return (res.lon, res.lat, res.original_id, self.name)
else:
print "Unknow node {0} on layer {1}, offset ".format(nd, self.name, self.offset)
def nodes(self):
for row in self.nodes_table.select().execute():
yield row
# A street layer containing only Car or Foot or Bike (depending on the value of mode)
class Layer(BaseLayer):
def __init__(self, name, mode, data, metadata):
super(Layer, self).__init__(name, data, metadata)
self.mode = mode
def edges(self):
for edge in self.edges_table.select().execute():
e = mumoro.Edge()
e.length = edge.length
if self.mode == mumoro.Foot:
property = edge.foot
property_rev = edge.foot
e.type = mumoro.FootEdge
elif self.mode == mumoro.Bike:
property = edge.bike
property_rev = edge.bike_rev
e.type = mumoro.BikeEdge
elif self.mode == mumoro.Car:
property = edge.car
property_rev = edge.car_rev
e.type = mumoro.CarEdge
else:
property = 0
property_rev = 0
e.type = mumoro.UnkwownEdgeType
node1 = self.map(edge.source)
node2 = self.map(edge.target)
try:
dur = duration(e.length, property, e.type)
e.duration = mumoro.Duration(dur)
e.elevation = 0
# if self.mode == mumoro.Bike:
# e.elevation = max(0, target_alt - source_alt)
yield {
'source': node1,
'target': node2,
'properties': e
}
except NotAccessible:
pass
try:
dur = duration(e.length, property_rev, e.type)
e.duration = mumoro.Duration(dur)
e.elevation = 0
# if self.mode == mumoro.Bike:
# e.elevation = max(0, source_alt - target_alt)
yield {
'source': node2,
'target': node1,
'properties': e,
}
except NotAccessible:
pass
except NoLength:
print "Error no length : ("+str(source)+", "+str(dest)+") "
pass
# A street layer with mixed bike, foot and car
class MixedStreetLayer(BaseLayer):
def __init__(self, name, data, metadata):
super(MixedStreetLayer, self).__init__(name, data, metadata)
def edges(self):
count_total = 0
count_ko = 0
for edge in self.edges_table.select().execute():
source = self.map(edge.source)
dest = self.map(edge.target)
# forward arc
properties = [ {'prop': edge.foot, 'type': mumoro.FootEdge},
{'prop': edge.bike, 'type': mumoro.BikeEdge},
{'prop': edge.car, 'type': mumoro.CarEdge} ]
for property in properties:
count_total += 1
if count_total % 50000 == 0:
print "Treated edges : " + str(count_total)
try:
dur = duration(edge.length, property['prop'], property['type'])
yield {
'source': source,
'target': dest,
'properties': { 'road_edge': True,
'type': property['type'],
'length': edge.length,
'duration': dur }
}
except NotAccessible:
pass
except NoLength:
count_ko += 1
print "Error no length : ("+str(source)+", "+str(dest)+") "+str(count_ko) + " / " + str(count_total)
pass
# backward arc
properties = [ {'prop': edge.foot, 'type': mumoro.FootEdge},
{'prop': edge.bike_rev, 'type': mumoro.BikeEdge},
{'prop': edge.car_rev, 'type': mumoro.CarEdge} ]
for property in properties:
count_total += 1
if count_total % 50000 == 0:
print "Treated edges : " + str(count_total)
try:
dur = duration(edge.length, property['prop'], property['type'])
yield {
'source': dest,
'target': source,
'properties': { 'road_edge': True,
'type': property['type'],
'length': edge.length,
'duration': dur }
}
except NotAccessible:
pass
except NoLength:
count_ko += 1
print "Error no length : ("+str(source)+", "+str(dest)+") "+str(count_ko) + " / " + str(count_total)
pass
class GTFSLayer(BaseLayer):
"""A layer for public transport described by the General Transit Feed Format"""
def __init__(self, name, data, metadata):
super(GTFSLayer, self).__init__(name, data, metadata)
self.services = Table(data['services'], metadata, autoload = True)
self.mode = mumoro.PublicTransport
def edges(self):
for row in self.edges_table.select().execute():
services = self.services.select(self.services.c.id == int(row.services)).execute().first().services
yield {
'source': row.source + self.offset,
'target': row.target + self.offset,
'duration_type': row.duration_type,
'departure': row.start_secs,
'arrival': row.arrival_secs,
'duration': row.duration,
'services': services,
'type': row.mode
}
# Connects every node corresponding to a same stop:
# if a stop is used by 3 routes, the stop will be represented by 3 nodes
n1 = self.nodes_table.alias()
n2 = self.nodes_table.alias()
res = select([n1.c.id,n2.c.id], (n1.c.original_id == n2.c.original_id) & (n1.c.route != n2.c.route)).execute()
count = 0
for r in res:
count += 1
yield {
'source': r[0] + self.offset,
'target': r[1] + self.offset,
'duration_type': mumoro.ConstDur,
'departure': 0,
'arrival': 0,
'duration': 60,
'services': "",
'type': mumoro.TransferEdge
}
print "{0} transfer edge inserted".format(count)
class MultimodalGraph(object):
def __init__(self, layers, id, filename = None, binary_archive = True):
nb_nodes = 0
self.node_to_layer = []
self.layers = layers
for l in layers:
l.offset = nb_nodes
nb_nodes += l.count
self.node_to_layer.append((nb_nodes, l.name))
print "Layer " + l.name + " for nodes from " + str(l.offset) +" to "+ str(nb_nodes - 1)
if filename:
self.graph_facto = mumoro.GraphFactory(filename, binary_archive)
else:
self.graph_facto = mumoro.GraphFactory(nb_nodes)
self.graph_facto.set_id(id)
count = 0
for l in layers:
for e in l.edges():
if e.has_key('properties') and e['properties']['road_edge']:
self.graph_facto.add_road_edge(e['source'], e['target'], e['properties']['type'], int(e['properties']['duration']))
count += 1
elif e.has_key('properties'):
if self.graph_facto.add_public_transport_edge(e['source'], e['target'], e['properties']['type'], int(e['properties']['duration'])):
count += 1
else:
if self.graph_facto.add_public_transport_edge(e['source'], e['target'], e['duration_type'], e['departure'], e['arrival'], e['duration'],
str(e['services']), e['type']):
count += 1
print "On layer {0}, {1} edges, {2} nodes".format(l.name, count, l.count)
for n in l.nodes():
self.graph_facto.set_coord(n.id + l.offset, n.lon, n.lat)
print "The multimodal graph has been built and has {0} nodes and {1} edges".format(nb_nodes, count)
def graph(self):
return self.graph_facto.get()
def save_to_bin(self, filename):
self.graph_facto.setAll()
print"goaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaal"
self.graph_facto.save_to_bin(filename)
def save_to_txt(self, filename):
self.graph_facto.setAll()
print"yeaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaah"
self.graph_facto.save_to_txt(filename)
#def load(self, filename):
#self.graph_facto.load(filename)
def layer(self, node):
for l in self.node_to_layer:
if int(node) <= l[0]: # nodes from 1 to l[0] are in layer 0, etc
return l[1]
print sys.exc_info()[0]
print "Unable to find the right layer for node {0}".format(node)
print self.node_to_layer
def coordinates(self, node):
name = self.layer(node)
for l in self.layers:
if l.name == name:
return l.coordinates(node)
print sys.exc_info()[0]
print "Unknown node: {0} on layer: {1}".format(node, name)
def match(self, name, lon, lat):
for l in self.layers:
if l.name == name:
return l.nearest(lon, lat)
def connect_same_nodes(self, layer1, layer2, property):
count = 0
for n1 in layer1.nodes():
n2 = layer2.map(n1.original_id)
if n2:
self.graph_facto.add_edge(n1.id + layer1.offset, n2, property)
count += 1
print count
return count
def connect_same_nodes_random(self, layer1, layer2, property, freq):
count = 0
for n1 in layer1.nodes():
n2 = layer2.map(n1.original_id)
if n2 and count % freq == 0:
self.graph_facto.add_edge(n1.id + layer.offset, n2, property)
count += 1
return count
def connect_nodes_from_list(self, layer1, layer2, list, property, property2 = None):
count = 0
if property2 == None:
property2 = property
for coord in list:
n1 = layer1.match(coord['lon'], coord['lat'])
n2 = layer2.match(coord['lon'], coord['lat'])
if n1 and n2:
self.graph_facto.add_edge(n1, n2, property)
self.graph_facto.add_edge(n2, n1, property2)
count += 2
else:
print "Uho... no connection possible"
return count
def connect_nearest_nodes(self, layer1, layer2, property, property2 = None):
count = 0
if property2 == None:
property2 = property
for n in layer1.nodes():
# Only connect nodes flaged as linkable
if not n.linkable:
continue
nearest = layer2.nearest(n.lon, n.lat)
if nearest:
self.graph_facto.add_public_transport_edge(n.id + layer1.offset, nearest, property['duration'], property['type'])
self.graph_facto.add_public_transport_edge(nearest, n.id + layer1.offset, property2['duration'], property2['type'])
count += 2
return count
```
#### File: Projet_tut/test/testunit.py
```python
import unittest
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
pass
def test_import_street_data(self):
self.assertTrue(False)
def test_import_municipal_data(self):
self.assertTrue(False)
def test_import_bike_service(self):
self.assertTrue(False)
def test_load_layer(self):
self.assertTrue(False)
def test_cost(self):
self.assertTrue(False)
def test_connect_layers_same_nodes(self):
self.assertTrue(False)
def test_connect_layers_from_node_list(self):
self.assertTrue(False)
def test_create_multimodal_graph(self):
self.assertTrue(False)
if __name__ == '__main__':
unittest.main()
```
#### File: JordyCabannes/Projet_tut/toolbox.py
```python
__author__ = '<NAME>'
from time import time
class Output(object):
def __init__(self):
super(Output, self).__init__()
def timer(func, *pargs, **kargs):
"""
Measures the time required to run func with the given parameters.
Returns the time as well as the result of the computation.
"""
start = time()
ret = func(*pargs, **kargs)
elapsed = time() - start
return elapsed, ret
``` |
{
"source": "JordyMoos/Fake-images-please",
"score": 3
} |
#### File: Fake-images-please/app/tests.py
```python
import unittest
from main import app
from PIL import Image
from io import BytesIO
class AppTestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
self.app = app.test_client()
@staticmethod
def _open_image(image_data):
strio = BytesIO()
strio.write(image_data)
strio.seek(0)
return Image.open(strio)
# Routes
def testIndex(self):
with self.app.get('/') as r:
self.assertEqual(r.status_code, 200)
def test404(self):
with self.app.get('/this-does-not-exist-bitch') as r:
self.assertEqual(r.status_code, 404)
def testHeaders(self):
with self.app.get('/') as r:
headers = r.headers
self.assertEqual(headers['X-UA-Compatible'], 'IE=Edge,chrome=1')
self.assertEqual(headers['Cache-Control'], 'public,max-age=36000')
def testFavicon(self):
with self.app.get('/favicon.ico') as r:
self.assertEqual(r.status_code, 200)
def testRobotsTxt(self):
with self.app.get('/robots.txt') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'text/plain')
def testHumansTxt(self):
with self.app.get('/humans.txt') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'text/plain')
def testTrailingSlash(self):
# redirected to /100/
with self.app.get('/100') as r:
self.assertEqual(r.status_code, 308)
def testPlaceholder1(self):
with self.app.get('/300/') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'image/png')
img = self._open_image(r.data)
width, height = img.size
self.assertEqual(width, 300)
self.assertEqual(height, 300)
with self.app.get('/5000/') as r:
self.assertEqual(r.status_code, 404)
def testPlaceholder2(self):
with self.app.get('/200x100/') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'image/png')
img = self._open_image(r.data)
width, height = img.size
self.assertEqual(width, 200)
self.assertEqual(height, 100)
with self.app.get('/4005x300/') as r:
self.assertEqual(r.status_code, 404)
with self.app.get('/200x4050/') as r:
self.assertEqual(r.status_code, 404)
def testPlaceholder3(self):
with self.app.get('/200x100/CCCCCC/') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'image/png')
img = self._open_image(r.data)
width, height = img.size
self.assertEqual(width, 200)
self.assertEqual(height, 100)
with self.app.get('/200x100/CCCCCC,50/') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'image/png')
img = self._open_image(r.data)
width, height = img.size
self.assertEqual(width, 200)
self.assertEqual(height, 100)
with self.app.get('/200x100/prout/') as r:
self.assertEqual(r.status_code, 404)
with self.app.get('/200x100/CCCCCC,5123/') as r:
self.assertEqual(r.status_code, 404)
def testPlaceholder4(self):
with self.app.get('/200x100/eee/000/') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'image/png')
img = self._open_image(r.data)
width, height = img.size
self.assertEqual(width, 200)
self.assertEqual(height, 100)
with self.app.get('/200x100/eee,10/000/') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'image/png')
img = self._open_image(r.data)
width, height = img.size
self.assertEqual(width, 200)
self.assertEqual(height, 100)
with self.app.get('/200x100/eee/000,25/') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'image/png')
img = self._open_image(r.data)
width, height = img.size
self.assertEqual(width, 200)
self.assertEqual(height, 100)
with self.app.get('/200x100/eee,15/000,15/') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'image/png')
img = self._open_image(r.data)
width, height = img.size
self.assertEqual(width, 200)
self.assertEqual(height, 100)
with self.app.get('/200x100/fff/ee/') as r:
self.assertEqual(r.status_code, 404)
with self.app.get('/200x100/eee,25555/000/') as r:
self.assertEqual(r.status_code, 404)
with self.app.get('/200x100/eee/000,b/') as r:
self.assertEqual(r.status_code, 404)
with self.app.get('/200x100/eee,458/000,2555/') as r:
self.assertEqual(r.status_code, 404)
def testRetina(self):
with self.app.get('/200x100/eee/000/?retina=1') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'image/png')
img = self._open_image(r.data)
width, height = img.size
self.assertEqual(width, 400)
self.assertEqual(height, 200)
with self.app.get('/200x100/eee,10/000,10/?retina=1') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'image/png')
img = self._open_image(r.data)
width, height = img.size
self.assertEqual(width, 400)
self.assertEqual(height, 200)
def testFontsize(self):
with self.app.get('/200x100/eee/000/?font_size=1') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'image/png')
img = self._open_image(r.data)
width, height = img.size
self.assertEqual(width, 200)
self.assertEqual(height, 100)
# Make it work with wrong value (ie. not crash)
with self.app.get('/200x100/eee/000/?font_size=0') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'image/png')
img = self._open_image(r.data)
width, height = img.size
self.assertEqual(width, 200)
self.assertEqual(height, 100)
with self.app.get('/200x100/eee/000/?font_size=-1') as r:
self.assertEqual(r.status_code, 200)
self.assertEqual(r.mimetype, 'image/png')
img = self._open_image(r.data)
width, height = img.size
self.assertEqual(width, 200)
self.assertEqual(height, 100)
def testCORSHeaders(self):
with self.app.get('/200x100/') as r:
self.assertEqual(r.headers['Access-Control-Allow-Origin'], '*')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JordyMoos/raspberry-pi-pokemon-catcher",
"score": 3
} |
#### File: JordyMoos/raspberry-pi-pokemon-catcher/main.py
```python
from picamera.array import PiRGBArray
from picamera import PiCamera
import numpy as np
import cv2
import RPi.GPIO as GPIO
import time
# Config
PIN_PAIR = 27
PIN_SERVO_PAIR = 12
PIN_SERVO_POKEBALL = 18
REPAIR_IDLE_TIME = 60 * 7
REPAIR_DELAY_TIME = 30
MINIMUM_PIXELS = 10000
RESOLUTION = (640, 480)
def setup_servo(pin):
GPIO.setup(pin, GPIO.OUT)
servo = GPIO.PWM(pin, 50)
servo.start(0.1)
time.sleep(0.2)
servo.ChangeDutyCycle(0)
return servo
def trigger_servo(servo):
# Turn servo on
servo.ChangeDutyCycle(0.1)
time.sleep(0.1)
# Activate the button
servo.ChangeDutyCycle(3.5)
time.sleep(0.2)
# Go back to the off position
servo.ChangeDutyCycle(0.1)
time.sleep(0.1)
# Turn servo off
servo.ChangeDutyCycle(0)
# GPIO Setup
GPIO.setwarnings(False) # Do not tell anyone
GPIO.setmode(GPIO.BCM)
# Servo's
pairServo = setup_servo(PIN_SERVO_PAIR)
pokeballServo = setup_servo(PIN_SERVO_POKEBALL)
# Pair button
GPIO.setup(PIN_PAIR, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Camera
camera = PiCamera()
camera.resolution = RESOLUTION
camera.framerate = 6
rawCapture = PiRGBArray(camera, size=RESOLUTION)
time.sleep(1) # Camera needs some time for itself
# Image transformation
blueLower = np.array([10, 70, 70])
blueUpper = np.array([30, 255, 255])
greenLower = np.array([35, 70, 70])
greenUpper = np.array([70, 255, 255])
lastInteraction = time.time() - 1000000
for rgbFrame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
print("Quit")
break
hsvFrame = cv2.cvtColor(rgbFrame.array, cv2.COLOR_RGB2HSV)
# Pokestop
blueMask = cv2.inRange(hsvFrame, blueLower, blueUpper)
blueCount = cv2.countNonZero(blueMask)
# Pokemon
greenMask = cv2.inRange(hsvFrame, greenLower, greenUpper)
greenCount = cv2.countNonZero(greenMask)
# res = cv2.bitwise_and(rgbFrame.array, rgbFrame.array, mask = blueMask)
# cv2.imshow('rgbFrame', rgbFrame.array)
# cv2.imshow('blueMask', blueMask)
# cv2.imshow('greenMask', greenMask)
# cv2.imshow('res', res)
rawCapture.truncate(0)
if not GPIO.input(PIN_PAIR):
print("Button Pressed")
trigger_servo(pairServo)
trigger_servo(pokeballServo)
time.sleep(1)
elif blueCount > MINIMUM_PIXELS and blueCount > greenCount:
lastInteraction = time.time()
time.sleep(2)
elif greenCount > MINIMUM_PIXELS:
lastInteraction = time.time()
trigger_servo(pokeballServo)
# Sleep so we do not keep hitting the button
time.sleep(3)
elif lastInteraction < time.time() - REPAIR_IDLE_TIME:
trigger_servo(pairServo)
trigger_servo(pokeballServo)
lastInteraction = time.time() - REPAIR_IDLE_TIME - REPAIR_DELAY_TIME
time.sleep(5)
pokeballServo.stop()
pairServo.stop()
GPIO.cleanup()
cv2.destroyAllWindows()
``` |
{
"source": "jordynmackool/sdl_java_suite",
"score": 3
} |
#### File: generator/transformers/common_producer.py
```python
import logging
import re
from abc import ABC
from collections import namedtuple, OrderedDict
from model.array import Array
from model.enum import Enum
from model.struct import Struct
class InterfaceProducerCommon(ABC):
"""
Common transformation
"""
version = '1.0.0'
def __init__(self, container_name, enums_package, structs_package, package_name,
enum_names=(), struct_names=(), key_words=()):
self.logger = logging.getLogger('Generator.InterfaceProducerCommon')
self.container_name = container_name
self.enum_names = enum_names
self.struct_names = struct_names
self.key_words = key_words
self.enums_package = enums_package
self.structs_package = structs_package
self.package_name = package_name
self._params = namedtuple('params', 'deprecated description key last mandatory origin return_type since title '
'param_doc name')
@property
def get_version(self):
return self.version
@property
def params(self):
"""
:return: namedtuple params(name='', origin='')
"""
return self._params
@staticmethod
def key(param: str):
"""
Convert param string to uppercase and inserting underscores
:param param: camel case string
:return: string in uppercase with underscores
"""
if re.match(r'^[A-Z_\d]+$', param):
return param
else:
result = re.sub(r'([a-z]|[A-Z]{2,})([A-Z]|\d$)', r'\1_\2', param).upper()
result = re.sub('IDPARAM', 'ID_PARAM', result)
return result
@staticmethod
def ending_cutter(n: str):
"""
If string not contains only uppercase letters and end with 'ID' deleting 'ID' from end of string
:param n: string to evaluate and deleting 'ID' from end of string
:return: if match cut string else original string
"""
if re.match(r'^\w+[a-z]+([A-Z]{2,})?ID$', n):
return n[:-2]
else:
return n
@staticmethod
def extract_description(d):
"""
Extract description
:param d: list with description
:return: evaluated string
"""
return re.sub(r'(\s{2,}|\n)', ' ', ''.join(d)).strip() if d else ''
@staticmethod
def extract_values(param):
p = OrderedDict()
if hasattr(param.param_type, 'min_size'):
p['array_min_size'] = param.param_type.min_size
if hasattr(param.param_type, 'max_size'):
p['array_max_size'] = param.param_type.max_size
if hasattr(param, 'default_value'):
if hasattr(param.default_value, 'name'):
p['default_value'] = param.default_value.name
else:
p['default_value'] = param.default_value
elif hasattr(param.param_type, 'default_value'):
if hasattr(param.param_type.default_value, 'name'):
p['default_value'] = param.param_type.default_value.name
else:
p['default_value'] = param.param_type.default_value
if hasattr(param.param_type, 'min_value'):
p['num_min_value'] = param.param_type.min_value
elif hasattr(param.param_type, 'element_type') and hasattr(param.param_type.element_type, 'min_value'):
p['num_min_value'] = param.param_type.element_type.min_value
if hasattr(param.param_type, 'max_value'):
p['num_max_value'] = param.param_type.max_value
elif hasattr(param.param_type, 'element_type') and hasattr(param.param_type.element_type, 'max_value'):
p['num_max_value'] = param.param_type.element_type.max_value
if hasattr(param.param_type, 'min_length'):
p['string_min_length'] = param.param_type.min_length
elif hasattr(param.param_type, 'element_type') and hasattr(param.param_type.element_type, 'min_length'):
p['string_min_length'] = param.param_type.element_type.min_length
if hasattr(param.param_type, 'max_length'):
p['string_max_length'] = param.param_type.max_length
elif hasattr(param.param_type, 'element_type') and hasattr(param.param_type.element_type, 'max_length'):
p['string_max_length'] = param.param_type.element_type.max_length
# Filter None values
filtered_values = {k: v for k, v in p.items() if v is not None}
return filtered_values
@staticmethod
def replace_sync(name):
"""
:param name: string with item name
:return: string with replaced 'sync' to 'Sdl'
"""
if name:
return re.sub(r'^([sS])ync(.+)$', r'\1dl\2', name)
return name
def replace_keywords(self, name: str = '') -> str:
"""
if :param name in self.key_words, :return: name += 'Param'
:param name: string with item name
"""
if any(map(lambda k: re.search(r'^(get|set|key_)?{}$'.format(name.casefold()), k), self.key_words)):
origin = name
if name.isupper():
name += '_PARAM'
else:
name += 'Param'
self.logger.debug('Replacing %s with %s', origin, name)
return self.replace_sync(name)
def extract_type(self, param):
"""
Evaluate and extract type
:param param: sub-element Param of element from initial Model
:return: string with sub-element type
"""
def evaluate(t1):
if isinstance(t1, Struct) or isinstance(t1, Enum):
name = t1.name
return name
else:
return type(t1).__name__
if isinstance(param.param_type, Array):
return 'List<{}>'.format(evaluate(param.param_type.element_type))
else:
return evaluate(param.param_type)
``` |
{
"source": "jordyn/onefuzz",
"score": 2
} |
#### File: __app__/proxy/__init__.py
```python
from typing import Optional
import azure.functions as func
from onefuzztypes.enums import ErrorCode, VmState
from onefuzztypes.models import Error
from onefuzztypes.requests import ProxyCreate, ProxyDelete, ProxyGet, ProxyReset
from onefuzztypes.responses import BoolResult, ProxyGetResult, ProxyInfo, ProxyList
from ..onefuzzlib.endpoint_authorization import call_if_user
from ..onefuzzlib.proxy import Proxy
from ..onefuzzlib.proxy_forward import ProxyForward
from ..onefuzzlib.request import not_ok, ok, parse_request
from ..onefuzzlib.workers.scalesets import Scaleset
def get_result(proxy_forward: ProxyForward, proxy: Optional[Proxy]) -> ProxyGetResult:
forward = proxy_forward.to_forward()
if (
proxy is None
or proxy.state not in [VmState.running, VmState.extensions_launch]
or proxy.heartbeat is None
or forward not in proxy.heartbeat.forwards
):
return ProxyGetResult(forward=forward)
return ProxyGetResult(ip=proxy.ip, forward=forward)
def get(req: func.HttpRequest) -> func.HttpResponse:
request = parse_request(ProxyGet, req)
if isinstance(request, Error):
return not_ok(request, context="ProxyGet")
if (
request.scaleset_id is not None
and request.machine_id is not None
and request.dst_port is not None
):
scaleset = Scaleset.get_by_id(request.scaleset_id)
if isinstance(scaleset, Error):
return not_ok(scaleset, context="ProxyGet")
proxy = Proxy.get_or_create(scaleset.region)
forwards = ProxyForward.search_forward(
scaleset_id=request.scaleset_id,
machine_id=request.machine_id,
dst_port=request.dst_port,
)
if not forwards:
return not_ok(
Error(
code=ErrorCode.INVALID_REQUEST,
errors=["no forwards for scaleset and node"],
),
context="debug_proxy get",
)
return ok(get_result(forwards[0], proxy))
else:
proxies = [
ProxyInfo(region=x.region, proxy_id=x.proxy_id, state=x.state)
for x in Proxy.search()
]
return ok(ProxyList(proxies=proxies))
def post(req: func.HttpRequest) -> func.HttpResponse:
request = parse_request(ProxyCreate, req)
if isinstance(request, Error):
return not_ok(request, context="ProxyCreate")
scaleset = Scaleset.get_by_id(request.scaleset_id)
if isinstance(scaleset, Error):
return not_ok(scaleset, context="debug_proxy create")
forward = ProxyForward.update_or_create(
region=scaleset.region,
scaleset_id=scaleset.scaleset_id,
machine_id=request.machine_id,
dst_port=request.dst_port,
duration=request.duration,
)
if isinstance(forward, Error):
return not_ok(forward, context="debug_proxy create")
proxy = Proxy.get_or_create(scaleset.region)
if proxy:
forward.proxy_id = proxy.proxy_id
forward.save()
proxy.save_proxy_config()
return ok(get_result(forward, proxy))
def patch(req: func.HttpRequest) -> func.HttpResponse:
request = parse_request(ProxyReset, req)
if isinstance(request, Error):
return not_ok(request, context="ProxyReset")
proxy = Proxy.get(request.region)
if proxy is not None:
proxy.state = VmState.stopping
proxy.save()
return ok(BoolResult(result=True))
return ok(BoolResult(result=False))
def delete(req: func.HttpRequest) -> func.HttpResponse:
request = parse_request(ProxyDelete, req)
if isinstance(request, Error):
return not_ok(request, context="debug_proxy delete")
regions = ProxyForward.remove_forward(
scaleset_id=request.scaleset_id,
machine_id=request.machine_id,
dst_port=request.dst_port,
)
for region in regions:
proxy = Proxy.get_or_create(region)
if proxy:
proxy.save_proxy_config()
return ok(BoolResult(result=True))
def main(req: func.HttpRequest) -> func.HttpResponse:
methods = {"GET": get, "POST": post, "DELETE": delete, "PATCH": patch}
method = methods[req.method]
result = call_if_user(req, method)
return result
```
#### File: utils/add-corpus-storage-accounts/add-corpus-storage-account.py
```python
import argparse
import json
import uuid
from azure.common.client_factory import get_client_from_cli_profile
from azure.mgmt.eventgrid import EventGridManagementClient
from azure.mgmt.eventgrid.models import EventSubscription
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.storage.models import (
AccessTier,
Kind,
Sku,
SkuName,
StorageAccountCreateParameters,
)
# This was generated randomly and should be preserved moving forwards
STORAGE_GUID_NAMESPACE = uuid.UUID("f7eb528c-d849-4b81-9046-e7036f6203df")
def get_base_event(
client: EventGridManagementClient, resource_group: str, location: str
) -> EventSubscription:
for entry in client.event_subscriptions.list_regional_by_resource_group(
resource_group, location
):
if (
entry.name == "onefuzz1"
and entry.type == "Microsoft.EventGrid/eventSubscriptions"
and entry.event_delivery_schema == "EventGridSchema"
and entry.destination.endpoint_type == "StorageQueue"
and entry.destination.queue_name == "file-changes"
):
return entry
raise Exception("unable to find base eventgrid subscription")
def add_event_grid(src_account_id: str, resource_group: str, location: str) -> None:
client = get_client_from_cli_profile(EventGridManagementClient)
base = get_base_event(client, resource_group, location)
event_subscription_info = EventSubscription(
destination=base.destination,
filter=base.filter,
retry_policy=base.retry_policy,
)
topic_id = uuid.uuid5(STORAGE_GUID_NAMESPACE, src_account_id).hex
result = client.event_subscriptions.create_or_update(
src_account_id, "corpus" + topic_id, event_subscription_info
).result()
if result.provisioning_state != "Succeeded":
raise Exception(
"eventgrid subscription failed: %s"
% json.dumps(result.as_dict(), indent=4, sort_keys=True),
)
def create_storage(resource_group: str, account_name: str, location: str) -> str:
params = StorageAccountCreateParameters(
sku=Sku(name=SkuName.premium_lrs),
kind=Kind.block_blob_storage,
location=location,
tags={"storage_type": "corpus"},
access_tier=AccessTier.hot,
allow_blob_public_access=False,
minimum_tls_version="TLS1_2",
)
client = get_client_from_cli_profile(StorageManagementClient)
account = client.storage_accounts.begin_create(
resource_group, account_name, params
).result()
if account.provisioning_state != "Succeeded":
raise Exception(
"storage account creation failed: %s",
json.dumps(account.as_dict(), indent=4, sort_keys=True),
)
return account.id
def create(resource_group: str, account_name: str, location: str) -> None:
new_account_id = create_storage(resource_group, account_name, location)
add_event_grid(new_account_id, resource_group, location)
def main():
formatter = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(formatter_class=formatter)
parser.add_argument("resource_group")
parser.add_argument("account_name")
parser.add_argument("location")
args = parser.parse_args()
create(args.resource_group, args.account_name, args.location)
if __name__ == "__main__":
main()
``` |
{
"source": "jordyril/PythonLaTeX",
"score": 3
} |
#### File: PythonLaTeX/pythonlatex/figure.py
```python
from pylatex import Figure as FigureOriginal
from pylatex import Package, NoEscape
from .saving import LatexSaving
from .float import FloatAdditions
import matplotlib.pyplot as plt
class Figure(FloatAdditions, LatexSaving, FigureOriginal):
"""A class that represents a Figure environment with modified methods compared to parent"""
def __init__(
self,
*args,
folders_path="Latex/",
outer_folder_name="Figures",
inner_folder_name="Graphics",
position=None,
**kwargs,
):
LatexSaving.__init__(
self,
outer_folder=outer_folder_name,
inner_folder=inner_folder_name,
folders_path=folders_path,
)
FigureOriginal.__init__(self, *args, position=position, **kwargs)
self._label = "fig"
def save_plot(self, filename, *args, extension="png", **kwargs):
"""Saves the plot in the 'inner' folder
Args
----
filename: str
Name of the plot for saving
args:
Arguments passed to plt.savefig for displaying the plot.
extension : str
extension of image file indicating figure file type
kwargs:
Keyword arguments passed to plt.savefig for displaying the plot. In
case these contain ``width`` or ``placement``, they will be used
for the same purpose as in the add_image command. Namely the width
and placement of the generated plot in the LaTeX document.
Returns
-------
str
The relative path/name with which the plot has been saved.
(original package stored figure in temp directory, here the naming is added
and it is being saved in a known directory)
"""
name = f"{filename}.{extension}"
plt.savefig(self._absolute_inner_path(name), *args, **kwargs)
return self._relative_inner_path(name)
def add_plot(
self,
filename,
*args,
caption=None,
description=None,
above=True,
label=None,
zref=False,
extension="png",
**kwargs,
):
"""Add the current Matplotlib plot to the figure.
The plot that gets added is the one that would normally be shown when
using ``plt.show()``. Replaced feature of random temp saving with dedicated filename saving
compared to original package
Args
----
filename: str
Name of the figure for saving
args:
Arguments passed to plt.savefig for displaying the plot.
caption: : str
Optional caption to be added to the figure
above: bool
In case caption is given, position of caption can be above or bellow figure
extension : str
Extension of image file indicating figure file type
kwargs:
Keyword arguments passed to plt.savefig for displaying the plot. In
case these contain ``width`` or ``placement``, they will be used
for the same purpose as in the add_image command. Namely the width
and placement of the generated plot in the LaTeX document.
"""
label, caption = self._check_label_caption(label, caption, filename)
add_image_kwargs = {}
for key in ("width", "placement"):
if key in kwargs:
add_image_kwargs[key] = kwargs.pop(key)
path = self.save_plot(filename, *args, extension=extension, **kwargs)
self.add_image(path, **add_image_kwargs)
if caption is not None:
self.add_caption_description_label(
caption, label, above, description, zref)
def reset(self, show=True, close=False, *args, **kwargs):
"""Resets the Figure instance, this way the same set-up
can be used for following figures without having to create
a Figure instance with the same path, folder and extension option every time
Args
----
close: bool
if set to True, plt.close() will be called for. Default is False
args:
Arguments passed to plt.close.
kwargs:
Keyword arguments passed to plt.close for displaying the plot.
"""
if show:
plt.show(*args, **kwargs)
if close:
plt.close()
self.data = []
def create_input_latex(
self,
filename,
*args,
add_plot=True,
caption=None,
description=None,
above=True,
label=None,
zref=False,
**kwargs,
):
"""Creates separate input tex-file that can be used to input Figure
Args
----
filename: str
Name of the plot for saving
args:
Arguments passed to plt.savefig for displaying the plot.
add_plot: bool
In case of normal figure this is True, in case of subfigure, no new figure needs
to be added so this option should be set to False
caption: str
Optional caption to be added to the figure
above: bool
In case caption is given, position of caption can be above or bellow figure
extension : str
Extension of image file indicating figure file type
kwargs:
Keyword arguments passed to plt.savefig for displaying the plot. In
case these contain ``width`` or ``placement``, they will be used
for the same purpose as in the add_image command. Namely the width
and placement of the generated plot in the LaTeX document.
"""
label, caption = self._check_label_caption(label, caption, filename)
if add_plot:
self.add_plot(
filename, *args, caption=caption, above=above, label=label, description=description, zref=zref, ** kwargs
)
else:
self.add_caption_description_label(
caption, label, above, description, zref)
# creating + opening the final input file in the 'outer' folder
with open(f"{self._absolute_outer_path(filename)}.tex", "w+") as tex_file:
tex_file.write(self.dumps())
latex_input = self._print_latex_input(filename)
self._write_input_to_txt_file(latex_input)
return NoEscape(latex_input)
def _check_label_caption(self, label, caption, filename):
if label is None:
label = filename
# create automatic caption
if caption is None:
caption = filename
# Allow for no caption
if caption is False:
caption = None
return label, caption
class SubFigure(Figure):
"""A class that represents a subfigure from the subcaption package.
Methods are almost exact copy of original pylatex package"""
packages = [Package("subcaption")]
#: By default a subfigure is not on its own paragraph since that looks
#: weird inside another figure.
separate_paragraph = False
_repr_attributes_mapping = {"width": "arguments"}
def __init__(self, width=NoEscape(r"0.49\linewidth"), **kwargs):
"""
Args
----
width: str
Width of the subfigure itself. It needs a width because it is
inside another figure.
"""
super().__init__(arguments=width, **kwargs)
def add_image(self, filename, *, width=NoEscape(r"\linewidth"), placement=None):
"""Add an image to the subfigure.
Args
----
filename: str
Filename of the image.
width: str
Width of the image in LaTeX terms.
placement: str
Placement of the figure, `None` is also accepted.
"""
super().add_image(filename, width=width, placement=placement)
```
#### File: PythonLaTeX/pythonlatex/float.py
```python
from pylatex import Command, NoEscape, Package
from pylatex.base_classes import Float
class FloatAdditions(Float):
def __init__(self):
self._label = ""
def add_caption_description(self, caption, above=True, description=None):
"""Add a caption to the float.
Args
----
caption: str
The text of the caption.
above: bool
Position of caption
description: str
The text for an accompanying description bellow caption
"""
if above:
if description:
self.insert(0, Command('caption*', description))
self.insert(0, Command("caption", caption))
else:
self.append(Command("caption", caption))
if description:
self.append(0, Command('caption*'), description)
def add_label(self, label, above=True, zref=False):
if zref:
self.packages.add(Package("zref-user"))
lbl = 'zlabel'
else:
lbl = 'label'
if above:
self.insert(0, Command(
lbl, NoEscape(f"{self._label}:{label}")))
else:
self.append(Command(lbl, NoEscape(f"{self._label}:{label}")))
def add_caption_description_label(self, caption, label, above=True, description=None, zref=False):
if above:
# note that we do label first here, so in final label is after caption
self.add_label(NoEscape(label), above, zref)
self.add_caption_description(caption, above, description)
else:
self.add_caption_description(caption, above, description)
self.add_label(NoEscape(label), above, zref)
```
#### File: PythonLaTeX/pythonlatex/table.py
```python
from pylatex import Table as TableOriginal
from pylatex import Tabular as TabularOriginal
from pylatex import Package, NoEscape, UnsafeCommand, Command
# from pylatex.base_classes import Arguments
from pylatex.utils import fix_filename
from .saving import LatexSaving
from .float import FloatAdditions
import pandas as pd
class Table(FloatAdditions, LatexSaving, TableOriginal):
"""A class that represents a Table environment with modified methods
compared to parent TableOriginal
"""
def __init__(
self,
*args,
folders_path="Latex/",
outer_folder_name="Tables",
inner_folder_name="Tabulars",
position=None,
**kwargs,
):
LatexSaving.__init__(
self,
outer_folder=outer_folder_name,
inner_folder=inner_folder_name,
folders_path=folders_path,
)
TableOriginal.__init__(self, *args, position=position, **kwargs)
self._label = "tbl"
def _set_tabular(self, tabular, *args, **kwargs):
"""
TODO
"""
if isinstance(tabular, TabularOriginal):
self.tabular = tabular.dumps()
elif isinstance(tabular, str):
self.tabular = tabular
elif isinstance(tabular, pd.DataFrame):
self.tabular = tabular.to_latex(*args, **kwargs)
def _save_tabular(self, filename):
try:
self.tabular
except AttributeError:
raise AttributeError("No tabular set to save")
with open(self._absolute_inner_path(f"{filename}.tex"), "w+") as file:
file.write(self.tabular)
return self._relative_inner_path(filename)
def add_table(
self,
tabular,
filename,
*args,
caption=None,
description=None,
above=True,
label=None,
zref=False,
placement=NoEscape(r"\centering"),
adjustbox=True,
adjustbox_arguments=NoEscape(
r"max totalsize={\textwidth}{0.95\textheight}"),
**kwargs,
):
"""Add an image to the figure.
Args
----
filename: str
Filename of the image.
placement: str
Placement of the table, `None` is also accepted.
"""
if label is None:
label = filename
self._set_tabular(tabular, *args, **kwargs)
path = self._save_tabular(filename)
if placement is not None:
self.append(placement)
tabular_input = NoEscape(StandAloneTabular(
filename=fix_filename(path)).dumps())
if adjustbox:
tabular_input = Command(
command="adjustbox",
arguments=adjustbox_arguments,
extra_arguments=tabular_input,
packages=[Package("adjustbox")],
)
self.append(tabular_input)
if caption is not None:
self.add_caption_description_label(
caption, label, above, description, zref)
def reset(self):
self.data = []
self.tabular = None
def create_input_latex(
self,
tabular,
filename,
*args,
add_table=True,
caption=None,
description=None,
above=True,
label=None,
zref=False,
placement=NoEscape(r"\centering"),
adjustbox=True,
adjustbox_arguments=NoEscape(
r"max totalsize={\textwidth}{0.95\textheight}"),
reset=True,
**kwargs,
):
"""Creates separate input tex-file that can be used to input tabular within table environment
Args
----
filename: str
Name of the table for saving
tabular: str, pandas.DataFrame, Tabular
tabular that will be saved and created into a proper table
args:
Arguments passed to pd.df.to_latex for displaying the tabular.
add_table: bool
In case of normal table this is True, in case of subtable, no new table needs
to be added so this option should be set to False
caption: str
Optional caption to be added to the table
above: bool
In case caption is given, position of caption can be above or bellow table
extension : str
Extension of image file indicating table file type
kwargs:
Keyword arguments passed to plt.savefig for displaying the plot.
"""
# create automatic caption
if caption is None:
caption = filename
# Allow for no caption
if caption is False:
caption = None
if add_table:
self.add_table(
tabular,
filename,
*args,
caption=caption,
description=description,
above=above,
label=label,
placement=placement,
adjustbox=adjustbox,
adjustbox_arguments=adjustbox_arguments,
**kwargs,
)
else:
self.add_caption_description_label(
caption, label, above, description, zref)
# creating + opening the file
with open(self._absolute_outer_path(f"{filename}.tex"), "w") as tex_file:
tex_file.write(self.dumps())
latex_input = self._print_latex_input(filename)
self._write_input_to_txt_file(latex_input)
if reset:
self.reset()
return NoEscape(latex_input)
class SubTable(Table):
"""
"""
def __init__(self, width=NoEscape(r"0.49\linewidth"), **kwargs):
super().__init__(arguments=width, **kwargs)
class StandAloneTabular(UnsafeCommand):
r"""A class representing a stand alone tabular. (\input{tabularfile})"""
_latex_name = "input"
_repr_attributes_mapping = {"filename": "arguments"}
def __init__(self, filename):
r"""
Args
----
filename: str
The path to the tabular file
image_options: str or `list`
Specifies the options for the image (ie. height, width)
"""
arguments = [NoEscape(filename)]
super().__init__(command=self._latex_name, arguments=arguments)
```
#### File: PythonLaTeX/tests/test_figure.py
```python
import numpy as np
import matplotlib.pyplot as plt
from pythonlatex import Figure
from pylatex import Document, NoEscape
import unittest
import os
import shutil
a = 0.0
b = 2.0
n = 50
x = np.array(range(1, n + 1))
y = a + b * x
try:
shutil.rmtree("Latex")
except FileNotFoundError:
pass
class TestFigures(unittest.TestCase):
def test_path(self):
path = "./Latex/test_path/"
fig = Figure(folders_path=path)
self.assertEqual(fig._folders_path, path)
def test_inner_folder(self):
folder = "Inner_test"
fig = Figure(inner_folder_name=folder)
self.assertEqual(fig._inner_folder_name, folder)
def test_outer_folder(self):
folder = "Outer_test"
fig = Figure(outer_folder_name=folder)
self.assertEqual(fig._outer_folder_name, folder)
def test_position(self):
position = "h"
fig = Figure(position=position)
self.assertEqual(fig.options, position)
def test_save_plot(self):
fig = Figure()
name = "test"
plt.figure()
plt.plot(x, y)
fig._save_plot(name)
# check if graph was saved
path = fig._absolute_inner_path(f"{name}.jpg")
self.assertTrue(os.path.isfile(path))
def test_add_plot(self):
fig = Figure()
name = "test"
plt.figure()
plt.plot(x, y)
fig.add_plot(name)
# check if correct latex output is produced
self.assertEqual(
fig.dumps(),
(
"\\begin{figure}%\n\\centering%\n\\includegraphics[width=0.8"
+ "\\textwidth]{Graphs/test.jpg}%\n\\end{figure}"
),
)
plt.close()
def test_reset(self):
fig = Figure()
# figure 1
for i in range(1, 3):
name = f"test{i}"
plt.figure()
plt.plot(x, y, label=i)
fig.add_plot(name)
# check if correct latex output is produced
self.assertEqual(
fig.dumps(),
(
f"\\begin{{figure}}%\n\\centering%\n\\includegraphics[width=0.8\\textwidth]"
+ f"{{Graphs/test{i}.jpg}}%\n\\end{{figure}}"
),
)
fig.reset(show=False, close=True)
def test_texinput(self):
fig = Figure()
name = "test_tex"
caption = "caption"
plt.figure()
plt.plot(x, y)
input_tex = fig.create_input_latex(name, caption=caption, above=False)
# create document for testing input statement
doc = Document()
doc.append(input_tex)
doc.preamble.append(NoEscape(r"\usepackage{graphicx}"))
doc.preamble.append(NoEscape(r"\usepackage{zref-user}"))
doc.generate_pdf("Latex/test_tex", clean_tex=False)
def test_label(self):
fig = Figure()
name = "test_label"
caption = "caption"
label = "label"
plt.figure()
plt.plot(x, y)
input_tex = fig.create_input_latex(
name, caption=caption, above=True, label=label
)
# create document for testing input statement
doc = Document()
doc.append(input_tex)
doc.preamble.append(NoEscape(r"\usepackage{graphicx}"))
doc.preamble.append(NoEscape(r"\usepackage{zref-user}"))
doc.generate_pdf("Latex/test_label", clean_tex=False)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jordyril/UZH_mensa_menu",
"score": 3
} |
#### File: jordyril/UZH_mensa_menu/day.py
```python
from mensa import Mensa
from util import underline
class Day(object):
def __init__(self, day, urls):
self.URLS = urls
self.day = day.lower()
self.mensas = []
self.__retrieve_menus()
def __str__(self):
return self.day.capitalize()
def __repr__(self):
return self.day.capitalize()
def __retrieve_menus(self):
for url in self.URLS:
self.mensas.append(Mensa(url, self.day))
@property
def summary(self):
summary = ""
for mensa in self.mensas:
summary += f"{underline(mensa.mensa.upper())}"
summary += mensa.summary
return summary
# def optimal(self, value, max_min):
# mensa_optimals = []
# test = Day("montag", URLS)
# test
# test.mensas[0].menu.meals[0].values["energy"]
# print(test.summary)
# test.URLS
# x = [111, 1234.3, 4.0]
# import numpy as numpy
# n
# max(x)
# import operator
# index, value = max(enumerate(x), key=operator.itemgetter(1))
# max(x)
# List.index(max(x))
``` |
{
"source": "jordytheboy/FishStock",
"score": 4
} |
#### File: FishStock/chess/piece.py
```python
import pygame
import copy
def chk_move(self, color, x, y, board):
"""
Checks if a move is valid, if the coordinate on the board is empty,
the move is valid, if the coordinate contains an enemy, the move is
valid, if the coordinate is out of bounds or contains a friend, the
move is invalid
"""
if x < 0 or x > 7 or y < 0 or y > 7:
# Out of bounds
return False
piece = board.board_[y][x]
if piece == None:
# Empty Space
return True
else:
if piece.color != color:
piece.attacked_by.add((self.x, self.y))
self.attacking.add((x, y))
# Enemy piece
return True
if piece.color == color:
# Friend
return False
def get_straight_moves(self,board):
"""
Generates all possible legal horizontal and vertical moves
Args:
Instance of chess board
Returns:
plegal_moves - set containing tuples (x,y) of coordinates of each valid move
"""
# Vertical moves
plegal_moves = set()
for i in(-1, 1):
possible_y = self.y
while(True):
possible_y += i
if chk_move(self, self.color, self.x, possible_y, board):
if (board.board_[possible_y][self.x] != None):
plegal_moves.add((self.x, possible_y))
break
else:
plegal_moves.add((self.x, possible_y))
else:
break
# Horizontal moves
for i in(-1, 1):
possible_x = self.x
while(True):
possible_x += i
if chk_move(self, self.color, possible_x, self.y, board):
if (board.board_[self.y][possible_x] != None): # If there is an enemy piece
plegal_moves.add((possible_x, self.y))
break
else: # If board is empty here
plegal_moves.add((possible_x, self.y))
else:
break
return plegal_moves
def get_diag_moves(self,board):
"""
Generates all possible legal diagonal moves
Args:
Instance of chess board
Returns:
plegal_moves - set containing tuples (x,y) of coordinates of each valid move
"""
plegal_moves = set()
for movement in [(-1, -1), (-1, 1), (1, 1), (1, -1)]:
possible_x = self.x
possible_y = self.y
while(True):
possible_x += movement[0]
possible_y += movement[1]
if chk_move(self, self.color, possible_x, possible_y, board):
if (board.board_[possible_y][possible_x] != None): # If there is an enemy piece
plegal_moves.add((possible_x, possible_y))
break
else: # If the board is empty here
plegal_moves.add((possible_x, possible_y))
else:
break
return plegal_moves
class Piece():
"""
Basic piece object, stores color and position
"""
def __init__(self,color,x,y):
self.x = x
self.y = y
self.color = color
self.hasmoved = False
self.attacking = set()
self.attacked_by = set()
self.legal_moves = set()
def generate_moves(self, board):
return set()
def generate_legal_moves(self, board):
"""
Generates the legal moveset of any given piece
Legal meaning not putting the King into check
Simulates the board for each possible pseudo-legal
move that can be made by every single piece
Very slow but it works
Returns:
legal_moves: A set of tuples contain the legal moves for a given piece
"""
self.legal_moves = set() # Reset the legal moves
pseudo_moves = self.generate_moves(board)
for move in pseudo_moves:
# Copy the current piece
copyx = copy.copy(self.x)
copyy = copy.copy(self.y)
testpiece = copy.deepcopy(self)
testpiece.x = move[0]
testpiece.y = move[1]
# Copy the board
testboard = copy.deepcopy(board)
# Remove the current piece from the board
testboard.board_[copyy][copyx] = None
# Move it to the test location
testboard.board_[move[1]][move[0]] = testpiece
board.bottom_king.check_status()
board.top_king.check_status()
testboard.bottom_king.check_status()
testboard.top_king.check_status()
# Reset the attacked_by and attacking sets,
# They need to reset each move
for row in testboard.board_:
for item in row:
if item is not None:
item.attacked_by = set()
item.attacking = set()
# Generate the pseudo-legal moves for the testboard pieces
# This entire method is probably inefficient, will need to optimize
# This is to check and see if the move results in the king being taken out of check
for row in testboard.board_:
for item in row:
if item is not None:
item.generate_moves(testboard)
# Now set the king status again
testboard.bottom_king.check_status()
testboard.top_king.check_status()
if testboard.top_king.color == 'w':
white_king = testboard.top_king
black_king = testboard.bottom_king
if testboard.top_king.color == 'b':
white_king = testboard.bottom_king
black_king = testboard.top_king
# Now check if that moves results in the king going into check
if self.color == 'w':
if (white_king.is_in_check == False):
self.legal_moves.add((testpiece.x, testpiece.y))
if self.color == 'b':
if (black_king.is_in_check == False):
self.legal_moves.add((testpiece.x, testpiece.y))
return self.legal_moves
def move(self, board, x, y):
temp_piece = copy.copy(self)
temp_piece.x = x
temp_piece.y = y
board.board_[self.y][self.x] = None
board.board_[y][x] = temp_piece
temp_piece.hasmoved = True
class Pawn(Piece):
def __init__(self, color, x, y, h):
"""
h: direction to move, 0 if up, 1 if down
"""
super().__init__(color, x, y)
self.heading = h
if (color == 'w'):
self.sprite = 'resources\\wpawn.png'
else: self.sprite = 'resources\\bpawn.png'
def generate_moves(self, board):
"""
Generates the possible legal moves, not accounting for check
Args:
Instance of the legal moves
Returns:
plegal_moves - set containing tuples (x,y) of coordinates of
each valid move
"""
plegal_moves = set()
direction = {0: -1, 1: 1}
col = self.color
if self.heading == 0:
possible_y = max(self.y + direction[self.heading], 0) # One square ahead of the pawn
if self.heading == 1:
possible_y = min(self.y + direction[self.heading], 7)
"""
Cannot use the chk_move method, as the pawn cannot capture piece in occupied
squares directly ahead of it
"""
if (possible_y >= 0 or possible_y <= 7):
piece = board.board_[possible_y][self.x]
if piece == None:
plegal_moves.add((self.x, possible_y))
if self.hasmoved == False: # If the pawn hasn't moved yet, check if it can move 2 spaces
double_y = possible_y + direction[self.heading]
piece = board.board_[double_y][self.x]
if (double_y >= 0 or double_y <= 7) and piece == None:
plegal_moves.add((self.x, double_y))
# Out of bounds or occupied, so check if the diagonally adjacent squares contain an enemy piece
if (self.x+1 <= 7): # Check if the piece you're looking at is off the board
enemy1 = board.board_[possible_y][self.x+1]
else: enemy1 = None
if (self.x-1 >= 0): # Check if the piece you're looking at is off the board
enemy2 = board.board_[possible_y][self.x-1]
else: enemy2 = None
# Check if the two spaces contain an enemy
if ((enemy1 is not None) and (enemy1.color != col)):
enemy1.attacked_by.add((self.x, self.y))
self.attacking.add((self.x+1, possible_y))
plegal_moves.add((self.x+1, possible_y))
if ((enemy2 is not None) and (enemy2.color != col)):
enemy2.attacked_by.add((self.x, self.y))
self.attacking.add((self.x-1, possible_y))
plegal_moves.add((self.x-1, possible_y))
return plegal_moves
class Rook(Piece):
def __init__(self, color, x, y):
super().__init__(color, x, y)
if (color == 'w'):
self.sprite = 'resources\\wrook.png'
else: self.sprite = 'resources\\brook.png'
def generate_moves(self, board):
return get_straight_moves(self, board)
class Bishop(Piece):
def __init__(self, color, x, y):
super().__init__(color, x, y)
if (color == 'w'):
self.sprite = 'resources\\wbishop.png'
else: self.sprite = 'resources\\bbishop.png'
def generate_moves(self, board):
return get_diag_moves(self,board)
class Knight(Piece):
def __init__(self, color, x, y):
super().__init__(color, x, y)
if (color == 'w'):
self.sprite = 'resources\\wknight.png'
else: self.sprite = 'resources\\bknight.png'
def generate_moves(self, board):
plegal_moves = set()
# Possible offset of the moves the knight can make from his starting position - starting(x,y) + (x, y)
possible_moves = [(-1, 2), (1, 2), (-1, -2), (1, -2), (2, -1), (2, 1), (-2, -1), (-2, 1)]
for move in possible_moves:
possible_x = self.x + move[0]
possible_y = self.y + move[1]
if chk_move(self, self.color, possible_x, possible_y, board):
plegal_moves.add((possible_x, possible_y))
if (board.board_[possible_y][possible_x] is not None) and (board.board_[possible_y][possible_x].color != self.color):
board.board_[possible_y][possible_x].attacked_by.add((self.x, self.y))
self.attacking.add((possible_x, possible_y))
return plegal_moves
class King(Piece):
def __init__(self, color, x, y):
super().__init__(color, x, y)
if (color == 'w'):
self.sprite = 'resources\\wking.png'
else: self.sprite = 'resources\\bking.png'
self.is_in_check = False
def generate_moves(self, board):
plegal_moves = set()
# Possible offset of the moves the king can make from his starting position
possible_moves = [(-1, -1), (-1, 1), (1, -1), (1, 1), (1, 0), (-1, 0), (0, -1), (0, 1)]
for move in possible_moves:
possible_x = self.x + move[0]
possible_y = self.y + move[1]
if chk_move(self, self.color, possible_x, possible_y, board):
plegal_moves.add((possible_x, possible_y))
if (board.board_[possible_y][possible_x] is not None) and (board.board_[possible_y][possible_x].color != self.color):
board.board_[possible_y][possible_x].attacked_by.add((self.x, self.y))
self.attacking.add((possible_x, possible_y))
return plegal_moves
def check_status(self):
if len(self.attacked_by):
self.is_in_check = True
else: self.is_in_check = False
class Queen(Piece):
def __init__(self, color, x, y):
super().__init__(color, x, y)
if (color == 'w'):
self.sprite = 'resources\\wqueen.png'
else: self.sprite = 'resources\\bqueen.png'
def generate_moves(self, board):
return get_diag_moves(self,board).union(get_straight_moves(self,board))
``` |
{
"source": "jordy-u/COVID-19-Dashboard-NL",
"score": 3
} |
#### File: COVID-19-Dashboard-NL/Backend/JSON_helper.py
```python
"""
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
Name: structure_for_date
Purpose: Structure an SQL data array containing a set of Gemeentecode and Aantal
and structure it into an target array in such a way that the JSON file can be generated
Expected input: Date as Datetime.date format, Target Array as array, Source as SQL list
Expected output: target array as array
Dependancies: none
"""
def structure_for_date(Date, Target, Source):
temp_data_set ={}
for Gemeentecode, Aantal in Source:
temp_data_set[str(Gemeentecode)] = Aantal
Target[str(Date)] = temp_data_set
return(Target)
"""
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
Name: save_JSON
Purpose: save a given structured array as JSON file to the drive
Expected input: Target Array as array, file name as string, possible alternative location as string
Expected output: File saved file in /outputs/[filename].json
Dependancies: json library
"""
import json
def save_JSON(target, filename="output", location = "../outputs/"):
with open('{}{}.json'.format(location,filename), 'w') as outfile:
json.dump(target, outfile)
"""
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
Name: structure_array
Purpose: get an array of dates and get the corrosponding data from the database,
Structure this data into an array for the json export
Expected input: table name as string, SQL query result as array, object for mysql helper to initialize the cursor
Expected output: structured array for JSON save.
Dependancies: mysql connect
"""
import mysql.connector
def structure_array(table_name, source, cnx):
json_output_data = {}
sellect_all_gemeentes_per_date = ("SELECT Gemeentecode, Aantal FROM `{}` WHERE Datum ='{}'")
search_for_datum_cursor = cnx.cursor()
for Datum in source:
search_for_datum_cursor.execute(sellect_all_gemeentes_per_date.format(table_name,Datum[0]))
json_output_data=structure_for_date(Datum[0],json_output_data,search_for_datum_cursor)
return json_output_data
"""
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
Name: get_date
Purpose: to get all unique dates from a database
Expected input: table name as string , object for mysql helper to initialize the cursor
Expected output: sql array
Dependancies: mysql connect
"""
def get_date(table_name, cnx):
select_all_datums_in_database = ("SELECT DISTINCT Datum FROM {} ORDER BY Datum ASC")
check_datum_cursor = cnx.cursor()
check_datum_cursor.execute(select_all_datums_in_database.format(table_name)) #select all Unique datums from the database
return check_datum_cursor.fetchall()
"""
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
Name: create_json
Purpose: phrase an JSON file from a table name
Expected input: table name as string, json storagename as string , object for mysql helper to initialize the cursor
Expected output: sql array
Dependancies: mysql connect and json
"""
def create_json(table_name,filename ,cnx , location = '../outputs/'):
result = get_date(table_name, cnx)
json_output_data = structure_array(table_name,result, cnx)
save_JSON(json_output_data,filename, location)
```
#### File: COVID-19-Dashboard-NL/Backend/SQL_helper.py
```python
"""
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
Name: insert_new_entry
Purpose: to insert new entries into a given table
Expected input: table name as string, Datum as datetime.date, gemeentecode as string, aantal as int, mysql object for cursor
Expected output: sql querries to insert into a database
Dependancies: mysql connect, log_lib
"""
from log_lib import event
def insert_new_entry(table_name, datum, gemeentecode, aantal, cnx):
insert_new_data_query = ("INSERT INTO {} (`Datum`, `Gemeentecode`, `Aantal`) VALUES ('{}', '{}', '{}')")
inser_cursor = cnx.cursor()
inser_cursor.execute(insert_new_data_query.format(table_name, datum, gemeentecode,aantal))
cnx.commit()
event.new_entry(datum,gemeentecode)
"""
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
Name: does_table_exist
Purpose: to check if the given table exists
Expected input: table name as string, mysql object for cursor
Expected output: boolean ouput if the table exists
Dependancies: mysql connect
"""
def does_table_exist(table_name, cnx):
search_query = ("SHOW TABLES LIKE '{}'")
search_cursor = cnx.cursor()
search_cursor.execute(search_query.format(table_name))
return search_cursor.fetchone() != None
"""
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
Name: create_new_table_for_gemeente
Purpose: create a new table for the aantalen per gemeente with the correct structure
Expected input: table name as string, mysql object for cursor
Expected output: mysql generated table in the database
Dependancies: mysql connect
"""
def create_new_table_for_gemeente(table_name, cnx):
creation_query = ("CREATE TABLE {} ( `ID` INT NOT NULL AUTO_INCREMENT , `Datum` DATE NOT NULL , `Gemeentecode` INT NOT NULL , `Aantal` INT NOT NULL , PRIMARY KEY (`ID`)) ENGINE = InnoDB")
creation_cursor = cnx.cursor()
creation_cursor.execute(creation_query.format(table_name))
"""
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
Name: does_entry_exist
Purpose: check if the entry already exists
Expected input: table name as string, datum as datetime.date, gemeentecode as string, mysql object for cursor
Expected output: boolean if the entry exists
Dependancies: mysql connect
"""
def does_entry_exist(table_name,date,gemeentecode,cnx):
search_query = ("SELECT Datum, Gemeentecode, Aantal FROM {} WHERE Datum ='{}' AND Gemeentecode ='{}'")
search_cursor = cnx.cursor()
search_cursor.execute(search_query.format(table_name, date, gemeentecode))
return search_cursor.fetchone() != None
``` |
{
"source": "jordyvanekelenCB/bad-bots",
"score": 3
} |
#### File: tests/unit/test_bad_bots.py
```python
import sys
import os
import inspect
import configparser
# pylint: disable=E0401
import pytest
# Fix module import form parent directory error.
# Reference: https://stackoverflow.com/questions/55933630/
# python-import-statement-modulenotfounderror-when-running-tests-and-referencing
CURRENT_DIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
PROJECT_ROOT = os.path.dirname(CURRENT_DIR)
PROJECT_ROOT_SRC = "%s/LambdaCode" % os.path.dirname(PROJECT_ROOT)
# Set up configuration path
CONFIG_PATH = os.path.join(os.path.dirname(PROJECT_ROOT_SRC + "/LambdaCode"), 'config', 'config.ini')
# Set up sys path
sys.path.insert(0, PROJECT_ROOT_SRC)
# Import project classes
# pylint: disable=C0413
from bad_bots import BadBots
from bad_bots import Bot
@pytest.fixture()
def setup_config():
""" Fixture for setting up configuration parser """
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
return config
@pytest.fixture()
def get_mock_event():
""" Fixture for retrieving mock event """
event = {
"httpMethod": "GET",
"//body": "{\"name\": \"Sam\"}",
"resource": "/{proxy+}",
"queryStringParameters": {},
"pathParameters": {
"proxy": "users"
},
"requestContext": {
"accountId": "222222222",
"identity": {
"sourceIp": "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b",
"userAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_1_6) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2743.116 Safari/537.36",
},
"resourcePath": "/{proxy+}",
"httpMethod": "GET",
"apiId": "xxxxxxxxxx"
}
}
return event
# pylint: disable=W0621
# pylint: disable=R0914
def test_get_ip_type_by_address(setup_config, get_mock_event):
""" Unit test get_ip_type_by_address method of the Bad Bots class """
# !ARRANGE!
bad_bots = BadBots(setup_config, get_mock_event)
ipv4_address_1 = '1.1.1.1'
ipv4_address_2 = '11.22.33.44'
ipv4_address_3 = '172.16.17.32'
ipv6_address_1 = 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b'
ipv6_address_2 = 'fc00:db20:35b:7399::5'
ipv6_address_3 = 'fd07:a47c:3742:823e:3b02:76:982b:463'
# !ACT!
# Detect the IP type of provided IP addresses
ipv4_address_1_type = bad_bots.get_ip_type_by_address(ipv4_address_1)
ipv4_address_2_type = bad_bots.get_ip_type_by_address(ipv4_address_2)
ipv4_address_3_type = bad_bots.get_ip_type_by_address(ipv4_address_3)
ipv6_address_1_type = bad_bots.get_ip_type_by_address(ipv6_address_1)
ipv6_address_2_type = bad_bots.get_ip_type_by_address(ipv6_address_2)
ipv6_address_3_type = bad_bots.get_ip_type_by_address(ipv6_address_3)
# !ASSERT!
# Assert IP addresses are of type IPv4
assert ipv4_address_1_type.value == BadBots.SourceIPType.IPV4.value
assert ipv4_address_2_type.value == BadBots.SourceIPType.IPV4.value
assert ipv4_address_3_type.value == BadBots.SourceIPType.IPV4.value
# Assert IP addresses are of type IPv6
assert ipv6_address_1_type.value == BadBots.SourceIPType.IPV6.value
assert ipv6_address_2_type.value == BadBots.SourceIPType.IPV6.value
assert ipv6_address_3_type.value == BadBots.SourceIPType.IPV6.value
def test_check_bot_confidence(setup_config, get_mock_event):
""" Unit test check_bot_confidence method of the Bad Bots class """
# !ARRANGE!
bad_bots = BadBots(setup_config, get_mock_event)
bot_1 = Bot()
bot_1.source_ip = '1.1.1.1'
bot_1.http_query_string_parameters = '<script></script>'
bot_1.http_body = 'EXEC'
bot_1.geolocation = 'United States'
bot_1.source_ip_type = BadBots.SourceIPType.IPV4
bot_1.http_method = "CONNECT"
bot_1.http_user_agent = "Mozilla/5.0 (compatible; Sosospider/2.0; +http://help.soso.com/webspider.htm)"
bot_2 = Bot()
bot_2.source_ip = '172.16.58.3'
bot_2.http_query_string_parameters = 'hello'
bot_2.http_body = 'hello!'
bot_2.geolocation = 'Netherlands'
bot_2.source_ip_type = BadBots.SourceIPType.IPV4
bot_2.http_method = "GET"
bot_2.http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"
bot_3 = Bot()
bot_3.source_ip = 'fc00:db20:35b:7399::5'
bot_3.http_query_string_parameters = 'param=true'
bot_3.http_body = 'username=xxx'
bot_3.geolocation = 'United States'
bot_3.source_ip_type = BadBots.SourceIPType.IPV6
bot_3.http_method = "GET"
bot_3.http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"
# !ACT!
# Do confidence check on potential bots
confidence_score_bot_1 = bad_bots.check_bot_confidence(bot_1)
confidence_score_bot_2 = bad_bots.check_bot_confidence(bot_2)
confidence_score_bot_3 = bad_bots.check_bot_confidence(bot_3)
# !ASSERT!
# Assert IP addresses are of type IPv4
assert(confidence_score_bot_1 == 25)
assert(confidence_score_bot_2 == 0)
assert(confidence_score_bot_3 == 5)
``` |
{
"source": "jordyvanekelenCB/ip-list-parser",
"score": 3
} |
#### File: tests/unit/test_ip_list_parser.py
```python
import sys
import os
import inspect
import configparser
import random
import struct
import socket
# pylint: disable=E0401
import pytest
# Fix module import form parent directory error.
# Reference: https://stackoverflow.com/questions/55933630/
# python-import-statement-modulenotfounderror-when-running-tests-and-referencing
CURRENT_DIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
PROJECT_ROOT = os.path.dirname(CURRENT_DIR)
PROJECT_ROOT_SRC = "%s/LambdaCode" % os.path.dirname(PROJECT_ROOT)
# Set up configuration path
CONFIG_PATH = os.path.join(os.path.dirname(PROJECT_ROOT_SRC + "/LambdaCode"), 'config', 'config.ini')
# Set up sys path
sys.path.insert(0, PROJECT_ROOT_SRC)
# Import project classes
# pylint: disable=C0413
from ip_list_parser import IPListParser
@pytest.fixture()
def setup_config():
""" Fixture for setting up configuration parser """
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
return config
# pylint: disable=W0621
# pylint: disable=R0914
def test_compile_lists(setup_config):
""" This method tests the filter_block_list_queue method """
# !ARRANGE!
ip_list_parser = IPListParser(setup_config)
list_1 = []
list_2 = []
# pylint: disable=W0612
# Add 400 addresses containing 4 unique IP addresses
for i in range(0, 100):
list_1.append('1.1.1.1')
list_1.append('2.2.2.2')
list_1.append('3.3.3.3')
list_1.append('4.4.4.4')
# Add 9997 randomly generated IP addresses
for i in range(0, 9997):
list_2.append(socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff))))
# Merge the lists
master_list = [list_1, list_2]
# !ACT!
# Call compile_lists
master_ip_list = ip_list_parser.compile_lists(master_list)
# !ASSERT!
# Assert the length of the list is equal to or lower than 10000 because of the AWS WAF threshold
assert len(master_ip_list) <= 10000
# Assert the correct IP addresses are in the list
assert '1.1.1.1' in master_ip_list
assert '2.2.2.2' in master_ip_list
assert '3.3.3.3' in master_ip_list
assert '4.4.4.4' in master_ip_list
# Assert the count of the entries is one
assert master_ip_list.count('1.1.1.1') == 1
assert master_ip_list.count('2.2.2.2') == 1
assert master_ip_list.count('3.3.3.3') == 1
assert master_ip_list.count('4.4.4.4') == 1
``` |
{
"source": "jordyvanraalte/piqcer-client-python",
"score": 2
} |
#### File: picqer_client_python/config/config.py
```python
class Config:
instance = None
def __new__(cls, *args, **kwargs):
if cls.instance is None:
cls.instance = super(Config, cls).__new__(cls)
cls.instance.api_key = ""
cls.instance.base_url = ""
return cls.instance
```
#### File: picqer_client_python/resources/customers.py
```python
import requests
from requests.auth import HTTPBasicAuth
from ..resources.resource import Resource
class Customers(Resource):
def __init__(self):
super().__init__("customers")
def get_customer_addresses(self, id):
return requests.get(self.config.base_url + self.path + "/" + str(id) + "addresses", verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def post_customer_address(self, id, addresses):
return requests.post(self.config.base_url + self.path + "/" + str(id) + "addresses", data=addresses,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def put_customer_address(self, id, address_id, address):
return requests.put(self.config.base_url + self.path + "/" + str(id) + "addresses/" + str(address_id),
data=address,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def delete_customer_address(self, id, address_id):
return requests.delete(self.config.base_url + self.path + "/" + str(id) + "/addresses/" + address_id,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def delete(self, id):
raise NotImplementedError("Not possible to delete a product")
```
#### File: picqer_client_python/resources/locations.py
```python
from ..resources.resource import Resource
import requests
from requests.auth import HTTPBasicAuth
class Locations(Resource):
def __init__(self):
super().__init__("locations")
def get_products_on_location(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/products",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
```
#### File: picqer_client_python/resources/picklists.py
```python
import requests
from requests.auth import HTTPBasicAuth
from ..resources.resource import Resource
class Picklists(Resource):
def __init__(self):
super().__init__("picklists")
def close_picklist(self, id):
return requests.post(self.config.base_url + self.path + "/" + str(id) + "/close", verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def pick_product(self, id, product_object):
return requests.post(self.config.base_url + self.path + "/" + str(id) + "/pick", data=product_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def pick_all(self, id):
return requests.post(self.config.base_url + self.path + "/" + str(id) + "/pickall", verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_shipments(self, id):
return requests.get(self.config.base_url + self.path + "/" + str(id) + '/shipments', verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_shipping_methods(self, id):
return requests.get(self.config.base_url + self.path + "/" + str(id) + '/shippingmethods', verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def create_shipments(self, id, shipments_object):
return requests.post(self.config.base_url + self.path + "/" + str(id) + '/shipments', data=shipments_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def assign_user(self, id, user_object):
return requests.post(self.config.base_url + self.path + "/" + str(id) + "/assign", data=user_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def unassign_user(self, id):
return requests.post(self.config.base_url + self.path + "/" + str(id) + "/unassign",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_pdf_json(self, id):
headers = {"Accept": "application/json"}
return requests.get(self.config.base_url + self.path + "/" + str(id) + '/picklistpdf', verify=True,
headers=headers,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_packing_list_pdf_json(self, id):
headers = {"Accept": "application/json"}
return requests.get(self.config.base_url + self.path + "/" + str(id) + '/packinglistpdf', verify=True,
headers=headers,
auth=HTTPBasicAuth(self.config.api_key, ''))
def snooze(self, id, snooze_object):
return requests.post(self.config.base_url + self.path + "/" + str(id) + "/snooze", data=snooze_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def cancel(self, id):
return requests.post(self.config.base_url + self.path + "/" + str(id) + "/cancel",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def delete(self, id):
raise NotImplementedError("Not possible to delete a product")
```
#### File: picqer_client_python/resources/products.py
```python
import requests
from requests.auth import HTTPBasicAuth
from ..resources.resource import Resource
class Products(Resource):
def __init__(self):
super().__init__("products")
def activate_product(self, id):
return requests.post(self.config.base_url + self.path + '/' + str(id),
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_warehouse_settings(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/warehouses",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def update_warehouse_settings(self, product_id, warehouse_id, settings_object):
return requests.put(self.config.base_url + self.path + "/" + str(product_id) + "/warehouses/" + str(settings_object), data=object, verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_images(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/images",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def post_images(self, id, image_object):
return requests.post(self.config.base_url + self.path + '/' + str(id) + "/images", data=image_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def delete_image(self, id, image_id):
return requests.delete(self.config.base_url + self.path + '/' + str(id) + "/images/" + str(image_id),
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_locations(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/locations",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def link_product(self, id, link_product_object):
return requests.post(self.config.base_url + self.path + '/' + str(id) + "/locations",
data=link_product_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def unlink_product(self, id, location_id):
return requests.post(self.config.base_url + self.path + '/' + str(id) + "/locations/" + location_id,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_tag(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/tags",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def post_tag(self, id, tag_object):
return requests.post(self.config.base_url + self.path + '/' + str(id) + "/tags",
data=tag_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def remove_tag(self, id, tags_id):
return requests.delete(self.config.base_url + self.path + '/' + str(id) + "/tags/" + tags_id,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_stock(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/stock",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_stock_in_single_warehouse(self, product_id, warehouse_id):
return requests.get(self.config.base_url + self.path + '/' + str(product_id) + "/stock/" + str(warehouse_id),
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def change_stock(self, product_id, warehouse_id, stock_object):
return requests.post(self.config.base_url + self.path + '/' + str(product_id) + "/stock/" + str(warehouse_id),
data=stock_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def move_stock(self, product_id, warehouse_id, stock_object):
return requests.post(
self.config.base_url + self.path + '/' + str(product_id) + "/stock/" + str(warehouse_id) + "/move",
data=stock_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def delete(self, id):
raise NotImplementedError("Not possible to delete a product")
```
#### File: picqer_client_python/resources/resource.py
```python
import requests
from requests.auth import HTTPBasicAuth
from ..config.config import Config
class Resource:
def __init__(self, path):
self.path = path
self.config = Config()
def get_all(self):
return requests.get(self.config.base_url + self.path, verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_all_with_offset(self, offset):
return requests.get(self.config.base_url + self.path + "?offset=" + str(offset), verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_with_filter(self, filters):
return requests.get(self.config.base_url + self.path + str(filters), verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get(self, id):
return requests.get(self.config.base_url + self.path + "/" + str(id), verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def post(self, object):
return requests.post(self.config.base_url + self.path, data=object, verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def put(self, id, object):
return requests.put(self.config.base_url + self.path + "/" + str(id), data=object, verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def delete(self, id):
return requests.delete(self.config.base_url + self.path + "/" + str(id), verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
``` |
{
"source": "jordyvanvorselen/quote-generator",
"score": 2
} |
#### File: quote-generator/quotes/tests.py
```python
from django.test import TestCase
from bs4 import BeautifulSoup
from .views import home_page
from django.http import request
class HomePageTest(TestCase):
def test_uses_home_template(self):
response = self.client.get('/')
self.assertTemplateUsed(response, 'home.html')
def get_quote_from_response(self, response):
return BeautifulSoup(response.content, "html.parser").find(id='Quote').string
def test_home_page_view_should_return_different_quote_each_refresh(self):
first_quote = self.get_quote_from_response(home_page(request))
second_quote = self.get_quote_from_response(home_page(request))
self.assertNotEqual(first_quote, second_quote)
def test_quote_on_home_page_should_change_upon_refresh(self):
first_quote = self.get_quote_from_response(self.client.get('/'))
second_quote = self.get_quote_from_response(self.client.get('/'))
self.assertNotEqual(first_quote, second_quote)
``` |
{
"source": "Jordy-VL/uncertainty-baselines",
"score": 2
} |
#### File: baselines/diabetic_retinopathy_detection/dropoutensemble.py
```python
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import utils # local file import
# Data load / output flags.
flags.DEFINE_string(
'checkpoint_dir', '/tmp/diabetic_retinopathy_detection/dropout',
'The directory from which the trained dropout '
'model weights are retrieved.')
flags.DEFINE_string(
'output_dir', '/tmp/diabetic_retinopathy_detection/dropoutensemble',
'The directory where the dropout ensemble model weights '
'and training/evaluation summaries are stored.')
flags.DEFINE_string('data_dir', None, 'Path to training and testing data.')
flags.mark_flag_as_required('data_dir')
# General model flags.
flags.DEFINE_integer('seed', 42, 'Random seed.')
flags.DEFINE_integer('eval_batch_size', 32,
'The per-core validation/test batch size.')
# Metric flags.
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE.')
# Dropout-related flags -- should be consistent with the trained
# dropout models in the checkpoint dir above.
flags.DEFINE_float('dropout_rate', 0.1, 'Dropout rate, between [0.0, 1.0).')
flags.DEFINE_bool(
'filterwise_dropout', False,
'Dropout whole convolutional filters instead of individual '
'values in the feature map.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', True, 'Whether to run on GPU, otherwise CPU.')
flags.DEFINE_bool('use_bfloat16', False, 'Whether to use mixed precision.')
flags.DEFINE_integer(
'num_cores', 1,
'Number of TPU cores or number of GPUs - only support 1 GPU for now.')
FLAGS = flags.FLAGS
def main(argv):
del argv # unused arg
tf.io.gfile.makedirs(FLAGS.output_dir)
logging.info('Saving Ensemble MC Dropout predictions to %s', FLAGS.output_dir)
tf.random.set_seed(FLAGS.seed)
if FLAGS.num_cores > 1:
raise ValueError('Only a single accelerator is currently supported.')
if FLAGS.use_gpu:
logging.info('Use GPU')
else:
logging.info('Use CPU')
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# As per the Kaggle challenge, we have split sizes:
# train: 35,126
# validation: 10,906 (currently unused)
# test: 42,670
ds_info = tfds.builder('diabetic_retinopathy_detection').info
eval_batch_size = FLAGS.eval_batch_size * FLAGS.num_cores
steps_per_eval = ds_info.splits['test'].num_examples // eval_batch_size
dataset_test_builder = ub.datasets.get(
'diabetic_retinopathy_detection', split='test', data_dir=FLAGS.data_dir)
dataset_test = dataset_test_builder.load(batch_size=eval_batch_size)
if FLAGS.use_bfloat16:
policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
tf.keras.mixed_precision.experimental.set_policy(policy)
# TODO(nband): debug, switch from keras.models.save to Checkpoint
logging.info('Building Keras ResNet-50 Ensemble MC Dropout model')
ensemble_filenames = utils.parse_keras_models(FLAGS.checkpoint_dir)
ensemble_size = len(ensemble_filenames)
logging.info('Ensemble size: %s', ensemble_size)
logging.info('Ensemble Keras model dir names: %s', str(ensemble_filenames))
# Write model predictions to files.
for member, ensemble_filename in enumerate(ensemble_filenames):
model = tf.keras.models.load_model(ensemble_filename, compile=False)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
filename = f'{member}.npy'
filename = os.path.join(FLAGS.output_dir, filename)
if not tf.io.gfile.exists(filename):
logits = []
test_iterator = iter(dataset_test)
for i in range(steps_per_eval):
inputs = next(test_iterator) # pytype: disable=attribute-error
images = inputs['features']
logits.append(model(images, training=False))
if i % 100 == 0:
logging.info(
'Ensemble member %d/%d: Completed %d of %d eval steps.',
member + 1,
ensemble_size,
i + 1,
steps_per_eval)
logits = tf.concat(logits, axis=0)
with tf.io.gfile.GFile(filename, 'w') as f:
np.save(f, logits.numpy())
percent = (member + 1) / ensemble_size
message = (
'{:.1%} completion for prediction: ensemble member {:d}/{:d}.'.format(
percent, member + 1, ensemble_size))
logging.info(message)
metrics = {
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.BinaryAccuracy(),
'test/auprc': tf.keras.metrics.AUC(curve='PR'),
'test/auroc': tf.keras.metrics.AUC(curve='ROC'),
'test/ece': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
}
for i in range(ensemble_size):
metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
metrics['test/accuracy_member_{}'.format(i)] = (
tf.keras.metrics.BinaryAccuracy())
test_diversity = {
'test/disagreement': tf.keras.metrics.Mean(),
'test/average_kl': tf.keras.metrics.Mean(),
'test/cosine_similarity': tf.keras.metrics.Mean()
}
metrics.update(test_diversity)
# Evaluate model predictions.
logits_dataset = []
for member in range(ensemble_size):
filename = f'{member}.npy'
filename = os.path.join(FLAGS.output_dir, filename)
with tf.io.gfile.GFile(filename, 'rb') as f:
logits_dataset.append(np.load(f))
logits_dataset = tf.convert_to_tensor(logits_dataset)
test_iterator = iter(dataset_test)
for step in range(steps_per_eval):
inputs = next(test_iterator) # pytype: disable=attribute-error
labels = inputs['labels']
logits = logits_dataset[:, (step * eval_batch_size):((step + 1) *
eval_batch_size)]
labels = tf.cast(labels, tf.float32)
logits = tf.cast(logits, tf.float32)
negative_log_likelihood_metric = rm.metrics.EnsembleCrossEntropy(
binary=True)
negative_log_likelihood_metric.add_batch(
logits, labels=tf.expand_dims(labels, axis=-1))
negative_log_likelihood = list(
negative_log_likelihood_metric.result().values())[0]
per_probs = tf.nn.sigmoid(logits)
probs = tf.reduce_mean(per_probs, axis=0)
gibbs_ce_metric = rm.metrics.GibbsCrossEntropy(binary=True)
gibbs_ce_metric.add_batch(logits, labels=tf.expand_dims(labels, axis=-1))
gibbs_ce = list(gibbs_ce_metric.result().values())[0]
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/auprc'].update_state(labels, probs)
metrics['test/auroc'].update_state(labels, probs)
metrics['test/ece'].add_batch(probs, label=labels)
for i in range(ensemble_size):
member_probs = per_probs[i]
member_loss = tf.keras.losses.binary_crossentropy(labels, member_probs)
metrics['test/nll_member_{}'.format(i)].update_state(member_loss)
metrics['test/accuracy_member_{}'.format(i)].update_state(
labels, member_probs)
diversity = rm.metrics.AveragePairwiseDiversity()
diversity.add_batch(per_probs, num_models=ensemble_size)
diversity_results = diversity.result()
for k, v in diversity_results.items():
test_diversity['test/' + k].update_state(v)
total_results = {name: metric.result() for name, metric in metrics.items()}
# Metrics from Robustness Metrics (like ECE) will return a dict with a
# single key/value, instead of a scalar.
total_results = {
k: (list(v.values())[0] if isinstance(v, dict) else v)
for k, v in total_results.items()
}
logging.info('Metrics: %s', total_results)
if __name__ == '__main__':
app.run(main)
```
#### File: uncertainty_baselines/datasets/mnli.py
```python
from typing import Any, Dict, Optional
import tensorflow as tf
import tensorflow_datasets as tfds
from uncertainty_baselines.datasets import base
class MnliDataset(base.BaseDataset):
"""Multi-NLI dataset builder class."""
def __init__(
self,
split: str,
shuffle_buffer_size: int = None,
num_parallel_parser_calls: int = 64,
mode: str = 'matched',
try_gcs: bool = False,
download_data: bool = False,
is_training: Optional[bool] = None,
**unused_kwargs: Dict[str, Any]):
"""Create an Genomics OOD tf.data.Dataset builder.
Args:
split: a dataset split, either a custom tfds.Split or one of the
tfds.Split enums [TRAIN, VALIDAITON, TEST] or their lowercase string
names.
shuffle_buffer_size: the number of example to use in the shuffle buffer
for tf.data.Dataset.shuffle().
num_parallel_parser_calls: the number of parallel threads to use while
preprocessing in tf.data.Dataset.map().
mode: Type of data to import. If mode = "matched", import the in-domain
data (glue/mnli_matched). If mode = "mismatched", import the
out-of-domain data (glue/mnli_mismatched).
try_gcs: Whether or not to try to use the GCS stored versions of dataset
files. Currently unsupported.
download_data: Whether or not to download data before loading. Currently
unsupported.
is_training: Whether or not the given `split` is the training split. Only
required when the passed split is not one of ['train', 'validation',
'test', tfds.Split.TRAIN, tfds.Split.VALIDATION, tfds.Split.TEST].
"""
if mode not in ('matched', 'mismatched'):
raise ValueError('"mode" must be either "matched" or "mismatched".'
'Got {}'.format(mode))
if mode == 'mismatched' and split == tfds.Split.TRAIN:
raise ValueError('No training data for mismatched domains.')
if is_training is None:
is_training = split in ['train', tfds.Split.TRAIN]
if split == tfds.Split.VALIDATION:
split = 'validation_' + mode
if split == tfds.Split.TEST:
split = 'test_' + mode
name = 'glue/mnli'
dataset_builder = tfds.builder(name, try_gcs=try_gcs)
super(MnliDataset, self).__init__(
name=name,
dataset_builder=dataset_builder,
split=split,
is_training=is_training,
shuffle_buffer_size=shuffle_buffer_size,
num_parallel_parser_calls=num_parallel_parser_calls,
fingerprint_key='idx',
download_data=download_data)
def _create_process_example_fn(self) -> base.PreProcessFn:
"""Create a pre-process function to return labels and sentence tokens."""
def _example_parser(example: Dict[str, tf.Tensor]) -> Dict[str, Any]:
"""Parse sentences and labels from a serialized tf.train.Example."""
idx = example['idx']
label = example['label']
text_a = example['premise']
text_b = example['hypothesis']
return {
'text_a': text_a,
'text_b': text_b,
'labels': label,
'idx': idx
}
return _example_parser
``` |
{
"source": "Jordyvm/Django_portfolio",
"score": 2
} |
#### File: src/pages/views.py
```python
from django.shortcuts import render
def home_view(request, *args, **kwargs):
context = {"home_active": "active"}
return render(request, 'home.html', context)
def about_view(request, *args, **kwargs):
context = {"about_active": "active"}
return render(request, 'about.html', context)
def contact_view(request, *args, **kwargs):
context = {"contact_active": "active"}
return render(request, 'contact.html', context)
```
#### File: src/posts/models.py
```python
from django.db import models
# Create your models here.
class Skill(models.Model):
skill = models.CharField(max_length=30)
def __str__(self):
return self.skill
class Meta:
ordering = ('skill',)
class Post(models.Model):
title = models.CharField(max_length=120)
date = models.DateField()
description = models.TextField()
image = models.ImageField(default='')
skills = models.ManyToManyField(Skill)
MediaTechnology = models.BooleanField(default=False)
def __str__(self):
return self.title
``` |
{
"source": "Jordzman/explorer",
"score": 2
} |
#### File: explorer/blockexplorer/middleware.py
```python
from django.http import HttpResponseRedirect
class SSLMiddleware(object):
# http://stackoverflow.com/a/9207726/1754586
def process_request(self, request):
if not any([request.is_secure(), request.META.get("HTTP_X_FORWARDED_PROTO", "") == 'https']):
url = request.build_absolute_uri(request.get_full_path())
secure_url = url.replace("http://", "https://")
return HttpResponseRedirect(secure_url)
```
#### File: explorer/users/token_api.py
```python
import requests
import json
from tokens.settings import BLOCKCYPHER_API_KEY
def register_new_token(email, new_token, first=None, last=None):
assert new_token and email
post_params = {
"first": "MichaelFlaxman",
"last": "TestingOkToToss",
"email": "<EMAIL>",
"token": new_token,
}
url = 'https://api.blockcypher.com/v1/tokens'
get_params = {'token': BLOCKCYPHER_API_KEY}
r = requests.post(url, data=json.dumps(post_params), params=get_params,
verify=True, timeout=20)
assert 'error' not in json.loads(r.text)
return new_token
```
#### File: Jordzman/explorer/utils.py
```python
import re
import random
from blockexplorer.settings import BASE_URL
def get_max_pages(num_items, items_per_page):
if num_items < items_per_page:
return 1
elif num_items % items_per_page == 0:
return num_items // items_per_page
else:
return num_items // items_per_page + 1
def get_client_ip(request):
"""
Get IP from a request
"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_user_agent(request):
return request.META.get('HTTP_USER_AGENT')
def is_good_status_code(status_code):
return str(status_code).startswith('2')
def assert_good_status_code(status_code):
err_msg = 'Expected status code 2XX but got %s' % status_code
assert is_good_status_code(status_code), err_msg
def simple_csprng(num_chars=32, eligible_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKMNPQRSTUVWXYZ23456789'):
"""
Generate a random password using the characters in `chars` and with a length of `num_chars`.
http://stackoverflow.com/a/2257449
Cryptographically secure but may not work on all OSs.
Shouldn't cause blocking but it's possible.
"""
return ''.join(random.SystemRandom().choice(eligible_chars) for x in range(num_chars))
def simple_pw_generator(num_chars=10, eligible_chars='abcdefghjkmnpqrstuvwxyz23456789'):
"""
Generate a random password using the characters in `chars` and with a
length of `size`.
http://stackoverflow.com/a/2257449
"""
return ''.join(random.choice(eligible_chars) for x in range(num_chars))
def uri_to_url(uri, base_url=BASE_URL):
"""
Take a URI and map it a URL:
/foo -> http://coinsafe.com/foo
"""
if not uri:
return base_url
if uri.startswith('/'):
return '%s%s' % (base_url, uri)
return '%s/%s' % (base_url, uri)
def cat_email_header(name, email):
assert '@' in email
if name:
return '%s <%s>' % (name, email)
return email
def split_email_header(header):
if '<' in header and '>' in header:
name, email = re.findall('(.*)<(.*)>', header)[0]
else:
name = None
email = header
assert '@' in email
return name, email
``` |
{
"source": "jore731/generic-package-manager",
"score": 3
} |
#### File: jore731/generic-package-manager/package_manager.py
```python
import argparse
import sys
from package import Package, DependentPackageFoundError
def parse_arguments():
parser = argparse.ArgumentParser(prog='package_manager.py')
parser.add_argument('-f', type=argparse.FileType('r'), help='Input File', required=True)
parser.add_argument('-o', type=argparse.FileType('w'), help='Output File (If not specified, stdout will be used)')
_args = parser.parse_args()
return _args
def register_package(package_name: str):
if package_name not in packages:
packages[package_name] = Package(package_name)
def install(package_name: str):
register_package(package_name)
packages[package_name].install(explicitly_installed=True)
def depend(main_package_name: str, *dependent_package_names: [str]):
register_package(main_package_name)
for dependent_package_name in dependent_package_names:
register_package(dependent_package_name)
packages[main_package_name].depends_on(packages[dependent_package_name])
def remove(package_name: str):
register_package(package_name)
try:
packages[package_name].remove()
except DependentPackageFoundError as exc:
print(exc)
def list_packages():
for package in packages.values():
if package.installed:
print(f" {package.name}")
def process_command(command, *args):
if command == 'DEPEND':
depend(args[0], *args[1:])
elif command == 'INSTALL':
assert len(args) == 1, "Invalid amount of packages required to install"
install(args[0])
elif command == 'LIST':
assert len(args) == 0, "LIST requires no arguments"
list_packages()
elif command == 'REMOVE':
assert len(args) == 1, "Invalid amount of packages required to install"
remove(args[0])
elif command == 'END':
exit(0)
if __name__ == '__main__':
args = parse_arguments()
if args.o is not None:
sys.stdout = args.o
packages = {}
for command in args.f.readlines():
command = command.strip()
print(command)
process_command(*command.split(" "))
``` |
{
"source": "Jore93/CryptoCourse",
"score": 2
} |
#### File: Week4Exercises/app/admin.py
```python
from flask import Blueprint, render_template, request, redirect, abort, current_app
admin = Blueprint('admin', __name__)
@admin.route("/admin", methods=["GET"])
def admin_home():
if not current_app.db.is_admin(request):
abort(403, description="Wooow, chill down. No access on here.")
return render_template("admin.html", user="admin")
@admin.route("/admin/top-secret", methods=["GET"])
def view_content():
if not current_app.db.is_admin(request):
abort(403, description="Wooow, chill down. No access on here.")
return '"Every secret creates a potential failure point." — <NAME>'
```
#### File: Week4Exercises/app/security.py
```python
import logging
import os
from base64 import b64decode, b64encode
from hashlib import sha256
from random import randrange
SECRET = os.urandom(randrange(10, 20))
logger = logging.getLogger("app")
def hash_password(password) -> str:
""" Hash password with a known secure hashing function """
return sha256(password.encode()).hexdigest()
def sign(message: str) -> bytes:
"""Sign message with a super secret key"""
return sha256(SECRET + message).digest()
def verify(data: bytes, sig: bytes) -> bool:
""" Verify supplied signature. Return boolean based on result"""
return sign(data) == sig
def parse_session(cookie: bytes) -> dict:
""" Parse cookie and return dict
@cookie: "key1=value1;key2=value2"
return {"key1":"value1","key2":"value2"}
"""
parsed = {}
b64_data, b64_sig = cookie.split('.')
data = b64decode(b64_data)
sig = b64decode(b64_sig)
if not verify(data, sig):
raise ValueError
for group in data.split(b';'):
try:
if not group:
continue
key, val = group.split(b'=')
parsed[key.decode()] = val
except Exception:
continue
return parsed
def create_session(user_data: dict) -> bytes:
""" Create session based on dict
param data: {"username": username, "secret": password}
return: key value pairs in "key1=value1;key2=value2;"
"""
session = ""
for k, v in user_data.items():
session += f"{k}={v};"
return session.encode()
def get_session(request) -> dict:
""" Get user specific session and verify signature """
if not request.cookies or "auth" not in request.cookies:
return
cookie = request.cookies.get("auth")
try:
user_data = parse_session(cookie)
except ValueError:
logger.warning("Invalid signature detected! Session will get killed.")
return {"message": "Invalid signature", "error": 403}
return user_data
def create_cookie(session):
"""Create cookie for continuous authentication"""
cookie_sig = sign(session)
return b64encode(session) + b'.' + b64encode(cookie_sig)
``` |
{
"source": "jorelius/Lux-Design-2021-CSharp",
"score": 2
} |
#### File: jorelius/Lux-Design-2021-CSharp/main.py
```python
from subprocess import Popen, PIPE
from threading import Thread
from queue import Queue, Empty
import atexit
import os
import sys
agent_processes = [None, None]
t = None
q = None
def cleanup_process():
global agent_processes
for proc in agent_processes:
if proc is not None:
proc.kill()
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def dotnet_agent(observation, configuration):
"""
a wrapper around a dotnet agent
"""
global agent_processes, t, q
agent_process = agent_processes[observation.player]
### Do not edit ###
if agent_process is None:
if "__raw_path__" in configuration:
cwd = os.path.dirname(configuration["__raw_path__"])
else:
cwd = os.path.dirname(__file__)
agent_process = Popen(["./run.sh"], stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=cwd)
agent_processes[observation.player] = agent_process
atexit.register(cleanup_process)
# following 4 lines from https://stackoverflow.com/questions/375427/a-non-blocking-read-on-a-subprocess-pipe-in-python
q = Queue()
t = Thread(target=enqueue_output, args=(agent_process.stderr, q))
t.daemon = True # thread dies with the program
t.start()
if observation.step == 0:
# fixes bug where updates array is shared, but the first update is agent dependent actually
observation["updates"][0] = f"{observation.player}"
# print observations to agent
agent_process.stdin.write(("\n".join(observation["updates"]) + "\n").encode())
agent_process.stdin.flush()
# wait for data written to stdout
agent1res = (agent_process.stdout.readline()).decode()
_end_res = (agent_process.stdout.readline()).decode()
while True:
try: line = q.get_nowait()
except Empty:
# no standard error received, break
break
else:
# standard error output received, print it out
print(line.decode(), file=sys.stderr, end='')
outputs = agent1res.split("\n")[0].split(",")
actions = []
for cmd in outputs:
if cmd != "":
actions.append(cmd)
return actions
``` |
{
"source": "jorendorff/tinysearch",
"score": 3
} |
#### File: jorendorff/tinysearch/web.py
```python
import tiny
from flask import Flask, render_template, request
my_index = tiny.Index("small-sample")
app = Flask(__name__)
@app.route("/")
def root():
return render_template("index.html")
@app.route("/search")
def search():
q = request.args['q']
results = my_index.search(q)
return render_template("results.html", q=q, results=results)
``` |
{
"source": "jorenham/asynced",
"score": 2
} |
#### File: asynced/asynced/_states.py
```python
from __future__ import annotations
__all__ = ('StateBase', 'State', 'StateCollection')
import abc
import asyncio
import collections
import itertools
from typing import (
Any,
AsyncIterable,
AsyncIterator,
Awaitable,
Callable,
cast,
Collection,
Coroutine,
Final,
Generator,
Generic,
Iterator,
Literal,
Mapping,
NoReturn,
overload,
TypeVar,
Union,
)
from typing_extensions import ParamSpec, Self, TypeAlias
from . import amap_iter
from ._typing import awaitable, Comparable, Maybe, Nothing, NothingType
from .exceptions import StateError, StopAnyIteration
_T = TypeVar('_T')
_KT = TypeVar('_KT')
_VT = TypeVar('_VT')
_RT = TypeVar('_RT')
_S = TypeVar('_S', bound=object)
_RS = TypeVar('_RS', bound=object)
_SS = TypeVar('_SS', bound=Collection)
_P = ParamSpec('_P')
_FutureOrCoro: TypeAlias = Union[asyncio.Future[_T], Coroutine[Any, Any, _T]]
_MaybeAsync: TypeAlias = Union[Callable[_P, _T], Callable[_P, Awaitable[_T]]]
_DoneStatus: TypeAlias = Literal['stop', 'error', 'cancel']
_DONE_STATUS_KEYS: Final[tuple[_DoneStatus, ...]] = 'stop', 'error', 'cancel'
_ST = TypeVar('_ST', bound='State')
class StateBase(Generic[_S]):
__slots__ = ('__loop', '__future', )
__match_args__ = ('_value_raw',)
__loop: asyncio.AbstractEventLoop | None
__future: asyncio.Future[_S] | None
def __init__(self):
self.__loop = None
self.__future = None
def __del__(self):
if (future := self.__future) is None:
return
try:
future.cancel()
except RuntimeError:
pass
def __await__(self) -> Generator[Any, None, _S]:
return self._future.__await__()
def __repr__(self) -> str:
return f'<{type(self).__name__}{self._format()} at {id(self):#x}>'
def __str__(self) -> str:
return f'<{type(self).__name__}{self._format()}>'
@property
def readonly(self) -> bool:
"""Returns true if an asyncio.Task will set the result, otherwise, the
asyncio.Future can be set manually.
"""
return isinstance(self._future, asyncio.Task)
@property
def is_done(self) -> bool:
return self._future.done()
@property
def is_set(self) -> bool:
future = self._future
if not future.done():
return False
if future.cancelled():
return False
return future.exception() is None
@property
def is_error(self) -> bool:
future = self._future
if not future.done():
return False
if future.cancelled():
return False
if (exc := future.exception()) is None:
return False
return not isinstance(exc, asyncio.CancelledError)
@property
def is_cancelled(self) -> bool:
future = self._future
if not future.done():
return False
if future.cancelled():
return True
return isinstance(future.exception(), asyncio.CancelledError)
@property
def _loop(self) -> asyncio.AbstractEventLoop:
if (loop := self.__loop) is None:
if (future := self.__future) is not None:
loop = future.get_loop()
else:
loop = asyncio.get_running_loop()
self.__loop = loop
return loop
@property
def _future(self) -> asyncio.Future[_S]:
if (future := self.__future) is None:
future = self.__future = self._loop.create_future()
return future
@_future.setter
def _future(self, value: asyncio.Future[_S] | _S):
if done := (future := self._future).done():
future.result()
if asyncio.isfuture(value):
future = value
else:
if done:
future = future.get_loop().create_future()
future.set_result(cast(_S, value))
self.__future = future
@_future.deleter
def _future(self):
if (future := self.__future) is None or not future.done():
return
if future.cancelled():
future.result() # will raise asyncio.CancelledError
self.__future = future.get_loop().create_future()
@property
def _value_raw(self) -> _S | BaseException | None:
"""For pattern matching."""
fut = self._future
if not fut.done():
return None
try:
return fut.result()
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
return exc
def _set(self, value: _S) -> None:
self.__as_unset_future().set_result(value)
self._on_set(value)
def _raise(self, exc: BaseException) -> None:
if isinstance(exc, asyncio.CancelledError):
self._cancel()
return
self.__as_unset_future().set_exception(exc)
self._on_error(exc)
def _cancel(self) -> bool:
cancelled = self._future.cancel()
self._on_cancel()
return cancelled
def _on_set(self, value: _S) -> None:
...
def _on_error(self, exc: BaseException) -> None:
...
def _on_cancel(self) -> None:
...
def _format(self) -> str:
fut = self._future
if fut.done():
return f'({self._value_raw!r})'
return ''
def __as_unset_future(self):
future = self._future
if isinstance(future, asyncio.Task):
raise StateError(f'{self!r} is readonly')
if future.done():
current = future.result()
raise StateError(f'{self!r} is already set: {current!r}')
return future
class State(AsyncIterator[_S], StateBase[_S], Generic[_S]):
__slots__ = (
'_key',
'_collections',
'_producer',
'_consumer',
'_is_set',
'_is_stopped',
'_is_error',
'_is_cancelled',
'__waiters',
'__waiter_counter',
)
# when provided, states are mapped before they're compared
_key: Callable[[_S], Comparable]
# change notification to e.g. StateVarTuple
_collections: list[tuple[Any, StateCollection[Any, _S, Any]]]
# see _set_from(), can be set only once
_producer: Maybe[AsyncIterable[Any]]
_consumer: Maybe[asyncio.Task[None | NoReturn]]
def __init__(
self,
*,
key: Callable[[_S], Comparable] = lambda s: s
) -> None:
super().__init__()
self._key = key
self._consumer = Nothing
self._producer = Nothing
# TODO use weakref
self._collections = []
self._is_set = False
self._is_stopped = False
self._is_error = False
self._is_cancelled = False
self.__waiters = {}
self.__waiter_counter = itertools.count(0).__next__
def __del__(self):
super().__del__()
try:
self._future.cancel()
for waiter in self.__waiters.values():
waiter.cancel()
except RuntimeError:
pass
self._collections.clear()
def __eq__(self, other: _S) -> bool:
if not self.is_set:
return False
value = self._get()
return bool(value is other or value == other)
def __await__(self) -> Generator[Any, None, _S]:
if not self.is_set:
if (future := self._future).done():
future.result() # reraise if needed
return self._wait_next().__await__()
return super().__await__()
async def __aiter__(self, *, buffer: int | None = 4) -> AsyncIterator[_S]:
futures = collections.deque(maxlen=buffer)
if self.is_set:
futures.append(self._future)
waiters = self.__waiters
waiter_id = self.__waiter_counter()
def _schedule_next(present=None):
if present:
if present.cancelled():
return
if isinstance(present.exception(), StopAsyncIteration):
return
loop = present.get_loop()
else:
loop = asyncio.get_running_loop()
future = loop.create_future()
future.add_done_callback(_schedule_next)
assert waiter_id not in waiters or waiters[waiter_id].done()
waiters[waiter_id] = future
futures.append(future)
try:
_schedule_next()
while not self.is_done:
if not len(futures):
await asyncio.sleep(0)
assert len(futures)
try:
yield await futures.popleft()
except StopAnyIteration:
break
finally:
if waiter_id in waiters:
del waiters[waiter_id]
async def __anext__(self) -> _S:
try:
return await self._wait_next()
except asyncio.CancelledError:
raise StopAsyncIteration
__hash__ = None # type: ignore
async def _wait_next(self) -> _S:
waiters = self.__waiters
waiter_id = self.__waiter_counter()
future = waiters[waiter_id] = self._loop.create_future()
try:
return await future
finally:
del waiters[waiter_id]
@property
def readonly(self) -> bool:
"""Returns true if an asyncio.Task will set the result, otherwise, the
asyncio.Future can be set manually.
"""
return self._producer is not Nothing or super().readonly
@property
def is_set(self) -> bool:
return self._is_set
@property
def is_done(self) -> bool:
return self._is_stopped or self._is_error or self._is_cancelled
@property
def is_stopped(self) -> bool:
return self._is_stopped
@property
def is_error(self) -> bool:
return self._is_error
@property
def is_cancelled(self) -> bool:
return self._is_cancelled
@overload
def map(self, function: Callable[[_S], Awaitable[_S]]) -> Self: ...
@overload
def map(self, function: Callable[[_S], _S]) -> Self: ...
@overload
def map(
self,
function: Callable[[_S], Awaitable[object]],
cls: type[_ST],
*cls_args: Any,
**cls_kwargs: Any,
) -> _ST:
...
@overload
def map(
self,
function: Callable[[_S], object],
cls: type[_ST],
*cls_args: Any,
**cls_kwargs: Any,
) -> _ST:
...
def map(
self,
function: Callable[[_S], Awaitable[_RS]] | Callable[[_S], _RS],
cls: type[_ST] | None = None,
*cls_args: Any,
**cls_kwargs: Any,
) -> _ST:
"""Create a new instance from this state, with the function applied to
its value.
The function can be either sync or async.
Unless specified, the mapped state type will be type(self).
"""
if cls_kwargs is None:
cls_kwargs = {}
if 'name' not in cls_kwargs:
cls_kwargs['name'] = f'{function.__name__}({self})'
res: _ST
if cls is None:
res = cast(_ST, type(self)(*cls_args, **cls_kwargs))
else:
res = cls(*cls_args, **cls_kwargs)
if self.is_set:
initial = function(self._get())
if awaitable(initial):
async def _set_after():
res._set(cast(_RS, await initial))
asyncio.create_task(_set_after())
else:
res._set(cast(_RS, initial))
res._set_from(amap_iter(function, self))
return res
async def _consume(self) -> None | NoReturn:
assert self._producer is not Nothing
try:
async for state in self._producer:
self._set_item(state)
except (SystemExit, KeyboardInterrupt) as exc:
self._raise(exc)
raise
except GeneratorExit:
# don't attempt to "stop" if the loop was closed
try:
asyncio.get_running_loop()
except RuntimeError:
self._cancel()
else:
self._stop()
except StopAnyIteration:
self._stop()
except asyncio.CancelledError:
self._cancel()
except BaseException as exc:
self._raise(exc)
else:
await asyncio.sleep(0)
self._stop()
@overload
def _get(self, default: _T) -> _S | _T: ...
@overload
def _get(self, default: NothingType = ...) -> _S: ...
def _get(self, default: Maybe[_T] = Nothing) -> _S | _T:
if (future := self._future).done():
try:
return future.result()
except (GeneratorExit, StopAsyncIteration, asyncio.CancelledError):
if default is Nothing:
raise
return default
if default is Nothing:
raise LookupError(repr(self))
return default
def _set_from(self, producer: AsyncIterable[Any]) -> None:
if self._producer is not Nothing:
raise StateError(f'{self!r} is already being set')
self._producer = producer
task_name = f'{self}.consumer'
self._consumer = asyncio.create_task(self._consume(), name=task_name)
def _set_item(self, value: Any):
"""Used by the consumer to set the next item"""
self._set(cast(_S, value))
def _set(self, value: _S, always: bool = False):
if not always and self._equals(value):
return 0
del self._future
cast(asyncio.Future[_S], self._future).set_result(value)
self._on_set(value)
self.__notify_waiters(value)
def _clear(self) -> None:
del self._future
self._on_clear()
def _stop(self) -> None:
exc = StopAsyncIteration
future = self._future
if not future.done():
future.set_exception(exc)
self._on_stop()
self.__notify_waiters(exc=exc)
def _raise(self, exc: type[BaseException] | BaseException) -> None:
if isinstance(exc, type):
exc = exc()
if isinstance(exc, asyncio.CancelledError):
self._cancel()
elif isinstance(exc, StopAsyncIteration):
self._stop()
else:
del self._future
cast(asyncio.Future[_S], self._future).set_exception(exc)
self._on_error(exc)
self.__notify_waiters(exc=exc)
def _cancel(self) -> None:
if self._is_cancelled:
return
self._future.cancel()
self._on_cancel()
self.__notify_waiters(cancel=True)
def _on_set(self, state: _S) -> None:
self._is_set = True
for j, parent in self._collections:
# noinspection PyProtectedMember
parent._on_item_set(j, state)
def _on_clear(self) -> None:
assert not self._is_cancelled
self._is_set = False
self._is_error = False
self._is_stopped = False
for j, parent in self._collections:
# noinspection PyProtectedMember
parent._on_item_del(j)
def _on_stop(self) -> None:
self._is_stopped = True
for j, parent in self._collections:
# noinspection PyProtectedMember
parent._on_item_stop(j)
def _on_error(self, exc: BaseException) -> None:
if isinstance(exc, asyncio.CancelledError):
self._on_cancel()
elif isinstance(exc, StopAnyIteration):
self._on_stop()
else:
self._is_error = True
for j, parent in self._collections:
# noinspection PyProtectedMember
parent._on_item_error(j, exc)
def _on_cancel(self) -> None:
self._is_cancelled = True
for j, parent in self._collections:
# noinspection PyProtectedMember
parent._on_item_cancel(j)
self._collections.clear()
def _equals(self, state: _S) -> bool:
"""Returns True if set and the argument is equal to the current state"""
future = self._future
if not future.done():
return False
# raises exception if thrown or cancelled
key = self._key(state)
key_current = self._key(future.result())
return key is key_current or key == key_current
def _check(self) -> None | NoReturn:
future = self._future
if future.done():
# raises after set_exception() or cancel()
future.result()
consumer = self._consumer
if consumer is not Nothing and consumer.done():
if consumer.exception() is not None:
consumer.result() # raises exception
elif consumer.cancelled() and not self.is_set:
consumer.result() # raises asyncio.CancelledError
def _check_next(self) -> None | NoReturn:
consumer = self._consumer
if consumer is not Nothing and consumer.done():
consumer.result()
raise StopAsyncIteration
self._check()
if self.is_done:
raise StopAsyncIteration
def _ensure_mutable(self) -> None | NoReturn:
if self.readonly:
raise StateError(f'{self!r} is readonly')
def __notify_waiters(self, result=None, exc=None, cancel=False,):
for waiter in self.__waiters.values():
if waiter.done():
continue
if cancel:
waiter.cancel()
elif exc is not None:
waiter.set_exception(exc)
else:
waiter.set_result(result)
def __get_fresh_future(self) -> asyncio.Future[_S]:
del self._future
return self._future
class StateCollection(State[_SS], Generic[_KT, _S, _SS]):
@abc.abstractmethod
def __iter__(self) -> Iterator[State[_S]]: ...
@abc.abstractmethod
def __contains__(self, item: object) -> bool: ...
@abc.abstractmethod
def _get_states(self) -> Mapping[_KT, State[_S]]: ...
@overload
@abc.abstractmethod
def _get_data(self) -> _SS: ...
@overload
@abc.abstractmethod
def _get_data(self, default: _SS = ...) -> _SS: ...
@abc.abstractmethod
def _get_data(self, default: Maybe[_S] = Nothing) -> _SS: ...
def __len__(self) -> int:
return len(self._get_states())
def __getitem__(self, key: _KT) -> State[_S]:
return self._get_states()[key]
def __setitem__(self, key: _KT, value: _S) -> None:
self._get_states()[key]._set(value)
@property
def readonly(self) -> bool:
return super().readonly or any(
s.readonly for s in self._get_states().values()
)
@property
def any_done(self) -> bool:
return any(s.is_done for s in self._get_states().values())
@property
def all_done(self) -> bool:
return all(s.is_done for s in self._get_states().values())
@property
def any_set(self) -> bool:
return any(s.is_set for s in self._get_states().values())
@property
def all_set(self) -> bool:
return all(s.is_set for s in self._get_states().values())
@property
def any_stopped(self) -> bool:
return any(s.is_stopped for s in self._get_states().values())
@property
def all_stopped(self) -> bool:
return all(s.is_stopped for s in self._get_states().values())
@property
def any_error(self) -> bool:
return any(s.is_error for s in self._get_states().values())
@property
def all_error(self) -> bool:
return all(s.is_error for s in self._get_states().values())
@property
def any_cancelled(self) -> bool:
return any(s.is_cancelled for s in self._get_states().values())
@property
def all_cancelled(self) -> bool:
return all(s.is_cancelled for s in self._get_states().values())
@overload
def get(self, key: NothingType = ..., /) -> _SS: ...
@overload
def get(self, key: _KT, /) -> _S: ...
@overload
def get(self, key: _KT, /, default: _T = ...) -> _S | _T: ...
def get(
self,
key: Maybe[_KT] = Nothing,
/,
default: Maybe[_T] = Nothing
) -> _SS | _S | _T:
if key is Nothing:
return self._get_data()
if key not in (states := self._get_states()):
if default is Nothing:
raise KeyError(key)
return default
return states[key]._get(default)
# Internal: following methods are called by a statevar after it was updated
def _sync_soon(self):
self._loop.call_soon(lambda: self._set(self._get_data()))
# noinspection PyUnusedLocal
def _on_item_set(self, item: _KT, value: _S) -> None:
if self.all_set:
self._sync_soon()
# noinspection PyUnusedLocal
def _on_item_del(self, item: _KT) -> None:
if self.all_set:
self._sync_soon()
# noinspection PyUnusedLocal
def _on_item_stop(self, item: _KT) -> None:
self._loop.call_soon(self._stop)
def _on_item_error(self, item: _KT, exc: BaseException) -> None:
if isinstance(exc, (StopIteration, StopAsyncIteration, GeneratorExit)):
self._on_item_stop(item)
return
if isinstance(exc, asyncio.CancelledError):
self._on_item_cancel(item)
return
self._loop.call_soon(self._raise, exc)
# noinspection PyUnusedLocal
def _on_item_cancel(self, item: _KT) -> None:
loop = self._loop
if loop.is_closed():
self._cancel()
else:
loop.call_soon(self._cancel)
```
#### File: asynced/tests/test_asyncio_utils.py
```python
import asyncio
from asynced import race
async def test_race():
async def slowrange(t0, dt, *args):
await asyncio.sleep(t0)
for i in range(*args):
yield i
await asyncio.sleep(dt)
dt = 0.05
n = 3
it_0_2 = slowrange(dt * 0, dt * 2, n)
it_1_2 = slowrange(dt * 1, dt * 2, n)
res = [(i, j) async for i, j in race(it_0_2, it_1_2)]
assert len(res) == 2 * n
res_i = [i for i, _ in res]
assert not any(res_i[::2])
assert all(res_i[1::2])
res_j = [j for _, j in res]
assert res_j[::2] == res_j[1::2]
``` |
{
"source": "jorenham/p-y",
"score": 3
} |
#### File: p-y/hall/analysis.py
```python
from __future__ import annotations
__all__ = ["Function", "Interval", "DiscreteInterval", "supp", "convolve"]
import abc
from typing import (
Callable,
Generic,
List,
Optional,
Protocol,
TypeVar,
Union,
cast,
runtime_checkable,
)
import mpmath
from hall.numbers import (
AnyNumber,
CleanNumber,
ComplexType,
FloatType,
IntType,
Number,
clean_number,
is_complex,
is_float,
is_int,
is_number,
)
def _cmax(*args: Number) -> Number:
res = None
for arg in args:
if res is None:
res = arg
elif is_int(res) or is_float(res): # not complex
if arg > res:
res = arg
else:
if arg.real > res.real and arg.imag > res.imag:
res = arg
elif arg.real > res.real:
res = type(res)(arg.real, res.imag)
elif arg.imag > res.imag:
res = type(res)(res.real, arg.imag)
if res is None:
raise ValueError("cmax expected 1 argument, got 0")
return res
def _cmin(*args: Number) -> Number:
return cast(Number, -_cmax(*(-arg for arg in args)))
class Interval(Generic[Number]):
"""Closed interval (endpoints a and b are included)"""
a: Number
b: Number
def __init__(self, a: AnyNumber, b: AnyNumber, /):
if a.real > b.real:
a, b = b, a
if a.imag > b.imag:
a, b = ComplexType(a.real, b.imag), ComplexType(b.real, a.imag)
self.a, self.b = clean_number(a), clean_number(b)
def __repr__(self):
return f"{type(self).__name__}({self.a!r}, {self.b!r})"
def __str__(self):
return f"[{self.a}, {self.b}]"
def __contains__(self, x: Union[Number, Interval[Number], object]):
if isinstance(x, Interval):
# subinterval check
return x.a in self and x.b in self
if is_number(x):
y = clean_number(x)
return (
self.a.real <= y.real <= self.b.real
and self.a.imag <= y.imag <= self.b.imag
)
return False
def __eq__(self, other) -> bool:
if isinstance(other, Interval):
return bool(self.a == other.a and self.b == other.b)
elif self.is_degenerate and is_number(other):
return bool(self.a == clean_number(other))
return False
def __lt__(self, other) -> bool:
if isinstance(other, Interval):
# subinterval check
other = other.a
if is_number(other):
x = clean_number(other)
return bool(self.b.real < x.real or self.b.imag < x.imag)
return NotImplemented
def __le__(self, other) -> bool:
if isinstance(other, Interval) or is_number(other):
return bool(self == other or self < other)
return NotImplemented
def __gt__(self, other) -> bool:
if isinstance(other, Interval):
# subinterval check
other = other.b
if is_number(other):
x = clean_number(other)
return bool(self.a.real > x.real or self.a.real > x.imag)
return NotImplemented
def __ge__(self, other) -> bool:
if isinstance(other, Interval) or is_number(other):
return bool(self == other or self > other)
return NotImplemented
def __and__(self, other: Interval) -> Optional[Interval]:
if isinstance(other, Interval):
if self.isdisjoint(other):
return None
return type(self)(_cmax(self.a, other.a), _cmin(self.b, other.b))
return NotImplemented
def __or__(self, other: Interval) -> Interval:
if isinstance(other, Interval):
if self.isdisjoint(other):
# TODO mask the gap, or create a disjoint interval class
raise NotImplementedError
return type(self)(_cmin(self.a, other.a), _cmax(self.b, other.b))
return NotImplemented
def __add__(self, other) -> Interval:
if isinstance(other, Interval):
return type(self)(self.a + other.a, self.b + other.b)
if is_number(other):
x = clean_number(other)
return type(self)(self.a + x, self.b + x)
return NotImplemented
def __sub__(self, other) -> Interval:
if isinstance(other, Interval):
return type(self)(self.a - other.a, self.b - other.b)
if is_number(other):
x = clean_number(other)
return type(self)(self.a - x, self.b - x)
return NotImplemented
def __mul__(self, other) -> Interval:
if isinstance(other, Interval):
return type(self)(self.a * other.a, self.b * other.b)
if is_number(other):
x = clean_number(other)
return type(self)(self.a * x, self.b * x)
return NotImplemented
def __truediv__(self, other) -> Interval:
if isinstance(other, Interval):
return type(self)(self.a / other.a, self.b / other.b)
if is_number(other):
x = clean_number(other)
return type(self)(self.a / x, self.b / x)
return NotImplemented
def __pow__(self, other) -> Interval:
if isinstance(other, Interval):
return type(self)(self.a ** other.a, self.b ** other.b)
if is_number(other):
x = clean_number(other)
return type(self)(self.a ** x, self.b ** x)
return NotImplemented
def __radd__(self, other) -> Interval:
return self.__add__(other)
def __rsub__(self, other) -> Interval:
return self.__sub__(other)
def __rmul__(self, other) -> Interval:
return self.__mul__(other)
def __neg__(self) -> Interval:
return type(self)(-self.a, -self.b)
def __bool__(self) -> bool:
return True
def __hash__(self):
return hash((self.a, self.b))
@property
def is_complex(self) -> bool:
if self.a.imag or self.b.imag:
return True
return is_complex(self.a) or is_complex(self.b)
@property
def is_discrete(self) -> bool:
if self.is_complex:
return False
return is_int(self.a) and is_int(self.b)
@property
def is_bounded(self) -> bool:
return bool(
mpmath.isfinite(self.a.real)
and mpmath.isfinite(self.a.imag)
and mpmath.isfinite(self.b.real)
and mpmath.isfinite(self.b.imag)
)
@property
def is_degenerate(self) -> bool:
return bool(self.a == self.b)
@property
def size(self) -> Number:
return self.b - self.a
@property
def radius(self) -> Union[Number, FloatType]: # lower type bound for int
return self.size / 2
@property
def mid(self) -> Union[Number, FloatType]: # lower type bound for bool
return (self.a + self.b) / 2
@property
def d(self) -> Number:
"""
the difference between two consecutive values for the given precision
of the endpoint types
"""
if self.is_complex:
d = ComplexType(mpmath.mp.eps, mpmath.mp.eps)
elif self.is_discrete:
d = IntType(1)
else:
d = FloatType(mpmath.mp.eps)
return cast(Number, d)
def isdisjoint(self, other: Interval) -> bool:
# for collections.AbstractSet compatiblity
return bool(
self.a.real > other.b.real
or self.a.imag > other.b.imag
or self.b.real < other.a.real
or self.a.imag < other.a.imag
)
class DiscreteInterval(Interval[IntType]):
def __contains__(self, x: Union[IntType, Interval[IntType], object]):
if not isinstance(x, DiscreteInterval) and not is_int(x):
return False
return super().__contains__(x)
_N_co = TypeVar("_N_co", bound=CleanNumber, covariant=True)
@runtime_checkable
class Function(Protocol[Number, _N_co]):
__slots__ = ()
@property
@abc.abstractmethod
def __discrete__(self) -> bool:
...
@property
@abc.abstractmethod
def __support__(self) -> Interval[Number]:
...
@abc.abstractmethod
def f(self, x: Number) -> _N_co:
...
class _FunctionAlias(Function[Number, _N_co], Generic[Number, _N_co]):
__slots__ = ("__wrapped__", "__support") # noqa
__support: Interval[Number]
def __init__(
self, fn: Callable[[Number], _N_co], support: Interval[Number]
):
self.__wrapped__: Callable[[Number], _N_co] = fn
self.__support = support
@property
def __discrete__(self):
return False
@property
def __support__(self) -> Interval[Number]:
return self.__support
def f(self, x: Number) -> _N_co:
return self.__wrapped__(x)
class _DiscreteFunctionAlias(_FunctionAlias[IntType, _N_co], Generic[_N_co]):
__discrete__ = True
def supp(f: Function[Number, AnyNumber]) -> Interval[Number]:
return f.__support__
def convolve(
f: Function[Number, _N_co], g: Function[Number, _N_co]
) -> Function[Number, _N_co]:
"""
Convolution (lazy):
https://en.wikipedia.org/wiki/Convolution
"""
res_support = f.__support__ + g.__support__
fa, fb = f.__support__.a, f.__support__.b
ga, gb = g.__support__.a, g.__support__.b
integrator: Callable[[Callable[[Number], _N_co], List[Number]], _N_co]
if f.__discrete__ and g.__discrete__:
integrator = mpmath.nsum
else:
integrator = mpmath.quad
def res(n: Number) -> _N_co:
if not is_complex(n):
# clip the support bounds
a = fb if n - ga > fb else ga
b = fa if n - gb < fa else gb
else:
a, b = ga, gb
def dres(m: Number):
return mpmath.fmul(f.f(n - m), g.f(m))
return integrator(dres, [a, b])
if f.__discrete__ and g.__discrete__:
return _DiscreteFunctionAlias(res, res_support)
else:
return _FunctionAlias(res, res_support)
```
#### File: p-y/hall/_core.py
```python
from __future__ import annotations
__all__ = ["Distribution", "RandomVar"]
import abc
from typing import (
Generic,
Optional,
Protocol,
TypeVar,
Union,
cast,
runtime_checkable,
)
import mpmath
from hall._event import EventEq, EventInterval
from hall.analysis import Function, Interval
from hall.numbers import (
CleanNumber,
ComplexType,
FloatType,
IntType,
Number,
Probability,
clean_number,
is_number,
)
@runtime_checkable
class BaseDistribution(Function[Number, Probability], Protocol[Number]):
__slots__ = ()
@property
@abc.abstractmethod
def mean(self) -> Union[Number, FloatType]:
"""
The mean/expected value of the distribution
"""
...
@property
@abc.abstractmethod
def variance(self) -> Union[Number, FloatType]:
"""
The variance of the distribution
"""
...
@abc.abstractmethod
def f(self, x: Number) -> Probability:
"""
Probability Mass/Density Function (PMF/PDF) that must integrate to 1.
"""
...
@abc.abstractmethod
def F(self, x: Number) -> Probability:
"""
Cumulative Distribution Function (CDF); the integral of `f`.
"""
...
@abc.abstractmethod
def G(self, y: Probability) -> Number:
"""
Percent Point Function (PPF); the inverse of the CDF `F`
"""
...
class Distribution(BaseDistribution[Number], Protocol[Number]):
__slots__ = ()
def __invert__(self) -> RandomVar[Number]:
return RandomVar(self)
_MapValue = TypeVar("_MapValue", IntType, FloatType, ComplexType)
@runtime_checkable
class BaseRandomVar(BaseDistribution[Number], Protocol[Number]):
__slots__ = ()
# discrete events
def __eq__(self, other) -> EventEq: # type: ignore
if isinstance(other, BaseRandomVar):
return self - other == 0
return EventEq(self, other)
def __ne__(self, other) -> EventEq[Number]: # type: ignore
if isinstance(other, BaseRandomVar):
return self - other != 0
return EventEq(self, other, _inv=True)
def __invert__(self) -> EventEq[Number]:
return EventEq(self, 0)
# continuous events
def __lt__(self, other) -> EventInterval[Number]:
if not is_number(other):
raise TypeError(type(other).__name__)
return EventInterval(self, b=clean_number(other - self.__support__.d))
def __le__(self, other) -> EventInterval[Number]:
if not is_number(other):
raise TypeError(type(other).__name__)
return EventInterval(self, b=other)
def __gt__(self, other) -> EventInterval[Number]:
if not is_number(other):
raise TypeError(type(other).__name__)
return EventInterval(self, a=other)
def __ge__(self, other) -> EventInterval[Number]:
if not is_number(other):
raise TypeError(type(other).__name__)
if self.__discrete__:
return EventInterval(self, a=other + 1)
return EventInterval(self, a=other) # TODO some tiny amount
# algebra
def __add__(self, other) -> MappedRandomVar[Number, _MapValue]:
if is_number(other):
return self._as_mapped(add=clean_number(other))
return NotImplemented
def __sub__(self, other) -> MappedRandomVar[Number, _MapValue]:
if is_number(other):
return self._as_mapped(add=clean_number(-other))
return NotImplemented
def __mul__(self, other) -> MappedRandomVar[Number, _MapValue]:
if is_number(other):
return self._as_mapped(mul=clean_number(other))
return NotImplemented
def __truediv__(self, other) -> MappedRandomVar[Number, _MapValue]:
if is_number(other):
return self._as_mapped(div=clean_number(other))
return NotImplemented
def __radd__(self, other) -> MappedRandomVar[Number, _MapValue]:
return self.__add__(other)
def __rsub__(self, other) -> MappedRandomVar[Number, _MapValue]:
return self.__neg__().__add__(other)
def __rmul__(self, other) -> MappedRandomVar[Number, _MapValue]:
return self.__mul__(other)
def __neg__(self) -> MappedRandomVar[Number, _MapValue]:
return self._as_mapped(mul=-1)
# utility
def __contains__(self, x) -> bool:
"""returns True iff x is a valid outcome"""
return x in self.__support__
@abc.abstractmethod
def _as_mapped(
self,
*,
add: Optional[_MapValue] = None,
mul: Optional[_MapValue] = None,
div: Optional[_MapValue] = None,
) -> MappedRandomVar[Number, _MapValue]:
...
class RandomVar(BaseRandomVar[Number], Generic[Number]):
__slots__ = ("distribution",) # noqa
distribution: Distribution[Number]
def __init__(self, distribution: Distribution[Number], /):
self.distribution = distribution
@property
def __discrete__(self) -> bool:
return self.distribution.__discrete__
@property
def __support__(self) -> Interval[Number]:
return self.distribution.__support__
@property
def mean(self) -> Number:
return self.distribution.mean
@property
def variance(self) -> Number:
return self.distribution.variance
def f(self, x: CleanNumber) -> Probability:
return self.distribution.f(x)
def F(self, x: CleanNumber) -> Probability:
return self.distribution.F(x)
def G(self, y: Probability) -> Number:
return self.distribution.G(y)
def _as_mapped(
self,
*,
add: Optional[_MapValue] = None,
mul: Optional[_MapValue] = None,
div: Optional[_MapValue] = None,
) -> MappedRandomVar[Number, _MapValue]:
kwargs = {}
if add is not None:
kwargs["add"] = add
if mul is not None:
kwargs["mul"] = mul
if div is not None:
kwargs["div"] = div
return MappedRandomVar(self, **kwargs)
_NumberV = TypeVar(
"_NumberV", IntType, FloatType, ComplexType, contravariant=True
)
_NumberR = TypeVar(
"_NumberR", IntType, FloatType, ComplexType, contravariant=True
)
class MappedRandomVar(BaseRandomVar[_NumberR], Generic[_NumberV, _NumberR]):
"""Linearly mapped random var, i.e. Y -> aX + b"""
__arg__: RandomVar[_NumberV]
__add: _NumberR
__mul: _NumberR
__div: _NumberR # kept separately to avoid rounding errors
def __init__(
self,
X: RandomVar[_NumberV],
/,
*,
add: _NumberR = 0,
mul: _NumberR = 1,
div: _NumberR = 1,
):
self.__arg__ = X
if not is_number(add):
raise TypeError("addend must be a number")
if not is_number(mul):
raise TypeError("multiplier must be a number")
if not is_number(div):
raise TypeError("divisor must be a number")
self.__add = add
self.__mul = mul
self.__div = div
@property
def __discrete__(self) -> bool:
return self.__arg__.__discrete__
@property
def __support__(self) -> Interval[_NumberR]:
a = self.__arg__.__support__.a
b = self.__arg__.__support__.b
return Interval(self._apply(a), self._apply(b))
@property
def mean(self) -> _NumberR:
return self._apply(self.__arg__.mean)
@property
def variance(self) -> _NumberR:
a = mpmath.fraction(self.__add, self.__div)
return cast(_NumberR, self.__arg__.variance * a * a)
def f(self, x: _NumberR) -> Probability:
return self.__arg__.f(self._unapply(x))
def F(self, x: _NumberR) -> Probability:
return self.__arg__.F(self._unapply(x))
def G(self, y: Probability) -> _NumberR:
return self._apply(self.__arg__.G(y))
def _apply(self, x: _NumberV) -> _NumberR:
return cast(
_NumberR, x * mpmath.fraction(self.__mul, self.__div) + self.__add
)
def _unapply(self, y: _NumberR) -> _NumberV:
x = mpmath.fraction((y - self.__add) * self.__div, self.__mul)
if self.__discrete__:
return IntType(round(x))
return cast(_NumberV, clean_number(x))
def _as_mapped(
self,
*,
add: Optional[_MapValue] = None,
mul: Optional[_MapValue] = None,
div: Optional[_MapValue] = None,
) -> MappedRandomVar[Number, _MapValue]:
kwargs = dict(add=self.__add, mul=self.__mul, div=self.__div)
if add is not None:
kwargs["add"] += add
if mul is not None:
kwargs["add"] *= mul
kwargs["mul"] *= mul
if div is not None:
kwargs["add"] /= div # type: ignore
kwargs["div"] = div
return MappedRandomVar(self.__arg__, **kwargs)
```
#### File: p-y/hall/numbers.py
```python
from __future__ import annotations
__all__ = [
"INT_TYPES",
"FLOAT_TYPES",
"COMPLEX_TYPES",
"NUMBER_TYPES",
"NUMBER_TYPES_RAW",
"IntType",
"FloatType",
"Probability",
"ComplexType",
"AnyInt",
"AnyFloat",
"AnyComplex",
"AnyNumber",
"CleanNumber",
"Float",
"Complex",
"Number",
"is_int",
"is_float",
"is_probability",
"is_complex",
"is_number",
"is_mp_number",
"clean_number",
]
import sys
from decimal import Decimal
from fractions import Fraction
from typing import Any, TypeVar, Union, overload
import mpmath
from typing_extensions import TypeGuard
if sys.version_info >= (3, 10):
from types import EllipsisType # noqa
else:
EllipsisType = None
from mpmath import ctx_mp_python, libmp
"""
for all `numbers.*`, `T = TypeVar("T", bound=numbers.Real)` is bugged in mypy:
https://github.com/python/mypy/issues/3186
"""
# integers
# IntType: Any = libmp.MPZ_TYPE # either `gmpy.mpz`, `sage.Integer` or `int`
IntType = int
INT_TYPES = (int, IntType)
AnyInt = Union[int, IntType]
Int = TypeVar("Int", bound=AnyInt)
def is_int(x: object) -> TypeGuard[AnyInt]:
return isinstance(x, INT_TYPES)
# floats
FloatType: Any = mpmath.mpf
ConstantType: Any = mpmath.mp.constant
# float alias that implies the value to be within [0, 1]
Probability = FloatType
FLOAT_TYPES = (float, FloatType, ConstantType)
AnyFloat = Union[float, FloatType, ConstantType]
Float = TypeVar("Float", bound=AnyFloat)
def is_float(x: object) -> TypeGuard[AnyFloat]:
return isinstance(x, FLOAT_TYPES)
def is_probability(x: object) -> TypeGuard[AnyFloat]:
return is_float(x) and bool(0.0 <= FloatType(x) <= 1.0)
# complex
ComplexType: Any = ctx_mp_python._mpc # noqa
COMPLEX_TYPES = (complex, ComplexType)
AnyComplex = Union[complex, ComplexType]
Complex = TypeVar("Complex", bound=AnyComplex)
def is_complex(x: object) -> TypeGuard[AnyComplex]:
return isinstance(x, COMPLEX_TYPES)
# all supported numeric types
NUMBER_TYPES_RAW = INT_TYPES + FLOAT_TYPES + COMPLEX_TYPES + (Decimal, Fraction)
AnyNumber = Union[AnyInt, AnyFloat, AnyComplex, Decimal, Fraction]
RawNumber = TypeVar("RawNumber", bound=AnyNumber)
NUMBER_TYPES = IntType, FloatType, ComplexType
CleanNumber = Union[IntType, FloatType, ComplexType]
Number = TypeVar("Number", IntType, FloatType, ComplexType)
NUMBER_TYPES_PY = (int, float, complex, Decimal, Fraction)
def is_number(x: object):
return isinstance(x, NUMBER_TYPES_RAW)
def is_mp_number(x: object) -> bool:
return is_number(x) and not isinstance(x, NUMBER_TYPES_PY)
@overload
def clean_number(x: AnyInt) -> IntType:
...
@overload
def clean_number(x: Union[AnyFloat, Decimal, Fraction]) -> FloatType:
...
@overload
def clean_number(x: AnyComplex) -> ComplexType:
...
def clean_number(x: AnyNumber) -> CleanNumber:
if not is_number(x):
raise TypeError("not a number")
if is_int(x):
return IntType(x)
if is_float(x):
return FloatType(x)
if isinstance(x, (Decimal, Fraction)):
return FloatType(x)
if is_complex(x):
return ComplexType(x)
raise TypeError(f"unknown number type {type(x).__name__!r}")
``` |
{
"source": "jorenham/webcam-vr",
"score": 3
} |
#### File: jorenham/webcam-vr/position.py
```python
import numpy as np
from imutils.video import VideoStream
from imutils import face_utils
import argparse
import imutils
import time
import dlib
import cv2
SHAPE_PREDICTOR = 'shape_predictor_5_face_landmarks.dat'
def get_stream():
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] camera sensor warming up...")
vs = VideoStream().start()
time.sleep(2.0)
return vs
def get_frame(stream: VideoStream, width):
# grab the frame from the threaded video stream, resize it to
# have a maximum width
frame = stream.read()
return imutils.resize(frame, width=width)
def get_eye_position(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dims = np.array([frame.shape[1], frame.shape[0]])
# detect faces in the grayscale frame
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(SHAPE_PREDICTOR)
rects = detector(gray, 0)
# nothing detected
if not rects:
return None
# only use first detected first face
rect = rects[0]
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# use the mean of the first and second two points to find the pupillary
# distance in relative coords
pupils = np.vstack((
(shape[0, :] + shape[1, :]) / 2,
(shape[2, :] + shape[3, :]) / 2,
))
pupils /= dims
pupillary_distance = np.abs(np.diff(pupils))
# find x, y position of eye center
position = (pupils[0, :] + pupils[1, :]) / 2
# append z [0, 1] coordinate based on pd
position = np.append(position, pupillary_distance[1])
return position
def show_frame_with_position(position, frame):
# draw virtual position
position_pixels = (
int((1 - position[0]) * frame.shape[1]),
int((1 - position[1]) * frame.shape[0])
)
size = int((frame.shape[1] / 10) * position[2])
color = (0, 255, 0)
cv2.line(frame, (int(frame.shape[1]/2), int(frame.shape[0]/2)), position_pixels, color)
cv2.circle(frame, position_pixels, size, color, -1)
# show the frame
cv2.imshow("Frame", frame)
if __name__ == '__main__':
vs = get_stream()
try:
prev = None
while True:
f = get_frame(stream=vs, width=1200)
pos = get_eye_position(f)
# use previous value if no face is detected
if pos is None and prev is not None:
pos = prev
show_frame_with_position(pos, f)
except KeyboardInterrupt:
pass
finally:
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
``` |
{
"source": "jorenretel/Malandro",
"score": 2
} |
#### File: jorenretel/Malandro/installMalandro.py
```python
import sys
import os
def runInstaller(argServer) :
pathToPython = sys.executable
print pathToPython
workingDirectory = os.path.dirname(__file__)
import subprocess
print 'Compiling malandro for your version of ccpn analysis...'
print 'using python version %s' %sys.version
process = subprocess.Popen([pathToPython, 'setupC.py', 'build_ext', '--inplace'], cwd=workingDirectory, stdout=subprocess.PIPE)
process.wait()
if process.returncode == 0:
print 'Compiling is done, you can now run the macro.'
```
#### File: malandro/backend/generalFactory.py
```python
def generalFactory(ClassOfObject, *args):
return ClassOfObject.__new__(ClassOfObject, *args)
```
#### File: malandro/gui/benchMark.py
```python
import csv
from time import localtime
def createBenchmark(results):
'''
Output:name of the proh
Compare the assignment made by malandro to the manual assignment.
'''
dirName = '/home/joren/malandroBenchmarks/'
ts = timeStamp()
#projectName = project.shortName
filePath = '%s%s_%s' % (dirName, 'benchMark', ts)
with open(filePath, 'wb') as csvfile:
writer = csv.writer(csvfile, dialect='excel')
writer.writerow([ts])
spectraNames = [spectrum.getName()
for spectrum in results.getSpectra()]
writer.writerow(spectraNames)
writer.writerow(['% agree',
'% disagree',
'% agreeing Joker',
'% disagreeing Joker'])
residues = results.getChain().getResidues()
for residue in residues:
writer.writerow(calculatePercentages(residue))
def calculatePercentages(res):
'''Calculate how often 4 types of spin realSpinSystems
assignments are made to this residue:
1) agreeing with solution in project
2) highest scoring assignment that does
not agree with the assignment in the
project.
3) agreeing joker. A joker is placed on
a residue that does not have any spin
system assignment in the project either.
4) disagreeing joker. A joker is placed
on a residue that does have a spin
system assignment in the project.
args: res (src.cython.malandro.Residue)
return: list [sequence code,
agree percentage,
disagree percentage,
agreeing joker percentage,
disagreeing joker percentage]
'''
data = []
jokers = []
realSpinSystems = []
resSeqCode = res.getSeqCode()
if res.getCcpnResidue().findFirstResonanceGroup():
manualAssigned = True
else:
manualAssigned = False
solutions = res.getSolutions()
Nsolutions = len(solutions)
solutionSet = set(solutions)
for spinSys in solutionSet:
if spinSys.getIsJoker():
jokers.append(spinSys)
else:
realSpinSystems.append(spinSys)
for spinsys in realSpinSystems:
assignmentPercentage = int(
float(solutions.count(spinsys)) / Nsolutions * 100.0)
data.append((assignmentPercentage, spinsys))
if jokers:
NumberOfAssignmentsToJoker = 0
for spinSys in jokers:
NumberOfAssignmentsToJoker += solutions.count(spinSys)
assignmentPercentage = int(
float(NumberOfAssignmentsToJoker) / Nsolutions * 100.0)
data.append((assignmentPercentage, None))
data = sorted(data, reverse=True)
bestWrongPercentage = 0
agreePercentage = 0
goodJoker = 0
badJoker = 0
foundBad = False
for percent, spinSys in data:
if spinSys:
# print '----'
# print spinSys.getCcpnResonanceGroup().residue.seqCode
# print resSeqCode
if spinSys.getCcpnResonanceGroup().residue and spinSys.getCcpnResonanceGroup().residue.seqCode == resSeqCode:
agreePercentage = percent
elif not foundBad:
bestWrongPercentage = percent
foundBad = True
else: # Joker
if manualAssigned:
if not foundBad:
badJoker = percent
foundBad = True
else:
goodJoker = percent
return [resSeqCode,
agreePercentage,
bestWrongPercentage * -1,
goodJoker, badJoker * -1]
def timeStamp():
'''Returns string timestamp.'''
t = localtime()
return '%s_%s_%s_%s_%s' % (str(t.tm_year), str(t.tm_mon), str(t.tm_mday), str(t.tm_hour), str(t.tm_min))
```
#### File: malandro/gui/color.py
```python
def pick_color_by_percentage(percentage, asHex=True):
'''Get a color from a predefined scale for a percentage.
args: percentage: the percentage on the scale
asHex: boolean: if true return hex (default)
else: return rgb tuple
'''
if not 0 <= percentage <= 100:
raise ValueError('percentage should be between 0 and 100')
RGB = pick_color_from_scale(percentage,
_standardBackGroundColorRGB,
_niceGreenRGB)
if asHex:
return rgb_to_hex(RGB)
else:
return RGB
def pick_color_from_scale(percentage, minRGB, maxRGB):
'''Pick color from scale.
args: percentage: percentage on scale
minRGB: color corresponding to 0%
maxRGB: color corresponding to 100%
returns: rgb tuple
'''
percentage = float(percentage)
newRGB = []
for minimal, maximal in zip(minRGB, maxRGB):
colorPart = int(minimal + (percentage / 100.0) * (maximal - minimal))
newRGB.append(colorPart)
return tuple(newRGB)
def pickColorByPercentage(percentage):
'''deprecated'''
percentage = float(percentage)
if percentage < 1:
return 'grey83'
if percentage > 80:
red = 0
green = int(percentage / 100.0 * 255.0)
blue = int(255.0 - (percentage / 100.0 * 255.0))
elif percentage > 50:
green = 0
blue = int(percentage / 80.0 * 255.0)
red = int(255.0 - (percentage / 80.0 * 255.0))
else:
red = 255
green = 0
blue = 0
red = rgb_to_hex(red)
green = rgb_to_hex(green)
blue = rgb_to_hex(blue)
color = '#' + red + green + blue
return color
def rgb_to_hex(rgb):
'''Returns hex for rgb iterable'''
return '#' + ''.join([format(x, '02x') for x in rgb])
def grey_scale(rgb):
'''Get grey scale for rgb tuple or list'''
return tuple([int(((rgb[0] * 299) + (rgb[1] * 587) + (rgb[2] * 114)) / 1000)] * 3)
highLightRed = rgb_to_hex((227, 26, 28))
highLightYellow = rgb_to_hex((253, 191, 111))
_standardBackGroundColorRGB = (212, 212, 212) # This is grey83
_niceGreenRGB = (0, 130, 60)
#_niceGreenRGB = (51,160,44)
# http://colorbrewer2.org/ qualitative 12 colors
colorSeries = ['#a6cee3',
'#1f78b4',
'#b2df8a',
'#33a02c',
'#fb9a99',
'#e31a1c',
'#fdbf6f',
'#ff7f00',
'#cab2d6',
'#6a3d9a',
'#ffff99',
'#b15928']
```
#### File: jorenretel/Malandro/start_malandro.py
```python
import malandro.gui.malandroGUI as GUI
from version import __version__
def open_malandro(argServer):
"""Descrn: Opens the macro.
Inputs: ArgumentServer
Output: None
"""
print 'Malandro Version {}'.format(__version__)
reload(GUI)
GUI.Connector(argServer.parent)
``` |
{
"source": "Joreshic/python-for-android",
"score": 2
} |
#### File: recipes/libx264/__init__.py
```python
from pythonforandroid.toolchain import Recipe, shprint, current_directory, ArchARM
from os.path import exists, join, realpath
from os import uname
import glob
import sh
class LibX264Recipe(Recipe):
version = 'x264-snapshot-20170608-2245-stable' # using mirror url since can't use ftp
url = 'http://mirror.yandex.ru/mirrors/ftp.videolan.org/x264/snapshots/{version}.tar.bz2'
md5sum = 'adf3b87f759b5cc9f100f8cf99276f77'
def should_build(self, arch):
build_dir = self.get_build_dir(arch.arch)
return not exists(join(build_dir, 'lib', 'libx264.a'))
def build_arch(self, arch):
with current_directory(self.get_build_dir(arch.arch)):
env = self.get_recipe_env(arch)
configure = sh.Command('./configure')
shprint(configure,
'--cross-prefix=arm-linux-androideabi-',
'--host=arm-linux',
'--disable-asm',
'--disable-cli',
'--enable-pic',
'--disable-shared',
'--enable-static',
'--prefix={}'.format(realpath('.')),
_env=env)
shprint(sh.make, '-j4', _env=env)
shprint(sh.make, 'install', _env=env)
recipe = LibX264Recipe()
```
#### File: recipes/mysqldb/__init__.py
```python
from pythonforandroid.recipe import CompiledComponentsPythonRecipe
from os.path import join
class MysqldbRecipe(CompiledComponentsPythonRecipe):
name = 'mysqldb'
version = '1.2.5'
url = 'https://pypi.python.org/packages/source/M/MySQL-python/MySQL-python-{version}.zip'
site_packages_name = 'MySQLdb'
depends = ['python2', 'setuptools', 'libmysqlclient']
patches = ['override-mysql-config.patch',
'disable-zip.patch']
# call_hostpython_via_targetpython = False
def convert_newlines(self, filename):
print('converting newlines in {}'.format(filename))
with open(filename, 'rb') as f:
data = f.read()
with open(filename, 'wb') as f:
f.write(data.replace(b'\r\n', b'\n').replace(b'\r', b'\n'))
def prebuild_arch(self, arch):
super(MysqldbRecipe, self).prebuild_arch(arch)
setupbase = join(self.get_build_dir(arch.arch), 'setup')
self.convert_newlines(setupbase + '.py')
self.convert_newlines(setupbase + '_posix.py')
def get_recipe_env(self, arch=None):
env = super(MysqldbRecipe, self).get_recipe_env(arch)
hostpython = self.get_recipe('hostpython2', self.ctx)
# TODO: fix hardcoded path
env['PYTHONPATH'] = (join(hostpython.get_build_dir(arch.arch),
'build', 'lib.linux-x86_64-2.7') +
':' + env.get('PYTHONPATH', ''))
libmysql = self.get_recipe('libmysqlclient', self.ctx)
mydir = join(libmysql.get_build_dir(arch.arch), 'libmysqlclient')
# env['CFLAGS'] += ' -I' + join(mydir, 'include')
# env['LDFLAGS'] += ' -L' + join(mydir)
libdir = self.ctx.get_libs_dir(arch.arch)
env['MYSQL_libs'] = env['MYSQL_libs_r'] = '-L' + libdir + ' -lmysql'
env['MYSQL_cflags'] = env['MYSQL_include'] = '-I' + join(mydir,
'include')
return env
recipe = MysqldbRecipe()
```
#### File: recipes/opencv/__init__.py
```python
import os
import sh
from pythonforandroid.toolchain import (
NDKRecipe,
Recipe,
current_directory,
info,
shprint,
)
from multiprocessing import cpu_count
class OpenCVRecipe(NDKRecipe):
version = '2.4.10.1'
url = 'https://github.com/Itseez/opencv/archive/{version}.zip'
#md5sum = '2ddfa98e867e6611254040df841186dc'
depends = ['numpy']
patches = ['patches/p4a_build-2.4.10.1.patch']
generated_libraries = ['cv2.so']
def prebuild_arch(self, arch):
self.apply_patches(arch)
def get_recipe_env(self,arch):
env = super(OpenCVRecipe, self).get_recipe_env(arch)
env['PYTHON_ROOT'] = self.ctx.get_python_install_dir()
env['ANDROID_NDK'] = self.ctx.ndk_dir
env['ANDROID_SDK'] = self.ctx.sdk_dir
env['SITEPACKAGES_PATH'] = self.ctx.get_site_packages_dir()
return env
def build_arch(self, arch):
with current_directory(self.get_build_dir(arch.arch)):
env = self.get_recipe_env(arch)
cvsrc = self.get_build_dir(arch.arch)
lib_dir = os.path.join(self.ctx.get_python_install_dir(), "lib")
shprint(sh.cmake,
'-DP4A=ON','-DANDROID_ABI={}'.format(arch.arch),
'-DCMAKE_TOOLCHAIN_FILE={}/platforms/android/android.toolchain.cmake'.format(cvsrc),
'-DPYTHON_INCLUDE_PATH={}/include/python2.7'.format(env['PYTHON_ROOT']),
'-DPYTHON_LIBRARY={}/lib/libpython2.7.so'.format(env['PYTHON_ROOT']),
'-DPYTHON_NUMPY_INCLUDE_DIR={}/numpy/core/include'.format(env['SITEPACKAGES_PATH']),
'-DANDROID_EXECUTABLE={}/tools/android'.format(env['ANDROID_SDK']),
'-DBUILD_TESTS=OFF', '-DBUILD_PERF_TESTS=OFF', '-DBUILD_EXAMPLES=OFF', '-DBUILD_ANDROID_EXAMPLES=OFF',
'-DPYTHON_PACKAGES_PATH={}'.format(env['SITEPACKAGES_PATH']),
cvsrc,
_env=env)
shprint(sh.make,'-j',str(cpu_count()),'opencv_python')
shprint(sh.cmake,'-DCOMPONENT=python','-P','./cmake_install.cmake')
sh.cp('-a',sh.glob('./lib/{}/lib*.so'.format(arch.arch)),lib_dir)
recipe = OpenCVRecipe()
```
#### File: recipes/python3crystax/__init__.py
```python
from pythonforandroid.recipe import TargetPythonRecipe
from pythonforandroid.toolchain import shprint, current_directory, ArchARM
from pythonforandroid.logger import info, error
from pythonforandroid.util import ensure_dir, temp_directory
from os.path import exists, join
import glob
import sh
prebuilt_download_locations = {
'3.6': ('https://github.com/inclement/crystax_python_builds/'
'releases/download/0.1/crystax_python_3.6_armeabi_armeabi-v7a.tar.gz')}
class Python3Recipe(TargetPythonRecipe):
version = '3.5'
url = ''
name = 'python3crystax'
depends = ['hostpython3crystax']
conflicts = ['python2', 'python3']
from_crystax = True
def get_dir_name(self):
name = super(Python3Recipe, self).get_dir_name()
name += '-version{}'.format(self.version)
return name
def build_arch(self, arch):
# We don't have to actually build anything as CrystaX comes
# with the necessary modules. They are included by modifying
# the Android.mk in the jni folder.
# If the Python version to be used is not prebuilt with the CrystaX
# NDK, we do have to download it.
crystax_python_dir = join(self.ctx.ndk_dir, 'sources', 'python')
if not exists(join(crystax_python_dir, self.version)):
info(('The NDK does not have a prebuilt Python {}, trying '
'to obtain one.').format(self.version))
if self.version not in prebuilt_download_locations:
error(('No prebuilt version for Python {} could be found, '
'the built cannot continue.'))
exit(1)
with temp_directory() as td:
self.download_file(prebuilt_download_locations[self.version],
join(td, 'downloaded_python'))
shprint(sh.tar, 'xf', join(td, 'downloaded_python'),
'--directory', crystax_python_dir)
if not exists(join(crystax_python_dir, self.version)):
error(('Something went wrong, the directory at {} should '
'have been created but does not exist.').format(
join(crystax_python_dir, self.version)))
if not exists(join(
crystax_python_dir, self.version, 'libs', arch.arch)):
error(('The prebuilt Python for version {} does not contain '
'binaries for your chosen architecture "{}".').format(
self.version, arch.arch))
exit(1)
# TODO: We should have an option to build a new Python. This
# would also allow linking to openssl and sqlite from CrystaX.
dirn = self.ctx.get_python_install_dir()
ensure_dir(dirn)
# Instead of using a locally built hostpython, we use the
# user's Python for now. They must have the right version
# available. Using e.g. pyenv makes this easy.
self.ctx.hostpython = 'python{}'.format(self.version)
recipe = Python3Recipe()
``` |
{
"source": "Jorewang/LeetCode_Solutions",
"score": 3
} |
#### File: Jorewang/LeetCode_Solutions/10. Regular Expression Matching.py
```python
class Solution(object):
def isMatch(self, s, p):
def helper(s, i, p, j):
if j == -1:
return i == j
if i == -1:
if p[j] != '*':
return False
return helper(s, i, p, j-2)
if p[j] == '*':
if p[j-1] == '.' or p[j-1] == s[i]:
if helper(s, i-1, p, j):
return True
return helper(s, i, p, j-2)
if p[j] == '.' or p[j] == s[i]:
return helper(s, i-1, p, j-1)
return False
return helper(s, len(s)-1, p, len(p)-1)
def dp(self, s, p):
dp_list = [[0]*(len(p)+1) for _ in range(len(s)+1)]
dp_list[0][0] = 1
for j in range(1, len(p)+1):
if p[j-1] == '*':
dp_list[0][j] = dp_list[0][j-2]
for i in range(1, len(s)+1):
for j in range(1, len(p)+1):
if p[j-1] == '*':
if (p[j-2] == '.' or p[j-2] == s[i-1]) and dp_list[i-1][j] == 1:
dp_list[i][j] = 1
continue
dp_list[i][j] = dp_list[i][j-2]
if p[j-1] == '.' or p[j-1] == s[i-1]:
dp_list[i][j] = dp_list[i-1][j-1]
print(dp_list)
return dp_list[len(s)][len(p)] == 1
if __name__ == '__main__':
c = Solution()
print(c.isMatch('aab', 'b.*'))
print(c.dp('aab', 'b.*'))
```
#### File: Jorewang/LeetCode_Solutions/125. Valid Palindrome.py
```python
class Solution(object):
def isPalindrome(self, s):
res = []
for char in s:
if char.isalnum():
res.append(char.lower())
return res == res[::-1]
def isPalindrome_2(self, s):
if not s:
return True
l, r = 0, len(s)-1
while l < r:
if not s[l].isalnum():
l += 1
continue
if not s[r].isalnum():
r -= 1
continue
if s[l].lower() == s[r].lower():
l += 1
r -= 1
else:
return False
return True
if __name__ == '__main__':
s = "A man, a plan, a canal: Panama"
print(Solution().isPalindrome(s))
```
#### File: Jorewang/LeetCode_Solutions/198. House Robber.py
```python
class Solution(object):
def rob(self, nums):
def helper(nums, i):
le = len(nums)
if i == le - 1:
return nums[le-1]
if i == le - 2:
return max(nums[le-1], nums[le-2])
if i == le - 3:
return max(nums[le-3] + nums[le-1], nums[le-2])
return max(helper(nums, i+2) + nums[i], helper(nums, i+1))
return helper(nums, 0)
def rob_2(self, nums):
le = len(nums)
ans = [0]*(le+1)
ans[-2] = nums[-1]
for i in range(le-2, -1, -1):
ans[i] = max(nums[i] + ans[i+2], ans[i+1])
return ans[0]
if __name__ == '__main__':
print(Solution().rob_2([2, 7, 9, 3, 1]))
```
#### File: Jorewang/LeetCode_Solutions/242. Valid Anagram.py
```python
from collections import Counter
class Solution(object):
def isAnagram(self, s, t):
return Counter(s) == Counter(t)
if __name__ == '__main__':
print(Solution().isAnagram('a', 'b'))
```
#### File: Jorewang/LeetCode_Solutions/279. Perfect Squares.py
```python
import math
import random
class Solution(object):
def numSquares(self, n):
def helper(i, target):
if target == 0 and i >= 0:
return 0
if target < 0:
return 1000
if i == 0:
return 1000
return min(helper(i, target-i**2) + 1, helper(i-1, target))
return helper(math.floor(math.sqrt(n)), n)
def numSquares_2(self, n):
i, j = n+1, math.floor(math.sqrt(n))+1
dp = [[-1]*j for _ in range(i)]
for t in range(j):
dp[0][t] = 0
for t in range(1, i):
for k in range(1, j):
if t-k**2 < 0:
dp[t][k] = dp[t][k-1]
else:
if dp[t-k**2][k] != -1 and dp[t][k-1] != -1:
dp[t][k] = min(dp[t-k**2][k] + 1, dp[t][k-1])
elif dp[t-k**2][k] == -1:
dp[t][k] = dp[t][k-1]
else:
dp[t][k] = dp[t-k**2][k] + 1
return dp[-1][-1]
def numSquares_3(self, n):
q = []
visit = [0]*(n+1)
q.append((n, 0))
visit[n] = 1
while q:
num, step = q.pop(0)
i = 1
t = num - i**2
while t >= 0:
if t == 0:
return step + 1
if visit[t] == 0:
q.append((t, step+1))
visit[t] = 1
i += 1
t = num - i**2
if __name__ == '__main__':
for _ in range(1000):
n = random.randint(1, 444)
if Solution().numSquares_2(n) != Solution().numSquares_3(n):
print(n)
print("Shit")
break
else:
print("Done")
```
#### File: Jorewang/LeetCode_Solutions/733. Flood Fill.py
```python
class Solution(object):
def floodFill(self, image, sr, sc, newColor):
if not image or not image[0]:
return
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
oldColor = image[sr][sc]
if oldColor == newColor:
return image
image[sr][sc] = newColor
for i in range(4):
x = sr + dx[i]
y = sc + dy[i]
if 0 <= x < len(image) and 0 <= y < len(image[0]) and image[x][y] == oldColor:
self.floodFill(image, x, y, newColor)
return image
```
#### File: Jorewang/LeetCode_Solutions/946. Validate Stack Sequences.py
```python
class Solution(object):
def validateStackSequences(self, pushed, popped):
if not pushed and not popped:
return True
pushed_seq = [pushed.pop(0)]
while pushed:
if pushed_seq and pushed_seq[-1] == popped[0]:
popped.pop(0)
pushed_seq.pop(-1)
else:
while pushed and (not pushed_seq or pushed_seq[-1] != popped[0]):
pushed_seq.append(pushed.pop(0))
return pushed_seq[::-1] == popped
def validateStackSequences_2(self, pushed, popped):
i, stack = 0, []
for i in range(len(pushed)):
stack.append(pushed[i])
while stack and i < len(popped) and stack[-1] == popped[i]:
stack.pop()
i += 1
return not stack
if __name__ == '__main__':
print(Solution().validateStackSequences_2([1, 2, 3, 4, 5], [4, 5, 3, 2, 1]))
```
#### File: Jorewang/LeetCode_Solutions/962. Maximum Width Ramp.py
```python
Maximum Width Ramp.py
class Solution(object):
def maxWidthRamp(self, A):
ans = 0
for i, value in enumerate(A):
next = i+1
j = i
while next < len(A):
if A[next] >= A[i]:
j = next
next += 1
if j-i > ans:
ans = j-i
return ans
def maxWidthRamp_2(self, A):
def f(x):
return x[1]
li = []
for i, value in enumerate(A):
li.append((value, i))
li.sort()
print(li)
ans = 0
for i, tup in enumerate(li):
ans = max(sorted(li[i:], key=f)[-1][1]-tup[1], ans)
return ans
def maxWidthRamp_3(self, A):
s = []
for i, value in enumerate(A):
if not s or A[s[-1]] > value:
s.append(i)
ans = 0
for i in range(len(A))[::-1]:
while s and A[s[-1]] <= A[i]:
ans = max(ans, i-s.pop())
return ans
if __name__ == '__main__':
print(Solution().maxWidthRamp_3([6, 0, 8, 2, 1, 5]))
print(Solution().maxWidthRamp_3([9, 8, 1, 0, 1, 9, 4, 0, 4, 1]))
```
#### File: Jorewang/LeetCode_Solutions/Compare Strings.py
```python
from collections import defaultdict
class Solution(object):
def compareStrings(self, A, B):
letters = defaultdict(int)
for a in A:
letters[a] += 1
for b in B:
letters[b] -= 1
if letters[b] < 0:
return False
return True
```
#### File: Jorewang/LeetCode_Solutions/Longest Common Substring.py
```python
Common Substring.py
class Solution(object):
def longestCommonSubstring(self, A, B):
if not A or not B:
return 0
lcs = 0
for i in range(len(A)):
for j in range(len(B)):
lcs_tmp = 0
while i+lcs_tmp < len(A) and j+lcs_tmp < len(B) and A[i+lcs_tmp] == B[j+lcs_tmp]:
lcs_tmp += 1
if lcs < lcs_tmp:
lcs = lcs_tmp
return lcs
def longestCommonSubstring_dp(self, A, B):
if not A or not B:
return 0
n = len(A)
m = len(B)
f = [[0 for _ in range(m+1)] for _ in range(n+1)]
for i in range(n):
for j in range(m):
if A[i] == B[j]:
f[i+1][j+1] = 1+f[i][j]
lcs = max(map(max, f))
print(f)
return lcs
if __name__ == '__main__':
sa = 'ABCD'
sb = 'CBCE'
print(Solution().longestCommonSubstring_dp(sa, sb))
```
#### File: Jorewang/LeetCode_Solutions/two_sum.py
```python
def two_sum(nums, target):
lookup = {}
for i, num in enumerate(nums):
if target-num in lookup:
return i, lookup[target-num]
lookup[num] = i
return "fail"
``` |
{
"source": "Jorewin/ok_scheduler",
"score": 4
} |
#### File: scheduler/algorithms/brute_force_iterative.py
```python
from scheduler.problem import Instance, InstanceSolution
from typing import Iterator
class Lists:
def __init__(self, number):
self.current = 0
self.number = number
self.used = -1
self.storage = []
def reset(self):
self.current = 0
def next(self):
if self.current < self.number - 1:
self.current += 1
return True
return False
def previous(self):
if self.current > 0:
self.current -= 1
return True
return False
def get_current(self):
return self.storage[self.current]
def set_current(self, value):
self.storage[self.current] = value
class Tasks(Lists):
"""Class that simplifies operations on the tasks list.
:ivar current: current task index
:type current: int
:ivar number: total number of tasks
:type number: int
:ivar storage: storage[task_index] = last processor that was assigned to the process
:ivar storage: list
"""
def __init__(self, n_tasks):
super().__init__(n_tasks)
self.storage = [-1 for _ in range(n_tasks)]
def free(self):
return self.number - self.current + 1
class Processors(Lists):
"""Class that simplifies operations on the processors list.
:ivar current: current processor index
:type current: int
:ivar number: total number of processors
:type number: int
:ivar used: number of processors that have at least one task assigned to them
:ivar storage: storage[processor_index] = list of tasks that are currently assigned to the processor
:ivar storage: list
"""
def __init__(self, n_processors):
super().__init__(n_processors)
self.storage = [[] for _ in range(n_processors)]
def free(self):
return self.number - self.used
def add_to_current(self, value):
if len(self.storage[self.current]) == 0 and self.used < self.number - 1:
self.used += 1
self.storage[self.current].append(value)
def pop_from_current(self):
result = self.storage[self.current].pop()
if len(self.storage[self.current]) == 0 and self.used > 0:
self.used = self.current - 1
return result
def len_current(self):
return len(self.storage[self.current])
def brute_generator(tasks_number: int, processors_number: int) -> Iterator[list]:
"""Yields all of the possible combinations of process assignment.
:param tasks_number:
:param processors_number:
:return: list of processors with tasks assigned to them
"""
tasks = Tasks(tasks_number)
processors = Processors(processors_number)
lower_bound = True
upper_bound = True
while lower_bound:
if not upper_bound:
yield [list(processor) for processor in processors.storage]
upper_bound = True
continue
if tasks.get_current() == -1:
if tasks.free() > processors.free():
processors.reset()
else:
processors.current = processors.used
processors.next()
tasks.set_current(processors.current)
processors.add_to_current(tasks.current)
upper_bound = tasks.next()
continue
processors.current = tasks.get_current()
if processors.len_current() == 1 or processors.current == processors.number - 1:
tasks.set_current(-1)
processors.pop_from_current()
lower_bound = tasks.previous()
else:
processors.pop_from_current()
processors.next()
tasks.set_current(processors.current)
processors.add_to_current(tasks.current)
upper_bound = tasks.next()
def solve(instance: Instance) -> InstanceSolution:
"""Solves the P||Cmax problem by using an iterative version of a brute force algorithm.
:param instance: valid problem instance
:return: generated solution of a given problem instance
"""
all_possible_partitions = brute_generator(len(instance.tasks_durations), instance.processors_number)
solutions = map(lambda p: InstanceSolution(instance, p), all_possible_partitions)
best_solution = min(solutions, key=lambda s: s.total_time)
return best_solution
__all__ = ["solve"]
``` |
{
"source": "joreynajr/dchallenge",
"score": 2
} |
#### File: scripts/loop_analysis/Annotate_Coloc_SNPs_with_Nearby_Genes_Loops.py
```python
import os
import sys
import pybedtools as pbt
import pandas as pd
import numpy as np
import subprocess as sp
import json
os.chdir('/mnt/BioHome/jreyna/jreyna/projects/dchallenge/')
pbt.set_bedtools_path('/mnt/BioApps/bedtools/bin/')
bgzip = '/mnt/BioApps/tabix/tabix-0.2.6/bgzip'
tabix = '/mnt/BioApps/tabix/tabix-0.2.6/tabix'
bedpe_6cols = ['chrA', 'startA', 'endA', 'chrB', 'startB', 'endB']
bedpe_10cols = ['chrA', 'startA', 'endA', 'chrB', 'startB', 'endB', 'name', 'score', 'strand1', 'strand2']
# In[2]:
# ## default values for the command line
# sys.argv = [0] * 8
# sys.argv[1] = 'results/main/2021_Nikhil_eQTL/Results/Colocalization/T1D_34012112_Gaulton/'
# sys.argv[1] += 'DICE_eQTL_CD4_NAIVE/FINAL_Summary_Coloc_Gene_SNP_Pairs.bed'
# sys.argv[2] = 'results/refs/ensembl/gencode.v19.annotation.bed'
# sys.argv[3] = 'results/main/2021_Nikhil_eQTL/Data/FitHiChIP_Loops/CD4N/FitHiChIP_L/FitHiChIP.interactions_FitHiC_Q0.01.bed'
# sys.argv[4] = 'results/refs/spp/SPP_D-Challenge_networks.xlsx'
# sys.argv[5] = 'results/refs/hg19/hg19.chrom.sizes'
# sys.argv[6] = 'results/main/2021_Nikhil_eQTL/Data/eqtl_sqtl_summ_stats/DICE_eQTL/CD4_NAIVE.txt.gz'
# sys.argv[7] = 'results/main/loop_analysis/washU/'
# In[3]:
# parsing the commandline arguments
coloc_fn = sys.argv[1]
genes_fn = sys.argv[2]
loop_fn = sys.argv[3]
spp_fn = sys.argv[4]
gs_fn = sys.argv[5]
eqtl_fn = sys.argv[6]
outdir = sys.argv[7]
# setting the output file names
os.makedirs(outdir, exist_ok=True)
# ## Load the colocalization data
# In[4]:
# load the colocalization data
coloc = pd.read_table(coloc_fn)
# extract the most significant according the H4
coloc_sig_df = coloc[coloc['pp_H4_Coloc_Summary'] > 0.75]
coloc_sig_df = coloc_sig_df.loc[~coloc_sig_df.duplicated(subset=['rs_id', 'geneName']),]
coloc_sig_full = coloc_sig_df.copy(deep=True)
coloc_sig_df.rename(columns={'pos': 'end'}, inplace=True)
coloc_sig_df.loc[:, 'start'] = coloc_sig_df.loc[:, 'end'] - 1
coloc_sig_df = coloc_sig_df[['chr', 'start', 'end', 'rs_id', 'variant_id']]
coloc_sig_pbt = pbt.BedTool.from_dataframe(coloc_sig_df.iloc[:, 0:4]).sort()
print('There are {} colocalized SNP-gene pairs'.format(coloc_sig_df.shape[0]))
# ## Load the gene data
# In[5]:
# load the gencode coords
cols = ['chrom', 'start', 'end', 'strand', 'type', 'gene_id', 'gname']
gencode = pd.read_table(genes_fn, header=None, names=cols)
# extract just the genes
genes_df = gencode.loc[gencode.type.isin(['gene'])]
genes_df = genes_df.loc[~genes_df.duplicated(subset='gene_id'), :]
genes_df.loc[:, 'chrom'] = genes_df['chrom'].astype(str)
genes_df = genes_df.iloc[:, [0,1,2,6,5,3]]
# create a copy of the original gene bed before coordinate shrinking
orig_genes_df = genes_df.copy()
# convert the start/end position into start/end for the TSS
# if the gene is + then the start is uses as the tss otherwise
# the end is used as the tss
genes_df.loc[(genes_df.strand == '+'), 'end'] = genes_df.loc[(genes_df.strand == '+'), 'start']
genes_df.loc[(genes_df.strand == '+'), 'start'] = genes_df.loc[(genes_df.strand == '+'), 'start'] - 1
genes_df.loc[(genes_df.strand == '-'), 'end'] = genes_df.loc[(genes_df.strand == '-'), 'end']
genes_df.loc[(genes_df.strand == '-'), 'start'] = genes_df.loc[(genes_df.strand == '-'), 'end'] - 1
# make a genes pbt for intersection
genes_pbt = pbt.BedTool.from_dataframe(genes_df).sort()
# In[6]:
print('There are {} genes in this GTF-derived file.'.format(genes_df.shape[0]))
# ## Find all genes +/- 500kb
# In[7]:
# get a list of gene names within +- 500kb of the SNPs
fivekb_genes = coloc_sig_pbt.slop(b=500000, g=gs_fn)
fivekb_genes = fivekb_genes.intersect(genes_pbt, wa=True, wb=True)
fivekb_genes = fivekb_genes.to_dataframe()
fivekb_genes = fivekb_genes.iloc[:, [0,1,2,4,5,6,3,7,8,9]]
# In[8]:
fivekb_genes.columns = bedpe_6cols + ['rs_id', 'gname', 'gid', 'strand2']
fivekb_genes['strand1'] = '+'
fivekb_genes['name'] = fivekb_genes['rs_id'] + '_' + fivekb_genes['gname']
fivekb_genes['score'] = '.'
new_order = bedpe_10cols + ['rs_id', 'gname', 'gid']
fivekb_genes = fivekb_genes[new_order]
fivekb_genes['startA'] += 500000
fivekb_genes['endA'] -= 500000
fivekb_genes['sid'] = fivekb_genes['chrA'].str.replace('chr', '') + ':' + fivekb_genes['endA'].astype(str)
# In[9]:
print('There are {} colocalized snp-gene pairs within +/- 5kb.'.format(fivekb_genes.shape[0]))
# ## Find the closest gene
# In[10]:
closest_gene = coloc_sig_pbt.closest(genes_pbt, d=True)
closest_gene = closest_gene.to_dataframe().iloc[:, [0,1,2,4,5,6,3,7,8,9]]
closest_gene.columns = bedpe_6cols + ['rs_id', 'gname', 'gid', 'dist']
closest_gene['sid'] = closest_gene['chrA'].str.replace('chr', '') + ':' + closest_gene['endA'].astype(str)
closest_gene.set_index(['sid', 'gname'], inplace=True)
# ## Get the loops
# In[11]:
# load the loop data
loops = pd.read_table(loop_fn)
tmp_loops = loops[['chr1', 's1', 'e1', 'chr2', 's2', 'e2']]
tmp_loops.rename(columns={'p': 'score'}, inplace=True)
tmp_loops.loc[:, 'name'] = '.'
tmp_loops.loc[:, 'score'] = loops['p']
tmp_loops.loc[:, 'strand1'] = '.'
tmp_loops.loc[:, 'strand2'] = '.'
loops = pbt.BedTool.from_dataframe(tmp_loops)
print('FitHiChIP found {} significant loops.'.format(tmp_loops.shape[0]))
# ## Find out SNP-Gene pairs with loops
# In[12]:
# re-arranging to fit bedpe format
fivekb_gloops = fivekb_genes.copy()
# loading into pbt
fivekb_gloops = pbt.BedTool.from_dataframe(fivekb_gloops)
fivekb_gloops = fivekb_gloops.pair_to_pair(loops, type='both', slop=7500, **{'is':True})
fivekb_gloops = fivekb_gloops.to_dataframe(disable_auto_names=True, header=None)
fivekb_gloops_set = fivekb_gloops.iloc[:, [13,11]]
fivekb_gloops_uniq = set([tuple(x) for x in fivekb_gloops_set.values.tolist()])
# In[13]:
print('There are {} SNP-Gene pairs with a loop.'.format(len(fivekb_gloops_uniq)))
# ## Construct master table
# In[14]:
# begin making the master
master = fivekb_genes.copy()
master['sid'] = master['chrA'].str.replace('chr', '') + ':' + master['endA'].astype(str)
# In[15]:
print('Master is starting with {} snp-gene pairs.'.format(master.shape[0]))
# #### Add eqtl results
# In[16]:
# get eQTL's
eqtls = pd.read_table(eqtl_fn)
eqtls.columns = ['eqtl_gname', 'nvar', 'shape1', 'shape2', 'dummy',
'sid', 'dist', 'npval', 'slope', 'ppval', 'bpval', 'qval']
print('There are {} eQTLs.'.format(eqtls.shape[0]))
# In[17]:
# need to use outer or else you exclude some eQTL's
master = master.merge(eqtls, left_on=['sid', 'gname'], right_on=['sid', 'eqtl_gname'], how='outer')
# add column to filter on eqtl snp status
master['is_eqtl_pair'] = (~master['ppval'].isna()).astype(int)
# add gene names to entries with a missing name (after adding eQTL info)
master.loc[master.gname.isna(), 'gname'] = master.loc[master.gname.isna(), 'eqtl_gname']
# add missing chrA, chrB, startA and startB data for the eQTL rows
master.loc[master.chrA.isna(), 'chrA'] = 'chr' + master.loc[master.chrA.isna(), 'sid'].str.replace(':[0-9]+', '')
master.loc[master.chrB.isna(), 'chrB'] = 'chr' + master.loc[master.chrB.isna(), 'sid'].str.replace(':[0-9]+', '')
master.loc[master.startA.isna(), 'startA'] = (master.loc[master.startA.isna(), 'sid'].str.replace('[0-9]+:', '')).astype(int)
master.loc[master.startA.isna(), 'startA'] -= 1
master.loc[master.endA.isna(), 'endA'] = master.loc[master.endA.isna(), 'sid'].str.replace('[0-9]+:', '')
# In[18]:
print('After outer merging with eqtls master has {} snp-gene pairs.'.format(master.shape[0]))
# #### Add gene meta data
# In[19]:
# genes with index as chrom and genename
query_genes = genes_df.sort_values(['chrom', 'gname']).set_index(['chrom', 'gname'])
def get_gene_meta_from_chrom_gname(query_genes, df, col_idxs=None):
# add gene positions (for missing gene meta data mostly)
gene_positions = []
if col_idxs == None:
for i, sr in df.iterrows():
gene_info = query_genes.loc[(sr.chrom, sr.gene_name)]
if len(gene_info) == 0:
print('Houston, where is my coffee?')
break
elif len(gene_info) > 1:
print('Houston, we have a problem.')
break
else:
gene_positions.append(gene_info.values.tolist()[0])
else:
for i, sr in df.iterrows():
gene_info = query_genes.loc[(sr[col_idxs[0]], sr[col_idxs[1]])]
if len(gene_info) == 0:
print(gene_info)
raise Exception('Houston, where is my coffee?')
elif len(gene_info) > 1:
#print('Picked the closest gene to the current SNP.')
dists = np.abs(gene_info['start'].values - sr['startA'])
closest_idx = np.argmin(dists)
gene_positions.append(gene_info.values.tolist()[closest_idx][2])
else:
gene_positions.append(gene_info.values.tolist()[0][2])
return(gene_positions)
# In[20]:
gene_ids = get_gene_meta_from_chrom_gname(query_genes, master, col_idxs=[0, 11])
master.loc[:, 'gid'] = gene_ids
# In[21]:
# add back the original gene start and end
master = master.merge(orig_genes_df[['start', 'end', 'gene_id', 'strand']], left_on='gid', right_on='gene_id')
# convert the startB/endB position into startB/endB for the TSS
# if the gene is + then the startB is uses as the tss otherwise
# the endB is used as the tss
master.loc[(master.strand == '+'), 'endB'] = master.loc[(master.strand == '+'), 'start']
master.loc[(master.strand == '+'), 'startB'] = master.loc[(master.strand == '+'), 'start'] - 1
master.loc[(master.strand == '-'), 'endB'] = master.loc[(master.strand == '-'), 'end']
master.loc[(master.strand == '-'), 'startB'] = master.loc[(master.strand == '-'), 'end'] - 1
# convert the coordinates from floats to ints
master.startA = master.startA.astype(int)
master.startB = master.startB.astype(int)
master.endA = master.endA.astype(int)
master.endB = master.endB.astype(int)
master.rename(columns={'start': 'gene_start', 'end': 'gene_end', 'strand': 'gene_strand'}, inplace=True)
# #### Add info about closests gene
# In[22]:
# check for the closets gene
closets_check = [0] * master.shape[0]
for i, sr in master.iterrows():
# check closest gene
rs_gene = (sr.sid, sr.gname)
if rs_gene in closest_gene.index:
closets_check[i] = 1
master['is_closest_gene'] = closets_check
# #### Add colocalization data
# In[23]:
# add colocalization data for SNP and is_coloc_snp columns
tmp_coloc = coloc_sig_full[[
'pp_H0_Coloc_Summary',
'pp_H1_Coloc_Summary',
'pp_H2_Coloc_Summary',
'pp_H3_Coloc_Summary',
'pp_H4_Coloc_Summary',
'rs_id',
'geneName',
'ref',
'alt',
'AC',
'AF',
'AN',
'slope_gwas',
'slope_se_gwas',
'pval_nominal',
'SampleSize']]
tmp_coloc.rename(columns={'slope_gwas': 'gwas_slope',
'slope_se_gwas': 'gwas_slope_se',
'pval_nominal': 'gwas_pval_nominal',
'geneName': 'gname'}, inplace=True)
master = master.merge(tmp_coloc, on=['rs_id', 'gname'], how='left')
# add column to filter on coloc snp status
master['is_coloc_pair'] = (~master['pp_H4_Coloc_Summary'].isna()).astype(int)
# In[24]:
print('After left merging master with the colocalization table there are {} entries.'.format(master.shape[0]))
# #### Add loop data
# In[25]:
# check for the loop gene
loop_check = [0] * master.shape[0]
for i, sr in master.iterrows():
# check closest gene
rs_gene = (sr.sid, sr.gname)
if rs_gene in fivekb_gloops_uniq:
loop_check[i] = 1
master['has_fithichip_loop'] = loop_check
# In[26]:
print('There are {} SNP-Gene loops.'.format(sum(loop_check)))
# #### Do the final reordering and saving
# In[27]:
master = master[[
'sid',
'rs_id',
'gname',
'gid',
'chrA',
'endA',
'startB',
'endB',
'is_eqtl_pair',
'is_coloc_pair',
'is_closest_gene',
'has_fithichip_loop',
'nvar',
'shape1',
'shape2',
'dist',
'npval',
'slope',
'ppval',
'bpval',
'qval',
'pp_H0_Coloc_Summary',
'pp_H1_Coloc_Summary',
'pp_H2_Coloc_Summary',
'pp_H3_Coloc_Summary',
'pp_H4_Coloc_Summary',
'gene_start',
'gene_end',
'gene_strand',
'ref',
'alt',
'AC',
'AF',
'AN',
'gwas_slope',
'gwas_slope_se',
'gwas_pval_nominal',
'SampleSize']]
# In[28]:
master.rename(columns={'chrA':'chrom', 'endA': 'snp_pos',
'startB': 'tss_start', 'endB': 'tss_end',
'gname': 'gene_name', 'gid': 'gene_id'}, inplace=True)
# In[29]:
master.sort_values(['chrom', 'snp_pos', 'tss_start', 'rs_id'], inplace=True)
master.snp_pos = master.snp_pos.astype(int)
# In[30]:
# write out the master data
fn = os.path.join(outdir, 'master.tsv')
master.to_csv(fn, sep='\t', header=True, index=False)
# In[31]:
fn = os.path.join(outdir, 'master.xlsx')
excel_master = master.sort_values('rs_id').set_index('rs_id')
excel_master.to_excel(fn, na_rep='nan')
# ## Make WashU files
# In[32]:
def bedpe_to_WashU_longrange(fn, df):
"""
Convert from a loop bedpe file into WashU longrange,
includes bgzip and tabix of the fn.
Params
-------
fn: str
path to the longrange output file (without gz)
df: dataframe
columns 1-6 are as expected and column 7 is the p or q-value.
Output
------
gzfn: str
path to the longrange with bgzip compression
tabix_fn: str
path to the index of the longrange file
"""
# parsing the data into WashU longrage format
data = []
for sr in df.values.tolist():
# calculate the -log(FDR)
qval = -np.log(sr[6])
# get the first pair data
second_pair_str = '{}:{}-{},{:.5f}'.format(*sr[3:6], qval)
first_row = sr[0:3] + [second_pair_str]
# get the second pair data
first_pair_str = '{}:{}-{},{:.5f}'.format(*sr[0:3], qval)
second_row = sr[3:6] + [first_pair_str]
# add each data row
data.append(first_row)
data.append(second_row)
data = sorted(data, key=lambda x: (x[0], x[1], x[2]))
# writing out the data
with open(fn, 'w') as f:
for line in data:
info = [str(x) for x in line]
info = '\t'.join(info)
f.write(info + '\n')
# run bgzip
cmd = '{} {}'.format(bgzip, fn)
print(cmd)
job = sp.Popen(cmd, stderr=sp.PIPE,stdout=sp.PIPE, shell=True)
out, err = job.communicate()
print('out:', out.decode())
print('err:', err.decode())
# run tabix
lrange_gzfn = fn + '.gz'
cmd = '{} -f {}'.format(tabix, lrange_gzfn)
print(cmd)
job = sp.Popen(cmd, stderr=sp.PIPE,stdout=sp.PIPE, shell=True)
out, err = job.communicate()
print('out:', out.decode())
print('err:', err.decode())
print('Created the gzfn: {}'.format(fn + '.gz'))
print('Created the tabix: {}'.format(fn + '.gz.tbi'))
# In[33]:
def bed_WashU_bedgz(fn, df):
"""
Convert from a bed dataframe into WashU longrange file
includes bgzip and tabix of the fn.
Params
-------
fn: str
path to the longrange output file (without gz)
df: dataframe
columns 1-3 are as expected and column 7 is the p or q-value.
Output
------
gzfn: str
path to the longrange with bgzip compression
tabix_fn: str
path to the index of the longrange file
"""
# parsing the data into WashU longrage format
data = []
for sr in df.values.tolist():
data.append(sr[0:4])
data = sorted(data, key=lambda x: (x[0], x[1], x[2]))
# writing out the data
with open(fn, 'w') as f:
for line in data:
info = [str(x) for x in line]
info = '\t'.join(info)
f.write(info + '\n')
# run bgzip
cmd = '{} {}'.format(bgzip, fn)
print(cmd)
job = sp.Popen(cmd, stderr=sp.PIPE,stdout=sp.PIPE, shell=True)
out, err = job.communicate()
print('out:', out.decode())
print('err:', err.decode())
# run tabix
gzfn = fn + '.gz'
cmd = '{} -f {}'.format(tabix, gzfn)
print(cmd)
job = sp.Popen(cmd, stderr=sp.PIPE,stdout=sp.PIPE, shell=True)
out, err = job.communicate()
print('out:', out.decode())
print('err:', err.decode())
print('Created the gzfn: {}'.format(fn + '.gz'))
print('Created the tabix: {}'.format(fn + '.gz.tbi'))
# In[34]:
def bed_to_WashU_refbed(fn, df):
"""
Convert from a bed dataframe into WashU longrange file
includes bgzip and tabix of the fn.
Params
-------
fn: str
path to the longrange output file (without gz)
df: dataframe
columns 1-3 are as expected and column 7 is the p or q-value.
Output
------
gzfn: str
path to the longrange with bgzip compression
tabix_fn: str
path to the index of the longrange file
"""
# parsing the data into WashU longrage format
data = df.values.tolist()
data = sorted(data, key=lambda x: (x[0], x[1], x[2]))
# writing out the data
with open(fn, 'w') as f:
for line in data:
info = [str(x) for x in line]
info = '\t'.join(info)
f.write(info + '\n')
# run bgzip
cmd = '{} -f {}'.format(bgzip, fn)
print(cmd)
job = sp.Popen(cmd, stderr=sp.PIPE,stdout=sp.PIPE, shell=True)
out, err = job.communicate()
print('out:', out.decode())
print('err:', err.decode())
# run tabix
gzfn = fn + '.gz'
cmd = '{} {}'.format(tabix, gzfn)
print(cmd)
job = sp.Popen(cmd, stderr=sp.PIPE,stdout=sp.PIPE, shell=True)
out, err = job.communicate()
print('out:', out.decode())
print('err:', err.decode())
print('Created the gzfn: {}'.format(fn + '.gz'))
print('Created the tabix: {}'.format(fn + '.gz.tbi'))
# In[35]:
# make the refbed link for genes (status: running)
final_sg_cols = ['chrom', 'gene_start' ,'gene_end', 'gene_name', 'gene_strand']
final_sg_genes = master.loc[(master.has_fithichip_loop == 1), final_sg_cols]
# In[36]:
final_sg_genes.gene_start = final_sg_genes.gene_start.astype(int)
final_sg_genes.gene_end = final_sg_genes.gene_end.astype(int)
final_sg_genes = final_sg_genes.loc[~final_sg_genes.duplicated()]
final_sg_genes['chr'] = final_sg_genes['chrom']
final_sg_genes['transcript_start'] = final_sg_genes['gene_start']
final_sg_genes['transcript_stop'] = final_sg_genes['gene_end']
final_sg_genes['translation_start'] = final_sg_genes['gene_start']
final_sg_genes['translation_stop'] = final_sg_genes['gene_end']
final_sg_genes['strand'] = final_sg_genes['gene_strand']
final_sg_genes['gene_name'] = final_sg_genes['gene_name']
final_sg_genes['transcript_id'] = final_sg_genes['gene_name']
final_sg_genes['type'] = 'coding'
final_sg_genes['exon_gene_start'] = final_sg_genes['gene_start']
final_sg_genes['exon_stops'] = final_sg_genes['gene_end']
refcols = ['chr', 'transcript_start', 'transcript_stop', 'translation_start',
'translation_stop', 'strand', 'gene_name', 'transcript_id',
'type', 'exon_gene_start', 'exon_stops']
final_sg_genes = final_sg_genes.loc[:, refcols]
sg_genes_fn = os.path.join(outdir, 'gs_genes.bed')
bed_to_WashU_refbed(sg_genes_fn, final_sg_genes)
# In[37]:
# make the longrange link for raw fithichip data (status: running)
loop_gz = os.path.abspath(loop_fn.replace('.bed', '_WashU.bed.gz'))
loop_tbi = os.path.abspath(loop_fn.replace('.bed', '_WashU.bed.gz.tbi'))
loop_gz_link = os.path.join(outdir, os.path.basename(loop_gz))
loop_tbi_link = os.path.join(outdir, os.path.basename(loop_tbi))
if not os.path.exists(loop_gz_link):
os.link(loop_gz, loop_gz_link)
os.link(loop_tbi, loop_tbi_link)
# ## All 5kb Washu Files
# In[38]:
# make the longrange link for snp-gene fivekb pairs (status: running)
#fivekb_lrange = fivekb_lrange[fivekb_lrange.rs_id.notna()].reset_index(drop=True)
fivekb_lrange = fivekb_genes.copy()
# convert full for viz
fivekb_lrange = fivekb_lrange[['chrA', 'startA', 'endA', 'chrB', 'startB', 'endB']]
fivekb_lrange.iloc[:, 1] -= 1
fivekb_lrange['score'] = 0.01
# In[39]:
fivekb_snp_gene_pairs_fn = os.path.join(outdir, '5kb.snp_gene_pairs.bed')
bedpe_to_WashU_longrange(fivekb_snp_gene_pairs_fn, fivekb_lrange)
# In[40]:
# plot all snp-gene pairs with a loop
fivekb_gloops_lrange = fivekb_gloops.iloc[:, [11,12,13,14,15,16,18]]
fivekb_snp_gene_loops_fn = os.path.join(outdir, '5kb.snp_gene_loops.bed')
bedpe_to_WashU_longrange(fivekb_snp_gene_loops_fn, fivekb_gloops_lrange)
# ## eQTL WashU Files
# In[41]:
# make the longrange link for snp-gene eQTL pairs (status: running)
eqtl_lrange = master.loc[master.is_eqtl_pair == 1]
eqtl_lrange = eqtl_lrange[eqtl_lrange.rs_id.notna()].reset_index(drop=True)
# In[42]:
# convert full for viz
eqtl_snp_gene_pairs = eqtl_lrange[['chrom', 'snp_pos', 'snp_pos', 'chrom', 'tss_start', 'tss_end', 'gene_name']]
eqtl_snp_gene_pairs.iloc[:, 1] -= 1
eqtl_snp_gene_pairs['score'] = 0.01
# In[43]:
eqtl_snp_gene_pairs_fn = os.path.join(outdir, 'eqtl.snp_gene_pairs.bed')
bedpe_to_WashU_longrange(eqtl_snp_gene_pairs_fn, eqtl_snp_gene_pairs.iloc[:, [0,1,2,3,4,5,7]])
# In[44]:
# make the bed for egenes only (status: running)
eqtl_genes = eqtl_lrange.loc[:, ['chrom', 'gene_start', 'gene_end', 'gene_name']]
eqtl_genes_only_fn = os.path.join(outdir, 'eqtl.genes_only.bed')
bed_WashU_bedgz(eqtl_genes_only_fn, eqtl_genes)
# In[45]:
# make the bed for eSNPs only (status: running)
eqtl_snps = eqtl_lrange.loc[:, ['chrom', 'snp_pos', 'snp_pos', 'rs_id']]
eqtl_snps.iloc[:, 1] -= 1
eqtl_snps = pbt.BedTool.from_dataframe(eqtl_snps)
eqtl_snps = eqtl_snps.slop(b=500, g=gs_fn).to_dataframe()
eqtl_snps_only_fn = os.path.join(outdir, 'eqtl.snps_only.bed')
bed_WashU_bedgz(eqtl_snps_only_fn, eqtl_snps)
# In[46]:
# # make the longrange link for snp-gene eQTL loops (status: didn't add loop coordinates to master.)
# eqtl_loops = master.loc[master.is_eqtl_pair == 1]
# ## Coloc WashU Files
# In[47]:
# make the bed for colocalized SNPs (status: running)
final_snps = coloc_sig_df.copy()
final_snps = final_snps.loc[~final_snps.duplicated()]
final_snps = pbt.BedTool.from_dataframe(final_snps)
final_snps = final_snps.slop(b=500, g=gs_fn)
final_snps = final_snps.to_dataframe()[0:4]
# In[48]:
coloc_snps_only_fn = os.path.join(outdir, 'coloc.snps_only.bed')
bed_WashU_bedgz(coloc_snps_only_fn, final_snps)
# In[49]:
# make the bed for colocalized genes (status: running)
final_coloc_snp_genes_pairs = master[(master.is_coloc_pair == 1)]
final_coloc_genes = final_coloc_snp_genes_pairs[['chrom', 'gene_start' ,'gene_end', 'gene_name']]
coloc_genes_only_fn = os.path.join(outdir, 'coloc.genes_only.bed')
bed_WashU_bedgz(coloc_genes_only_fn, final_coloc_genes)
# In[50]:
# make the longrange for colocalized snp-gene pairs (status: running)
final_coloc_snp_genes_pairs_out = final_coloc_snp_genes_pairs[['chrom', 'snp_pos', 'snp_pos', 'chrom',
'tss_start', 'tss_end', 'gene_name']]
final_coloc_snp_genes_pairs_out.iloc[:, 1] -= 1
final_coloc_snp_genes_pairs_out.iloc[:, 6] = 0.01
coloc_snp_gene_pairs_fn = os.path.join(outdir, 'coloc.snp_gene_pairs.bed')
bedpe_to_WashU_longrange(coloc_snp_gene_pairs_fn, final_coloc_snp_genes_pairs_out)
# In[51]:
# make the longrange for colocalized snp-gene loops (status: running)
final_coloc_snp_genes_loops = fivekb_gloops[(fivekb_gloops[10].isin(final_coloc_snp_genes_pairs['rs_id'])) &
(fivekb_gloops[12].isin(final_coloc_snp_genes_pairs['gene_id']))]
final_coloc_snp_genes_loops_out = final_coloc_snp_genes_loops.iloc[:, [14,15,16,17,18,19,21]]
coloc_snp_gene_loops_fn = os.path.join(outdir, 'coloc.snp_gene_loops.bed')
bedpe_to_WashU_longrange(coloc_snp_gene_loops_fn, final_coloc_snp_genes_loops_out)
# In[52]:
final_coloc_snp_genes_loops_out
# In[53]:
# make the loop anchors as bed files (status: developing)
left = final_coloc_snp_genes_loops[[14,15,16,6]].T.reset_index(drop=True).T
left[3] = 'L-' + left[3]
right = final_coloc_snp_genes_loops[[17,18,19,6]].T.reset_index(drop=True).T
right[3] = 'R-' + right[3]
anchors = pd.concat([left, right], ignore_index=True, axis=0)
# In[54]:
coloc_anchors_fn = os.path.join(outdir, 'coloc.anchors_only.bed')
bed_WashU_bedgz(coloc_anchors_fn, anchors)
# # make the hub json file
# In[55]:
#gwas, cline = coloc_fn.split('/')[5:7]
# In[56]:
print("# make the hub json file")
gene_refbed_json = {'type': 'refbed',
'filename': os.path.basename(sg_genes_fn) + '.gz',
'name': 'Gencode V19',
'showOnHubLoad': True
}
orig_loops_json = {'type': 'longrange',
'filename': os.path.basename(loop_gz_link),
'name': 'Original Loops',
'options': {'displayMode': 'arc', 'color':'red'},
'showOnHubLoad': True
}
# fivekb_snp_gene_pairs_json = {'type': 'longrange',
# 'filename': os.path.basename(fivekb_snp_gene_pairs_fn) + '.gz',
# 'name': '5kb SNP-Gene Pairs',
# 'options': {'displayMode': 'arc', 'color':'purple', 'height': 200},
# 'showOnHubLoad': False
# }
# fivekb_snp_gene_loops_json = {'type': 'longrange',
# 'filename': os.path.basename(fivekb_snp_gene_loops_fn) + '.gz',
# 'name': '5kb SNP-Gene Loops',
# 'options': {'displayMode': 'arc', 'color':'red', 'height': 200},
# 'showOnHubLoad': False
# }
eqtl_snp_gene_pairs_json = {'type': 'longrange',
'filename': os.path.basename(eqtl_snp_gene_pairs_fn) + '.gz',
'name': 'eQTL SNP-Gene Pairs',
'options': {'displayMode': 'arc', 'color':'purple', 'height': 200},
'showOnHubLoad': True
}
eqtl_snps_only_json = {'type': 'bed',
'filename': os.path.basename(eqtl_snps_only_fn) + '.gz',
'name': 'eQTL SNPs only',
'options': {'color':'purple'},
'showOnHubLoad': True
}
eqtl_genes_only_json = {'type': 'bed',
'filename': os.path.basename(eqtl_genes_only_fn) + '.gz',
'name': 'eQTL genes only',
'options': {'color':'purple'},
'showOnHubLoad': True
}
coloc_snp_gene_pairs_json = {'type': 'longrange',
'filename': os.path.basename(coloc_snp_gene_pairs_fn) + '.gz',
'name': 'coloc SNP-Gene Pairs',
'options': {'displayMode': 'arc', 'color':'purple', 'height': 200},
'showOnHubLoad': True
}
coloc_snp_gene_loops_json = {'type': 'longrange',
'filename': os.path.basename(coloc_snp_gene_loops_fn) + '.gz',
'name': 'coloc SNP-Gene Loops',
'options': {'displayMode': 'arc', 'color':'red', 'height': 200},
'showOnHubLoad': True
}
coloc_snps_only_json = {'type': 'bed',
'filename': os.path.basename(coloc_snps_only_fn) + '.gz',
'name': 'coloc SNPs only',
'options': {'color':'purple'},
'showOnHubLoad': False
}
coloc_genes_only_json = {'type': 'bed',
'filename': os.path.basename(coloc_genes_only_fn) + '.gz',
'name': 'coloc genes only',
'options': {'color':'purple'},
'showOnHubLoad': False
}
coloc_anchors_only_json = {'type': 'bed',
'filename': os.path.basename(coloc_anchors_fn) + '.gz',
'name': 'coloc loop anchors only',
'options': {'color':'red'},
'showOnHubLoad': False
}
#hub_json = [orig_loops_json, sg_pairs_json, sg_loops_json, sg_snps_json, sg_genes_json]
hub_json = [gene_refbed_json,
#fivekb_snp_gene_pairs_json,
#fivekb_snp_gene_loops_json,
eqtl_snps_only_json,
eqtl_genes_only_json,
eqtl_snp_gene_pairs_json,
coloc_snps_only_json,
coloc_genes_only_json,
coloc_snp_gene_pairs_json,
coloc_anchors_only_json,
coloc_snp_gene_loops_json,
orig_loops_json,
]
hub_json_fn = os.path.join(outdir, 'hub.config.json')
with open(hub_json_fn, 'w') as f:
f.write(json.dumps(hub_json, indent=4))
# In[ ]:
# In[ ]:
# In[ ]:
``` |
{
"source": "Jorge1o1/ghost",
"score": 4
} |
#### File: ghost/scripts/makecorpusfrombrown.py
```python
MIN_LETTERS = 3
TOP_N = 5000
def get_count_from_line(line):
parts = line.split()
return int(parts[1]) if len(parts) > 1 else 0
def is_longer_than_minimum(line):
word = line.split()[0]
return len(word) > MIN_LETTERS
with open("../data/brown_freq.txt", "r") as infile:
sorted_qty = sorted(infile, key=get_count_from_line, reverse=True)
valid = filter(is_longer_than_minimum, sorted_qty[:TOP_N])
sorted_alpha = [l.split()[0] + "\n" for l in sorted(valid)]
with open("../data/corpus2.txt", "w") as outfile:
outfile.writelines(sorted_alpha)
``` |
{
"source": "jorge4larcon/fastproject",
"score": 2
} |
#### File: fastproject/fastproject/main.py
```python
import fastapi
from .modules import skills, users
app = fastapi.FastAPI()
app.include_router(users.controller)
app.include_router(skills.router)
@app.get("/")
async def root():
return {"message": "Hello World"}
```
#### File: modules/users/models.py
```python
import datetime
import uuid
from typing import Any, Optional
import pydantic
from . import contypes, password_validators
class PublicUser(pydantic.BaseModel):
"""Represents user data that can be shared with the public."""
user_id: uuid.UUID
username: str
email: str
first_name: str
last_name: str
is_superuser: bool
is_staff: bool
is_active: bool
date_joined: datetime.datetime
last_login: Optional[datetime.datetime]
class PatchableUserData(pydantic.BaseModel):
"""
Represents user data that can be used to partially update a user in the
database.
"""
username: Optional[contypes.Username] = pydantic.Field(None, description="Username")
email: Optional[pydantic.EmailStr] = pydantic.Field(None, description="Email")
first_name: Optional[contypes.FirstName] = pydantic.Field(
None, description="First name"
)
last_name: Optional[contypes.LastName] = pydantic.Field(
None, description="Last name"
)
password: Optional[contypes.Password] = pydantic.Field(None, description="Password")
is_superuser: Optional[bool] = pydantic.Field(None, description="Is superuser?")
is_staff: Optional[bool] = pydantic.Field(None, description="Is staff?")
is_active: Optional[bool] = pydantic.Field(None, description="Is active?")
date_joined: Optional[datetime.datetime] = pydantic.Field(
None, description="Date joined"
)
last_login: Optional[datetime.datetime] = pydantic.Field(
None, description="Last login"
)
class UserRegistrationData(pydantic.BaseModel):
"""
Represents user data that can be used to register a user in the system and
insert that user in the database.
"""
username: contypes.Username = pydantic.Field(None, description="Username")
email: pydantic.EmailStr = pydantic.Field(None, description="Email")
first_name: contypes.FirstName = pydantic.Field(None, description="First name")
last_name: contypes.LastName = pydantic.Field(None, description="Last name")
password: contypes.Password = pydantic.Field(None, description="Password")
@pydantic.validator("password")
def validate_password(cls, value: str, values: dict[str, Any]) -> str:
"""Validates the password."""
user_attributes = {
"username": values["username"],
"email": values["email"],
"first_name": values["first_name"],
"last_name": values["last_name"],
}
return password_validators.validate_password(
value,
contypes.Password.min_length,
contypes.Password.max_length,
user_attributes,
)
```
#### File: modules/users/password_validators.py
```python
import difflib
import gzip
import pathlib
import re
from typing import Optional
from . import exceptions
_PASSWORD_LIST_PATH = (
pathlib.Path(__file__).resolve().parent / "common-passwords.txt.gz"
)
_PASSWORD_LIST = set()
def _load_password_list(password_list_path=_PASSWORD_LIST_PATH) -> None:
"""Loads the password list."""
global _PASSWORD_LIST
try:
with gzip.open(password_list_path, "rt", encoding="utf-8") as file:
_PASSWORD_LIST = {p.strip() for p in file}
except OSError:
with open(password_list_path, "rt", encoding="utf-8") as file:
_PASSWORD_LIST = {p.strip() for p in file}
def validate_password_length(password: str, min_length: int, max_length: int) -> str:
"""Validates that the password has the correct length."""
if not min_length <= len(password) <= max_length:
raise exceptions.InvalidPasswordError(
f"Password can not have less than {min_length} or more "
f"than {max_length} characters."
)
return password
def validate_password_not_numeric(password: str) -> str:
"""Validates that the password is not entirely numeric."""
if password.isdigit():
raise exceptions.InvalidPasswordError("Password can not be entirely numeric.")
return password
def exceeds_maximum_length_ratio(
password: str, max_similarity: float, value: str
) -> float:
"""
Test that value is within a reasonable range of password.
The following ratio calculations are based on testing difflib.SequenceMatcher like
this:
for i in range(0,6):
print(10**i, difflib.SequenceMatcher(a='A', b='A'*(10**i)).quick_ratio())
which yields:
1 1.0
10 0.18181818181818182
100 0.019801980198019802
1000 0.001998001998001998
10000 0.00019998000199980003
100000 1.999980000199998e-05
This means a length_ratio of 10 should never yield a similarity higher than
0.2, for 100 this is down to 0.02 and for 1000 it is 0.002. This can be
calculated via 2 / length_ratio. As a result we avoid the potentially
expensive sequence matching.
"""
pwd_len = len(password)
length_bound_similarity = max_similarity / 2 * pwd_len
value_len = len(value)
return pwd_len >= 10 * value_len and value_len < length_bound_similarity
def validate_password_not_similar_to_user_attributes(
password: str, user_attrs: Optional[dict[str, str]]
) -> str:
"""
Validate that the password is sufficiently different from the user's
attributes.
If no specific attributes are provided, look at a sensible list of
defaults. Attributes that don't exist are ignored. Comparison is made to
not only the full attribute value, but also its components, so that, for
example, a password is validated against either part of an email address,
as well as the full address.
"""
max_similarity = 0.7 # max_similarity must be at least 0.1
password_lower = password.<PASSWORD>()
for attr in user_attrs:
attr_value = user_attrs[attr]
if not attr_value or not isinstance(attr_value, str):
continue
attr_value_lower = attr_value.lower()
parts = re.split(r"\W+", attr_value_lower) + [attr_value_lower]
for part in parts:
if exceeds_maximum_length_ratio(password_lower, max_similarity, part):
continue
if (
difflib.SequenceMatcher(a=password_lower, b=part).quick_ratio()
>= max_similarity
):
raise exceptions.InvalidPasswordError(
"The password is very similar to " f"the {attr}"
)
return password
def validate_password_not_common(password: str) -> None:
"""
Validates that the password not occurs in a list of 20,000 common
passwords.
"""
if not _PASSWORD_LIST:
_load_password_list()
if password.lower().strip() in _PASSWORD_LIST:
raise exceptions.InvalidPasswordError("Password is too common.")
return password
def validate_password(
password: str,
min_length: int,
max_length: int,
user_attrs: Optional[list[str]] = None,
) -> None:
"""
Validates the password with all the password validation related
functions in this module.
"""
password = validate_password_length(password, min_length, max_length)
password = validate_password_not_numeric(password)
password = validate_password_not_similar_to_user_attributes(password, user_attrs)
password = validate_password_not_common(password)
return password
```
#### File: modules/users/service.py
```python
import datetime
import uuid
import zoneinfo
from typing import Any, Optional
from ... import config
from ...utils import encoding
from . import password_hashing, repository
async def create_user(
username: str,
email: str,
first_name: str,
last_name: str,
password: str,
date_joined: Optional[datetime.datetime] = None,
is_superuser=False,
is_staff=False,
is_active=True,
last_login: Optional[datetime.datetime] = None,
) -> repository.User:
"""Inserts a user into the database.
Args:
username: The username of the user.
email: The email of the user.
first_name: The first name of the user.
last_name: The last name of the user.
password: <PASSWORD>.
date_joined: The datetime the user joined the system.
is_superuser: A flag that indicates if this user is super user.
is_staff: A flag that indicated if this user can is staff.
is_active: A flag that indicates if this user is active.
last_login: The datetime the user last logged in.
Returns:
A repository.User representing the created user.
Raises:
UsernameAlreadyExistsError: If the username already exists.
EmailAlreadyExistsError: If the email already exists.
"""
username = encoding.normalize_str(username)
email = encoding.normalize_str(email)
first_name = encoding.normalize_str(first_name)
last_name = encoding.normalize_str(last_name)
if date_joined is None:
tzinfo = zoneinfo.ZoneInfo(config.settings["APPLICATION"]["timezone"])
date_joined = datetime.datetime.now(tz=tzinfo)
password_hash = password_hashing.make_password(password)
return await repository.insert_user(
username=username,
email=email,
first_name=first_name,
last_name=last_name,
password=<PASSWORD>,
date_joined=date_joined,
is_superuser=is_superuser,
is_staff=is_staff,
is_active=is_active,
last_login=last_login,
)
async def get_user_by_id(user_id: uuid.UUID) -> Optional[repository.User]:
"""Returns the user with the specified user_id from the database.
Args:
user_id: The user_id of the searched user.
Returns:
A repository.User representing the searched user, None if the user was not
found.
"""
return await repository.get_user_by_id(user_id)
async def update_user_by_id(
user_id: uuid.UUID, **kwargs: Any
) -> Optional[repository.User]:
"""Updates the data of the user with the specified user_id in the database.
Args:
user_id: The user_id of the user that will be updated.
**username (str): The username of the user.
**email (str): The email of the user.
**first_name (str): The first name of the user.
**last_name (str): The last name of the user.
**password (str): The password (not hashed) of the user.
**is_superuser (bool): A flag that indicates if this user is super user.
**is_staff (bool): A flag that indicated if this user can is staff.
**is_active (bool): A flag that indicates if this user is active.
**date_joined (datetime.datetime): The datetime the user joined the
system.
**last_login (datetime.datetime): The datetime the user last logged in.
Returns:
A repository.User representing the updated user, None if the user was not
updated.
Raises:
UsernameAlreadyExistsError: If the username already exists.
EmailAlreadyExistsError: If the email already exists.
"""
if "username" in kwargs:
kwargs["username"] = encoding.normalize_str(kwargs["username"])
if "email" in kwargs:
kwargs["email"] = encoding.normalize_str(kwargs["email"])
if "first_name" in kwargs:
kwargs["first_name"] = encoding.normalize_str(kwargs["first_name"])
if "last_name" in kwargs:
kwargs["last_name"] = encoding.normalize_str(kwargs["last_name"])
if "password" in kwargs:
kwargs["password"] = password_hashing.make_password(kwargs["password"])
return await repository.update_user_by_id(user_id, **kwargs)
async def delete_user_by_id(user_id: uuid.UUID) -> Optional[repository.User]:
"""Deletes the user with the specified user_id from the database.
Args:
user_id: The user_id of the user that will be deleted.
Returns:
A repository.User representing the deleted user, None if the user was not
deleted.
"""
return await repository.delete_user_by_id(user_id)
```
#### File: tests/db/test_utils.py
```python
import datetime
from fastproject import db
def test_updater_fields():
# Fields and null fields.
fields = ("username", "email", "first_name")
null_fields = ("last_name", "last_login")
new_values = db.updater_fields(
fields,
null_fields,
username="pontiff_sulyvahn",
email="<EMAIL>",
first_name="Sulyvahn",
last_login=datetime.datetime(2018, 12, 15),
)
assert new_values == {
"username": "pontiff_sulyvahn",
"email": "<EMAIL>",
"first_name": "Sulyvahn",
"last_name": None,
"update_last_name": False,
"last_login": datetime.datetime(2018, 12, 15),
"update_last_login": True,
}
# Fields only.
fields = ("username", "email", "first_name", "last_name")
new_values = db.updater_fields(
fields=fields,
username="pontiff_sulyvahn",
email="<EMAIL>",
first_name="Sulyvahn",
)
assert new_values == {
"username": "pontiff_sulyvahn",
"email": "<EMAIL>",
"first_name": "Sulyvahn",
"last_name": None,
}
# Null fields only
null_fields = ("last_login", "birthday")
new_values = db.updater_fields(
null_fields=null_fields,
updater_flag_preffix="upt_",
last_login=datetime.datetime(2018, 12, 15),
birthday=None,
)
assert new_values == {
"last_login": datetime.datetime(2018, 12, 15),
"upt_last_login": True,
"birthday": None,
"upt_birthday": True,
}
```
#### File: modules/users/test_password_validators.py
```python
import pytest
from fastproject.modules.users import exceptions, password_validators
@pytest.mark.parametrize(
"password,min_len,max_len,raises",
[
("d<PASSWORD>", 4, 16, False),
("", 0, 1, False),
("123456", 6, 6, False),
("1234567", 6, 7, False),
("12", 6, 7, True),
],
)
def test_validate_password_length(password, min_len, max_len, raises):
if raises:
with pytest.raises(
exceptions.InvalidPasswordError, match="Password can not have less than"
):
password_validators.validate_password_length(password, min_len, max_len)
else:
password_validators.validate_password_length(password, min_len, max_len)
@pytest.mark.parametrize(
"password,raises", [("<PASSWORD>", True), ("<PASSWORD>", False), ("<PASSWORD>", False)]
)
def test_validate_password_not_numeric(password, raises):
if raises:
with pytest.raises(
exceptions.InvalidPasswordError,
match="Password can not be entirely numeric.",
):
password_validators.validate_password_not_numeric(password)
else:
password_validators.validate_password_not_numeric(password)
``` |
{
"source": "jorge4larcon/watch-github-repo",
"score": 3
} |
#### File: watch-github-repo/watch_github_repo/lambda_function.py
```python
from __future__ import annotations # https://www.python.org/dev/peps/pep-0563/
import json
import os
import urllib.error
import urllib.parse
import urllib.request
from dataclasses import dataclass
from datetime import datetime, timedelta
from typing import List
import boto3
import jinja2
from utils import console_logger, utc2datetime, datetime2utc
LOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO')
TELEGRAM_API_URL = 'https://api.telegram.org/bot'
TELEGRAM_MSG_TEMPLATE_FILE = 'telegram-msg.j2'
logger = console_logger(__name__, LOG_LEVEL)
@dataclass
class Commit:
"""Basic representation of a commit."""
message: str
timestamp: datetime
url: str
@staticmethod
def from_api_dict(api_dict: dict) -> Commit:
"""
Retrieves a 'Commit' object using a dict.
"""
try:
message = api_dict['commit']['message']
timestamp = utc2datetime(api_dict['commit']['committer']['date'])
url = api_dict['html_url']
return Commit(message, timestamp, url)
except KeyError as e:
logger.exception('Seems that the Github API is not using the way '
'to represent commits in JSON format they used '
'to.')
raise e
def get_last_check_date(s3: boto3.session.Session.resource, bucket: str,
key: str) -> datetime:
"""Retrives the last check date from a text file in an S3 bucket."""
try:
s3_obj = s3.Object(bucket, key)
date_str = s3_obj.get()['Body'].read().decode('UTF-8').strip()
return utc2datetime(date_str)
except Exception:
logger.exception('Unable to retrieve the object %s to obtain the last '
' check date, using "now" as the last check date.',
f's3://{bucket}/{key}')
return datetime.now()
def write_check_date(check_date: datetime, s3: boto3.session.Session.resource,
bucket: str, key: str):
"""Saves the check date in iso format in a text file in an S3 bucket."""
check_date_str = datetime2utc(check_date)
object_path = f's3://{bucket}/{key}'
try:
s3_obj = s3.Object(bucket, key)
response = s3_obj.put(Body=check_date_str)
response_metadata = response.get('ResponseMetadata')
if response_metadata.get('HTTPStatusCode') == 200:
logger.info('The check date was saved successfully in %s',
object_path)
else:
logger.error('Unable to save the check date in %s', object_path)
except Exception:
logger.exception('Unable to save the check date in %s', object_path)
def get_github_commits(repo_url: str, files_to_watch: List[str],
since: datetime) -> List[dict]:
"""
Retrieves the Github commits that contain the specified files since an
specific date.
"""
query = {'path': files_to_watch, 'since': since.isoformat()}
params = urllib.parse.urlencode(query, doseq=True,
quote_via=urllib.parse.quote)
url = f'{repo_url}?{params}'
commits: List[dict] = []
try:
with urllib.request.urlopen(url) as response:
commits = json.loads(response.read())
except Exception:
logger.exception('Unable to retrieve the Github repository commits.')
commits = list(map(Commit.from_api_dict, commits))
return commits
def make_telegram_msg(commits: List[dict], watched_files: List[str],
project_name: str, template_file: str) -> str:
"""Creates the text message that will be sent via Telegram."""
template_loader = jinja2.FileSystemLoader(searchpath='.')
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template(template_file)
return template.render(commits=commits, watched_files=watched_files,
project_name=project_name)
def send_telegram_msg(msg: str, chat_id: str, token: str):
"""Sends a text message to an specific Telegram chat."""
msg = urllib.parse.urlencode({
'chat_id': chat_id, 'text': msg, 'disable_web_page_preview': True})
msg = msg.encode('ascii')
url = f'{TELEGRAM_API_URL}{token}/sendMessage'
request = urllib.request.Request(url=url, data=msg, method='POST')
try:
logger.info('Notifying the boss via Telegram...')
with urllib.request.urlopen(request) as response:
parsed_response = json.loads(response.read())
logger.info('Telegram response received: %s', parsed_response)
if parsed_response.get('ok'):
logger.info('The boss has been notified via Telegram.')
else:
logger.error('There was a problem notifying the boss via '
'Telegram O_o.')
except urllib.error.URLError:
logger.exception('There was a problem sending the Telegram message!')
def watch_files(s3_bucket: str, s3_obj_key: str, github_repo_api_url: str,
files_to_watch: List[str], project_name: str,
telegram_msg_template: str, telegram_chat_id: str,
telegram_token: str):
"""Orchestrates all the operation of watching files of the repository."""
logger.info('Retrieving the last check date from "%s"...',
f's3://{s3_bucket}/{s3_obj_key}')
s3 = boto3.resource('s3')
last_check_date = get_last_check_date(s3, s3_bucket, s3_obj_key)
logger.info('Retrieving the commits that contain the files %s since: %s',
','.join(files_to_watch),
last_check_date.strftime('%d/%b/%Y, %I:%M %p'))
commits = get_github_commits(github_repo_api_url, files_to_watch,
last_check_date)
if not commits:
logger.info('There are no recent commmits that include the files the '
'boss is interested on.')
return
five_min_ago = datetime.now() - timedelta(minutes=5)
write_check_date(five_min_ago, s3, s3_bucket, s3_obj_key)
msg = make_telegram_msg(commits, files_to_watch, project_name,
telegram_msg_template)
logger.info('Notifying about %s commit(s).', len(commits))
send_telegram_msg(msg, telegram_chat_id, telegram_token)
def lambda_handler(event, _context):
"""AWS Lambda funtion handler."""
watch_files(
event['s3_bucket'],
event['check_date_file'],
event['github_repo_api_url'],
event['files_to_watch'],
event['project_name'],
TELEGRAM_MSG_TEMPLATE_FILE,
event['telegram_chat_id'],
event['telegram_bot_token']
)
``` |
{
"source": "Jorge6493/MessageOrientedMiddleware",
"score": 3
} |
#### File: Jorge6493/MessageOrientedMiddleware/server.py
```python
import pika
import sys
import threading
def Send(msg):
# print("msg")
# print(msg)
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='logs',
exchange_type='fanout')
message = msg
if msg != '':
channel.basic_publish(exchange='logs',
routing_key='',
body=message)
print("Sent %r" % message)
connection.close()
def Receive():
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='logs',
exchange_type='fanout')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='logs',
queue=queue_name)
print('Waiting for messages')
channel.queue_declare(queue='hello')
# def callback(ch, method, properties, body):
# print(" [x] %r" % body)
def callbackhello(ch, method, properties, body):
Send(body)
# print(" [c] %r" % body)
# channel.basic_consume(callback,
# queue=queue_name,
# no_ack=True)
channel.basic_consume(callbackhello,
queue='hello',
no_ack=True)
channel.start_consuming()
# send_thread = threading.Thread(target=Send(""))
receive_thread = threading.Thread(target=Receive)
# send_thread.start()
receive_thread.start()
# send_thread.join()
receive_thread.join()
``` |
{
"source": "Jorge908/G1_LPC",
"score": 2
} |
#### File: G1_LPC/mp_orcamento/views.py
```python
from django.shortcuts import render
from .models import *
def orcamentos_lista(request):
orcamentos = Orcamento.objects.all()
return render(request, 'mp_orcamento/orcamentos.html', {'orcamentos': orcamentos})
def orcamentos_estatisticas(request):
maior_custo = 0
menor_custo = 999999999999
orcamento_maior_custo = None
orcamento_menor_custo = None
orcamentos = Orcamento.objects.all()
somatorio_custo_total = 0
for orcamento in orcamentos:
somatorio = 0
for peca in Peca.objects.filter(orcamento=orcamento):
somatorio += peca.custo_de_producao_ajustado()
orcamento.custo_total = somatorio * 1.25
somatorio_custo_total += orcamento.custo_total
if orcamento.custo_total >= maior_custo:
orcamento_maior_custo = orcamento
maior_custo = orcamento.custo_total
if orcamento.custo_total <= menor_custo:
orcamento_menor_custo = orcamento
menor_custo = orcamento.custo_total
quantidade = Orcamento.objects.count()
media_custo_total = somatorio_custo_total / quantidade
return render(request, 'mp_orcamento/estatisticas.html',
{'quantidade': quantidade,
'orcamento_maior_custo': orcamento_maior_custo,
'orcamento_menor_custo': orcamento_menor_custo,
'media_custo_total': media_custo_total,
})
def orc_cliente(request, codigo):
cliente = Cliente.objects.get(id=codigo)
orcamento = Orcamento.objects.filter(cliente=cliente)
return render(request, 'mp_orcamento/id_cliente.html', {'cliente': cliente, 'orcamento':orcamento})
def cliente_estatisticas(request):
clientes = Cliente.objects.count()
maior_custo = 0
menor_custo = 999999999999
orcamento_maior_custo = None
orcamento_menor_custo = None
orcamentos = Orcamento.objects.all()
somatorio_custo_total = 0
for orcamento in orcamentos:
somatorio = 0
for peca in Peca.objects.filter(orcamento=orcamento):
area_frente = peca.largura * peca.altura
area_lado = peca.altura * peca.profundidade
area_total = area_frente + area_frente + area_lado + area_lado
area_total = area_total / 100
custo_de_producao = 0
if peca.tipo_da_mobilia == 'compartimento de armário':
custo_de_producao += 50 * area_total
else:
custo_de_producao += 75 * area_total
if peca.tipo_do_puxador == 'plástico':
custo_de_producao += 5
else:
custo_de_producao += 8.5
if peca.pintura == 'acabamento PU':
custo_de_producao += 15 * area_total
elif peca.pintura == 'acabamento PU texturizado':
custo_de_producao += 20 * area_total
else:
custo_de_producao += 35 * area_total
custo_de_producao_ajustado = custo_de_producao * 1.75
somatorio += custo_de_producao_ajustado
orcamento.custo_total = somatorio * 1.25
somatorio_custo_total += orcamento.custo_total
if orcamento.custo_total >= maior_custo:
orcamento_maior_custo = orcamento
maior_custo = orcamento.custo_total
if orcamento.custo_total <= menor_custo:
orcamento_menor_custo = orcamento
menor_custo = orcamento.custo_total
return render(request, 'mp_orcamento/cliente_estatisticas.html', {'clientes': clientes, 'orcamento_maior_custo':orcamento_maior_custo, 'orcamento_menor_custo':orcamento_menor_custo})
``` |
{
"source": "Jorge9314/Distribuidos",
"score": 3
} |
#### File: Proyectos/ManejadorArchivos/Client_Proyect.py
```python
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
import xmlrpc.client
import random
import _thread
import time
import tempfile
import os
import socket
PATH = os.path.abspath("./tmp/")
PATHF = os.path.abspath("./files/")
tmpname = '\esteeselarchivitotemporal.txt'
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_path = ('RPC2',)
PORT = 8000
BUFF = 1024
HOST = '192.168.9.124'
HOST2 = '192.168.9.182'
def readingfile(clientsock):
#print("*Received request to read file*")
data = clientsock.recv(BUFF).decode("utf-8")
#print(data)
f = open ('files/'+data, 'r')
#print("file opened.")
for line in f:
clientsock.send(line.encode("utf-8"))
#print("End of file")
time.sleep(0.5)
clientsock.send("EOF".encode("utf-8"))
f.close()
#print("File closed.")
return data
def sockets():
sckt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sckt.bind((HOST,PORT))
sckt.listen(5)
print("*Conected to server*")
while True:
clientsock, addr = sckt.accept()
op = clientsock.recv(BUFF).decode("utf-8")
if op == 'r':
data = readingfile(clientsock)
if op == 'w':
data = readingfile(clientsock)
f = open ('files/'+data, 'w')
print('Waiting modifications...')
while True:
stream = clientsock.recv(BUFF).decode("utf-8")
if stream == "EOF":
break
f.write(stream)
print (stream)
f.close()
if op == "d":
print("*Received request to read file MODE EDIT*")
data = clientsock.recv(BUFF).decode("utf-8")
print(data)
os.remove(PATHF+"\\"+data)
#parte servidor del cliente
class client_functions():
def __init__(self):
self.s = xmlrpc.client.ServerProxy("http://192.168.9.124:9000")
def newClient(self):
listFiles = os.listdir("files")
print (self.s.newClient(listFiles, PORT))
def read(self,file):
filename, portFile = self.s.requestFile(file)
print ("Connecting... to ",portFile)
filesocket = socket.socket()
filesocket.connect((HOST2,portFile))
print ("Conected\n")
filesocket.send("r".encode("utf-8"))
print ("Read request send... ")
print("Sending filename... ")
filesocket.send(filename.encode("utf-8"))
print("OK")
ft = open('tmp/'+tmpname,"w")
while True:
stream = filesocket.recv(BUFF).decode("utf-8")
if stream == "EOF":
break
ft.write(stream)
ft.close()
filesocket.close()
fd = os.open(PATH+tmpname,os.O_RDONLY)
os.system(PATH+tmpname)
os.close(fd)
os.remove(PATH+tmpname)
print(self.s.release(file))
def edit(self,file):
print("Editing... \n")
filename, portFile = self.s.requestFile(file)
print ("Connecting... to ",portFile)
filesocket = socket.socket()
filesocket.connect((HOST2,portFile))
print ("Conected\n")
filesocket.send("w".encode("utf-8"))
print ("Write request send... ")
print("Sending filename... ")
filesocket.send(filename.encode("utf-8"))
print("OK")
ft = open('tmp/'+tmpname,"w")
while True:
stream = filesocket.recv(BUFF).decode("utf-8")
if stream == "EOF":
break
ft.write(stream)
print('Successfully get the file')
print('connection closed')
ft.close()
os.system(PATH+tmpname)
ft = open('tmp/'+tmpname, 'r')
for line in ft:
filesocket.send(line.encode("utf-8"))
time.sleep(0.5)
filesocket.send("EOF".encode("utf-8"))
ft.close()
os.remove(PATH+tmpname)
filesocket.close()
print(self.s.release(file))
def delete(self,file):
print("Delete... \n")
filename, portFile = self.s.requestFile(file)
print(self.s.deleteFile(file))
print ("Connecting... to ",portFile)
filesocket = socket.socket()
filesocket.connect((HOST2,portFile))
print ("Conected\n")
filesocket.send("d".encode("utf-8"))
print("Sending filename... ")
filesocket.send(filename.encode("utf-8"))
filesocket.close()
print("Deleted succesfully...")
def main():
while True:
op=input("What do you want to do?, Select an option with ID: 1.Read 2.Edit 3.Delete 4.List Files\n: ")
if op == '4':
print("Asking for file names...\n")
d = clientObject.s.filesAvailable(PORT)
print("Files available:\n")
for i in d:
tupl = d.get(i)
print('ID: {}\tFilename: {}\tPermissions: {}\n'.format(i, tupl[0], tupl[1]))
else:
fileToOpen=input("Select the ID of file: ")
fileToOpen=int(fileToOpen)
exist, notbusy = clientObject.s.searchFile(fileToOpen)
if exist==True:
if notbusy == True:
#mirar permisos
print('File available...\n')
perm = clientObject.s.checkPermissions(fileToOpen,PORT) #devuelve los permisos del archivo.
print (perm)
if op == '1':
if 'r' in perm:
clientObject.read(fileToOpen)
else:
print("You're not allowed to do that.")
if op == '2':
if 'w' in perm:
clientObject.edit(fileToOpen)
else:
print("You're not allowed to do that.")
if op == '3':
if 'd' in perm:
clientObject.delete(fileToOpen)
else:
print("You're not allowed to do that.")
else:
print("The file is already been used.")
else:
print("This file doesn't exist.")
_thread.start_new_thread(sockets,())
clientObject = client_functions()
clientObject.newClient()
main()
```
#### File: Proyectos/ManejadorArchivos/Server_Proyect.py
```python
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
import xmlrpc.client
import random
import _thread
import time
PORT = 9000
perm = ['n','r','rwd']
class nn():
def __init__(self):
self.listFiles=[] #ID - Filename - port client - status
self.clients={}
self.idnum = 0
def newClient(self,listf,port):
self.permissions(port,listf)
return ('Added.')
def permissions(self,port,listf):
if len(self.clients)==0:
tmp = {}
for i in listf:
tmp[str(self.idnum)] = [i,'rwd']
self.listFiles.append([self.idnum, i,port,0])
self.idnum += 1
self.clients[port] = tmp
else:
tmp = {}
tmp2 = {}
for i in self.listFiles:
print(i[0])
tmp[str(i[0])] = [i[1],random.choice(perm)]
self.clients[port] = tmp
for i in listf:
tmp[str(self.idnum)] = [i,'rwd']
self.listFiles.append([self.idnum,i,port,0])
for k in self.clients.keys():
if k != port:
tmp2[str(self.idnum)] = [i,random.choice(perm)]
self.clients[k].update(tmp2)
self.idnum += 1
def filesAvailable(self,port):
print (self.clients[port])
return self.clients[port]
def searchFile(self,id):
for i in self.listFiles:
if id == i[0]:
if i[3] != 1:
i[3] = 1
return (True, True)
else:
return (True, False)
return (False, False)
def checkPermissions(self,file,port):
return self.clients[port][str(file)][1] #Retorna los permisos
def requestFile(self,file):
for i in self.listFiles:
if file == i[0]:
return (i[1],i[2])
def deleteFile(self,file):
print (file)
for i in self.listFiles:
print(i)
if file == i[0]:
self.listFiles.remove(i)
print (self.listFiles)
for i in self.clients:
print (i)
print (self.clients[i][str(file)])
self.clients[i].pop(str(file))
return 'Deleted from server database'
def release(self,file):
for i in self.listFiles:
if file == i[0]:
i[3] = 0
return "Released."
else:
return "Error"
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_path = ('RPC2',)
servidor = nn()
server = SimpleXMLRPCServer(("192.168.9.124", PORT), requestHandler=RequestHandler)
server.register_introspection_functions()
server.register_function(servidor.newClient, "newClient")
server.register_function(servidor.permissions, "permissions")
server.register_function(servidor.filesAvailable, "filesAvailable")
server.register_function(servidor.searchFile, "searchFile")
server.register_function(servidor.checkPermissions, "checkPermissions")
server.register_function(servidor.requestFile, "requestFile")
server.register_function(servidor.deleteFile, "deleteFile")
server.register_function(servidor.release, "release")
server.serve_forever()
```
#### File: Talleres/ComunicacionGrupo(Taller 6)/GroupUDP.py
```python
import socket
import _thread
import random
GRPPORT=7000
OTPORT=8000
GRPPORT,OTPORT= input('Ingrese 2 puertos para el proceso: entre 7000-8000 separados por espacio = ').split(' ')
GRPPORT=int(GRPPORT)
OTPORT=int(OTPORT)
UDP_IP = "127.0.0.1"
NPPORT = 5005
BUFF = 1024
ADMPORT = 9999
OPPORT =9000
NCPORT=3000
LTPORT=4000
admin=False
operator=False
process=[]
listsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
class handler():
def __init__(self):
self.process=[]
def run(self):
while True:
newlist, addradmin = adminsock.recvfrom(BUFF)
self.process = newlist.decode('utf-8')[1:-1].split(', ')
hand=handler()
def funciones(p):
while True:
o = input('Seleccione su opcion.\n1. Listar grupo.\n2. Eliminar proceso.\n3. Eliminar Grupo. = ')
o = int(o)
if o == 1:
for enum,i in enumerate(p.process):
print (enum, ': ',i)
if o == 2:
for enum,i in enumerate(p.process):
print (enum, ': ',i)
pos = input('Ingrese la posicion.= ')
pos = int(pos)
if pos == 0:
print('No jodas ome')
else:
del p.process[pos]
listsock.sendto(str(p.process).encode('utf-8'),(UDP_IP,LTPORT))
if o == 3:
p.process = []
listsock.sendto('vaciar'.encode('utf-8'),(UDP_IP,LTPORT))
def suma(op):
r=0
for i in op:
r = r + int(i[1:-1])
return r
newprocesssock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
clientsock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # UDP
clientsock.bind((UDP_IP, GRPPORT))
groupsock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # UDP
newprocesssock.sendto(str(GRPPORT).encode('utf-8'),(UDP_IP,NPPORT))
serverdata, addrserver = clientsock.recvfrom(BUFF)
serverdata = serverdata.decode('utf-8')
newprocesssock.close()
if serverdata == 'admin':
admin = True
else:
admin = False
if admin:
adminsock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # UDP
adminsock.bind((UDP_IP, ADMPORT))
_thread.start_new_thread(hand.run,())
else:
groupsock.bind((UDP_IP,OTPORT))
if admin:
_thread.start_new_thread(funciones, (hand,))
while True:
dataclient, addrclient = clientsock.recvfrom(BUFF)
dataclient = dataclient.decode('utf-8')
if len(process)>1:
n = random.randint(0,len(process)-1)
else:
n = 0
if dataclient == 'suma':
if admin:
if n == 0:
op, opaddr = clientsock.recvfrom(BUFF)
op = op.decode('utf-8')
op = op[1:-1].split(', ')
op = suma(op)
clientsock.sendto(str(op).encode('utf-8'),(UDP_IP,NCPORT))
else:
for i,j in enumerate(process[1:]):
if i == n:
groupsock.sendto('yes'.encode('utf-8'),(UDP_IP,OTPORT))
else:
groupsock.sendto('no'.encode('utf-8'),(UDP_IP,OTPORT))
else:
dataadmin, dataddr = groupsock.recvfrom(BUFF)
dataadmin = dataadmin.decode('utf-8')
if dataadmin == 'yes':
op, opaddr = clientsock.recvfrom(BUFF)
op = op.decode('utf-8')
op = op[1:-1].split(' ')
op = suma(op)
clientsock.sendto(str(op).encode('utf-8'))
```
#### File: ServidorDeNombres(Taller 2)/python/client.py
```python
import socket
class UDPClient:
def __init__(self, ip, puerto):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serverMainAddr = (ip, pueSrto)
self.id = "-1"
self.tipo = "cliente"
self.sock.settimeout(60)
def sendMsg(self, message, addres):
if isinstance(message, str):
message = self.id + ":" + self.tipo + ":" + message + ":"
self.sock.sendto(message.encode('utf-8'), addres)
print("mensaje enviado - {}".format(message))
elif isinstance(message, bytes):
message = self.id + ":" + self.tipo + ":" + message.decode('utf-8') + ":"
self.sock.sendto(message, addres)
print("mensaje enviado - {}".format(message.decode('utf-8')))
else:
print("ERROR - Data type to send can't work")
return None
def recieveMsg(self, size):
try:
data, addr = self.sock.recvfrom(size)
print("Mensaje recibido - {}".format(data.decode('utf-8')))
id = None
tipo = None
mensaje = None
temp = ""
for i in data.decode('utf-8'):
if(i!=":"):
temp+=i
elif(id is None and i==":"):
id=temp
temp=""
elif(tipo is None and i==":"):
tipo=temp
temp=""
elif(mensaje is None and i==":"):
mensaje=temp
temp=""
return (id, tipo, mensaje, addr)
except socket.timeout:
print("Time over")
return (None, None, None, None)
except:
return (None, None, None, None)
def comServerName(self):
print("Communicating - Server name")
while True:
MESSAGE=input("> ")
self.sendMsg(MESSAGE, (ipServer, puertoServer))
data = ""
while data != "end" and data is not None:
id, tipo, data, addr=self.recieveMsg(1024)
if (data is not None):
print("Communique - Server name")
print("Recibido dato '{}' de '{}'".format(data, addr))
if(data!="end"):
if("~" in data):
tag = None
temp = ""
for i in data:
if(i != "~"):
temp += i
else:
tag = temp
temp = ""
if(tag == "direccion"):
ip, addr = temp[1:-1],split(", ")
ip = ip[1:-1]
addr = int(addr)
return (ip, addr)
def comServerOperation(self, addr):
print("Communicating - Server operation")
self.sendMsg("operar", addr)
id, tipo, data, addr=self.recieveMsg(1024)
data = None
while data != "end":
id, tipo, data, addr=self.recieveMsg(1024)
print("Communique - server operation")
print("To exit writte END")
while True:
MESSAGE = input("> ")
self.sendMsg(MESSAGE, addr)
data = ""
if MESSAGE == "END":
return None
while data != "end" and data is not None:
id, tipo, data, addr = self.recieveMsg(1024)
if (data is not None):
print("Recibido dato '{}' de '{}'".format(data, addr))
def runClient(self):
print("Client running")
while True:
addr = self.comServerName()
if(addr is not None):
print("dir", addr)
self.comServerOperation(addr)
if __name__ == '__main__':
ipServer = input("input server IP: ")
puertoServer = int(input("input server port: "))
print("Server IP: ", ipServer)
print("Server Port: ", puertoServer)
cliente = UDPClient(ipServer, puertoServer)
cliente.runClient()
``` |
{
"source": "jorgeab98/openfda",
"score": 3
} |
#### File: jorgeab98/openfda/web.py
```python
import http.client
import json
import http.server
import socketserver
# Copyright [2017] [<NAME>]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# Author : <NAME>
class OpenFDAClient():
OPENFDA_API_URL="api.fda.gov"
OPENFDA_API_EVENT="/drug/event.json"
OPENFDA_API_LYRICA="/drug/event.json?search=patient.drug.medicinalproduct:"
OPENFDA_API_COMPANY="/drug/event.json?search=companynumb:"
def get_event(self,LIMIT):
conn=http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET",self.OPENFDA_API_EVENT + "?limit=" + LIMIT)
r1 = conn.getresponse()
data = r1.read()
data1=data.decode("utf8")
data2=json.loads(data1)
return data2
def get_event_lyrica(self,LIMIT):
conn=http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET",self.OPENFDA_API_LYRICA + LIMIT + "&limit=10")
r1 = conn.getresponse()
data = r1.read()
data1=data.decode("utf8")
data2=json.loads(data1)
return data2
def get_event_company(self,LIMIT):
conn=http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET",self.OPENFDA_API_COMPANY + LIMIT + "&limit=10")
r1 = conn.getresponse()
data = r1.read()
data1=data.decode("utf8")
data2=json.loads(data1)
return data2
class OpenFDAParser():
def look_drugs(self,event):
lista_medicamentos=[]
event1=event['results']
for event in event1:
event2=event["patient"]["drug"][0]['medicinalproduct']
event3=json.dumps(event2)
lista_medicamentos+=[event3]
return lista_medicamentos
def look_for_drug(self,busqueda):
lista_lyrica=[]
for event in busqueda['results']:
a=event["companynumb"]
lista_lyrica+=[a]
return lista_lyrica
def look_companies(self,company):
lista_empresas=[]
company1=company['results']
for company in company1:
company2=company['companynumb']
company3=json.dumps(company2)
lista_empresas+=[company3]
return lista_empresas
def look_for_companies(self,busqueda_empresas):
lista_companies=[]
empresa1=busqueda_empresas['results']
for busqueda_empresas in empresa1:
empresa2=busqueda_empresas["patient"]["drug"][0]['medicinalproduct']
lista_companies+=[empresa2]
return lista_companies
def look_gender(self,sex):
lista_sex=[]
sex1=sex['results']
for sex in sex1:
sex2=sex["patient"]["patientsex"]
sex3=json.dumps(sex2)
lista_sex+=[sex3]
return lista_sex
def look_age(self,age):
lista_age=[]
age1=age['results']
for age in age1:
if "patientonsetage" in age["patient"]:
age2=age["patient"]["patientonsetage"]
else:
age2="-"
age3=json.dumps(age2)
lista_age+=[age3]
return lista_age
class OpenFDAHTML():
def get_main_page(self):
html='''
<html>
<head>
<title>OpenFDA Cool App</title>
</head>
<body>
<h1>OpenFDA Client</h1>
<form method="get" action="listDrugs">
<input type="submit" value="Medicamentos">
</input>
Limite:
<input name="drug_limit" type="text" >
</input>
</form>
<form method="get" action="searchDrug">
<input name="drug" type="text" >
</input>
<input type="submit" value="Buscar medicamento">
</input>
</form>
<form method="get" action="listCompanies">
<input type="submit" value="Empresas">
</input>
Limite:
<input name="companies_limit" type="text" >
</input>
</form>
<form method="get" action="searchCompany">
<input name="company" type="text" >
</input>
<input type="submit" value="Buscar empresa">
</input>
</form>
<form method="get" action="listGender">
<input type="submit" value="Sexos">
</input>
Limite:
<input name="sex_limit" type="text" >
</input>
</form>
</body>
</html>
'''
return html
def get_main_page2(self):
html2='''
<html>
<head>
<title>OpenFDA Cool App</title>
</head>
<body>
<form method="get" action="listAge">
<input type="submit" value="Edades">
</input>
Limite:
<input name="age_limit" type="text" >
</input>
</form>
</body>
</html>
'''
return html2
def get_html_event_drug(self,lista_medicamentos):
html_event_drug='''
<html>
<head>
<title>Medicamentos</title>
</head>
<body>
<h1>Medicamentos</h1>
<ol>
'''
for i in lista_medicamentos:
html_event_drug+='<li>'+i+'</li>'
html_event_drug+='''
</ol>
</body>
</html>
'''
return html_event_drug
def get_html_event_company(self,lista_empresas):
html_event_company='''
<html>
<head>
<title>Empresas</title>
</head>
<body>
<h1>Empresas</h1>
<ol>
'''
for i in lista_empresas:
html_event_company+='<li>'+i+'</li>'
html_event_company+='''
</ol>
</body>
</html>
'''
return html_event_company
def get_html_lyrica(self,lista_lyrica):
html_lyrica='''
<html>
<head>
<title>Empresas</title>
</head>
<body>
<h1>Empresas</h1>
<ol>
'''
for i in lista_lyrica:
html_lyrica+='<li>'+i+'</li>'
html_lyrica+='''
</ol>
</body>
</html>
'''
return html_lyrica
def get_html_companies(self,lista_companies):
html_companies='''
<html>
<head>
<title>Medicamentos</title>
</head>
<body>
<h1>Medicamentos</h1>
<ol>
'''
for i in lista_companies:
html_companies+='<li>'+i+'</li>'
html_companies+='''
</ol>
</body>
</html>
'''
return html_companies
def get_html_event_sex(self,lista_sex):
html_event_sex='''
<html>
<head>
<title>Sexos</title>
</head>
<body>
<h1>Sexos</h1>
<ol>
'''
for i in lista_sex:
html_event_sex+='<li>'+i+'</li>'
html_event_sex+='''
</ol>
</body>
</html>
'''
return html_event_sex
def get_html_event_age(self,lista_age):
html_event_age='''
<html>
<head>
<title>Edades</title>
</head>
<body>
<h1>Edades</h1>
<ol>
'''
for i in lista_age:
html_event_age+='<li>'+i+'</li>'
html_event_age+='''
</ol>
</body>
</html>
'''
return html_event_age
def get_html_error(self):
html_event_error='''
<html>
<head>
<title>ERROR 404</title>
</head>
<body>
<h1>ERROR 404
NOT FOUND
</h1>
</body>
</html>
'''
return html_event_error
class testHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def get_any_drug(self):
drug1=self.path.split("=")[1]
return drug1
def do_GET(self):
html=OpenFDAHTML()
client=OpenFDAClient()
parser=OpenFDAParser()
main_page = False
is_event_drug = False
is_searchDrug = False
is_event_company = False
is_searchCompany = False
is_event_sex = False
is_error = False
is_found = False
is_secret = False
is_redirect = False
is_event_age = False
main_page2 = False
if self.path == '/':
main_page=True
is_found=True
elif "/listDrugs" in self.path:
is_event_drug=True
is_found=True
elif "searchDrug" in self.path:
is_searchDrug=True
is_found=True
elif "/listCompanies" in self.path:
is_event_company=True
is_found=True
elif "searchCompany" in self.path:
is_searchCompany=True
is_found=True
elif "/listGender" in self.path:
is_event_sex=True
is_found=True
elif "/secret" in self.path:
is_secret=True
is_found=True
elif "/redirect" in self.path:
is_redirect=True
is_found=True
elif "/listAge" in self.path:
is_event_age=True
is_found=True
elif "/age" in self.path:
main_page2=True
is_found=True
else:
is_error=True
if is_secret:
self.send_response(401)
self.send_header('WWW-Authenticate','Basic realm="Login required"')
elif is_redirect:
self.send_response(302)
self.send_header('Location','/')
elif is_found:
self.send_response(200)
self.send_header('Content-type','text/html')
else:
self.send_response(404)
self.send_header('Content-type','text/html')
self.end_headers()
html_page = html.get_main_page()
if main_page:
self.wfile.write(bytes(html_page, "utf8"))
elif main_page2:
self.wfile.write(bytes(html.get_main_page2(), "utf8"))
elif is_event_drug:
LIMIT=self.get_any_drug()
event=client.get_event(LIMIT)
medicamentos=parser.look_drugs(event)
html_medicine=html.get_html_event_drug(medicamentos)
self.wfile.write(bytes(html_medicine, "utf8"))
elif is_searchDrug:
LIMIT=self.get_any_drug()
busqueda=client.get_event_lyrica(LIMIT)
drug_searched=parser.look_for_drug(busqueda)
html_lyrica=html.get_html_lyrica(drug_searched)
self.wfile.write(bytes(html_lyrica, "utf8"))
elif is_event_company:
LIMIT=self.get_any_drug()
company=client.get_event(LIMIT)
empresas=parser.look_companies(company)
html_company=html.get_html_event_company(empresas)
self.wfile.write(bytes(html_company, "utf8"))
elif is_searchCompany:
LIMIT=self.get_any_drug()
busqueda_empresas=client.get_event_company(LIMIT)
company_searched=parser.look_for_companies(busqueda_empresas)
html_companies=html.get_html_companies(company_searched)
self.wfile.write(bytes(html_companies, "utf8"))
elif is_event_sex:
LIMIT=self.get_any_drug()
sex=client.get_event(LIMIT)
generos=parser.look_gender(sex)
html_sex=html.get_html_event_sex(generos)
self.wfile.write(bytes(html_sex, "utf8"))
elif is_event_age:
LIMIT=self.get_any_drug()
age=client.get_event(LIMIT)
edades=parser.look_age(age)
html_age=html.get_html_event_age(edades)
self.wfile.write(bytes(html_age, "utf8"))
else:
html_error=html.get_html_error()
self.wfile.write(bytes(html_error, "utf8"))
return
``` |
{
"source": "jorge-aceves/omegaup",
"score": 2
} |
#### File: stuff/cron/update_ranks.py
```python
import argparse
import datetime
import logging
import os
import sys
from typing import Sequence, NamedTuple
import mysql.connector
import mysql.connector.cursor
sys.path.insert(
0,
os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "."))
import lib.db # pylint: disable=wrong-import-position
import lib.logs # pylint: disable=wrong-import-position
class Cutoff(NamedTuple):
'''Cutoff percentile for user ranking.'''
percentile: float
classname: str
def _default_date() -> datetime.date:
today = datetime.date.today()
return today.replace(day=1)
def _parse_date(s: str) -> datetime.date:
today = datetime.datetime.strptime(s, '%Y-%m-%d').date()
return today.replace(day=1)
def update_problem_accepted_stats(
cur: mysql.connector.cursor.MySQLCursorDict,
) -> None:
'''Updates the problem accepted stats'''
logging.info('Updating accepted stats for problems...')
cur.execute('''
UPDATE
`Problems` AS `p`
SET
`p`.accepted = (
SELECT
COUNT(DISTINCT `s`.`identity_id`)
FROM
`Submissions` AS `s`
INNER JOIN
`Runs` AS `r`
ON
`r`.run_id = `s`.current_run_id
INNER JOIN
`Identities` AS `i`
ON
`i`.`identity_id` = `s`.`identity_id`
WHERE
`s`.`problem_id` = `p`.`problem_id` AND `r`.verdict = 'AC'
AND NOT EXISTS (
SELECT
`pf`.`problem_id`, `pf`.`user_id`
FROM
`Problems_Forfeited` AS `pf`
WHERE
`pf`.`problem_id` = `p`.`problem_id` AND
`pf`.`user_id` = `i`.`user_id`
)
AND NOT EXISTS (
SELECT
`a`.`acl_id`
FROM
`ACLs` AS `a`
WHERE
`a`.`acl_id` = `p`.`acl_id` AND
`a`.`owner_id` = `i`.`user_id`
)
);
''')
def update_user_rank(
cur: mysql.connector.cursor.MySQLCursorDict,
) -> Sequence[float]:
'''Updates the user ranking.'''
cur.execute('DELETE FROM `User_Rank`;')
logging.info('Updating user rank...')
cur.execute('''
SELECT
`i`.`username`,
`i`.`name`,
`i`.`country_id`,
`i`.`state_id`,
`isc`.`school_id`,
`i`.`identity_id`,
`i`.`user_id`,
COUNT(`p`.`problem_id`) AS `problems_solved_count`,
SUM(ROUND(100 / LOG(2, `p`.`accepted` + 1) , 0)) AS `score`
FROM
(
SELECT
`iu`.`user_id`,
`s`.`problem_id`
FROM
`Submissions` AS `s`
INNER JOIN
`Runs` AS `r`
ON
`r`.run_id = `s`.current_run_id
INNER JOIN
`Identities` AS `iu`
ON
`iu`.identity_id = `s`.identity_id
WHERE
`r`.verdict = 'AC' AND
`s`.type = 'normal' AND
`iu`.user_id IS NOT NULL
GROUP BY
`iu`.user_id, `s`.`problem_id`
) AS up
INNER JOIN
`Users` AS `u` ON `u`.`user_id` = `up`.`user_id`
INNER JOIN
`Problems` AS `p`
ON `p`.`problem_id` = up.`problem_id` AND `p`.visibility > 0
INNER JOIN
`Identities` AS `i` ON `i`.`identity_id` = u.`main_identity_id`
LEFT JOIN
`Identities_Schools` AS `isc`
ON
`isc`.`identity_school_id` = `i`.`current_identity_school_id`
WHERE
`u`.`is_private` = 0
AND NOT EXISTS (
SELECT
`pf`.`problem_id`, `pf`.`user_id`
FROM
`Problems_Forfeited` AS `pf`
WHERE
`pf`.`problem_id` = `p`.`problem_id` AND
`pf`.`user_id` = `u`.`user_id`
)
AND NOT EXISTS (
SELECT
`a`.`acl_id`
FROM
`ACLs` AS `a`
WHERE
`a`.`acl_id` = `p`.`acl_id` AND
`a`.`owner_id` = `u`.`user_id`
)
GROUP BY
`identity_id`
ORDER BY
`score` DESC;
''')
prev_score = None
rank = 0
# MySQL has no good way of obtaining percentiles, so we'll store the sorted
# list of scores in order to calculate the cutoff scores later.
scores = []
for index, row in enumerate(cur.fetchall()):
if row['score'] != prev_score:
rank = index + 1
scores.append(row['score'])
prev_score = row['score']
cur.execute('''
INSERT INTO
`User_Rank` (`user_id`, `ranking`,
`problems_solved_count`, `score`,
`username`, `name`, `country_id`,
`state_id`, `school_id`)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s);''',
(row['user_id'], rank, row['problems_solved_count'],
row['score'], row['username'], row['name'],
row['country_id'], row['state_id'], row['school_id']))
return scores
def update_author_rank(cur: mysql.connector.cursor.MySQLCursorDict) -> None:
'''Updates the author's ranking'''
logging.info('Updating authors ranking...')
cur.execute('''
SELECT
`u`.`user_id`,
`i`.`username`,
`i`.`name`,
`i`.`country_id`,
`i`.`state_id`,
`isc`.`school_id`,
SUM(`p`.`quality`) AS `author_score`
FROM
`Problems` AS `p`
INNER JOIN
`ACLs` AS `a` ON `a`.`acl_id` = `p`.`acl_id`
INNER JOIN
`Users` AS `u` ON `u`.`user_id` = `a`.`owner_id`
INNER JOIN
`Identities` AS `i` ON `i`.`identity_id` = `u`.`main_identity_id`
LEFT JOIN
`Identities_Schools` AS `isc`
ON
`isc`.`identity_school_id` = `i`.`current_identity_school_id`
WHERE
`p`.`quality` IS NOT NULL
GROUP BY
`u`.`user_id`
ORDER BY
`author_score` DESC
''')
prev_score = None
rank = 0
for index, row in enumerate(cur.fetchall()):
if row['author_score'] != prev_score:
rank = index + 1
prev_score = row['author_score']
cur.execute('''
INSERT INTO
`User_Rank` (`user_id`, `username`, `author_score`,
`author_ranking`, `name`, `country_id`,
`state_id`, `school_id`)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY
UPDATE
author_ranking = %s,
author_score = %s;''',
(row['user_id'], row['username'], row['author_score'],
rank, row['name'], row['country_id'], row['state_id'],
row['school_id'], rank, row['author_score']))
def update_user_rank_cutoffs(cur: mysql.connector.cursor.MySQLCursorDict,
scores: Sequence[float]) -> None:
'''Updates the user ranking cutoff table.'''
cur.execute('DELETE FROM `User_Rank_Cutoffs`;')
logging.info('Updating ranking cutoffs...')
cutoffs = [
Cutoff(.01, 'user-rank-international-master'),
Cutoff(.09, 'user-rank-master'),
Cutoff(.15, 'user-rank-expert'),
Cutoff(.35, 'user-rank-specialist'),
Cutoff(.40, 'user-rank-beginner'),
]
if not scores:
return
for cutoff in cutoffs:
# Scores are already in descending order. That will also bias the
# cutoffs towards higher scores.
cur.execute('''
INSERT INTO
`User_Rank_Cutoffs` (`score`, `percentile`,
`classname`)
VALUES(%s, %s, %s);''',
(scores[int(len(scores) * cutoff.percentile)],
cutoff.percentile, cutoff.classname))
def update_schools_solved_problems(
cur: mysql.connector.cursor.MySQLCursorDict,
) -> None:
'''Updates the solved problems count by each school the last 6 months'''
logging.info('Updating schools solved problems...')
months = 6 # in case this parameter requires adjustments
cur.execute('DELETE FROM `Schools_Problems_Solved_Per_Month`')
cur.execute('''
INSERT INTO
`Schools_Problems_Solved_Per_Month` (
`school_id`,
`time`,
`problems_solved`
)
SELECT
`sc`.`school_id`,
STR_TO_DATE(
CONCAT (
YEAR(`su`.`time`), '-', MONTH(`su`.`time`), '-01'
),
"%Y-%m-%d"
) AS `time`,
COUNT(DISTINCT `su`.`problem_id`) AS `problems_solved`
FROM
`Submissions` AS `su`
INNER JOIN
`Schools` AS `sc` ON `sc`.`school_id` = `su`.`school_id`
INNER JOIN
`Runs` AS `r` ON `r`.`run_id` = `su`.`current_run_id`
INNER JOIN
`Problems` AS `p` ON `p`.`problem_id` = `su`.`problem_id`
WHERE
`su`.`time` >= CURDATE() - INTERVAL %(months)s MONTH
AND `r`.`verdict` = "AC" AND `p`.`visibility` >= 1
AND NOT EXISTS (
SELECT
*
FROM
`Submissions` AS `sub`
INNER JOIN
`Runs` AS `ru` ON `ru`.`run_id` = `sub`.`current_run_id`
WHERE
`sub`.`problem_id` = `su`.`problem_id`
AND `sub`.`identity_id` = `su`.`identity_id`
AND `ru`.`verdict` = "AC"
AND `sub`.`time` < `su`.`time`
)
GROUP BY
`sc`.`school_id`,
`time`
ORDER BY
`time` ASC;
''', {'months': months})
def update_school_rank(cur: mysql.connector.cursor.MySQLCursorDict) -> None:
'''Updates the school rank'''
logging.info('Updating school rank...')
cur.execute('''
SELECT
`s`.`school_id`,
SUM(ROUND(100 / LOG(2, `distinct_school_problems`.accepted+1), 0))
AS `score`
FROM
`Schools` AS `s`
INNER JOIN
(
SELECT
`su`.`school_id`,
`p`.accepted,
MIN(`su`.time)
FROM
`Submissions` AS `su`
INNER JOIN
`Runs` AS `r` ON `r`.run_id = `su`.current_run_id
INNER JOIN
`Problems` AS `p` ON `p`.`problem_id` = `su`.`problem_id`
WHERE
`r`.verdict = "AC"
AND `p`.visibility >= 1
AND `su`.`school_id` IS NOT NULL
GROUP BY
`su`.`school_id`,
`su`.`problem_id`
) AS `distinct_school_problems`
ON
`distinct_school_problems`.`school_id` = `s`.`school_id`
GROUP BY
`s`.`school_id`
ORDER BY
`score` DESC;
''')
prev_score = None
rank = 0
for index, row in enumerate(cur.fetchall()):
if row['score'] != prev_score:
rank = index + 1
prev_score = row['score']
cur.execute('''
UPDATE
`Schools` AS `s`
SET
`s`.`score` = %s,
`s`.`ranking` = %s
WHERE
`s`.`school_id` = %s;
''',
(row['score'], rank, row['school_id']))
def update_school_of_the_month_candidates(
cur: mysql.connector.cursor.MySQLCursorDict,
first_day_of_current_month: datetime.date) -> None:
'''Updates the list of candidates to school of the current month'''
logging.info('Updating the candidates to school of the month...')
if first_day_of_current_month.month == 12:
first_day_of_next_month = datetime.date(
first_day_of_current_month.year + 1,
1,
1)
else:
first_day_of_next_month = datetime.date(
first_day_of_current_month.year,
first_day_of_current_month.month + 1,
1)
# First make sure there are not already selected schools of the month
cur.execute('''
SELECT
COUNT(*) AS `count`
FROM
`School_Of_The_Month`
WHERE
`time` = %s AND
`selected_by` IS NOT NULL;
''',
(first_day_of_next_month,))
for row in cur.fetchall():
if row['count'] > 0:
logging.info('Skipping because already exist selected schools.')
return
cur.execute('''
DELETE FROM
`School_Of_The_Month`
WHERE
`time` = %s;
''',
(first_day_of_next_month,))
cur.execute(
'''
SELECT
`s`.`school_id`,
IFNULL(
SUM(
ROUND(
100 / LOG(2, `distinct_school_problems`.`accepted`+1),
0
)
),
0.0
) AS `score`
FROM
`Schools` AS `s`
INNER JOIN
(
SELECT
`su`.`school_id`,
`p`.`accepted`,
MIN(`su`.`time`) AS `first_ac_time`
FROM
`Submissions` AS `su`
INNER JOIN
`Runs` AS `r` ON `r`.`run_id` = `su`.`current_run_id`
INNER JOIN
`Problems` AS `p` ON `p`.`problem_id` = `su`.`problem_id`
WHERE
`r`.`verdict` = "AC"
AND `p`.`visibility` >= 1
AND `su`.`school_id` IS NOT NULL
GROUP BY
`su`.`school_id`,
`su`.`problem_id`
HAVING
`first_ac_time` BETWEEN %s AND %s
) AS `distinct_school_problems`
ON
`distinct_school_problems`.`school_id` = `s`.`school_id`
WHERE
NOT EXISTS (
SELECT
`sotm`.`school_id`,
MAX(`time`) AS `latest_time`
FROM
`School_Of_The_Month` AS `sotm`
WHERE
`sotm`.`school_id` = `s`.`school_id`
AND (
`sotm`.`selected_by` IS NOT NULL OR
`sotm`.`ranking` = 1
)
GROUP BY
`sotm`.`school_id`
HAVING
DATE_ADD(`latest_time`, INTERVAL 1 YEAR) >= %s
)
GROUP BY
`s`.`school_id`
ORDER BY
`score` DESC
LIMIT 100;
''',
(
first_day_of_current_month,
first_day_of_next_month,
first_day_of_next_month
))
for index, row in enumerate(cur.fetchall()):
cur.execute('''
INSERT INTO
`School_Of_The_Month` (
`school_id`,
`time`,
`ranking`,
`score`
)
VALUES (
%s,
%s,
%s,
%s
);
''',
(
row['school_id'],
first_day_of_next_month,
index + 1,
row['score']
))
def update_coder_of_the_month_candidates(
cur: mysql.connector.cursor.MySQLCursorDict,
first_day_of_current_month: datetime.date,
category: str) -> None:
'''Updates the list of candidates to coder of the current month'''
logging.info('Updating the candidates to coder of the month...')
if first_day_of_current_month.month == 12:
first_day_of_next_month = datetime.date(
first_day_of_current_month.year + 1,
1,
1)
else:
first_day_of_next_month = datetime.date(
first_day_of_current_month.year,
first_day_of_current_month.month + 1,
1)
# First make sure there are not already selected coder of the month
cur.execute('''
SELECT
COUNT(*) AS `count`
FROM
`Coder_Of_The_Month`
WHERE
`time` = %s AND
`selected_by` IS NOT NULL AND
`category` = %s;
''', (first_day_of_next_month, category))
for row in cur.fetchall():
if row['count'] > 0:
logging.info('Skipping because already exist selected coder')
return
cur.execute('''
DELETE FROM
`Coder_Of_The_Month`
WHERE
`time` = %s AND
`category` = %s;
''',
(first_day_of_next_month, category))
if category == 'female':
gender_clause = " AND i.gender = 'female'"
else:
gender_clause = ""
sql = f'''
SELECT DISTINCT
IFNULL(i.user_id, 0) AS user_id,
i.username,
IFNULL(i.country_id, 'xx') AS country_id,
isc.school_id,
COUNT(ps.problem_id) ProblemsSolved,
IFNULL(SUM(ROUND(100 / LOG(2, ps.accepted+1) , 0)), 0) AS score,
IFNULL(
(
SELECT urc.classname FROM
User_Rank_Cutoffs urc
WHERE
urc.score <= (
SELECT
ur.score
FROM
User_Rank ur
WHERE
ur.user_id = i.user_id
)
ORDER BY
urc.percentile ASC
LIMIT
1
),
'user-rank-unranked'
) AS classname
FROM
(
SELECT DISTINCT
s.identity_id, s.problem_id
FROM
Submissions s
INNER JOIN
Runs r
ON
r.run_id = s.current_run_id
WHERE
r.verdict = 'AC' AND s.type= 'normal' AND
s.time >= %s AND s.time <= %s
) AS up
INNER JOIN
Problems ps ON
ps.problem_id = up.problem_id
AND ps.visibility >= 1
AND ps.quality_seal = 1
INNER JOIN
Identities i ON i.identity_id = up.identity_id
LEFT JOIN
Identities_Schools isc ON isc.identity_school_id =
i.current_identity_school_id
LEFT JOIN
(
SELECT
user_id,
MIN(ranking) best_ranking,
time,
selected_by
FROM
Coder_Of_The_Month
WHERE
category = %s
GROUP BY
user_id,
selected_by,
time
HAVING
best_ranking = 1
) AS cm on i.user_id = cm.user_id
WHERE
(cm.user_id IS NULL OR
DATE_ADD(cm.time, INTERVAL 1 YEAR) < %s) AND
i.user_id IS NOT NULL
{gender_clause}
GROUP BY
up.identity_id
ORDER BY
score DESC,
ProblemsSolved DESC
LIMIT 100;
'''
cur.execute(
sql,
(
first_day_of_current_month,
first_day_of_next_month,
category,
first_day_of_next_month,
))
for index, row in enumerate(cur.fetchall()):
cur.execute('''
INSERT INTO
`Coder_Of_The_Month` (
`user_id`,
`time`,
`ranking`,
`school_id`,
`category`,
`score`,
`problems_solved`
)
VALUES (
%s,
%s,
%s,
%s,
%s,
%s,
%s
);
''',
(
row['user_id'],
first_day_of_next_month,
index + 1,
row['school_id'],
category,
row['score'],
row['ProblemsSolved']
))
def update_users_stats(
cur: mysql.connector.cursor.MySQLCursorDict,
dbconn: mysql.connector.MySQLConnection,
date: datetime.date) -> None:
'''Updates all the information and ranks related to users'''
logging.info('Updating users stats...')
try:
try:
scores = update_user_rank(cur)
update_user_rank_cutoffs(cur, scores)
except: # noqa: bare-except
logging.exception('Failed to update user ranking')
raise
try:
update_author_rank(cur)
except: # noqa: bare-except
logging.exception('Failed to update authors ranking')
raise
# We update both the general rank and the author's rank in the same
# transaction since both are stored in the same DB table.
dbconn.commit()
try:
update_coder_of_the_month_candidates(cur, date, 'all')
dbconn.commit()
except: # noqa: bare-except
logging.exception(
'Failed to update candidates to coder of the month')
raise
try:
update_coder_of_the_month_candidates(cur, date, 'female')
dbconn.commit()
except: # noqa: bare-except
logging.exception(
'Failed to update candidates to coder of the month female')
raise
logging.info('Users stats updated')
except: # noqa: bare-except
logging.exception('Failed to update all users stats')
def update_schools_stats(
cur: mysql.connector.cursor.MySQLCursorDict,
dbconn: mysql.connector.MySQLConnection,
date: datetime.date) -> None:
'''Updates all the information and ranks related to schools'''
logging.info('Updating schools stats...')
try:
try:
update_schools_solved_problems(cur)
dbconn.commit()
except: # noqa: bare-except
logging.exception('Failed to update schools solved problems')
raise
try:
update_school_rank(cur)
dbconn.commit()
except: # noqa: bare-except
logging.exception('Failed to update school ranking')
raise
try:
update_school_of_the_month_candidates(cur, date)
dbconn.commit()
except: # noqa: bare-except
logging.exception(
'Failed to update candidates to school of the month')
raise
logging.info('Schools stats updated')
except: # noqa: bare-except
logging.exception('Failed to update all schools stats')
def main() -> None:
'''Main entrypoint.'''
parser = argparse.ArgumentParser(description=__doc__)
lib.db.configure_parser(parser)
lib.logs.configure_parser(parser)
parser.add_argument('--date',
type=_parse_date,
default=_default_date(),
help='The date the command should take as today')
args = parser.parse_args()
lib.logs.init(parser.prog, args)
logging.info('Started')
dbconn = lib.db.connect(args)
try:
with dbconn.cursor(buffered=True, dictionary=True) as cur:
update_problem_accepted_stats(cur)
update_users_stats(cur, dbconn.conn, args.date)
update_schools_stats(cur, dbconn.conn, args.date)
finally:
dbconn.conn.close()
logging.info('Done')
if __name__ == '__main__':
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
``` |
{
"source": "JorgeAGR/neuralpick",
"score": 3
} |
#### File: JorgeAGR/neuralpick/neuralpick.py
```python
import os
import argparse
from src.models import PickingModel, CheckingModel
from src.aux_funcs import read_Config
from src.phase_picker import Picker
from src.scanner import Scanner
def pick(model_type, args):
file_dir = args.file_path
if file_dir[-1] != '/':
file_dir += '/'
if model_type == 'p':
model = PickingModel(args.model_name)
Picker(file_dir, model, args.phase, overwrite=args.nooverwrite)
else:
print('ERROR: Invalid model type. Use a picking model.')
return
def check(model_type, args):
file_dir = args.file_path
if file_dir[-1] != '/':
file_dir += '/'
if model_type == 'c':
model = CheckingModel(args.model_name)
print('Coming soon!')
else:
print('ERROR: Invalid model type. Use a checking model.')
return
def scan(model_type, args):
file_dir = args.file_path
if file_dir[-1] != '/':
file_dir += '/'
if model_type == 'p':
model = PickingModel(args.model_name)
Scanner(file_dir, model, args.phase, args.begin, args.end, args.number)
else:
print('ERROR: Invalid model type. Use a picking model.')
return
def train(model_type, args):
if model_type == 'p':
model = PickingModel(args.model_name)
elif model_type == 'c':
model = CheckingModel(args.model_name)
else:
print('ERROR: Invalid model type. Assign a valid model type.')
return
if args.force:
model.trained = False
model.train_Model()
model.save_Model()
if model.trained:
print('Model already exists. Use -f option to force model training and overwrite previous model.')
return
parser = argparse.ArgumentParser(description='Software for training and deploying CNNs for seismic data quality checking and phase identification.')
subparsers = parser.add_subparsers()
parser_pick = subparsers.add_parser('pick',
help='Pick the main arrival of a seismic phase in the file header of seismic data using a trained CNN model.')
parser_pick.add_argument('file_path', help='Path to files to pick for.', type=str)
parser_pick.add_argument('phase', help='Seismic phase to pick for (case sensitive).', type=str)
parser_pick.add_argument('model_name', help='Name of the model (barring the .conf extension).', type=str)
parser_pick.add_argument('-no', '--nooverwrite',
help='Optional argument to prevent program from overwriting the input SAC files.',
action='store_false')
parser_pick.set_defaults(func=pick)
parser_check = subparsers.add_parser('check', help='Quality check seismograms using a CNN model.')
parser_check.add_argument('file_path', help='Path to files to quality check.', type=str)
parser_check.add_argument('phase', help='Seismic phase to quality check around (case sensitive).', type=str)
parser_check.add_argument('model_name', help='Name of the model (barring the .conf extension).', type=str)
parser_check.set_defaults(func=check)
parser_scan = subparsers.add_parser('scan', help='Scan a set time range from a seismic phase to find pre/postcursors.')
parser_scan.add_argument('file_path', help='Path to files to quality check.', type=str)
parser_scan.add_argument('phase', help='Seismic phase to quality check around (case sensitive).', type=str)
parser_scan.add_argument('begin', help='Start time from the main arrival in seconds.', type=float)
parser_scan.add_argument('end', help='End time from the main arrival in seconds.', type=float)
parser_scan.add_argument('model_name', help='Name of the model (barring the .conf extension).', type=str)
parser_scan.add_argument('-n' , '--number', help='Number of relevant predictions to consider', type=float, default=10)
parser_scan.set_defaults(func=scan)
parser_train = subparsers.add_parser('train', help='Train a new picking or checking model using a seismic dataset.')
parser_train.add_argument('model_name', help='Name of the model (barring the .conf extension).', type=str)
parser_train.add_argument('-f', '--force',
help='Optional argument to force the training and overwritting of an existing model.',
action='store_true')
parser_train.set_defaults(func=train)
args = parser.parse_args()
config = read_Config('models/conf/{}.conf'.format(args.model_name))
model_type = config['model_type'].lower()
args.func(model_type, args)
```
#### File: neuralpick/src/models.py
```python
import os
from subprocess import call
import numpy as np
from tensorflow.keras.losses import Huber
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, Flatten, Conv1D, MaxPooling1D, BatchNormalization, Input, UpSampling1D, Reshape
from tensorflow.keras.models import Model, Sequential, load_model
from tensorflow.keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint
from tensorflow.keras.utils import Sequence
from time import time as clock
import obspy
from src.aux_funcs import check_String, read_Config
class ModelType:
def __init__(self, model_name):
config = read_Config('models/conf/{}.conf'.format(model_name))
self.model_type = config['model_type']
self.model_name = model_name
self.model_path = 'models/{}/'.format(self.model_name)
self.batch_size = config['batch_size']
self.epochs = config['epochs']
self.model_iters = config['model_iters']
self.test_split = float(config['test_split'])
self.debug = config['debug']
self.files_path = list(map(lambda x: x+'/' if (x[-1] != '/') else x, [config['files_path'],]))[0]
self.sample_rate = config['sample_rate']
self.th_arrival_var = config['theory_arrival_var']
self.arrival_var = config['pick_arrival_var']
self.window_before = config['window_before']
self.window_after = config['window_after']
self.number_shift = config['number_shift']
self.window_shift = config['window_shift']
try:
self.npz_path = config['temp_write_path'] + self.model_name + 'npz/'
except:
self.npz_path = self.model_path + 'npz/'
self.total_points = (self.window_before + self.window_after) * self.sample_rate
if self.model_name not in os.listdir('models/'):
for directory in [self.model_path, self.model_path+'train_logs/']:
os.mkdir(directory)
self.trained = self._check_Model()
return
def _train_Test_Split(self, idnum, seed=None):
npz_files = np.sort(os.listdir(self.npz_path.format(self.model_name)))
cutoff = int(len(npz_files) * (1-self.test_split))
np.random.seed(seed)
np.random.shuffle(npz_files)
train_npz_list = npz_files[:cutoff]
test_npz_list = npz_files[cutoff:]
np.savez(self.model_path+'train_logs/train_test_split{}'.format(idnum),
train=train_npz_list, test=test_npz_list)
return train_npz_list, test_npz_list
def _get_Callbacks(self, epochs):
stopper = EarlyStopping(monitor='val_loss', min_delta=0, #don't want early stop
patience=epochs, restore_best_weights=True)
# Include Checkpoint? CSVLogger?
return [stopper,]
def load_Model(self):
self.model = load_model(self.model_path + self.model_name + '.h5')
return
def save_Model(self):
if not self.trained:
self.model.save(self.model_path + self.model_name + '.h5')
return
def _check_Model(self):
if self.model_name + '.h5' in os.listdir(self.model_path):
return True
else:
return False
class PickingModel(ModelType):
def __init__(self, model_name):
super().__init__(model_name)
return
def __create_Train_Data(self):
'''
Function that iterates through seismograms in directory, perform preprocessing,
create a time window around the arrival time and randomly shift it to augment
the data set. Save augmented data set as npy files for quicker loading in
the future. Meant for training/testing data.
'''
try:
os.mkdir(self.npz_path)
except:
pass
files = np.sort(os.listdir(self.files_path))
gen_whitespace = lambda x: ' '*len(x)
for f, file in enumerate(files):
if file+'npz' in os.listdir(self.npz_path):
continue
else:
file = check_String(file)
print_string = 'File ' + str(f+1) + ' / ' + str(len(files)) + '...'
print('\r'+print_string, end=gen_whitespace(print_string))
try:
seismogram = obspy.read(self.files_path + file)
except:
continue
seismogram = seismogram[0].resample(self.sample_rate)
# Begging time
b = seismogram.stats.sac['b']
# The beginning time may not be 0, so shift all attribtues to be so
shift = -b
b = b + shift
# End time
e = seismogram.stats.sac['e'] + shift
# Theoretical onset arrival time + shift
if self.th_arrival_var == self.arrival_var:
th_arrival = seismogram.stats.sac[self.arrival_var] + shift - np.random.rand() * 20
else:
th_arrival = seismogram.stats.sac[self.th_arrival_var] + shift
# Picked maximum arrival time + shift
arrival = seismogram.stats.sac[self.arrival_var] + shift
# Theoretical arrival may be something unruly, so assign some random
# shift from the picked arrival
if not (b < th_arrival < e):
th_arrival = arrival - 20 * np.random.rand()
amp = seismogram.data
time = seismogram.times()
# Shifts + 1 because we want a 0th shift + N random ones
rand_window_shifts = 2*np.random.rand(self.number_shift+1) - 1 # [-1, 1] interval
abs_sort = np.argsort(np.abs(rand_window_shifts))
rand_window_shifts = rand_window_shifts[abs_sort]
rand_window_shifts[0] = 0
seis_windows = np.zeros((self.number_shift+1, self.total_points, 1))
arrivals = np.zeros((self.number_shift+1, 1))
cut_time = np.zeros((self.number_shift+1, 1))
for i, n in enumerate(rand_window_shifts):
rand_arrival = th_arrival - n * self.window_shift
init = int(np.round((rand_arrival - self.window_before)*self.sample_rate))
end = init + self.total_points
if not (time[init] < arrival < time[end]):
init = int(np.round((arrival - 15 * np.random.rand() - self.window_before)*self.sample_rate))
end = init + self.total_points
amp_i = amp[init:end]
# Normalize by absolute peak, [-1, 1]
amp_i = amp_i / np.abs(amp_i).max()
seis_windows[i] = amp_i.reshape(self.total_points, 1)
arrivals[i] = arrival - time[init]
cut_time[i] = time[init]
np.savez(self.npz_path+'{}'.format(file),
seis=seis_windows, arrival=arrivals, cut=cut_time)
return
def __load_Data(self, npz_list, single=False, y_only=False):
if y_only:
arr_array = np.zeros((len(npz_list)*(self.number_shift+1)**(not single), 1))
if single:
for i, file in enumerate(npz_list):
npz = np.load(self.npz_path+file)
arr_array[i] = npz['arrival'][0]
else:
for i, file in enumerate(npz_list):
npz = np.load(self.npz_path+file)
arr_array[(self.number_shift+1)*i:(self.number_shift+1)*(i+1)] = npz['arrival']
return arr_array
else:
seis_array = np.zeros((len(npz_list)*(self.number_shift+1)**(not single), self.total_points, 1))
arr_array = np.zeros((len(npz_list)*(self.number_shift+1)**(not single), 1))
if single:
for i, file in enumerate(npz_list):
npz = np.load(self.npz_path+file)
seis_array[i] = npz['seis'][0]
arr_array[i] = npz['arrival'][0]
else:
for i, file in enumerate(npz_list):
npz = np.load(self.npz_path+file)
seis_array[(self.number_shift+1)*i:(self.number_shift+1)*(i+1)] = npz['seis']
arr_array[(self.number_shift+1)*i:(self.number_shift+1)*(i+1)] = npz['arrival']
return seis_array, arr_array
def train_Model(self):
if self.trained:
return
if self.debug:
self.epochs=10
self.model_iters=1
self.__create_Train_Data()
models = []
#models_train_means = np.zeros(self.model_iters)
#models_train_stds = np.zeros(self.model_iters)
models_test_means = np.zeros(self.model_iters)
models_test_stds = np.zeros(self.model_iters)
models_test_final_loss = np.zeros(self.model_iters)
models_train_lpe = np.zeros((self.model_iters, self.epochs))
models_test_lpe = np.zeros((self.model_iters, self.epochs))
tick = clock()
for m in range(self.model_iters):
print('Training arrival prediction model', m+1)
model = self.__rossNet()
callbacks = self._get_Callbacks(self.epochs)
train_files, test_files = self._train_Test_Split(m)
'''
train_x, train_y = self.__load_Data(train_files)
test_x, test_y = self.__load_Data(test_files, single=True)
train_hist = model.fit(train_x, train_y,
validation_data=(test_x, test_y),
batch_size=self.batch_size,
epochs=self.epochs,
verbose=2,
callbacks=callbacks)
'''
train_generator = PickingDataGenerator(self.npz_path, train_files,
self.total_points, self.number_shift,
self.batch_size)
test_generator = PickingDataGenerator(self.npz_path, test_files, self.total_points,
self.number_shift, self.batch_size, single=True)
train_hist = model.fit(train_generator,
validation_data=test_generator,
callbacks=callbacks,
verbose=2,)
#use_multiprocessing=True,
#workers=6,)
total_epochs = len(train_hist.history['loss'])
'''
train_pred = model.predict(train_x)
test_pred = model.predict(test_x)
test_loss = model.evaluate(test_x, test_y,
batch_size=self.batch_size, verbose=0)
'''
test_y = self.__load_Data(test_files, single=True, y_only=True)
test_pred = model.predict(test_generator)
test_loss = model.evaluate(test_generator, verbose=0)
#model_train_diff = np.abs(train_y - train_pred)
model_test_diff = np.abs(test_y - test_pred)
#model_train_mean = np.mean(model_train_diff)
#model_train_std = np.std(model_train_diff)
model_test_mean = np.mean(model_test_diff)
model_test_std = np.std(model_test_diff)
#print('Train Error:{:.3f} +/- {:.3f}'.format(model_train_mean, model_train_std))
print('Test Error: {:.3f} +/- {:.3f}'.format(model_test_mean, model_test_std))
print('Test Loss: {:.3f}'.format(test_loss))
models.append(model)
#models_train_means[m] += model_train_mean
#models_train_stds[m] += model_train_std
models_test_means[m] += model_test_mean
models_test_stds[m] += model_test_std
models_test_final_loss[m] += test_loss
models_train_lpe[m][:total_epochs] = train_hist.history['loss']
models_test_lpe[m][:total_epochs] = train_hist.history['val_loss']
#best_model = np.argmin(models_means)
tock = clock()
train_time = (tock-tick)/3600 # hours
best_model = np.argmin(models_test_final_loss)
with open(self.model_path + 'train_logs/{}_log.txt'.format(self.model_name), 'w+') as log:
print('\nUsing best model: Model {}\n'.format(best_model), file=log)
print('Best Model Results:', file=log)
#print('Training Avg Diff: {:.3f}'.format(models_train_means[best_model]), file=log)
#print('Training Avg Diff Uncertainty: {:.3f}'.format(models_train_stds[best_model]), file=log)
print('Testing Avg Diff: {:.3f}'.format(models_test_means[best_model]), file=log)
print('Testing Avg Diff Uncertainty: {:.3f}'.format(models_test_stds[best_model]), file=log)
print('Test Loss: {:.3f}'.format(models_test_final_loss[best_model]), file=log)
print('Total Training Time: {:.2f} hrs'.format(train_time), file=log)
print('\n')
if self.debug:
print('\nmodel saved at this point in no debug', file=log)
return
self.model = models[best_model]
np.savez(self.model_path + 'train_logs/{}_train_history'.format(self.model_name),
loss=models_train_lpe, val_loss=models_test_lpe, best_model=best_model, train_time=train_time)
call(['rm','-r',self.npz_path])
return
def __rossNet(self):
'''
Notes
------------
Ref: https://doi.org/10.1029/2017JB015251
'''
model = Sequential()
model.add(Conv1D(32, 21, activation='relu',))
model.add(BatchNormalization())
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(64, 15, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(128, 11, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss=Huber(),
optimizer=Adam())
return model
class CheckingModel(ModelType):
def __init__(self, model_name):
super().__init__(model_name)
config = read_Config('models/conf/{}.conf'.format(model_name))
self.compression_size = config['compression_size']
return
def __create_Train_Data(self):
try:
os.mkdir(self.npz_path)
except:
pass
files = np.sort(os.listdir(self.files_path))
gen_whitespace = lambda x: ' '*len(x)
for f, file in enumerate(files):
if file+'npz' in os.listdir(self.npz_path):
continue
else:
file = check_String(file)
print_string = 'File ' + str(f+1) + ' / ' + str(len(files)) + '...'
print('\r'+print_string, end=gen_whitespace(print_string))
try:
seismogram = obspy.read(self.files_path + file)
except:
continue
seismogram = seismogram[0].resample(self.sample_rate)
# Begging time
b = seismogram.stats.sac['b']
# The beginning time may not be 0, so shift all attribtues to be so
shift = -b
b = b + shift
# End time
e = seismogram.stats.sac['e'] + shift
# Theoretical onset arrival time + shift
if self.th_arrival_var == self.arrival_var:
th_arrival = seismogram.stats.sac[self.arrival_var] + shift - np.random.rand() * 20
else:
th_arrival = seismogram.stats.sac[self.th_arrival_var] + shift
# Picked maximum arrival time + shift
arrival = seismogram.stats.sac[self.arrival_var] + shift
# Theoretical arrival may be something unruly, so assign some random
# shift from the picked arrival
if not (b < th_arrival < e):
th_arrival = arrival - 20 * np.random.rand()
amp = seismogram.data
time = seismogram.times()
# Shifts + 1 because we want a 0th shift + N random ones
rand_window_shifts = 2*np.random.rand(self.number_shift+1) - 1 # [-1, 1] interval
abs_sort = np.argsort(np.abs(rand_window_shifts))
rand_window_shifts = rand_window_shifts[abs_sort]
rand_window_shifts[0] = 0
seis_windows = np.zeros((self.number_shift+1, self.total_points, 1))
for i, n in enumerate(rand_window_shifts):
rand_arrival = th_arrival - n * self.window_shift
init = int(np.round((rand_arrival - self.window_before)*self.sample_rate))
end = init + self.total_points
if (end-init < self.total_points):
init = init - (self.total_points - (end-init))
# init = int(np.round((arrival - 15 * np.random.rand() - self.window_before)*self.sample_rate))
# end = init + self.total_points
amp_i = amp[init:end]
# Normalize by absolute peak, [-1, 1]
amp_i = amp_i / np.abs(amp_i).max()
seis_windows[i] = amp_i.reshape(self.total_points, 1)
np.savez(self.npz_path+'{}'.format(file), seis=seis_windows)
return
def __load_Data(self, npz_list, single=False):
input_array = np.zeros((len(npz_list)*(self.number_shift+1)**(not single), self.total_points, 1))
output_array = np.zeros((len(npz_list)*(self.number_shift+1)**(not single), self.total_points, 1))
if single:
for i, file in enumerate(npz_list):
npz = np.load(self.npz_path+file)
input_array[i] = npz['seis'][0]
output_array[i] = npz['seis'][0]
else:
for i, file in enumerate(npz_list):
npz = np.load(self.npz_path+file)
input_array[(self.number_shift+1)*i:(self.number_shift+1)*(i+1)] = npz['seis']
output_array[(self.number_shift+1)*i:(self.number_shift+1)*(i+1)] = npz['seis']
return input_array, output_array
def train_Model(self):
if self.trained:
return
if self.debug:
self.epochs=10
self.model_iters=1
self.__create_Train_Data()
models = []
models_test_final_loss = np.zeros(self.model_iters)
models_train_lpe = np.zeros((self.model_iters, self.epochs))
models_test_lpe = np.zeros((self.model_iters, self.epochs))
tick = clock()
for m in range(self.model_iters):
print('Training arrival prediction model', m+1)
model = self.__rossNetAE(self.compression_size)
callbacks = self._get_Callbacks(self.epochs)
train_files, test_files = self._train_Test_Split(m)
'''
train_x, train_y = self.__load_Data(train_files)
test_x, test_y = self.__load_Data(test_files)
train_hist = model.fit(train_x, train_y,
validation_data=(test_x, test_y),
batch_size=self.batch_size,
epochs=self.epochs,
verbose=2,
callbacks=callbacks)
'''
train_generator = CheckingDataGenerator(self.npz_path, train_files,
self.total_points, self.batch_size)
test_generator = CheckingDataGenerator(self.npz_path, test_files, self.total_points,
self.batch_size)
train_hist = model.fit(train_generator,
validation_data=test_generator,
callbacks=callbacks,
verbose=2,)
#use_multiprocessing=True,
#workers=6,)
total_epochs = len(train_hist.history['loss'])
'''
train_pred = model.predict(train_x)
test_pred = model.predict(test_x)
test_loss = model.evaluate(test_x, test_y,
batch_size=self.batch_size, verbose=0)
'''
#train_pred = model.predict(train_generator)
#test_pred = model.predict(test_generator)
test_loss = model.evaluate(test_generator, verbose=0)
#model_train_diff = np.abs(train_y - train_pred)
#model_test_diff = np.abs(test_y - test_pred)
#model_train_mean = np.mean(model_train_diff)
#model_train_std = np.std(model_train_diff)
#model_test_mean = np.mean(model_test_diff)
#model_test_std = np.std(model_test_diff)
#print('Train Error: {:.3f} +/- {:.3f}'.format(model_train_mean, model_train_std))
#print('Test Error: {:.3f} +/- {:.3f}'.format(model_test_mean, model_test_std))
print('Test Loss: {:.3f}'.format(test_loss))
models.append(model)
#models_train_means[m] += model_train_mean
#models_train_stds[m] += model_train_std
#models_test_means[m] += model_test_mean
#models_test_stds[m] += model_test_std
models_test_final_loss[m] += test_loss
models_train_lpe[m][:total_epochs] = train_hist.history['loss']
models_test_lpe[m][:total_epochs] = train_hist.history['val_loss']
#best_model = np.argmin(models_means)
tock = clock()
train_time = (tock-tick)/3600 # hours
best_model = np.argmin(models_test_final_loss)
with open(self.model_path + 'train_logs/{}_log.txt'.format(self.model_name), 'w+') as log:
print('\nUsing best model: Model {}\n'.format(best_model), file=log)
print('Best Model Results:', file=log)
#print('Training Avg Diff: {:.3f}'.format(models_train_means[best_model]), file=log)
#print('Training Avg Diff Uncertainty: {:.3f}'.format(models_train_stds[best_model]), file=log)
#print('Testing Avg Diff: {:.3f}'.format(models_test_means[best_model]), file=log)
#print('Testing Avg Diff Uncertainty: {:.3f}'.format(models_test_stds[best_model]), file=log)
print('Test Loss: {:.3f}'.format(models_test_final_loss[best_model]), file=log)
print('Total Training Time: {:.2f} hrs'.format(train_time), file=log)
print('\n')
if self.debug:
print('\nmodel saved at this point in no debug', file=log)
return
self.model = models[best_model]
np.savez(self.model_path + 'train_logs/{}_train_history'.format(self.model_name),
loss=models_train_lpe, val_loss=models_test_lpe, best_model=best_model, train_time=train_time)
call(['rm','-r',self.npz_path])
return
def __rossNetAE(self, compression_size):
'''
Notes
------------
Main architecture idea:
Ref: https://doi.org/10.1029/2017JB015251
'''
input_seis = Input(shape=(self.total_points, 1))
conv1 = Conv1D(32, kernel_size=21, strides=1,
activation='relu', padding='same')(input_seis)
bn1 = BatchNormalization()(conv1)
max1 = MaxPooling1D(pool_size=2)(bn1)
conv2 = Conv1D(64, kernel_size=15, strides=1,
activation='relu', padding='same')(max1)
bn2 = BatchNormalization()(conv2)
max2 = MaxPooling1D(pool_size=2)(bn2)
conv3 = Conv1D(128, kernel_size=11, strides=1,
activation='relu', padding='same')(max2)
bn3 = BatchNormalization()(conv3)
max3 = MaxPooling1D(pool_size=2)(bn3)
flattened = Flatten()(max3)
encoding = Dense(compression_size, activation='sigmoid')(flattened)
expanded = Dense(max3.shape.as_list()[1] * max3.shape.as_list()[2], activation='relu')(encoding)
reshaped = Reshape(max3.shape.as_list()[1:])(expanded)
up1 = UpSampling1D(size=2)(reshaped)
bn_up1 = BatchNormalization()(up1)
conv_up1 = Conv1D(128, kernel_size=11, strides=1,
activation='relu', padding='same')(bn_up1)
up2 = UpSampling1D(size=2)(conv_up1)
bn_up2 = BatchNormalization()(up2)
conv_up2 = Conv1D(64, kernel_size=15, strides=1,
activation='relu', padding='same')(bn_up2)
up3 = UpSampling1D(size=2)(conv_up2)
bn_up3 = BatchNormalization()(up3)
conv_up3 = Conv1D(32, kernel_size=21, strides=1,
activation='relu', padding='same')(bn_up3)
# sigmoid? or tanh? or maybe something else
decoding = Conv1D(1, kernel_size=21, strides=1,
activation='linear', padding='same')(conv_up3)
model = Model(input_seis, decoding)
model.compile(loss='mean_absolute_error',
optimizer=Adam(1e-4))
return model
class CheckingDataGenerator(Sequence):
'''
Based on an implementation by <NAME>
https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
'''
def __init__(self, npy_path, list_IDs, seismo_size, batch_size=128, n_channels=1, shuffle=True):
'Initialization'
self.path = npy_path
self.dim = (1, seismo_size, 1)
self.batch_size = batch_size
self.list_IDs = list_IDs
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.seismo_size = seismo_size
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
x, y = self.__data_generation(list_IDs_temp)
return x, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples'
# Initialization
x = np.zeros((self.batch_size, self.seismo_size, self.n_channels))
# Generate data
for i, ID in enumerate(list_IDs_temp):
x[i,] = np.load(self.path + ID)['seis']
return x, x
class PickingDataGenerator(Sequence):
'''
Based on an implementation by <NAME>
https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
'''
def __init__(self, npy_path, list_IDs, seismo_size, number_shifts, batch_size, n_channels=1, single=False, shuffle=True):
'Initialization'
self.path = npy_path
self.single = single
#self.dim = (1, seismo_size, 1)
self.batch_size = batch_size
self.list_IDs = list_IDs
self.n_channels = n_channels
self.shuffle = shuffle
self.seismo_size = seismo_size
self.number_shifts = number_shifts
if not self.single:
self.list_IDs = self.gen_Variations()
self.on_epoch_end()
def gen_Variations(self):
list_IDs_temp = []
for i in range(len(self.list_IDs)):
for j in range(self.number_shifts+1):
list_IDs_temp.append(self.list_IDs[i]+str(j))
return list_IDs_temp
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
x, y = self.__data_generation(list_IDs_temp)
return x, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples'
# Initialization
x = np.zeros((self.batch_size, self.seismo_size, self.n_channels))
y = np.zeros((self.batch_size, 1))
# Generate data
if not self.single:
for i, ID in enumerate(list_IDs_temp):
x[i,] = np.load(self.path + ID[:-1])['seis'][int(ID[-1])]
y[i,] = np.load(self.path + ID[:-1])['arrival'][int(ID[-1])]
else:
for i, ID in enumerate(list_IDs_temp):
x[i,] = np.load(self.path + ID)['seis'][0]
y[i,] = np.load(self.path + ID)['arrival'][0]
return x, y
``` |
{
"source": "jorgeajimenezl/qvapay-python",
"score": 2
} |
#### File: qvapay/v1/auth.py
```python
from dataclasses import dataclass, field
from os import environ
from dotenv import load_dotenv
from .errors import QvaPayError
load_dotenv()
@dataclass
class QvaPayAuth:
qvapay_app_id: str = field(default=environ["QVAPAY_APP_ID"])
qvapay_app_secret: str = field(default=environ["QVAPAY_APP_SECRET"])
def __post_init__(self):
if not self.qvapay_app_id and not self.qvapay_app_secret:
raise QvaPayError(0, "QVAPAY_APP_ID and QVAPAY_APP_SECRET are not setted")
elif not self.qvapay_app_id:
raise QvaPayError(0, "QVAPAY_APP_ID is not setted")
elif not self.qvapay_app_secret:
raise QvaPayError(0, "QVAPAY_APP_SECRET is not setted")
```
#### File: v1/models/info.py
```python
from dataclasses import dataclass
from datetime import datetime
from typing import Any
from uuid import UUID
from dateutil.parser import parse
@dataclass
class Info:
"""
QvaPay app info
"""
id: UUID # alias: uuid
user_id: int
name: str
url: str # AnyUrl
description: str # alias: desc
callback: str
success_url: str # AnyUrl
cancel_url: str # AnyUrl
logo: str
active: bool
enabled: bool
card: int
created_at: datetime
updated_at: datetime
def __post_init__(self):
self.id = UUID(str(self.id))
self.user_id = int(str(self.user_id))
self.name = str(self.name)
self.url = str(self.url)
self.description = str(self.description)
self.callback = str(self.callback)
self.success_url = str(self.success_url)
self.cancel_url = str(self.cancel_url)
self.logo = str(self.logo)
self.active = bool(str(self.active))
self.enabled = bool(str(self.enabled))
self.card = int(str(self.card))
self.created_at = parse(str(self.created_at))
self.updated_at = parse(str(self.updated_at))
@staticmethod
def from_json(json: Any) -> "Info":
json["id"] = json["uuid"]
json["description"] = json["desc"]
del json["uuid"]
del json["desc"]
return Info(**json)
```
#### File: v1/models/paid_by.py
```python
from dataclasses import dataclass
from typing import Any
@dataclass
class PaidBy:
username: str
name: str
logo: str
def __post_init__(self):
self.username = str(self.username)
self.name = str(self.name)
self.logo = str(self.logo)
@staticmethod
def from_json(json: Any) -> "PaidBy":
return PaidBy(**json)
```
#### File: v1/models/transaction.py
```python
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Optional
from uuid import UUID
from dateutil.parser import parse
@dataclass
class Transaction:
"""
QvaPay transaction
"""
id: UUID # alias: uuid
user_id: int
app_id: int
amount: float
description: str
remote_id: str
status: str
paid_by_user_id: int
created_at: datetime
updated_at: datetime
signed: Optional[int]
def __post_init__(self):
self.id = UUID(str(self.id))
self.user_id = int(str(self.user_id))
self.app_id = int(str(self.app_id))
self.amount = float(str(self.amount))
self.description = str(self.description)
self.remote_id = str(self.remote_id)
self.status = str(self.status)
self.paid_by_user_id = int(str(self.paid_by_user_id))
self.created_at = parse(str(self.created_at))
self.updated_at = parse(str(self.updated_at))
self.signed = int(str(self.signed)) if self.signed is not None else None
@staticmethod
def from_json(json: Any) -> "Transaction":
json["id"] = json["uuid"]
del json["uuid"]
return Transaction(**json, signed=json["signed"] if "signed" in json else None)
``` |
{
"source": "Jorge-Alda/circlecorr",
"score": 3
} |
#### File: Jorge-Alda/circlecorr/circlecorr.py
```python
import numpy as np
import matplotlib.pyplot as plt
def interp(ni, nj, N):
'''
Returns an interpolation function from bin ni to bin nj (been N the total number of bins) in polar coordinates
See https://calculus7.org/2013/04/20/peanut-allergy-and-polar-interpolation/
'''
n1 = min(ni, nj)
n2 = max(ni, nj)
Nm = int(N/2-0.0001)
if (n2-n1 < N/2):
r = ((Nm+1-n2+n1)/(Nm+1))**(-2)
delta = np.pi/N*(n2-n1)
theta0 = 2*n1*np.pi/N+delta
elif (n2-n1 > N/2):
r = ((Nm+1+n2-n1-N)/(Nm+1))**(-2)
delta = np.pi/N*(N-n2+n1)
theta0 = 2*n1*np.pi/N - delta
else:
r = 1e10 #yeah, I don't know why this works :O
delta = np.pi/N*(n2-n1)
theta0 = 2*n1*np.pi/N+delta
return lambda x: (r + (1-r)*np.sin(x-theta0)**2/np.sin(delta)**2)**(-0.5)
def cov2corr(cov):
'''
Computes the correltion matrix from the covariance matrix
'''
return np.diag((np.diag(cov))**(-0.5)) @ cov @ np.diag((np.diag(cov))**(-0.5))
def circlecorr(cov, R=10, maxlw=5, labels=None):
'''
Circular plot for the correlation matrix.
The bins represent the variance of each variable,
and the lines the correlation coefficient:
the width of the line corresponds to the absolute value of the correlation coefficient.
Blue lines indicate negative correlation,
Red lines indicate positive correlation.
Function arguments:
- cov: Covariance matrix.
- R: Radius of the circle that contains the base of the bins.
- maxlw: Linewidth of the lines of correlation = +-1
- labels: list of labels for each bin
'''
N = cov.shape[0]
corr = cov2corr(cov)
theta = np.linspace(0, 2*np.pi, N, endpoint=False)
width = 2*np.pi/(N*1.2)
ax = plt.subplot(111, polar= True)
ax.set_xticks(np.linspace(0, 2*np.pi, 6, endpoint=False))
if labels is not None:
ax.set_xticklabels(labels)
ax.set_yticklabels([])
bars = ax.bar(theta, np.diag(cov), width=width, bottom=R)
for i in range(N):
for j in range(i+1, N):
if (j-i <= N/2):
lin =np.linspace( 2*i*np.pi/N, 2*j*np.pi/N, 100 )
else:
lin =np.linspace( 2*j*np.pi/N, 2*(i+N)*np.pi/N, 100 )
int1 = interp(i,j,N)
if corr[i,j] > 0:
c = 'red'
else:
c = 'blue'
plt.plot(lin, R*int1(lin), c=c, lw=maxlw*abs(corr[i,j]))
``` |
{
"source": "Jorge-Alda/imagWC",
"score": 2
} |
#### File: imagWC/src/bigfit.py
```python
from math import pi, sqrt
from cmath import phase
from wilson import Wilson
import flavio
import flavio.statistics.fits
from flavio.physics.running.running import get_alpha
from flavio.physics import ckm
from flavio.classes import Observable, Prediction, Measurement, Parameter
from flavio.statistics.probability import NormalDistribution
from flavio.statistics.functions import pull
import yaml
from iminuit import Minuit
import numpy as np
par = flavio.default_parameters.get_central_all()
GF = par['GF']
sq2 = sqrt(2)
DMs_SM = Parameter('Delta M_S')
flavio.default_parameters.set_constraint('Delta M_S', '20.01 ± 1.25')
def myDeltaMS(wc_obj, par):
DMs_SM = par['Delta M_S']
if wc_obj.wc is None:
return DMs_SM
else:
Cbs = - sq2/(4*GF*ckm.xi('t', 'bs')(par)**2)*wc_obj.wc['CVLL_bsbs']
return DMs_SM*abs(1+Cbs/(1.3397e-3))
Observable('DMs')
Prediction('DMs', myDeltaMS)
m = Measurement('DMs exp')
m.add_constraint(['DMs'], NormalDistribution(17.757, 0.021) )
m2 = Measurement('SLAC HFLAV 2018')
m2.add_constraint(['S_psiphi'], NormalDistribution(0.021, 0.030144582822607187))
def wc_Z(lambdaQ, MZp):
"Wilson coefficients as functions of Z' couplings"
if MZp < 100:
return {}
alpha = get_alpha(par, MZp)['alpha_e']
return {
'C9_bsmumu': -pi/(sq2* GF * MZp**2 *alpha) * lambdaQ/ckm.xi('t', 'bs')(par),
'C10_bsmumu': pi/(sq2* GF * MZp**2 *alpha) * lambdaQ/ckm.xi('t', 'bs')(par),
'CVLL_bsbs': - 0.5* (lambdaQ/MZp )**2 #flavio defines the effective Lagrangian as L = CVLL (bbar gamma d)^2, with NO prefactors
}
def wc_LQ(yy, MS):
"Wilson coefficients as functions of Leptoquark couplings"
if MS < 100:
return {}
alpha = get_alpha(par, MS)['alpha_e']
return {
'C9_bsmumu': pi/(sq2* GF * MS**2 *alpha) * yy/ckm.xi('t', 'bs')(par),
'C10_bsmumu': -pi/(sq2* GF * MS**2 *alpha) * yy/ckm.xi('t', 'bs')(par),
'CVLL_bsbs': - 0.5* (yy/MS)**2 * 5 /(64*pi**2) #flavio defines the effective Lagrangian as L = CVLL (bbar gamma d)^2, with NO prefactors
}
def C9mu(cr, ci):
return Wilson({'C9_bsmumu': cr + 1j*ci}, scale=4.2, eft='WET', basis='flavio')
def C10mu(cr, ci):
return Wilson({'C10_bsmumu': cr + 1j*ci}, scale=4.2, eft='WET', basis='flavio')
def C910mu(cr, ci):
return Wilson({'C9_bsmumu': cr + 1j*ci, 'C10_bsmumu': -cr-1j*ci}, scale=4.2, eft='WET', basis='flavio')
def C9pmu(cr, ci):
return Wilson({'C9p_bsmumu': cr + 1j*ci}, scale=4.2, eft='WET', basis='flavio')
def C10pmu(cr, ci):
return Wilson({'C10p_bsmumu': cr + 1j*ci}, scale=4.2, eft='WET', basis='flavio')
def C9e(cr, ci):
return Wilson({'C9_bsee': cr + 1j*ci}, scale=4.2, eft='WET', basis='flavio')
def C10e(cr, ci):
return Wilson({'C10_bsee': cr + 1j*ci}, scale=4.2, eft='WET', basis='flavio')
def C910e(cr, ci):
return Wilson({'C9_bsee': cr + 1j*ci, 'C10_bsee': -cr-1j*ci}, scale=4.2, eft='WET', basis='flavio')
def C9pe(cr, ci):
return Wilson({'C9p_bsee': cr + 1j*ci}, scale=4.2, eft='WET', basis='flavio')
def C10pe(cr, ci):
return Wilson({'C10p_bsee': cr + 1j*ci}, scale=4.2, eft='WET', basis='flavio')
def CRe910(c9, c10):
return {'C9_bsmumu': c9, 'C10_bsmumu':c10}
def CIm910(c9, c10):
return {'C9_bsmumu': c9*1j, 'C10_bsmumu':c10*1j}
def C910Im(c9):
return {'C9_bsmumu': c9*1j, 'C10_bsmumu':c9*1j}
wcs = [C9mu, C10mu, C910mu, C9pmu, C10pmu, C9e, C10e, C910e, C9pe, C10pe]
all_measurements=['LHCb Bs->mumu 2017', 'CMS Bs->mumu 2013', 'LHCb RK* 2017', 'LHCb B->Kee 2014', 'LHCb B->K*mumu 2015 P 1.1-6', 'ATLAS B->K*mumu 2017 P4p', 'ATLAS B->K*mumu 2017 P5p', 'HFAG osc summer 2015', 'CMS B->K*mumu 2017 P5p', 'LHCb B->K*mumu 2015 P 0.1-0.98', 'LHCb B->K*mumu 2015 P 1.1-2.5', 'LHCb B->K*mumu 2015 P 2.5-4', 'LHCb B->K*mumu 2015 P 4-6', 'LHCb B->K*mumu 2015 P 6-8', 'LHCb B->K*mumu 2015 P 11-12.5', 'LHCb B->K*mumu 2015 P 15-17', 'LHCb B->K*mumu 2015 P 17-19', 'LHCb B->K*mumu 2015 P 15-19', 'DMs exp', 'HFAG UT summer 2015', 'SLAC HFLAV 2018']
bins_P4ps_P5ps_LHCb=[( 0.1 , 0.98 ), ( 1.1 , 2.5 ), ( 2.5 , 4. ), ( 4. , 6. ), ( 6. , 8. ), ( 11. , 12.5 ), ( 15. , 17. ), ( 17. , 19. ), ( 1.1 , 6. ), ( 15. , 19. )]
bins_P5ps_CMS=[( 1 , 2), ( 2 , 4.3), ( 4.3 , 6), ( 6 , 8.68), ( 10.09 , 12.86 ), ( 14.18 , 16 ), ( 16, 19 )]
bins_P4ps_P5ps_ATLAS=[( 0.04 , 2), ( 2 , 4), ( 4 , 6), ( 0.04 , 4), ( 1.1 , 6 ), ( 0.04 , 6 )]
bins_P4ps_P5ps_LHCb=[x for x in bins_P4ps_P5ps_LHCb if x[1]<=8.7 or x[0]>=14]
bins_P5ps_CMS=[x for x in bins_P5ps_CMS if x[1]<=8.7 or x[0]>=14]
bins_P4ps_P5ps_ATLAS=[x for x in bins_P4ps_P5ps_ATLAS if x[1]<=8.7 or x[0]>=14]
observables = []
for x in bins_P4ps_P5ps_LHCb + bins_P5ps_CMS + bins_P4ps_P5ps_ATLAS:
observables += [( '<P5p>(B0->K*mumu)', ) + x]
for x in bins_P4ps_P5ps_LHCb + bins_P4ps_P5ps_ATLAS:
observables += [( '<P4p>(B0->K*mumu)', ) + x]
#observables += ['BR(B0->mumu', 'DMs', 'S_psiphi', ('<Rmue>(B0->K*ll)', 0.045, 1.1), ('<Rmue>(B0->K*ll)', 1.1, 6.0), ('<Rmue>(B+->Kll)', 1.0, 6.0) , 'BR(Bs->mumu)' ]
observables += [('<Rmue>(B0->K*ll)', 0.045, 1.1), ('<Rmue>(B0->K*ll)', 1.1, 6.0), ('<Rmue>(B+->Kll)', 1.0, 6.0)]
observables = list(set(observables))
def save_observables(filename):
obslist = []
for j in all_measurements:
meas1=flavio.Measurement[j].get_central_all()
for k in meas1.keys():
if k in observables:
exp = meas1[k]
err_exp = flavio.Measurement[j].get_1d_errors_random()[k]
if isinstance(k, tuple):
err_th = flavio.sm_uncertainty(k[0], q2min=k[1], q2max=k[2])
else:
err_th = flavio.sm_uncertainty(k)
err = sqrt(err_th**2 + err_exp**2)
obslist.append({'obs': k, 'central': exp, 'error': err})
f = open(filename, 'w')
yaml.dump(obslist, f)
f.close()
def read_observables(filename):
global obslist
global nobs
f = open(filename, 'r')
obslist = yaml.load(f)
f.close()
nobs = len(obslist)
def chi2(wc=None):
chi = 0
chiM = 0
chiACP = 0
for i in range(0, nobs):
if isinstance(obslist[i]['obs'], tuple):
if wc is None:
th = flavio.sm_prediction(obslist[i]['obs'][0], q2min=obslist[i]['obs'][1], q2max=obslist[i]['obs'][2])
else:
th = flavio.np_prediction(obslist[i]['obs'][0], wc_obj=wc, q2min=obslist[i]['obs'][1], q2max=obslist[i]['obs'][2])
else:
if wc is None:
th = flavio.sm_prediction(obslist[i]['obs'])
else:
th = flavio.np_prediction(obslist[i]['obs'], wc_obj=wc)
chiterm = (th - obslist[i]['central'])**2/obslist[i]['error']**2
chi += chiterm
if obslist[i]['obs'] == 'DMs':
chiM += chiterm
if obslist[i]['obs'] == 'S_psiphi':
chiACP += chiterm
return (chi-chiM-chiACP, chiM, chiACP, chi)
def chi2_budget(wc=None):
chi = []
for i in range(0, nobs):
if isinstance(obslist[i]['obs'], tuple):
if wc is None:
th = flavio.sm_prediction(obslist[i]['obs'][0], q2min=obslist[i]['obs'][1], q2max=obslist[i]['obs'][2])
else:
th = flavio.np_prediction(obslist[i]['obs'][0], wc_obj=wc, q2min=obslist[i]['obs'][1], q2max=obslist[i]['obs'][2])
else:
if wc is None:
th = flavio.sm_prediction(obslist[i]['obs'])
else:
th = flavio.np_prediction(obslist[i]['obs'], wc_obj=wc)
chi.append( (th - obslist[i]['central'])**2/obslist[i]['error']**2)
return chi
def define_fit(wc, M=4.2):
fast_fit = flavio.statistics.fits.FastFit(
name = 'Global fit',
observables = observables,
fit_wc_function = wc,
input_scale = M,
nuisance_parameters = 'all'
)
fast_fit.make_measurement(threads=4)
return fast_fit
def predictions(filename):
f = open(filename, 'at', buffering=1)
chiSM = chi2()
obscalc = [('<Rmue>(B+->Kll)', 1.0, 6.0), ('<Rmue>(B0->K*ll)', 0.045, 1.1), ('<Rmue>(B0->K*ll)', 1.1, 6.0)]
for w in wcs:
f.write(w.__name__ + '\n=================\n')
chi = lambda cr, ci: chi2(w(cr, ci))
m = Minuit(chi, cr=0, ci=0, error_cr=0.01, error_ci=0.01, errordef=1, print_level=0)
m.migrad()
f.write('\tBest fit: ' + str(m.values[0]) + ' + ' + str(m.values[1]) + 'i\n')
chibf = m.fval
f.write('\tPull (sqrt): ' + str(sqrt(chiSM-chibf)) + '\n')
f.write('\tPull (sigma): ' + str(pull(chiSM-chibf, 2)) + r' \sigma' + '\n')
f.write('\tChi2/dof: ' + str(chibf/(nobs-2)) + '\n')
#m.minos()
xr_centr = m.values[0]
xi_centr = m.values[1]
wcObj = w(xr_centr, xi_centr)
cont = m.mncontour('cr', 'ci', numpoints=40)[2]
for o in range(0, len(obscalc)):
obs_centr = flavio.np_prediction(obscalc[o][0], wcObj, *obscalc[o][1:])
obs_max = obs_min = obs_centr
for i in range(0, len(cont) ):
wcObj = w(*cont[i])
obs_max = max(obs_max, flavio.np_prediction(obscalc[o][0], wcObj, *obscalc[o][1:]))
obs_min = min(obs_min, flavio.np_prediction(obscalc[o][0], wcObj, *obscalc[o][1:]))
f.write('\t' + str(obscalc[o]) + ': ' + str(obs_centr) + ' + '+ str(obs_max - obs_centr) + ' - ' + str(obs_centr - obs_min) + '\n')
f.write('\n\n')
f.close()
def makefit_complex(wc, rangeR, rangeI, rangeM, filename):
'''
Fit and print results to file
wc: Function that computes Wilson Coefficients from the model parameters (e.g. wc_Z, wc_LQ)
Complex couplings
stepM, maxM in TeV
'''
chi0 = chi2()
for M in rangeM:
f = open(filename, 'at')
#f.write(str(M) + '\t' + '0' + '\t' + '0' + '\t' + str(chi0[0]) + '\t' + str(chi0[1]) + '\t' + str(chi0[2]) + '\t' + str(chi0[3]) + '\n' )
for lR in rangeR:
chi = []
for lI in rangeI:
if (lR==0) and (lI==0):
chitot = chi0
else:
wcobj = Wilson(wc(lR+lI*1j, M*1000), scale=M*1000, eft='WET', basis='flavio')
chitot = chi2(wcobj)
f.write(str(M) + '\t' + str(lR) + '\t' + str(lI) + '\t' + str(chitot[0]) + '\t' + str(chitot[1]) + '\t' + str(chitot[2]) + '\t' + str(chitot[3]) + '\n' )
f.close()
def predmodel(wc, l0, M):
chi = lambda lr, li: chi2(Wilson(wc(lr+li*1j, M*1000), scale=M*1000, eft='WET', basis='flavio'))[-1]
m = Minuit(chi, lr=l0[0], li=l0[1], error_lr=0.001, error_li=0.001, errordef=1, print_level=0)
m.migrad()
cont = [(l0[0]+m.errors[0], l0[1]), (l0[0]-m.errors[0], l0[1]), (l0[0], l0[1]+m.errors[1]), (l0[0], l0[1]-m.errors[1])]
#obs0 = [('<Rmue>(B+->Kll)', 1.0, 6.0), ('<Rmue>(B0->K*ll)', 0.045, 1.1), ('<Rmue>(B0->K*ll)', 1.1, 6.0), ('DMs',), ('S_psiphi',)]
obs0 = [('S_psiphi',)]
for ob in obs0:
lim = []
if isinstance(ob, tuple):
centr = flavio.np_prediction(ob[0], Wilson(wc(l0[0]+l0[1]*1j, M*1000), scale=M*1000, eft='WET', basis='flavio'), *ob[1:])
for p in cont:
lim.append(flavio.np_prediction(ob[0], Wilson(wc(p[0]+p[1]*1j, M*1000), scale=M*1000, eft='WET', basis='flavio'), *ob[1:]))
else:
centr = flavio.np_prediction(ob, Wilson(wc(l0[0]+l0[1]*1j, M*1000), scale=M*1000, eft='WET', basis='flavio'))
for p in cont:
lim.append(flavio.np_prediction(ob, Wilson(wc(p[0]+p[1]*1j, M*1000), scale=M*1000, eft='WET', basis='flavio')))
errorsup = max(lim) - centr
errorinf = centr - min(lim)
print(ob, ':\t', centr, ' + ', errorsup, ' - ', errorinf)
def run():
import numpy as np
read_observables('observables_ZLQ.yaml')
makefit_complex(wc_LQ, np.linspace(-0.5, 0.5, 20), np.linspace(-0.5, 0.5, 30), np.linspace(4, 6, 20), 'bf_ZComp.dat')
def plot(fast_fit, x):
'''
Plots the allowed regions in the C9-C10 plane for imaginary Wilson coefficients
'''
import texfig
import flavio.plots
import matplotlib.pyplot as plt
fig = texfig.figure()
opt = dict(x_min=-2, x_max=2, y_min=-2, y_max=2, n_sigma=(1,2), interpolation_factor=5)
flavio.plots.likelihood_contour(fast_fit.log_likelihood, col=0, **opt, threads=2)
#flavio.plots.flavio_branding(y=0.07, x=0.05) #crashes LaTeX
plt.gca().set_aspect(1)
plt.axhline(0, c='k', lw=0.2)
plt.axvline(0, c='k', lw=0.2)
plt.plot(x[0], x[1], marker='x') #compute best fit first!
plt.xlabel(r'$\mathrm{Im}\ C_9$')
plt.ylabel(r'$\mathrm{Im}\ C_{10}$')
texfig.savefig('fitIm_C9C10')
def chiM(wc, l, M):
wcobj = Wilson(wc(l, M*1000), scale=M*1000, eft='WET', basis='flavio')
chiM = 0
for i in range(0, nobs):
if obslist[i]['obs'] in ['DMs', 'S_psiphi']:
th = flavio.np_prediction(obslist[i]['obs'], wcobj)
chiM += (th - obslist[i]['central'])**2/obslist[i]['error']**2
return chiM
def massdep(wc, rangeM, maxl, fout):
f = open(fout, 'wt', buffering=1)
for M in rangeM:
chi = lambda l: chiM(wc, l*1j, M)
m = Minuit(chi, l=0, error_l=0.001, limit_l=(0, maxl), errordef=1)
m.migrad()
f.write(str(M) + '\t' + str(m.values[0]) + '\n')
f.close()
```
#### File: imagWC/src/Wilsonffit.py
```python
import texfig # download from https://github.com/nilsleiffischer/texfig
import flavio
import flavio.plots
import flavio.statistics.fits
import matplotlib.pyplot as plt
def wc(C9, C10):
'''
Wilson coeffients settings
'''
return {
'C9_bsmumu': C9,
'C10_bsmumu': C10,
}
fast_fit = flavio.statistics.fits.FastFit(
name = "C9-C10 SMEFT fast fit",
observables = [
#'BR(Bs->mumu)', 'BR(B0->mumu)',
('<Rmue>(B0->K*ll)', 0.045, 1.1), ('<Rmue>(B0->K*ll)', 1.1, 6.0), ('<Rmue>(B+->Kll)', 1.0, 6.0), ('<P4p>(B0->K*mumu)', 1.1, 6.0) , ('<P5p>(B0->K*mumu)', 1.1, 6.0) ],
fit_wc_function = wc,
input_scale = 4.8,
include_measurements = ['LHCb Bs->mumu 2017', 'CMS Bs->mumu 2013', 'LHCb RK* 2017', 'LHCb B->Kee 2014', 'LHCb B->K*mumu 2015 P 1.1-6', 'ATLAS B->K*mumu 2017 P4p', 'ATLAS B->K*mumu 2017 P5p'],
)
fast_fit.make_measurement(threads=2)
def plot(x0=0):
'''
Plots the allowed regions in the C9-C10 plane for imaginary Wilson coefficients
'''
fig = texfig.figure()
opt = dict(x_min=-2, x_max=2, y_min=-2, y_max=2, n_sigma=(1,2), interpolation_factor=5)
flavio.plots.likelihood_contour(fast_fit.log_likelihood, col=0, **opt, threads=2)
#flavio.plots.flavio_branding(y=0.07, x=0.05) #crashes LaTeX
plt.gca().set_aspect(1)
plt.axhline(0, c='k', lw=0.2)
plt.axvline(0, c='k', lw=0.2)
if len(x0) == 2:
plt.plot(x0[0], x0[1], marker='x') #compute best fit first!
plt.xlabel(r'$\mathrm{Re}\ C_9$')
plt.ylabel(r'$\mathrm{Re}\ C_{10}$')
texfig.savefig('fitre')
def best_fit(x=[0.3,0.3]):
'''
Computes the best fit starting at point x0 = [Im C9, Im C10]
'''
bf_global = fast_fit.best_fit(x0=x)
print('Global: C9='+str(bf_global['x'][0])+ 'i\tC10='+str(bf_global['x'][1])+'i\nchi2 = ' + str(bf_global['log_likelihood']))
``` |
{
"source": "Jorge-Alda/pytiming",
"score": 3
} |
#### File: Jorge-Alda/pytiming/pytiming.py
```python
import time
import datetime
from contextlib import contextmanager
@contextmanager
def timing(mode=''):
try:
if mode == 'process':
start = time.process_time()
elif mode == 'thread':
start = time.thread_time()
elif mode == 'perf':
start = time.perf_counter()
else:
start = time.time()
yield start
finally:
if mode == 'process':
end = time.process_time()
elif mode == 'thread':
end = time.thread_time()
elif mode == 'perf':
end = time.perf_counter()
else:
end = time.time()
delta = datetime.timedelta(seconds=end-start)
print(str(delta))
``` |
{
"source": "Jorge-Alda/SMEFT19",
"score": 2
} |
#### File: SMEFT19/SMEFT19/SMEFTglob.py
```python
from math import isinf
import warnings
from flavio.statistics.functions import pull
import smelli
import yaml
import SMEFT19
gl = smelli.GlobalLikelihood()
def restart_smelli(include_likelihoods=None, add_measurements=None,
remove_measurements=None, custom_likelihoods=None):
'''
Re-starts smelli's Global Likelihood with new parameters.
:Arguments:
- include_likelihoods\: If not None, only the specified likelihoods will be included.
- add_measurements\: Adds more experimental measurements not included by smelli.
- remove_measurements\: Removes more experimental measurements not included by smelli.
- custom_likelihoods\: Adds new likelihoods.
'''
global gl
gl = smelli.GlobalLikelihood(include_likelihoods=include_likelihoods,
add_measurements=add_measurements,
remove_measurements=remove_measurements,
custom_likelihoods=custom_likelihoods)
def likelihood_fits(x, wfun):
'''
Calculates the log-likelihood of a NP hypothesis for several classes of observables.
:Arguments:
- x\: Point in parameter space to be evaluated.
- wfun\: Function that takes a point in parameter space and
returns a dictionary of Wilson coefficents.
:Returns:
- A dictionary of log-likelihoods, for each of the classes of observables defined by `smelli`.
'''
res = dict()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
glpp = gl.parameter_point(wfun(x))
gldict = glpp.log_likelihood_dict()
for f in gldict.keys():
g = gldict[f]
if isinf(g):
if f == 'global':
g = 0
for f2 in list(gldict.keys())[:-1]:
g += res[f2]
else:
g = -68
res[f] = g
return res
def likelihood_global(x, wfun):
'''
Calculates the global log-likelihood of a NP hypothesis.
:Arguments:
- x\: Point in parameter space to be evaluated.
- wfun\: Function that takes a point in parameter space
and returns a dictionary of Wilson coefficents.
:Returns:
- The global log-likelihood.
'''
with warnings.catch_warnings():
warnings.simplefilter('ignore')
glpp = gl.parameter_point(wfun(x))
return glpp.log_likelihood_global()
def fastmeas(obs):
'''
Checks if the observable is part of a <<fast-measurement>> in smelli.
'''
if obs not in gl.obstable_sm.keys():
raise KeyError(f"The observable '{obs}' is not available in smelli")
obsm = gl.obstable_sm[obs]
lhname = obsm['lh_name']
return lhname[:4] == 'fast'
def prediction(x, obs, wfun):
'''
Interfaces `flavio` to compute the NP prediction of a given observable.
:Arguments:
- x\: Point in parameter space to be evaluated.
- obs\: observable, as defined by flavio, whose prediction will be computed.
If the observable does not depend on any parameter, obs is a string.
If the observable depends on numerical parameters (such as q2), obs is
a list containing a string and one or more floats.
- wfun\: Function that takes a point in parameter space and
returns a dictionary of Wilson coefficents.
:Returns:
- The prediction of the observable.
'''
if obs not in gl.obstable_sm.keys():
raise KeyError(f"The observable '{obs}' is not available in smelli")
obsm = gl.obstable_sm[obs]
lhname = obsm['lh_name']
wc = wfun(x)
if fastmeas(obs):
lh = gl.fast_likelihoods[lhname]
ml = lh.likelihood.measurement_likelihood
pred = ml.get_predictions_par(gl.par_dict_default, wc)
return pred[obs]
else:
lh = gl.likelihoods[lhname]
ml = lh.measurement_likelihood
pred = ml.get_predictions_par(gl.par_dict_default, wc)
return pred[obs]
def pull_obs(x, obs, wfun):
'''
Calculates the pull, in sigmas, of the prediction of a given observable
in NP with respect to its experimental value.
:Arguments:
- x\: Point in parameter space to be evaluated.
- obs\: observable, as defined by `flavio`, whose prediction will be computed.
If the observable does not depend on any parameter, obs is a string.
If the observable depends on numerical parameters (such as q2), obs is
a list containing a string and one or more floats.
- wfun\: Function that takes a point in parameter space and
returns a dictionary of Wilson coefficents.
:Returns:
- The pull of the observable.
'''
if obs not in gl.obstable_sm.keys():
raise KeyError(f"The observable '{obs}' is not available in smelli")
obsm = gl.obstable_sm[obs]
lhname = obsm['lh_name']
pred = prediction(x, obs, wfun)
ll_central = obsm['ll_central']
if fastmeas(obs):
lh = gl.fast_likelihoods[lhname]
m = lh.pseudo_measurement
ll = m.get_logprobability_single(obs, pred)
else:
p_comb = obsm['exp. PDF']
ll = p_comb.logpdf([pred])
return pull(-2*(ll-ll_central), 1)
def newlist():
'''
Creates a `.yaml` file with a list of all observables available, ordered by their pull in the SM.
'''
glSM = gl.parameter_point({}, scale=1000)
obsSM = glSM.obstable()
obscoll = list(obsSM['pull exp.'].keys())
for o in obscoll:
if isinstance(o, tuple):
o = list(o)
with open(SMEFT19.__path__[0] + '/observables.yaml', 'wt', encoding='utf-8') as fyaml:
yaml.dump(obscoll, fyaml)
def loadobslist(new=False):
'''
Loads from a `.yaml` file a list of all observables available, ordered by their pull in the SM.
If the file does not exist, this functions creates it.
:Returns:
- A list with all observables available.
'''
if new:
newlist()
else:
try:
with open(SMEFT19.__path__[0] + '/observables.yaml', 'rt', encoding='utf-8') as fyaml:
obscoll = yaml.safe_load(fyaml)
except (OSError, IOError):
newlist()
with open(SMEFT19.__path__[0] + '/observables.yaml', 'rt', encoding='utf-8') as fyaml:
obscoll = yaml.safe_load(fyaml)
for o in obscoll:
if isinstance(o, list):
o = tuple(o)
return obscoll
``` |
{
"source": "jorgealexandreb/Covid-19-ML-Project-",
"score": 3
} |
#### File: data_set/tests/test_dgs_data.py
```python
from pathlib import Path
import pytest
import pandas as pd
import datetime
import csv
import numpy as np
# Constants
NULL_PLACEHOLDER_VALUE = "NOO"
# Tests fixture (.csv with DGS data)
@pytest.fixture(scope="module")
def dgs_data():
""" Loads the CSV with the DGS data and applies some processing to it. """
# Loading the CSV
current_dir = Path(__file__).parent.absolute()
csv_filepath = current_dir / ".." / "data.csv"
data = pd.read_csv(
csv_filepath,
parse_dates=[0, 1],
infer_datetime_format=True,
skip_blank_lines=False,
)
# Filling NaNs with a custom value for easier processing
data.fillna(value=NULL_PLACEHOLDER_VALUE, inplace=True)
# Returning
return data
def _check_column_with_empty(val):
""" Guarantees columns with empty values are as expected. """
# Let's check if it's a float
if isinstance(val, float):
# Is it a float representing an integer value?
return val.is_integer()
# If it's a string, it must be equal to the expeceted string
elif isinstance(val, str):
return val == NULL_PLACEHOLDER_VALUE
# Anything else, it's wrong
else:
return False
def _check_column_with_empty_sintomas(val):
""" Guarantees symptoms are reported as expected. """
# Let's check if it's a float
if isinstance(val, float):
# Is it <= 1.0?
return val <= 1.0
# If it's a string, it must be equal to the expeceted string
elif isinstance(val, str):
return val == NULL_PLACEHOLDER_VALUE
# Anything else, it's wrong
else:
return False
def _check_datetime_format(date):
# Let's guarantee it makes sense (data starts on 26th February 2020)
if date >= datetime.date(2020, 2, 26):
# Let's guarantee it's in the correct format
datetime.datetime.strptime(str(date), "%d-%m-%Y") # Will fail if not
else:
return False
def _check_datetime_format_publication(date):
# Let's guarantee it makes sense (data starts on 26th February 2020)
if date >= datetime.date(2020, 2, 26):
# Let's guarantee it's in the correct format
datetime.datetime.strptime(str(date), "%d-%m-%Y %H:%M") # Will fail if not
else:
return False
def test_dates():
#TODO: Optimize this.
# Loading the CSV
current_dir = Path(__file__).parent.absolute()
csv_filepath = current_dir / ".." / "data.csv"
data = pd.read_csv(csv_filepath)
# Data de publicação
data_update = data['data']
# Data a que se referem os dados
data_dados = data['data_dados']
data_update.apply(lambda x: datetime.datetime.strptime(x, '%d-%m-%Y'))
data_dados.apply(lambda x: datetime.datetime.strptime(x, '%d-%m-%Y %H:%M'))
@pytest.mark.parametrize(
"col_name, expected_dtype, extra_check",
[
# Confirmados
("confirmados", (int), lambda x: x >= 0),
("confirmados_arsnorte", (int), lambda x: x >= 0),
("confirmados_arscentro", (int), lambda x: x >= 0),
("confirmados_arslvt", (int), lambda x: x >= 0),
("confirmados_arsalentejo", (int), lambda x: x >= 0),
("confirmados_arsalgarve", (int), lambda x: x >= 0),
("confirmados_acores", (int), lambda x: x >= 0),
("confirmados_madeira", (int), lambda x: x >= 0),
("confirmados_estrangeiro", (float, str), _check_column_with_empty),
("confirmados_novos", (int), lambda x: x >= 0),
# Internados
("internados", (float, str), _check_column_with_empty),
("internados_uci", (float, str), _check_column_with_empty),
# Casos suspeitos
("suspeitos", (float, str), _check_column_with_empty),
# Casos sob vigilância
("vigilancia", (float, str), _check_column_with_empty),
# Casos não confirmados
("n_confirmados", (float, str), _check_column_with_empty),
# Número de cadeias de transmissão
("cadeias_transmissao", (float, str), _check_column_with_empty),
# Casos de transmissão importada
("transmissao_importada", (float, str), _check_column_with_empty),
# Casos confirmados (nas várias faixas etárias e por género)
("confirmados_0_9_f", (float, str), _check_column_with_empty),
("confirmados_0_9_m", (float, str), _check_column_with_empty),
("confirmados_10_19_f", (float, str), _check_column_with_empty),
("confirmados_10_19_m", (float, str), _check_column_with_empty),
("confirmados_20_29_f", (float, str), _check_column_with_empty),
("confirmados_20_29_m", (float, str), _check_column_with_empty),
("confirmados_30_39_f", (float, str), _check_column_with_empty),
("confirmados_30_39_m", (float, str), _check_column_with_empty),
("confirmados_40_49_f", (float, str), _check_column_with_empty),
("confirmados_40_49_m", (float, str), _check_column_with_empty),
("confirmados_50_59_f", (float, str), _check_column_with_empty),
("confirmados_50_59_m", (float, str), _check_column_with_empty),
("confirmados_60_69_f", (float, str), _check_column_with_empty),
("confirmados_60_69_m", (float, str), _check_column_with_empty),
("confirmados_70_79_f", (float, str), _check_column_with_empty),
("confirmados_70_79_m", (float, str), _check_column_with_empty),
("confirmados_80_plus_f", (float, str), _check_column_with_empty),
("confirmados_80_plus_m", (float, str), _check_column_with_empty),
("confirmados_f", (float, str), _check_column_with_empty),
("confirmados_m", (float, str), _check_column_with_empty),
# Sintomas
("sintomas_tosse", (float, str), _check_column_with_empty_sintomas),
("sintomas_febre", (float, str), _check_column_with_empty_sintomas),
("sintomas_dificuldade_respiratoria",(float, str),_check_column_with_empty_sintomas),
("sintomas_cefaleia", (float, str), _check_column_with_empty_sintomas),
("sintomas_dores_musculares", (float, str), _check_column_with_empty_sintomas),
("sintomas_fraqueza_generalizada",(float, str),_check_column_with_empty_sintomas),
("sintomas_tosse", (float, str), _check_column_with_empty_sintomas),
# Óbitos
("obitos", (int), lambda x: x >= 0),
("obitos_arsnorte", (int), lambda x: x >= 0),
("obitos_arscentro", (int), lambda x: x >= 0),
("obitos_arslvt", (int), lambda x: x >= 0),
("obitos_arsalentejo", (int), lambda x: x >= 0),
("obitos_arsalgarve", (int), lambda x: x >= 0),
("obitos_acores", (int), lambda x: x >= 0),
("obitos_madeira", (int), lambda x: x >= 0),
("obitos_estrangeiro", (float, str), _check_column_with_empty),
("obitos_0_9_f", (float, str), _check_column_with_empty),
("obitos_0_9_m", (float, str), _check_column_with_empty),
("obitos_10_19_f", (float, str), _check_column_with_empty),
("obitos_10_19_m", (float, str), _check_column_with_empty),
("obitos_20_29_f", (float, str), _check_column_with_empty),
("obitos_20_29_m", (float, str), _check_column_with_empty),
("obitos_30_39_f", (float, str), _check_column_with_empty),
("obitos_30_39_m", (float, str), _check_column_with_empty),
("obitos_40_49_f", (float, str), _check_column_with_empty),
("obitos_40_49_m", (float, str), _check_column_with_empty),
("obitos_50_59_f", (float, str), _check_column_with_empty),
("obitos_50_59_m", (float, str), _check_column_with_empty),
("obitos_60_69_f", (float, str), _check_column_with_empty),
("obitos_60_69_m", (float, str), _check_column_with_empty),
("obitos_70_79_f", (float, str), _check_column_with_empty),
("obitos_70_79_m", (float, str), _check_column_with_empty),
("obitos_80_plus_f", (float, str), _check_column_with_empty),
("obitos_80_plus_m", (float, str), _check_column_with_empty),
("obitos_f", (float, str), _check_column_with_empty),
("obitos_m", (float, str), _check_column_with_empty),
# Recuperados
("recuperados", (int), lambda x: x >= 0),
("recuperados_arsnorte", (float, str), _check_column_with_empty),
("recuperados_arscentro", (float, str), _check_column_with_empty),
("recuperados_arslvt", (float, str), _check_column_with_empty),
("recuperados_arsalentejo", (float, str), _check_column_with_empty),
("recuperados_arsalgarve", (float, str), _check_column_with_empty),
("recuperados_acores", (float, str), _check_column_with_empty),
("recuperados_madeira", (float, str), _check_column_with_empty),
("recuperados_estrangeiro", (float, str), _check_column_with_empty),
("ativos", (float, str), _check_column_with_empty),
],
)
def test_dtype(dgs_data, col_name, expected_dtype, extra_check):
"""
Tests whether a certain column has the expected data types (and other column specific rules).
"""
df_latest_line = dgs_data.tail(1) # Only run for the latest line
for row in df_latest_line.iterrows():
val = row[1][col_name]
# Basic type assertion
assert isinstance(
val, expected_dtype
), "Dia {}: erro na coluna {}, valor {}".format(row[1]["data"], col_name, val)
# Extra verification
if extra_check is not None:
assert extra_check(
val
), "Dia: Coluna {}, valor {} não cumpre as condições específicas".format(
row[1]["data"], col_name, val
)
@pytest.mark.parametrize(
"group, total_col",
[
(
[
"confirmados_arsnorte",
"confirmados_arscentro",
"confirmados_arslvt",
"confirmados_arsalentejo",
"confirmados_arsalgarve",
"confirmados_acores",
"confirmados_madeira"
],
["confirmados"],
),
pytest.param(
[
"confirmados_0_9_f",
"confirmados_0_9_m",
"confirmados_10_19_f",
"confirmados_10_19_m",
"confirmados_20_29_f",
"confirmados_20_29_m",
"confirmados_30_39_f",
"confirmados_30_39_m",
"confirmados_40_49_f",
"confirmados_40_49_m",
"confirmados_50_59_f",
"confirmados_50_59_m",
"confirmados_60_69_f",
"confirmados_60_69_m",
"confirmados_70_79_f",
"confirmados_70_79_m",
"confirmados_80_plus_f",
"confirmados_80_plus_m",
"confirmados_desconhecidos_m",
"confirmados_desconhecidos_f"
],
["confirmados"], marks=pytest.mark.xfail),
(
[
"obitos_0_9_f",
"obitos_0_9_m",
"obitos_10_19_f",
"obitos_10_19_m",
"obitos_20_29_f",
"obitos_20_29_m",
"obitos_30_39_f",
"obitos_30_39_m",
"obitos_40_49_f",
"obitos_40_49_m",
"obitos_50_59_f",
"obitos_50_59_m",
"obitos_60_69_f",
"obitos_60_69_m",
"obitos_70_79_f",
"obitos_70_79_m",
"obitos_80_plus_f",
"obitos_80_plus_m",
],
["obitos_f", "obitos_m"],
),
(["obitos_f", "obitos_m"], ["obitos"]),
pytest.param(["confirmados_f", "confirmados_m", "confirmados_desconhecidos"], ["confirmados"]),
(
[
"obitos_arsnorte",
"obitos_arscentro",
"obitos_arslvt",
"obitos_arsalentejo",
"obitos_arsalgarve",
"obitos_acores",
"obitos_madeira",
],
["obitos"],
),
pytest.param(
[
"recuperados_arsnorte",
"recuperados_arscentro",
"recuperados_arslvt",
"recuperados_arsalentejo",
"recuperados_arsalgarve",
"recuperados_acores",
"recuperados_madeira",
],
["recuperados"], marks=pytest.mark.xfail)
],
)
def test_sums(dgs_data, group, total_col):
df_latest_line = dgs_data.tail(1) # Only run for the latest line
for row in df_latest_line.iterrows():
val = row[1]
assert val[group].sum() == val[total_col].sum(), "Soma difere"
def test_delimiter_comma():
"""
Tests that the delimiter is a comma
"""
current_dir = Path(__file__).parent.absolute()
csv_filepath = current_dir / ".." / "data.csv"
with open(csv_filepath, newline="") as csvfile:
csv.Sniffer().sniff(csvfile.read(1024), delimiters=",")
def test_blank_lines(dgs_data):
"""
Tests if the last row is blank
"""
df_latest_line = dgs_data.tail(1) # Only run for the latest line
for row in df_latest_line.iterrows():
val = row[1]
assert val["data"] != np.nan, "Empty row"
def test_sequentiality_new_cases(dgs_data):
"""
Tests if the number of new cases is correct
"""
today = dgs_data.iloc[-1]
yesterday = dgs_data.iloc[-2]
assert today["confirmados"] - yesterday["confirmados"] == today["confirmados_novos"]
assert today["confirmados"] > yesterday["confirmados"]
def test_sequentiality_dates(dgs_data):
"""
Tests if the sequentiality of dates is correct
"""
today_date = dgs_data.iloc[-1]["data"]
yesterday_date = dgs_data.iloc[-2]["data"]
diff_date = (today_date - yesterday_date).days
assert diff_date == 1
today_date_updates = dgs_data.iloc[-1]["data_dados"]
yesterday_date_updates = dgs_data.iloc[-2]["data_dados"]
diff_date_updates = (today_date_updates - yesterday_date_updates).days
assert diff_date_updates == 1
``` |
{
"source": "jorgearanda/katas",
"score": 3
} |
#### File: katas/ema/ema.py
```python
class ExponentialMovingAverage():
def __init__(self, alpha=0.5):
self.alpha = alpha
self.ema = None
def get(self):
return self.ema
def put(self, input):
if type(input) is int:
input = [input]
for value in input:
self._calc(value)
def _calc(self, value):
if self.ema is None:
self.ema = value
else:
self.ema = value * self.alpha + self.ema * (1.0 - self.alpha)
# Third pass. The result was identical to the second.
```
#### File: katas/life/life.py
```python
LIVE = '*'
DEAD = '.'
POTENTIAL_NEIGHBOURS = [
(-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 1),
(1, -1), (1, 0), (1, 1),
]
def life(input):
return Life(input).next()
class Life():
def __init__(self, input):
self.grid = input
self.max_y = len(input)
def next(self):
self.grid = [self.row_next(y, row) for y, row in enumerate(self.grid)]
return self.grid
def row_next(self, y, row):
return ''.join([self.cell_next(x, y) for x in range(len(row))])
def cell_next(self, x, y):
live_neighbours = 0
for i, j in POTENTIAL_NEIGHBOURS:
if self.is_live(x + i, y + j):
live_neighbours += 1
if self.is_live(x, y):
if live_neighbours in (2, 3):
return LIVE
else:
return DEAD
else:
if live_neighbours == 3:
return LIVE
else:
return DEAD
def is_live(self, x, y):
return self.are_valid_coords(x, y) and self.grid[y][x] == LIVE
def are_valid_coords(self, x, y):
return 0 <= x < len(self.grid[0]) and 0 <= y < len(self.grid)
# Fourth pass. Back to the basics, following TDD. The end result is much
# better! No extraneous abstractions, no overengineering. Short, readable code.
```
#### File: roman/test/test_roman.py
```python
from roman import to_roman, to_arabic
def test_1():
assert "I" == to_roman(1)
def test_2():
assert "II" == to_roman(2)
def test_3():
assert "III" == to_roman(3)
def test_4():
assert "IV" == to_roman(4)
def test_5():
assert "V" == to_roman(5)
def test_6():
assert "VI" == to_roman(6)
def test_9():
assert "IX" == to_roman(9)
def test_27():
assert "XXVII" == to_roman(27)
def test_48():
assert "XLVIII" == to_roman(48)
def test_59():
assert "LIX" == to_roman(59)
def test_93():
assert "XCIII" == to_roman(93)
def test_141():
assert "CXLI" == to_roman(141)
def test_163():
assert "CLXIII" == to_roman(163)
def test_402():
assert "CDII" == to_roman(402)
def test_575():
assert "DLXXV" == to_roman(575)
def test_911():
assert "CMXI" == to_roman(911)
def test_1024():
assert "MXXIV" == to_roman(1024)
def test_3000():
assert "MMM" == to_roman(3000)
def test_I():
assert 1 == to_arabic("I")
def test_II():
assert 2 == to_arabic("II")
def test_all():
for i in range(1, 4000):
assert i == to_arabic(to_roman(i))
```
#### File: to-words/test/test_to_words.py
```python
from to_words import to_words
def test_1():
assert to_words(1) == 'one'
def test_2():
assert to_words(2) == 'two'
def test_3():
assert to_words(3) == 'three'
def test_4():
assert to_words(4) == 'four'
def test_5():
assert to_words(5) == 'five'
def test_10():
assert to_words(10) == 'ten'
def test_11():
assert to_words(11) == 'eleven'
def test_12():
assert to_words(12) == 'twelve'
def test_13():
assert to_words(13) == 'thirteen'
def test_19():
assert to_words(19) == 'nineteen'
def test_20():
assert to_words(20) == 'twenty'
def test_27():
assert to_words(27) == 'twenty seven'
def test_35():
assert to_words(35) == 'thirty five'
def test_78():
assert to_words(78) == 'seventy eight'
def test_103():
assert to_words(103) == 'one hundred three'
def test_741():
assert to_words(741) == 'seven hundred forty one'
def test_999():
assert to_words(999) == 'nine hundred ninety nine'
def test_1015():
assert to_words(1015) == 'one thousand fifteen'
def test_8765():
assert to_words(8765) == 'eight thousand seven hundred sixty five'
def test_12345():
assert to_words(12345) == 'twelve thousand three hundred forty five'
def test_99999():
assert to_words(99999) == 'ninety nine thousand nine hundred ninety nine'
def test_9999999999():
assert to_words(9999999999) == \
'nine billion nine hundred ninety nine million ' + \
'nine hundred ninety nine thousand nine hundred ninety nine'
```
#### File: katas/to-words/to_words.py
```python
digits = {
0: '',
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
}
orders_of_magnitude = {
1000000000: 'billion',
100000000: 'hundred',
10000000: 'ten',
1000000: 'million',
100000: 'hundred',
10000: 'ten',
1000: 'thousand',
100: 'hundred',
10: 'ten',
1: '',
}
exceptions = {
'one ten': 'ten',
'two ten': 'twenty',
'three ten': 'thirty',
'four ten': 'forty',
'five ten': 'fifty',
'six ten': 'sixty',
'seven ten': 'seventy',
'eight ten': 'eighty',
'nine ten': 'ninety',
'ten one': 'eleven',
'ten two': 'twelve',
'ten three': 'thirteen',
'ten four': 'fourteen',
'ten five': 'fifteen',
'ten six': 'sixteen',
'ten seven': 'seventeen',
'ten eight': 'eighteen',
'ten nine': 'nineteen',
}
def to_words(n):
long_form = _long_form_number_text(n)
short_form = _eliminate_exceptions(long_form).strip()
return short_form
def _long_form_number_text(n):
words = ''
for order, denomination in orders_of_magnitude.items():
if n >= order:
print(order);
words += digits[n // order] + ' ' + denomination + ' '
n %= order
return words
def _eliminate_exceptions(words):
for incorrect, correct in exceptions.items():
words = words.replace(incorrect, correct)
words = words.strip()
return words
# Third pass. I think this is a slight improvement over the second.
# It would be hard to make the function cleaner than it is here,
# except perhaps in some of the naming choices.
#
# I decided to add a few more orders of magnitude, up to a billion,
# because it was very easy to do so.
``` |
{
"source": "jorgearojas25/FlaskLogin",
"score": 2
} |
#### File: app/auth/routes.py
```python
from flask import render_template, flash, request, session, make_response, g,copy_current_request_context,redirect
from app.db.dbMySQL import MySQLUser,MySQLToken
from app.models import User,Token
from app.utils.generate_token import generateToken
import threading
from . import auth_bp
import datetime
from threading import Timer,Thread,Event
@auth_bp.route('/sign_in', methods=('GET', 'POST'))
def signIn():
m = MySQLUser()
mT = MySQLToken()
# verficia si el toquen que se tiene en las cookies esta registrado en la bd y devulve el usuario corresponidente
user = mT.existToken(request.cookies.get('token'))
print(request.cookies.get('token'))
if None != user:
# cambio de token de usuario cada cierto tiempo
@copy_current_request_context
def verificate():
mT = MySQLToken()
tokenValue = generateToken()
t = Token(tokenValue,datetime.datetime.now(),user.token)
#actualiza el valro del token en la bd
mT.update(t)
# inicio de ciclo infinito cambio de token
g.user.setFunction(verificate)
g.user.start()
return render_template('home.html',user=user)
if request.method == 'POST':
# generacion de token
tokenValue = generateToken()
t = Token(tokenValue,datetime.datetime.now(),0)
email = request.form['username']
# busqueda de usario por gmail (ingresado anteriormente)
user = m.getOne(email)
if user != None:
# verificacion de password
if user.password == request.form['password']:
t.id=user.token
mT.update(t)
session['user'] = user.id
response = make_response(render_template('home.html',user=user))
# expiracion de cookie (no es posble ponerl una espiracion menor a 0.3 dias)
expireDate = t.date + datetime.timedelta(days=0.3)
response.set_cookie("token",tokenValue,expires = expireDate)
# cambio de token de usario cada
@copy_current_request_context
def verificate():
mT = MySQLToken()
tokenValue = generateToken()
t = Token(tokenValue,datetime.datetime.now(),user.token)
mT.update(t)
g.user.setFunction(verificate)
g.user.start()
return response
flash('Datos ingresados incorrectos')
return render_template('sign_in.html')
return render_template('sign_in.html')
@auth_bp.route('/register', methods=('GET', 'POST'))
def register():
m = MySQLUser()
if request.method == 'POST':
# generacion de token
tokenValue = generateToken()
t = Token(tokenValue,datetime.datetime.now(),0)
# creacion de usario
user = User(request.form['username'],request.form['password'],t,'0')
if m.create(user):
session['user'] = user.id
response = make_response(render_template('home.html',user=user))
# expiracion de cookie (no es posble ponerl una espiracion menor a 0.3 dias)
expireDate = t.date + datetime.timedelta(days=0.3)
response.set_cookie("token",tokenValue, expires=expireDate)
#return 'Email: {} \nPassword: {}'.format(user.email,user.password)
return response
flash('Email ya creado')
return render_template('register.html')
@auth_bp.route('/logout')
def logout():
response = make_response(render_template('sign_in.html'))
response.set_cookie("token",'')
return response
``` |
{
"source": "jorgearojas25/PythonExamples",
"score": 4
} |
#### File: PythonExamples/Characters/compare.py
```python
def compararPalabras(palabra1, palabra2):
indice = 0
incorrecto = "Las palabras no son iguales"
correcto = "Las palabras son iguales"
if len(palabra1) != len(palabra2):
return incorrecto
while indice < len(palabra1):
letraPalabra1 = palabra1[indice]
letraPalabra2 = palabra2[indice]
if letraPalabra1 != letraPalabra2:
return incorrecto
indice += 1
return correcto
print(compararPalabras(input('Ingrese una palabra: '), input('Ingrese otra palabra: ')))
```
#### File: PythonExamples/Collections/oddList.py
```python
def crearLista():
lista = []
n = 1
while len(lista) < 20:
if n % 2 == 0:
lista.append(n)
n += 1
return lista
def sumarLista(lista):
suma = 0
for n in lista:
suma += n
return suma
print('la suma de los primeros 20 pares es: ' + str(sumarLista(crearLista())))
```
#### File: PythonExamples/Functions/maxmin.py
```python
def leerNumero():
return float(input('Ingrese un numero: '))
def numeroMayor(numero1, numero2):
if numero1 > numero2:
return numero1
return numero2
def numeroMenor( numero1, numero2):
if numero1>numero2:
return numero2
return numero1
if __name__ == "__main__":
valor1 = leerNumero()
valor2 = leerNumero()
print("El numero mayor es: " + str(numeroMayor(valor1, valor2)))
print("El numero menor es: " + str(numeroMenor(valor1, valor2)))
``` |
{
"source": "jorgeartware/key-mon",
"score": 2
} |
#### File: src/keymon/key_mon.py
```python
__author__ = '<NAME> (<EMAIL>)'
__version__ = '1.18'
import locale
import logging
import pygtk
pygtk.require('2.0')
import gettext
import gobject
import gtk
import os
import sys
import time
try:
import xlib
except ImportError:
print 'Error: Missing xlib, run sudo apt-get install python-xlib'
sys.exit(-1)
import options
import lazy_pixbuf_creator
import mod_mapper
import settings
import shaped_window
import two_state_image
from ConfigParser import SafeConfigParser
gettext.install('key-mon', 'locale')
def fix_svg_key_closure(fname, from_tos):
"""Create a closure to modify the key.
Args:
from_tos: list of from, to pairs for search replace.
Returns:
A bound function which returns the file fname with modifications.
"""
def fix_svg_key():
"""Given an SVG file return the SVG text fixed."""
logging.debug('Read file %r', fname)
fin = open(fname)
fbytes = fin.read()
fin.close()
for fin, t in from_tos:
# Quick XML escape fix
t = t.replace('<', '<')
fbytes = fbytes.replace(fin, t)
return fbytes
return fix_svg_key
def cstrf(func):
"""Change locale before using str function"""
OLD_CTYPE = locale.getlocale(locale.LC_CTYPE)
locale.setlocale(locale.LC_CTYPE, 'C')
s = func()
locale.setlocale(locale.LC_CTYPE, OLD_CTYPE)
return s
class KeyMon:
"""main KeyMon window class."""
def __init__(self, options):
"""Create the Key Mon window.
Options dict:
scale: float 1.0 is default which means normal size.
meta: boolean show the meta (windows key)
kbd_file: string Use the kbd file given.
emulate_middle: Emulate the middle mouse button.
theme: Name of the theme to use to draw keys
"""
settings.SettingsDialog.register()
self.btns = ['MOUSE', 'BTN_RIGHT', 'BTN_MIDDLE', 'BTN_MIDDLERIGHT',
'BTN_LEFT', 'BTN_LEFTRIGHT', 'BTN_LEFTMIDDLE',
'BTN_LEFTMIDDLERIGHT']
self.options = options
self.pathname = os.path.dirname(os.path.abspath(__file__))
if self.options.scale < 1.0:
self.svg_size = '-small'
else:
self.svg_size = ''
# Make lint happy by defining these.
self.hbox = None
self.window = None
self.event_box = None
self.mouse_indicator_win = None
self.key_image = None
self.buttons = None
self.no_press_timer = None
self.move_dragged = False
self.shape_mask_current = None
self.shape_mask_cache = {}
self.MODS = ['SHIFT', 'CTRL', 'META', 'ALT']
self.IMAGES = ['MOUSE'] + self.MODS
self.images = dict([(img, None) for img in self.IMAGES])
self.enabled = dict([(img, self.get_option(cstrf(img.lower))) for img in self.IMAGES])
self.options.kbd_files = settings.get_kbd_files()
self.modmap = mod_mapper.safely_read_mod_map(self.options.kbd_file, self.options.kbd_files)
self.name_fnames = self.create_names_to_fnames()
self.devices = xlib.XEvents()
self.devices.start()
self.pixbufs = lazy_pixbuf_creator.LazyPixbufCreator(self.name_fnames,
self.options.scale)
self.create_window()
self.reset_no_press_timer()
def get_option(self, attr):
"""Shorthand for getattr(self.options, attr)"""
return getattr(self.options, attr)
def do_screenshot(self):
"""Create a screenshot showing some keys."""
for key in self.options.screenshot.split(','):
try:
if key == 'KEY_EMPTY':
continue
if key.startswith('KEY_'):
key_info = self.modmap.get_from_name(key)
if not key_info:
print 'Key %s not found' % key
self.destroy(None)
return
scancode = key_info[0]
event = xlib.XEvent('EV_KEY', scancode=scancode, code=key, value=1)
elif key.startswith('BTN_'):
event = xlib.XEvent('EV_KEY', scancode=0, code=key, value=1)
self.handle_event(event)
while gtk.events_pending():
gtk.main_iteration(False)
time.sleep(0.1)
except Exception, exp:
print exp
while gtk.events_pending():
gtk.main_iteration(False)
time.sleep(0.1)
win = self.window
x, y = win.get_position()
w, h = win.get_size()
screenshot = gtk.gdk.Pixbuf.get_from_drawable(
gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, w, h),
gtk.gdk.get_default_root_window(),
gtk.gdk.colormap_get_system(),
x, y, 0, 0, w, h)
fname = 'screenshot.png'
screenshot.save(fname, 'png')
print 'Saved screenshot %r' % fname
self.destroy(None)
def create_names_to_fnames(self):
"""Give a name to images."""
if self.options.scale < 1.0:
self.svg_size = '-small'
else:
self.svg_size = ''
ftn = {
'MOUSE': [self.svg_name('mouse'),],
'BTN_MIDDLE': [self.svg_name('mouse'), self.svg_name('middle-mouse')],
'SCROLL_UP': [self.svg_name('mouse'), self.svg_name('scroll-up-mouse')],
'SCROLL_DOWN': [self.svg_name('mouse'), self.svg_name('scroll-dn-mouse')],
'REL_LEFT': [self.svg_name('mouse'), self.svg_name('sroll-lft-mouse')],
'REL_RIGHT': [self.svg_name('mouse'), self.svg_name('scroll-rgt-mouse')],
'SHIFT': [self.svg_name('shift')],
'SHIFT_EMPTY': [self.svg_name('shift'), self.svg_name('whiteout-72')],
'CTRL': [self.svg_name('ctrl')],
'CTRL_EMPTY': [self.svg_name('ctrl'), self.svg_name('whiteout-58')],
'META': [self.svg_name('meta'), self.svg_name('meta')],
'META_EMPTY': [self.svg_name('meta'), self.svg_name('whiteout-58')],
'ALT': [self.svg_name('alt')],
'ALT_EMPTY': [self.svg_name('alt'), self.svg_name('whiteout-58')],
'ALTGR': [self.svg_name('altgr')],
'ALTGR_EMPTY': [self.svg_name('altgr'), self.svg_name('whiteout-58')],
'KEY_EMPTY': [
fix_svg_key_closure(self.svg_name('one-char-template'), [('&', '')]),
self.svg_name('whiteout-48')],
'BTN_LEFTRIGHT': [
self.svg_name('mouse'), self.svg_name('left-mouse'),
self.svg_name('right-mouse')],
'BTN_LEFTMIDDLERIGHT': [
self.svg_name('mouse'), self.svg_name('left-mouse'),
self.svg_name('middle-mouse'), self.svg_name('right-mouse')],
}
if self.options.swap_buttons:
# swap the meaning of left and right
left_str = 'right'
right_str = 'left'
else:
left_str = 'left'
right_str = 'right'
ftn.update({
'BTN_RIGHT': [self.svg_name('mouse'),
self.svg_name('%s-mouse' % right_str)],
'BTN_LEFT': [self.svg_name('mouse'),
self.svg_name('%s-mouse' % left_str)],
'BTN_LEFTMIDDLE': [
self.svg_name('mouse'), self.svg_name('%s-mouse' % left_str),
self.svg_name('middle-mouse')],
'BTN_MIDDLERIGHT': [
self.svg_name('mouse'), self.svg_name('middle-mouse'),
self.svg_name('%s-mouse' % right_str)],
})
if self.options.scale >= 1.0:
ftn.update({
'KEY_SPACE': [
fix_svg_key_closure(self.svg_name('two-line-wide'),
[('TOP', 'Space'), ('BOTTOM', '')])],
'KEY_TAB': [
fix_svg_key_closure(self.svg_name('two-line-wide'),
[('TOP', 'Tab'), ('BOTTOM', u'\u21B9')])],
'KEY_BACKSPACE': [
fix_svg_key_closure(self.svg_name('two-line-wide'),
[('TOP', 'Back'), ('BOTTOM', u'\u21fd')])],
'KEY_RETURN': [
fix_svg_key_closure(self.svg_name('two-line-wide'),
[('TOP', 'Enter'), ('BOTTOM', u'\u23CE')])],
'KEY_CAPS_LOCK': [
fix_svg_key_closure(self.svg_name('two-line-wide'),
[('TOP', 'Capslock'), ('BOTTOM', '')])],
'KEY_MULTI_KEY': [
fix_svg_key_closure(self.svg_name('two-line-wide'),
[('TOP', 'Compose'), ('BOTTOM', '')])],
})
else:
ftn.update({
'KEY_SPACE': [
fix_svg_key_closure(self.svg_name('one-line-wide'), [('&', 'Space')])],
'KEY_TAB': [
fix_svg_key_closure(self.svg_name('one-line-wide'), [('&', 'Tab')])],
'KEY_BACKSPACE': [
fix_svg_key_closure(self.svg_name('one-line-wide'), [('&', 'Back')])],
'KEY_RETURN': [
fix_svg_key_closure(self.svg_name('one-line-wide'), [('&', 'Enter')])],
'KEY_CAPS_LOCK': [
fix_svg_key_closure(self.svg_name('one-line-wide'), [('&', 'Capslck')])],
'KEY_MULTI_KEY': [
fix_svg_key_closure(self.svg_name('one-line-wide'), [('&', 'Compose')])],
})
return ftn
def create_window(self):
"""Create the main window."""
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_resizable(False)
self.window.set_title('Keyboard Status Monitor')
width, height = 30 * self.options.scale, 48 * self.options.scale
self.window.set_default_size(int(width), int(height))
self.window.set_decorated(self.options.decorated)
self.mouse_indicator_win = shaped_window.ShapedWindow(
self.svg_name('mouse-indicator'),
timeout=self.options.visible_click_timeout)
self.mouse_follower_win = shaped_window.ShapedWindow(
self.svg_name('mouse-follower'))
if self.options.follow_mouse:
self.mouse_follower_win.show()
self.window.set_opacity(self.options.opacity)
self.window.set_keep_above(True)
self.event_box = gtk.EventBox()
self.window.add(self.event_box)
self.event_box.show()
self.create_images()
self.hbox = gtk.HBox(False, 0)
self.event_box.add(self.hbox)
self.layout_boxes()
self.hbox.show()
self.add_events()
self.set_accept_focus(False)
self.window.set_skip_taskbar_hint(True)
old_x = self.options.x_pos
old_y = self.options.y_pos
if old_x != -1 and old_y != -1 and old_x and old_y:
self.window.move(old_x, old_y)
self.window.show()
def update_shape_mask(self, *unused_args, **kwargs):
if not self.options.backgroundless:
return
force = kwargs.get('force', False)
btns = [btn for btn in self.buttons if btn.get_visible()]
# Generate id to see if current mask needs to be updated, which is a tuple
# of allocation of buttons.
cache_id = tuple(tuple(btn.get_allocation()) for btn in btns)
if cache_id == self.shape_mask_current and not force:
return
# Try to find existing mask in cache
# TODO limit number of cached masks
shape_mask = self.shape_mask_cache.get(cache_id, None)
if shape_mask and not force:
self.window.shape_combine_mask(shape_mask, 0, 0)
self.shape_mask_current = cache_id
return
_, _, width, height = self.window.get_allocation()
masks = [self.pixbufs.get(btn.current).render_pixmap_and_mask()[1] \
for btn in btns]
shape_mask = gtk.gdk.Pixmap(None, width, height, masks[0].get_depth())
gc = gtk.gdk.GC(shape_mask)
# Initialize the mask just in case masks of buttons can't fill the window,
# if that happens, some artifacts will be seen usually at right edge.
gc.set_foreground(
gtk.gdk.Color(pixel=0) if self.options.backgroundless else \
gtk.gdk.Color(pixel=1))
shape_mask.draw_rectangle(gc, True, 0, 0, width, height)
for btn_allocation, mask in zip(cache_id, masks):
# Don't create mask until every image is allocated
if btn_allocation[0] == -1:
return
shape_mask.draw_drawable(gc, mask, 0, 0, *btn_allocation)
self.window.shape_combine_mask(shape_mask, 0, 0)
self.shape_mask_current = cache_id
self.shape_mask_cache[cache_id] = shape_mask
def create_images(self):
self.images['MOUSE'] = two_state_image.TwoStateImage(self.pixbufs, 'MOUSE')
for img in self.MODS:
self.images[img] = two_state_image.TwoStateImage(
self.pixbufs, img + '_EMPTY', self.enabled[img])
self.create_buttons()
def create_buttons(self):
self.buttons = list(self.images[img] for img in self.IMAGES)
for _ in range(self.options.old_keys):
key_image = two_state_image.TwoStateImage(self.pixbufs, 'KEY_EMPTY')
self.buttons.append(key_image)
self.key_image = two_state_image.TwoStateImage(self.pixbufs, 'KEY_EMPTY')
self.buttons.append(self.key_image)
for but in self.buttons:
if but.normal == 'MOUSE':
but.timeout_secs = self.options.mouse_timeout
else:
but.timeout_secs = self.options.key_timeout
but.connect('size_allocate', self.update_shape_mask)
def layout_boxes(self):
for child in self.hbox.get_children():
self.hbox.remove(child)
for img in self.IMAGES:
if not self.enabled[img]:
self.images[img].hide()
self.hbox.pack_start(self.images[img], False, False, 0)
prev_key_image = None
for key_image in self.buttons[-(self.options.old_keys + 1):-1]:
# key_image.hide()
#key_image.timeout_secs = 0.5
key_image.defer_to = prev_key_image
self.hbox.pack_start(key_image, True, True, 0)
prev_key_image = key_image
# This must be after the loop above.
#self.key_image.timeout_secs = 0.5
self.key_image.defer_to = prev_key_image
self.hbox.pack_start(self.key_image, True, True, 0)
def svg_name(self, fname):
"""Return an svg filename given the theme, system."""
themepath = self.options.themes[self.options.theme][1]
fullname = os.path.join(themepath, '%s%s.svg' % (fname, self.svg_size))
if self.svg_size and not os.path.exists(fullname):
# Small not found, defaulting to large size
fullname = os.path.join(themepath, '%s.svg' % fname)
return fullname
def add_events(self):
"""Add events for the window to listen to."""
self.window.connect('destroy', self.destroy)
self.window.connect('button-press-event', self.button_pressed)
self.window.connect('button-release-event', self.button_released)
self.window.connect('leave-notify-event', self.pointer_leave)
self.event_box.connect('button_release_event', self.right_click_handler)
accelgroup = gtk.AccelGroup()
key, modifier = gtk.accelerator_parse('<Control>q')
accelgroup.connect_group(key, modifier, gtk.ACCEL_VISIBLE, self.quit_program)
key, modifier = gtk.accelerator_parse('<Control>s')
accelgroup.connect_group(key, modifier, gtk.ACCEL_VISIBLE, self.show_settings_dlg)
self.window.add_accel_group(accelgroup)
if self.options.screenshot:
gobject.timeout_add(700, self.do_screenshot)
return
gobject.idle_add(self.on_idle)
def button_released(self, unused_widget, evt):
"""A mouse button was released."""
if evt.button == 1:
self.move_dragged = None
return True
def button_pressed(self, widget, evt):
"""A mouse button was pressed."""
self.set_accept_focus(True)
if evt.button == 1:
self.move_dragged = widget.get_pointer()
self.window.set_opacity(self.options.opacity)
# remove no_press_timer
if self.no_press_timer:
gobject.source_remove(self.no_press_timer)
self.no_press_timer = None
return True
def pointer_leave(self, unused_widget, unused_evt):
self.set_accept_focus(False)
def set_accept_focus(self, accept_focus=True):
self.window.set_accept_focus(accept_focus)
if accept_focus:
logging.debug('window now accepts focus')
else:
logging.debug('window now does not accept focus')
def _window_moved(self):
"""The window has moved position, save it."""
if not self.move_dragged:
return
old_p = self.move_dragged
new_p = self.window.get_pointer()
x, y = self.window.get_position()
x, y = x + new_p[0] - old_p[0], y + new_p[1] - old_p[1]
self.window.move(x, y)
logging.info('Moved window to %d, %d' % (x, y))
self.options.x_pos = x
self.options.y_pos = y
def on_idle(self):
"""Check for events on idle."""
event = self.devices.next_event()
try:
if event:
self.handle_event(event)
else:
for button in self.buttons:
button.empty_event()
time.sleep(0.001)
except KeyboardInterrupt:
self.quit_program()
return False
return True # continue calling
def handle_event(self, event):
"""Handle an X event."""
if event.type == 'EV_MOV':
if self.mouse_indicator_win.get_property('visible'):
self.mouse_indicator_win.center_on_cursor(*event.value)
if self.mouse_follower_win.get_property('visible'):
self.mouse_follower_win.center_on_cursor(*event.value)
if self.move_dragged:
self._window_moved()
elif event.type == 'EV_KEY' and event.value in (0, 1):
if type(event.code) == str:
if event.code.startswith('KEY'):
code_num = event.scancode
self.handle_key(code_num, event.code, event.value)
elif event.code.startswith('BTN'):
self.handle_mouse_button(event.code, event.value)
if not self.move_dragged:
self.reset_no_press_timer()
elif event.type.startswith('EV_REL') and event.code == 'REL_WHEEL':
self.handle_mouse_scroll(event.value, event.value)
elif event.code.startswith('REL'):
self.handle_mouse_scroll(event.value, event.value)
def reset_no_press_timer(self):
"""Initialize no_press_timer"""
if not self.options.no_press_fadeout:
return
logging.debug('Resetting no_press_timer')
if not self.window.get_property('visible'):
self.window.move(self.options.x_pos, self.options.y_pos)
self.window.show()
self.window.set_opacity(self.options.opacity)
if self.no_press_timer:
gobject.source_remove(self.no_press_timer)
self.no_press_timer = None
self.no_press_timer = gobject.timeout_add(int(self.options.no_press_fadeout * 1000), self.no_press_fadeout)
def no_press_fadeout(self, begin=True):
"""Fadeout the window in a second
Args:
begin: indicate if this timeout is requested by handle_event.
"""
opacity = self.window.get_opacity() - self.options.opacity / 10.0
if opacity < 0.0:
opacity = 0.0;
logging.debug('Set opacity = %f' % opacity)
self.window.set_opacity(opacity)
if opacity == 0.0:
self.window.hide()
# No need to fade out more
self.no_press_timer = None
return False
if begin:
# Recreate a new timer with 0.1 seccond interval
self.no_press_timer = gobject.timeout_add(100, self.no_press_fadeout)
# The current self.options.no_press_fadeout interval will not be timed
# out again.
return False
def _show_down_key(self, name):
"""Show the down key.
Normally True, unless combo is set.
Args:
name: name of the key being held down.
Returns:
True if the key should be shown
"""
if not self.options.only_combo:
return True
if self.is_shift_code(name):
return True
if (any(self.images[img].is_pressed() for img in self.MODS)):
return True
return False
def _handle_event(self, image, name, code):
"""Handle an event given image and code."""
image.really_pressed = code == 1
if code == 1:
if self._show_down_key(name):
logging.debug('Switch to %s, code %s' % (name, code))
image.switch_to(name)
return
# on key up
if self.is_shift_code(name):
# shift up is always shown
if not self.options.sticky_mode:
image.switch_to_default()
return
else:
for img in self.MODS:
self.images[img].reset_time_if_pressed()
image.switch_to_default()
def is_shift_code(self, code):
if code in ('SHIFT', 'ALT', 'ALTGR', 'CTRL', 'META'):
return True
return False
def handle_key(self, scan_code, xlib_name, value):
"""Handle a keyboard event."""
code, medium_name, short_name = self.modmap.get_and_check(scan_code,
xlib_name)
if not code:
logging.info('No mapping for scan_code %s', scan_code)
return
if self.options.scale < 1.0 and short_name:
medium_name = short_name
logging.debug('Scan code %s, Key %s pressed = %r', scan_code,
code, medium_name)
if code in self.name_fnames:
self._handle_event(self.key_image, code, value)
return
for keysym, img in (('KEY_SHIFT', 'SHIFT'), ('KEY_CONTROL', 'CTRL'),
('KEY_ALT', 'ALT'), ('KEY_ISO_LEVEL3_SHIFT', 'ALT'),
('KEY_SUPER', 'META')):
if code.startswith(keysym):
if self.enabled[img]:
if keysym == 'KEY_ISO_LEVEL3_SHIFT':
self._handle_event(self.images['ALT'], 'ALTGR', value)
else:
self._handle_event(self.images[img], img, value)
return
if code.startswith('KEY_KP'):
letter = medium_name
if code not in self.name_fnames:
template = 'one-char-numpad-template'
self.name_fnames[code] = [
fix_svg_key_closure(self.svg_name(template), [('&', letter)])]
self._handle_event(self.key_image, code, value)
return
if code.startswith('KEY_'):
letter = medium_name
if code not in self.name_fnames:
logging.debug('code not in %s', code)
if len(letter) == 1:
template = 'one-char-template'
else:
template = 'multi-char-template'
self.name_fnames[code] = [
fix_svg_key_closure(self.svg_name(template), [('&', letter)])]
else:
logging.debug('code in %s', code)
self._handle_event(self.key_image, code, value)
return
def handle_mouse_button(self, code, value):
"""Handle the mouse button event."""
if self.enabled['MOUSE']:
if code in self.btns:
n_image = 0
n_code = 0
for i, btn in enumerate(self.btns):
if btn == code:
n_code = i
if btn == self.images['MOUSE'].current:
n_image = i
if self.options.emulate_middle and ((self.images['MOUSE'].current == 'BTN_LEFT'
and code == 'BTN_RIGHT') or
(self.images['MOUSE'].current == 'BTN_RIGHT' and code == 'BTN_LEFT')):
code = 'BTN_MIDDLE'
elif value == 0 and n_code != n_image:
code = self.btns[n_image - n_code]
elif value == 1 and n_image:
code = self.btns[n_image | n_code]
elif code not in self.name_fnames:
btn_num = code.replace('BTN_', '')
self.name_fnames[code] = [
fix_svg_key_closure(self.svg_name('mouse'),
[('>​', '>' + btn_num)])]
self._handle_event(self.images['MOUSE'], code, value)
if self.options.visible_click:
if value == 1:
self.mouse_indicator_win.center_on_cursor()
self.mouse_indicator_win.maybe_show()
else:
self.mouse_indicator_win.fade_away()
return True
def handle_mouse_scroll(self, direction, unused_value):
"""Handle the mouse scroll button event."""
if not self.enabled['MOUSE']:
return
if direction == 'REL_RIGHT':
self._handle_event(self.images['MOUSE'], 'REL_RIGHT', 1)
elif direction == 'REL_LEFT':
self._handle_event(self.images['MOUSE'], 'REL_LEFT', 1)
elif direction > 0:
self._handle_event(self.images['MOUSE'], 'SCROLL_UP', 1)
elif direction < 0:
self._handle_event(self.images['MOUSE'], 'SCROLL_DOWN', 1)
self.images['MOUSE'].switch_to_default()
return True
def quit_program(self, *unused_args):
"""Quit the program."""
self.devices.stop_listening()
self.destroy(None)
def destroy(self, unused_widget, unused_data=None):
"""Also quit the program."""
self.devices.stop_listening()
self.options.save()
gtk.main_quit()
def right_click_handler(self, unused_widget, event):
"""Handle the right click button and show a menu."""
if event.button != 3:
return
menu = self.create_context_menu()
menu.show()
menu.popup(None, None, None, event.button, event.time)
def create_context_menu(self):
"""Create a context menu on right click."""
menu = gtk.Menu()
toggle_chrome = gtk.CheckMenuItem(_('Window _Chrome'))
toggle_chrome.set_active(self.window.get_decorated())
toggle_chrome.connect_object('activate', self.toggle_chrome,
self.window.get_decorated())
toggle_chrome.show()
menu.append(toggle_chrome)
settings_click = gtk.MenuItem(_('_Settings...\tCtrl-S'))
settings_click.connect_object('activate', self.show_settings_dlg, None)
settings_click.show()
menu.append(settings_click)
about_click = gtk.MenuItem(_('_About...'))
about_click.connect_object('activate', self.show_about_dlg, None)
about_click.show()
menu.append(about_click)
quitcmd = gtk.MenuItem(_('_Quit\tCtrl-Q'))
quitcmd.connect_object('activate', self.destroy, None)
quitcmd.show()
menu.append(quitcmd)
return menu
def toggle_chrome(self, current):
"""Toggle whether the window has chrome or not."""
self.window.set_decorated(not current)
self.options.decorated = not self.options.decorated
def show_settings_dlg(self, *unused_args):
"""Show the settings dialog."""
dlg = settings.SettingsDialog(self.window, self.options)
dlg.connect('settings-changed', self.settings_changed)
dlg.show_all()
dlg.run()
dlg.destroy()
def settings_changed(self, unused_dlg):
"""Event received from the settings dialog."""
for img in self.IMAGES:
self._toggle_a_key(self.images[img], img, self.get_option(cstrf(img.lower)))
self.create_buttons()
self.layout_boxes()
self.mouse_indicator_win.hide()
self.mouse_indicator_win.timeout = self.options.visible_click_timeout
self.window.set_decorated(self.options.decorated)
self.name_fnames = self.create_names_to_fnames()
self.pixbufs.reset_all(self.name_fnames, self.options.scale)
for but in self.buttons:
if but.normal != 'KEY_EMPTY':
but.reset_image(self.enabled[but.normal.replace('_EMPTY', '')])
else:
but.reset_image()
if but.normal == 'MOUSE':
but.timeout_secs = self.options.mouse_timeout
else:
but.timeout_secs = self.options.key_timeout
# all this to get it to resize smaller
x, y = self.window.get_position()
self.hbox.resize_children()
self.window.resize_children()
self.window.reshow_with_initial_size()
self.hbox.resize_children()
self.event_box.resize_children()
self.window.resize_children()
self.window.move(x, y)
self.update_shape_mask(force=True)
# reload keymap
self.modmap = mod_mapper.safely_read_mod_map(
self.options.kbd_file, self.options.kbd_files)
def _toggle_a_key(self, image, name, show):
"""Toggle show/hide a key."""
if self.enabled[name] == show:
return
if show:
image.showit = True
self.enabled[name] = True
image.switch_to_default()
else:
image.showit = False
self.enabled[name] = False
image.hide()
def show_about_dlg(self, *_):
dlg = gtk.AboutDialog()
# Find the logo file
logo_paths = (os.path.join(self.pathname, '../../icons'),)
logo_paths += tuple(logo_path + '/share/pixmaps' for logo_path in (
os.path.expanduser('~'),
'/usr', '/usr/local', '/opt/local',
))
logo_paths = [logo_path + '/key-mon.xpm' for logo_path in logo_paths]
for logo_path in logo_paths:
if os.path.exists(logo_path):
dlg.set_logo(gtk.gdk.pixbuf_new_from_file(logo_path))
break
dlg.set_name('Keyboard Status Monitor')
dlg.set_program_name('key-mon')
dlg.set_website('http://code.google.com/p/key-mon/')
dlg.set_version(__version__)
dlg.set_authors([
__author__,
'<NAME>',
'<NAME>',
'<NAME>',
])
dlg.set_license('''Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.''')
dlg.run()
dlg.destroy()
def show_version():
"""Show the version number and author, used by help2man."""
print _('Keymon version %s.') % __version__
print _('Written by %s') % __author__
def create_options():
opts = options.Options()
opts.add_option(opt_short='-s', opt_long='--smaller', dest='smaller', default=False,
type='bool',
help=_('Make the dialog 25% smaller than normal.'))
opts.add_option(opt_short='-l', opt_long='--larger', dest='larger', default=False,
type='bool',
help=_('Make the dialog 25% larger than normal.'))
opts.add_option(opt_short='-m', opt_long='--meta', dest='meta', type='bool',
ini_group='buttons', ini_name='meta', default=None,
help=_('Show the meta (windows) key.'))
opts.add_option(opt_long='--mouse', dest='mouse', type='bool', default=True,
ini_group='buttons', ini_name='mouse',
help=_('Show the mouse.'))
opts.add_option(opt_long='--shift', dest='shift', type='bool', default=True,
ini_group='buttons', ini_name='shift',
help=_('Show shift key.'))
opts.add_option(opt_long='--ctrl', dest='ctrl', type='bool', default=True,
ini_group='buttons', ini_name='ctrl',
help=_('Show the ctrl key.'))
opts.add_option(opt_long='--alt', dest='alt', type='bool', default=True,
ini_group='buttons', ini_name='alt',
help=_('Show the alt key.'))
opts.add_option(opt_long='--scale', dest='scale', type='float', default=1.0,
ini_group='ui', ini_name='scale',
help=_('Scale the dialog. ex. 2.0 is 2 times larger, 0.5 is '
'half the size. Defaults to %default'))
opts.add_option(opt_long='--key-timeout', dest='key_timeout',
type='float', default=0.5,
ini_group='ui', ini_name='key_timeout',
help=_('Timeout before key returns to unpressed image. '
'Defaults to %default'))
opts.add_option(opt_long='--mouse-timeout', dest='mouse_timeout',
type='float', default=0.2,
ini_group='ui', ini_name='mouse_timeout',
help=_('Timeout before mouse returns to unpressed image. '
'Defaults to %default'))
opts.add_option(opt_long='--visible-click-timeout', dest='visible_click_timeout',
type='float', default=0.2,
ini_group='ui', ini_name='visible_click_timeout',
help=_('Timeout before highly visible click disappears. '
'Defaults to %default'))
opts.add_option(opt_long='--decorated', dest='decorated', type='bool',
ini_group='ui', ini_name='decorated',
default=False,
help=_('Show decoration'))
opts.add_option(opt_long='--backgroundless', dest='backgroundless', type='bool',
ini_group='ui', ini_name='backgroundless',
default=False,
help=_('Show only buttons'))
opts.add_option(opt_long='--no-press-fadeout', dest='no_press_fadeout',
type='float', default=0.0,
ini_group='ui', ini_name='no_press_fadeout',
help=_('Fadeout the window after a period with no key press. '
'Defaults to %default seconds (Experimental)'))
opts.add_option(opt_long='--only_combo', dest='only_combo', type='bool',
ini_group='ui', ini_name='only_combo',
default=False,
help=_('Show only key combos (ex. Control-A)'))
opts.add_option(opt_long='--sticky', dest='sticky_mode', type='bool',
ini_group='ui', ini_name='sticky_mode',
default=False,
help=_('Sticky mode'))
opts.add_option(opt_long='--visible_click', dest='visible_click', type='bool',
ini_group='ui', ini_name='visible-click',
default=False,
help=_('Show where you clicked'))
opts.add_option(opt_long='--follow_mouse', dest='follow_mouse', type='bool',
ini_group='ui', ini_name='follow-mouse',
default=False,
help=_('Show the mouse more visibly'))
opts.add_option(opt_long='--kbdfile', dest='kbd_file',
ini_group='devices', ini_name='map',
default=None,
help=_('Use this kbd filename.'))
opts.add_option(opt_long='--swap', dest='swap_buttons', type='bool',
default=False,
ini_group='devices', ini_name='swap_buttons',
help=_('Swap the mouse buttons.'))
opts.add_option(opt_long='--emulate-middle', dest='emulate_middle', type='bool',
default=False,
ini_group='devices', ini_name='emulate_middle',
help=_('When you press the left, and right mouse buttons at the same time, '
'it displays as a middle mouse button click. '))
opts.add_option(opt_short='-v', opt_long='--version', dest='version', type='bool',
help=_('Show version information and exit.'))
opts.add_option(opt_short='-t', opt_long='--theme', dest='theme', type='str',
ini_group='ui', ini_name='theme', default='classic',
help=_('The theme to use when drawing status images (ex. "-t apple").'))
opts.add_option(opt_long='--list-themes', dest='list_themes', type='bool',
help=_('List available themes'))
opts.add_option(opt_long='--old-keys', dest='old_keys', type='int',
ini_group='buttons', ini_name='old-keys',
help=_('How many historical keypresses to show (defaults to %default)'),
default=0)
opts.add_option(opt_long='--reset', dest='reset', type='bool',
help=_('Reset all options to their defaults.'),
default=None)
opts.add_option(opt_short=None, opt_long='--opacity', type='float',
dest='opacity', default=1.0, help='Opacity of window',
ini_group='ui', ini_name='opacity')
opts.add_option(opt_short=None, opt_long=None, type='int',
dest='x_pos', default=-1, help='Last X Position',
ini_group='position', ini_name='x')
opts.add_option(opt_short=None, opt_long=None, type='int',
dest='y_pos', default=-1, help='Last Y Position',
ini_group='position', ini_name='y')
opts.add_option_group(_('Developer Options'), _('These options are for developers.'))
opts.add_option(opt_long='--loglevel', dest='loglevel', type='str', default='',
help=_('Logging level'))
opts.add_option(opt_short='-d', opt_long='--debug', dest='debug', type='bool',
default=False,
help=_('Output debugging information. '
'Shorthand for --loglevel=debug'))
opts.add_option(opt_long='--screenshot', dest='screenshot', type='str', default='',
help=_('Create a "screenshot.png" and exit. '
'Pass a comma separated list of keys to simulate'
'(ex. "KEY_A,KEY_LEFTCTRL").'))
return opts
def main():
"""Run the program."""
# Check for --loglevel, --debug, we deal with them by ourselves because
# option parser also use logging.
loglevel = None
for idx, arg in enumerate(sys.argv):
if '--loglevel' in arg:
if '=' in arg:
loglevel = arg.split('=')[1]
else:
loglevel = sys.argv[idx + 1]
level = getattr(logging, loglevel.upper(), None)
if level is None:
raise ValueError('Invalid log level: %s' % loglevel)
loglevel = level
else:
if '--debug' in sys.argv or '-d' in sys.argv:
loglevel = logging.DEBUG
logging.basicConfig(
level=loglevel,
format='%(filename)s [%(lineno)d]: %(levelname)s %(message)s')
if loglevel is None:
# Disabling warning, info, debug messages
logging.disable(logging.WARNING)
opts = create_options()
opts.read_ini_file(os.path.join(settings.get_config_dir(), 'config'))
desc = _('Usage: %prog [Options...]')
opts.parse_args(desc, sys.argv)
if opts.version:
show_version()
sys.exit(0)
if opts.smaller:
opts.scale = 0.75
elif opts.larger:
opts.scale = 1.25
opts.themes = settings.get_themes()
if opts.list_themes:
print _('Available themes:')
print
theme_names = sorted(opts.themes)
name_len = max(len(name) for name in theme_names)
for theme in theme_names:
print (' - %%-%ds: %%s' % name_len) % (theme, opts.themes[theme][0])
raise SystemExit()
elif opts.theme and opts.theme not in opts.themes:
print _('Theme %r does not exist') % opts.theme
print
print _('Please make sure %r can be found in '
'one of the following directories:') % opts.theme
print
for theme_dir in settings.get_config_dirs('themes'):
print ' - %s' % theme_dir
sys.exit(-1)
if opts.reset:
print _('Resetting to defaults.')
opts.reset_to_defaults()
opts.save()
keymon = KeyMon(opts)
try:
gtk.main()
except KeyboardInterrupt:
keymon.quit_program()
if __name__ == '__main__':
#import cProfile
#cProfile.run('main()', 'keymonprof')
main()
``` |
{
"source": "jorgeaserrano/SQLAlchemy-Homework---Surfs-Up-",
"score": 2
} |
#### File: jorgeaserrano/SQLAlchemy-Homework---Surfs-Up-/app.py
```python
[1]:
#import dependencies
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect = True)
# Save reference to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
#################################################
# Flask Setup
#################################################
app=Flask(__name__)
#################################################
# Flask Routes
#################################################
#Have the home page return the information of the different routes
@app.route("/")
def intro():
"""List all apis"""
return (
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end>"
)
#################################################
@app.route("/api/v1.0/precipitation")
def precipitation():
"""Last Year of Percipitation Data"""
session = Session(engine)
# Find last date in database from Measurements
last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
# Convert last date string to date
last_date
# Perform a query to retrieve the data and precipitation scores
one_year_ago = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
one_year_ago = dt.date(2017,8,23) - dt.timedelta(days=365)
# Design a Query to Retrieve the Last 12 Months of Precipitation Data Selecting Only the `date` and `prcp` Values
prcp_data = session.query(Measurement.date, Measurement.prcp).\ filter(Measurement.date >= one_year_ago).\
order_by(Measurement.date).all()
# Perform a Query to Retrieve the Data and Precipitation Scores
all_scores = session.query(Measurement.date, Measurement.prcp).order_by(Measurement.date.desc()).all()
@app.route("/api/v1.0/stations")
def stations():
"""List of Weather Stations"""
session = Session(engine)
# Select station names from stations table
active_stations = session.query(Measurement.station, func.count(Measurement.station)).\
group_by(Measurement.station).\
order_by(func.count(Measurement.station).desc()).all()
# Return JSONIFY List of Stations
return jsonify(active_stations)
@app.route("/api/v1.0/tobs")
def tobs():
"""Temperature Observations for Top Station for Last Year"""
session = Session(engine)
# Find last date in database from Measurements
last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
# Calculate date one year after last date using timedelta datetime function
one_year_ago = dt.date(2017,8,23) - dt.timedelta(days=365)
# Design a Query to Retrieve the Last 12 Months of Precipitation Data Selecting Only the `date` and `prcp` Values
prcp_data = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= one_year_ago).\
order_by(Measurement.date).all()
# Perform a Query to Retrieve the Data and Precipitation Scores
all_scores = session.query(Measurement.date, Measurement.prcp).order_by(Measurement.date.desc()).all()
# Save the Query Results as a Pandas DataFrame and Set the Index to the Date Column & Sort the Dataframe Values by `date`
prcp_df = pd.DataFrame(prcp_data, columns=["Date","Precipitation"])
prcp_df.set_index("Date", inplace=True,)
prcp_df.head()
# Use Pandas to Calculate the Summary of the Precipitation Data
prcp_df.describe()
# Design a Query to show the number of Stations in the Dataset
station_count = session.query(Measurement.station).distinct().count()
station_count
# Design a Query to Find the Most Active Station(s)
# List the Station(s) and Count in Descending Order
# Which Station Had the Highest Number of Observations?
active_stations = session.query(Measurement.station, func.count(Measurement.station)).\
group_by(Measurement.station).\
order_by(func.count(Measurement.station).desc()).all()
# Query for the dates and temperature observations from a year from the last data point.
# Return a JSON list of Temperature Observations (tobs) for the previous year.
tobs_data = session.query(Measurement.tobs).\
filter(Measurement.date >= one_year_ago).\
filter(Measurement.station == "USC00519281").\
order_by(Measurement.date).all()
@app.route("/api/v1.0/<start>")
@app.route("/api/v1.0/<start>/<end>")
def stats(start=None, end=None):
session = Session(engine)
### Return TMIN, TAVG, TMAX###
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
# Calculate TMIN, TVAG, TMAX for dates greater than start
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
(calc_temps('2012-02-28', '2012-03-05'))
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
query_columns = [Station.station, Station.name, Station.latitude,
Station.longitude, Station.elevation, func.sum(Measurement.prcp)]
results = session.query(*query_columns).\
filter(Measurement.station == Station.station).\
filter(Measurement.date >= first_date).\
filter(Measurement.date <= last_date).\
group_by(Station.name).order_by(func.sum(Measurement.prcp).desc()).all()
# Convert Query object to df
stations_table = pd.DataFrame(np.array(station_rain))
# Rename the columns
stations_table = stations_table.rename(columns={0: "Station", 1: "Location",
2: "Total Precipitation", 3: "Latitude", 4: "Longitude", 5: "Elevation"})
stations_table
## Set the start and end date for the trip
trip_dates=['08-05','08-06','08-07','08-08','08-09',
'08-10','08-11','08-12','08-13','08-14','08-15']
normal_temps=[]
def daily_normals(date):
sel = [func.min(Measurement.tobs),
func.round(func.avg(Measurement.tobs),2),
func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
# Set the min_temp, avg_temp, high_temp
daily_normals = pd.DataFrame(normal_temps,columns=['min_temp','avg_temp','high_temp'],
index=trip_dates)
daily_normals.index.name = 'Dates'
daily_normals
for i in trip_dates:
normal_temps.append(daily_normals(i)[0])
normal_temps
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "J-O-R-G-E/Automated-Lights",
"score": 3
} |
#### File: J-O-R-G-E/Automated-Lights/lights_server.py
```python
import RPi.GPIO as GPIO
import os
from http.server import BaseHTTPRequestHandler, HTTPServer
from time import sleep
host_name = ''
host_port = 8090
class MyServer(BaseHTTPRequestHandler):
""" A special implementation of BaseHTTPRequestHander for reading data from
and control GPIO of a Raspberry Pi
"""
def do_HEAD(self):
""" do_HEAD() can be tested use curl command
'curl -I http://server-ip-address:port'
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
""" do_GET() can be tested using curl command
'curl http://server-ip-address:port'
"""
html = ""
self.do_HEAD()
if self.path=='/':
html = '''
<html>
<body style="width:960px; margin: 20px auto;">
<h1>Front Lights Home Page</h1>
<h3>Current GPU temperature is {}</h3>
</body>
</html>
'''
temp = os.popen("/opt/vc/bin/vcgencmd measure_temp").read()
self.wfile.write(html.format(temp[5:]).encode("utf-8"))
#output = subprocess.check_output("/opt/vc/bin/vcgencmd measure_temp", shell=True)
#outpu[5:-1]
return
elif self.path=='/on':
html = '''
<html>
<body style="width:960px; margin: 20px auto;">
<h1>Front Lights Server</h1>
<h3>Turning front lights on... <h3>
</body>
</html>
'''
pinNum = 23
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(pinNum,GPIO.OUT)
GPIO.output(pinNum,0)
#print("Turned On...")
sleep(1)
pinNum = 24
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(pinNum,GPIO.OUT)
GPIO.output(pinNum,0)
#print("Turned On...")
elif self.path=='/off':
html = '''
<html>
<body style="width:960px; margin: 20px auto;">
<h1>Front Lights Server</h1>
<h3>Turning front lights off... <h3>
</body>
</html>
'''
pinNum = 23
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(pinNum,GPIO.OUT)
GPIO.output(pinNum,1)
#print("Turned Off...")
sleep(1)
pinNum = 24
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(pinNum,GPIO.OUT)
GPIO.output(pinNum,1)
#print("Turned Off...")
elif self.path=='/status':
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(23, GPIO.OUT)
porch = not GPIO.input(23)
if not porch:
porch = "Off"
else:
porch = "On"
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(24, GPIO.OUT)
outside = not GPIO.input(24)
if not outside:
outside = "Off"
else:
outside = "On"
html = '''
<html>
<body style="width:960px; margin: 20px auto;">
<h1>Front Lights Home Page</h1>
<h3> Porch Lights: {}</h3>
<h3> Outside Lights: {}</h3>
</body>
</html>
'''
self.wfile.write(html.format(porch, outside).encode("utf-8"))
return
elif self.path=='/notify':
os.system("../camara/Notification/./camara_notification.py")
return
else:
html = '''
<html>
<body style="width:960px; margin: 20px auto;">
<h1>Front Lights Server</h1>
<h3>ERROR: Invalid path... <h3>
</body>
</html>
'''
#self.wfile.write(html.format(temp[5:], status).encode("utf-8"))
self.wfile.write(html.encode("utf-8"))
if __name__ == '__main__':
http_server = HTTPServer((host_name, host_port), MyServer)
print("Server Starts - %s:%s" % (host_name, host_port))
try:
http_server.serve_forever()
except KeyboardInterrupt:
http_server.server_close()
``` |
{
"source": "jorgeavilacartes/dl-forecast",
"score": 3
} |
#### File: dl-forecast/data_selection/prepare_data.py
```python
import os
import time
import datetime
import json
import pandas as pd
import numpy as np
from pathlib import Path
from tqdm import tqdm
from typing import List, Optional
class PrepareData:
"""
Limpiar y extraer las series de tiempo de una tabla CSV
+ Asegurar fechas continuas, completar con 0 las no registradas
+ Separar series de tiempo de acuerdo al identificador que se eliga (Ej: id_producto, id_producto + cadena)
+ Guardar todas las series de tiempo generadas con el nombre de su identificador en formato numpy. Además, guardar
un archivo json con la lista de timesteps y los nombres de las features de cada serie de tiempo
"""
def __init__(self, path_data: str, colname_datetime: str, colname_features: List[str], colname_id_time_series: str = None,):
"""
+ Los datos son cargados desde 'path_data'.
+ 'colname_datetime' corresponde a la columna que contiene las fechas.
+ Se crea una serie de tiempo por cada valor distinto en la columna 'colname_id_time_series'. Si esta es None,
se considera que los datos corresponden a una sola serie de tiempo.
"""
self.path_data = path_data
self.colname_datetime = colname_datetime
self.colname_features = colname_features
self.colname_id_time_series = colname_id_time_series
self.time_series = {} # Diccionario para guardar series de tiempo por su id
def __call__(self,):
"Cargar los datos y generar las series de tiempo"
self.load_data() # Cargar datos
self.get_id_time_series() # Obtener id de cada serie de tiempo
self.get_timesteps() # Obtener rango de fechas
self.get_minmax() # Obtener minimos y maximos por feature
self.get_mean_std() # Obtener promedio y desv std por feature
print("Generando series de tiempo")
time.sleep(1)
for id_time_serie in tqdm(self.id_time_series):
self.get_time_serie(id_time_serie)
def load_data(self,):
"Cargar datos"
ALLOWED_FILES = [".csv"] # En caso de agregar mas formas de cargar. Ej: xlsx, pickle.
# Extension del archivo proporcionado
extension = os.path.splitext(self.path_data)[-1]
# Verificar si es uno de los archivos que podemos cargar
assert extension in set(ALLOWED_FILES), "Archivo debe ser uno de estos {}. El suyo '{}'".format(ALLOWED_FILES, extension)
# Cargar el archivo
if self._file_exists(filename = self.path_data):
self.data = pd.read_csv(self.path_data)
print("Archivo cargado desde {}".format(self.path_data))
def get_id_time_series(self,):
"Definir el identificador de cada serie de tiempo a generar"
self.colname_id = "ID_ts"
self.data[self.colname_id] = self.data[self.colname_id_time_series].apply(lambda row:
"_".join([ str(c) + "-" + str(r)
for c,r in
zip(self.colname_id_time_series,row) ]), axis=1)
# Total de series de tiempo que se van a extraer
self.id_time_series = list(set(self.data[self.colname_id].tolist()))
total_id = len(self.id_time_series)
print("Se encontraron {} series de tiempo con id {}.".format(total_id, self.colname_id))
def get_time_serie(self, id_time_serie):
"""Obtener serie de tiempo para un id, en el rango total de fechas.
Guardar la serie de tiempo generada en el atributo .time_series
"""
# Extraer datos de la serie de tiempo solicitada
cols = [self.colname_datetime]
cols.extend(self.colname_features)
time_serie = self.data.query("`ID_ts` == '{}'".format(id_time_serie))[cols].copy()
time_serie_by_date = {d.get(self.colname_datetime): [d.get(feature) for feature in self.colname_features] for d in time_serie.to_dict("records")}
# Extraer las fechas
dates_time_serie = list(time_serie_by_date.keys())
# Construir la serie de tiempo en el rango total de fechas
rows = []
for date in self.timesteps:
str_date = self.date_to_str(date)
if str_date in dates_time_serie:
date_values = time_serie_by_date.get(str_date)
#info_date = time_serie_by_date.get(str_date)
#date_values = info_date#[info_date for feature in self.colname_features]
else:
date_values = [0 for _ in self.colname_features]
rows.append(date_values)
self.time_series[id_time_serie] = np.array(rows)
def get_timesteps(self,):
"Obtener rango de fechas"
# Obtener la columna con todas las fechas
dates = self.data[self.colname_datetime].tolist()
# Transformar a datetime
dates = [self.str_to_date(date) for date in dates]
# Calcular fecha minima y maxima
self.min_date = min(dates)
self.max_date = max(dates)
# Obtener el listado de timesteps
n_days = (self.max_date-self.min_date).days + 1 # todos los dias incluidos inicial y final
self.timesteps = [ self.add_days(self.min_date, days) for days in range(n_days)]
print(f"Datos desde {self.date_to_str(self.min_date)} hasta {self.date_to_str(self.max_date)}, ({n_days} dias) ")
def get_minmax(self,):
self.list_min = self.data[self.colname_features].min(axis=0).tolist()
self.list_max = self.data[self.colname_features].max(axis=0).tolist()
def get_mean_std(self,):
self.list_mean = self.data[self.colname_features].mean(axis=0).tolist()
self.list_std = self.data[self.colname_features].std(axis=0).tolist()
def save(self,):
"""Guardar series de tiempo generadas como numpy y un archivo de
configuracion con los timesteps, features y paths a los numpy"""
folder = Path("time_series")
folder.mkdir(exist_ok=True)
folder.joinpath("numpy").mkdir(exist_ok=True)
print("Guardando series de tiempo")
time.sleep(1)
for name_ts, ts_array in tqdm(self.time_series.items()):
path_save = str(folder.joinpath("numpy/{}.npy".format(name_ts)))
np.save(path_save, ts_array)
time_series_config = dict(
features=self.colname_features,
timesteps=[self.date_to_str(ts) for ts in self.timesteps],
id_time_series=list(self.time_series.keys()),
basepath_time_series=str(folder.joinpath("numpy").absolute()),
list_min=self.list_min,
list_max=self.list_max,
list_mean=self.list_mean,
list_std=self.list_std
)
path_save_config = str(folder.joinpath("time_series_config.json"))
with open(path_save_config, "w", encoding="utf8") as fp:
json.dump(time_series_config, fp, ensure_ascii=False, indent=4)
print("Series de tiempo guardadas en {}".format(str(folder.absolute())))
@staticmethod
def _file_exists(filename):
"Verificar si el archivo proporcionado existe en memoria"
if os.path.exists(filename):
return True
else:
print("El archivo no existe. Revise si el directorio '{}' es correcto.".format(filename))
@staticmethod
def str_to_date(date):
"Transformar una fecha en formato str a date. Formato 'YYYY-MM-dd'"
if isinstance(date, str):
return datetime.date.fromisoformat(date)
else:
# TODO Comprobar correcto uso de raise y Exception
raise Exception("'date' debe ser un string de fecha con formato 'YYYY-MM-dd'")
@staticmethod
def date_to_str(date):
"Transformar un string de la forma 'YYYY-MM-dd' a objeto del tipo datetime.date(year, month, day)"
return date.isoformat()
@staticmethod
def add_days(date, days = 1):
"Agregar/quitar dias a una fecha en formato date"
assert isinstance(date, datetime.date), "'date' debe ser un objeto datetime.date"
return date + datetime.timedelta(days)
```
#### File: dl-forecast/model_training/preprocessing.py
```python
import functools
import random
from collections import OrderedDict
import numpy as np
from .pipeline import (
Pipeline,
register_in_pipeline,
)
@register_in_pipeline
def subtract_mean(time_serie, *, axis_signal=0):
"Restar la media a cada feature"
ts_mean = time_serie.mean(axis=axis_signal, keepdims=True)
return time_serie-ts_mean
@register_in_pipeline
def minmax(time_serie, *, axis_signal=0, list_min=[], list_max=[]):
ts_shape = time_serie.shape
if axis_signal == 0:
reshape_minmax = (1,ts_shape[1])
else:
reshape_minmax = (ts_shape[1],1)
list_min = np.array(list_min).reshape(reshape_minmax)
list_max = np.array(list_max).reshape(reshape_minmax)
epsilon = 0.000001 # TODO verificar si sirve esto
return (time_serie - list_min + epsilon) / (list_max-list_min)
@register_in_pipeline
def znormalization(time_serie, *, axis_signal=0, list_mean=[], list_std=[]):
ts_shape = time_serie.shape
if axis_signal == 0:
reshape = (1,ts_shape[1])
else:
reshape = (ts_shape[1],1)
list_mean = np.array(list_mean).reshape(reshape)
list_std = np.array(list_std).reshape(reshape)
return (time_serie - list_mean) / list_std
``` |
{
"source": "jorgeavilacartes/dl-supervised-learning-geof",
"score": 3
} |
#### File: dl-supervised-learning-geof/src/pipeline.py
```python
import json
from pathlib import Path
from collections import OrderedDict
from typing import List, Tuple, Optional
FUNCTIONS_PIPELINE = OrderedDict()
def register_in_pipeline(func):
"""Collect functions for the pipeline"""
print(f"{func.__name__} registered in Pipeline")
if func.__name__ not in FUNCTIONS_PIPELINE:
FUNCTIONS_PIPELINE[func.__name__] = func
else:
raise Exception(f"Duplicated function with name {func.__name__}")
class Pipeline:
"""Build a pipeline of functions
Pipeline structure: ("func_name", args, kwargs) or ("func_name", kwargs)
x -> Pipeline(x) -> new_x
"""
VERSION=1
FUNCTIONS_PIPELINE = FUNCTIONS_PIPELINE
def __init__(self, pipeline: Optional[List[Tuple[str, dict]]] = None):
self.pipeline = pipeline if pipeline else []
def __call__(self, x):
"""Apply pipeline to the input 'x'"""
for pipe in self.pipeline:
func_name, *args, kwargs = pipe
assert isinstance(kwargs, dict), f"Wrong declaration in {func_name!r}. Must be (str, dict) or (str, tuple, dict)"
# apply preprocessing
if args:
# args and kwargs provided
x = self.apply(x, func_name, *args, **kwargs)
else:
# only kwargs provided
x = self.apply(x, func_name, **kwargs)
return x
@classmethod
def apply(cls, x, func, *args, **kwargs):
"""Compute func(x, *args, **kwargs)"""
if func in cls.FUNCTIONS_PIPELINE:
return cls.FUNCTIONS_PIPELINE[func](x, *args, **kwargs)
else:
raise TypeError(f"{func} not available")
def __gt__(self, add_pipe):
"""Add a pipe ("func_name", args, kwargs)/("func_name", kwargs) to the current pipeline"""
if self.is_available(add_pipe[0]):
print(f"adding {add_pipe[0]!r} to pipeline")
self.pipeline.append(add_pipe)
return self.__class__(self.pipeline)
else:
raise NotImplementedError(f"{add_pipe[0]!r} not available in Pipeline")
def is_available(self, func_name):
"""Return True if the function 'func_name' is available in Pipeline"""
return True if func_name in self.FUNCTIONS_PIPELINE else False
def asJSON(self, path_save=None):
path_save = Path(path_save) if path_save else Path("pipeline.json")
with open(path_save, "w", encoding="utf8") as fp:
json.dump(self.pipeline, fp, indent=4, ensure_ascii=False)
print(f"Pipeline configuration saved at {str(path_save)!r}")
def fromJSON(self, path_pipeline):
# Read pipeline
path_pipeline = Path(path_pipeline)
with open(path_pipeline, "r", encoding="utf8") as fp:
pipeline = json.load(fp)
# Corrobate that all functions are availables
available_functions = {pipe[0]: self.is_available(pipe[0])
for pipe in pipeline}
if not all(available_functions.values()):
print("Functions not availables:")
functions_not_availables = dict(filter(lambda item: item[0], available_functions.items()))
return [func_name for func_name, available in functions_not_availables.items()
if available is False]
self.pipeline = pipeline
print(f"Pipeline loaded from {str(path_pipeline)!r}")
``` |
{
"source": "jorgebaier/iic1103-s7-2016",
"score": 3
} |
#### File: iic1103-s7-2016/clase1018/genera_numeros.py
```python
import random
def busqueda_binaria(N,L):
inicio = 0
fin = len(L) - 1
while inicio <= fin:
mitad = (inicio+fin)//2
if L[mitad] == N:
return True
elif L[mitad] < N:
inicio = mitad + 1
else:
fin = mitad - 1
return False
def busqueda_lineal(N,L):
# return x in L
encontrado = False
for x in L:
if x==N:
return True
return False
n = 0
print("Generando...")
L = []
for x in range(0,10000000):
L.append(n)
n = n+random.randint(0,500)
print("Buscando lineal...")
N = -10
if busqueda_lineal(N,L):
print("encontrado")
else:
print("no encontrado: ")
print("Buscando binaria...")
if busqueda_binaria(N,L):
print("encontrado")
else:
print("no encontrado")
```
#### File: iic1103-s7-2016/clase1020/pintar_completo.py
```python
def leer_archivo():
nombre = input("Nombre del archivo donde está el mapa: ")
f = open(nombre,"r")
lineas = f.readlines()
f.close()
mapa = []
for linea in lineas:
mapa.append([c for c in linea.rstrip()])
return mapa
def vecinos(mapa,i,j):
# entrega la lista de vecinos libres de la posición i,j en mapa
ancho = len(mapa[0])
alto = len(mapa)
V = [[-1,0],[1,0],[0,-1],[0,1]]
return [[i+x,j+y] for [x,y] in V if 0<=i+x<alto and 0<=j+y<ancho and mapa[i+x][j+y]==' ']
def pintar_desde(mapa,i,j,letra,limite):
# retorna un mapa en donde una region interior
# ha sido completamente pintada con letra
# y donde no se han usado más que limite letras para pintar
# de no tener exito, retorna una lista vacía
if mapa[i][j] != ' ':
return []
m = [list(linea) for linea in mapa]
cola = [[i,j]]
pintados = 0
while cola != []:
vecinos_sin_pintar = []
primero = cola.pop(0)
i = primero[0]
j = primero[1]
if m[i][j] != letra:
m[i][j] = letra
pintados += 1
if pintados > limite:
return []
for v in vecinos(m,i,j):
x = v[0] # x,y son las coordenadas del vecino
y = v[1]
if m[x][y] != letra:
cola.append([x,y])
if pintados <= limite:
return m
else:
return []
def pretty_print(mapa):
for linea in mapa:
print("".join(linea))
mapa=leer_archivo()
color = input("Con qué letra pinto? ")
limite = int(input("Cuantas letras tengo? "))
total=0
listo = False
i=0
while i < len(mapa) and not listo:
j=0
while j < len(mapa[0]) and not listo:
coloreado = pintar_desde(mapa,i,j,color,limite)
if coloreado != []:
pretty_print(coloreado)
listo = True
j+=1
i+=1
if not listo:
print("Es imposible pintar el mapa con el limite que me das :(")
```
#### File: iic1103-s7-2016/clase1108/insertion_sort.py
```python
def insertion_sort(L):
i = 1
while i < len(L):
j = i - 1
while j >= 0 and L[j] > L[j+1]:
L[j],L[j+1] = L[j+1],L[j]
j -= 1
i += 1
nombre = input("nombre del archivo: ")
f= open(nombre,"r")
L = [int(x.strip()) for x in f.readlines()]
f.close()
insertion_sort(L)
for x in L:
print(x)
```
#### File: iic1103-s7-2016/clase1117/laberinto.py
```python
import os
import time
class Vector:
def __init__(self,x,y):
self.x=x
self.y=y
def __add__(self,v):
return Vector(self.x+v.x,self.y+v.y)
def __eq__(self,v):
return self.x==v.x and self.y==v.y
def __str__(self):
return "(" + str(self.x) + ","+ str(self.y) + ")"
class Laberinto:
def __init__(self, path):
self.leer(path)
self.busca_robot()
self.visitados=[]
def imprimir(self):
for fila in self.lab:
print(''.join(fila))
def leer(self, path):
archivo = open(path,"r")
self.lab = []
for linea in archivo:
lab_linea = []
for c in linea.rstrip('\n'):
lab_linea.append(c)
self.lab.append(lab_linea)
archivo.close()
def busca_robot(self):
for i in range(len(self.lab)):
for j in range(len(self.lab[i])):
if self.lab[i][j] == '+':
self.pos = Vector(i,j)
def movimientos(self):
desp=[Vector(-1,0),Vector(1,0),Vector(0,-1),Vector(0,1)]
movs=[]
for d in desp:
nueva_pos=self.pos+d
if self.lab[nueva_pos.x][nueva_pos.y]!='x' and not nueva_pos in self.visitados:
movs.append(nueva_pos)
return movs
def marcar_ruta(self):
for v in self.visitados:
os.system('clear')
if self.lab[v.x][v.y]!='g':
self.lab[v.x][v.y]='*'
self.imprimir()
time.sleep(0.3)
def resolver(self):
if self.lab[self.pos.x][self.pos.y]=='o':
self.marcar_ruta()
return True
for m in self.movimientos():
if m not in self.visitados:
self.visitados.append(m)
posicion_original = self.pos
self.pos = m
if self.resolver():
return True
self.visitados.pop()
self.pos = posicion_original
return False
def resolver_optimo(self,i,limite):
if self.lab[self.pos.x][self.pos.y]=='o':
self.marcar_ruta()
return True
if i > limite:
return False
for m in self.movimientos():
if m not in self.visitados:
self.visitados.append(m)
posicion_original = self.pos
self.pos = m
if self.resolver_optimo(i+1,limite):
return True
self.visitados.pop()
self.pos = posicion_original
return False
# Código principal
lab = Laberinto('maze_3.txt')
lab.imprimir()
#lab.resolver()
for lim in range(10000):
if lab.resolver_optimo(0,lim):
break
``` |
{
"source": "jorgebg/cheatsheets",
"score": 3
} |
#### File: cheatsheeter/extensions/embed_img.py
```python
import base64
import mimetypes
import os
from urllib.parse import unquote
import xml.etree.ElementTree as etree
from markdown.inlinepatterns import LinkInlineProcessor
from markdown.extensions import Extension
def remove_namespace(doc, namespace):
"""Remove namespace in the passed document in place."""
ns = u'{%s}' % namespace
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
class EmbedImageInlineProcessor(LinkInlineProcessor):
""" Return a embed img element from the given match. """
def handleMatch(self, m, data):
from cheatsheeter.__main__ import cheatsheeter
text, index, handled = self.getText(data, m.end(0))
if not handled:
return None, None, None
src, title, index, handled = self.getLink(data, index)
if not handled:
return None, None, None
filename = os.path.join(cheatsheeter.source_path, unquote(src))
if src.endswith('.svg'):
el = etree.parse(filename).getroot()
remove_namespace(el, "http://www.w3.org/2000/svg")
el.attrib.pop('width', None)
el.attrib.pop('height', None)
else:
mime = mimetypes.guess_type(filename)[0]
with open(filename, 'br') as f:
data = base64.b64encode(f.read()).decode('ascii')
src_data = "data:{};base64,{}".format(mime, data)
el = etree.Element("img")
el.set("src", src_data)
if title is not None:
el.set("title", title)
el.set('alt', self.unescape(text))
return el, m.start(0), index
class EmbedImageExtension(Extension):
def extendMarkdown(self, md):
EMBED_IMAGE_LINK_RE = r'\!\!\['
md.inlinePatterns.register(EmbedImageInlineProcessor(EMBED_IMAGE_LINK_RE, md), 'embed_img', 175)
``` |
{
"source": "jorgebg/devip",
"score": 3
} |
#### File: devip/devip/lib.py
```python
from netifaces import interfaces, ifaddresses, AF_INET
def get_ip_addresses(loopback=False):
addresses = []
for name in interfaces():
af = ifaddresses(name).get(AF_INET)
if af:
addresses.extend([i.get('addr') for i in ifaddresses(name).get(AF_INET)])
if not loopback and '127.0.0.1' in addresses:
addresses.remove('127.0.0.1')
return addresses
``` |
{
"source": "jorgebg/dgt-anki-flashcards",
"score": 3
} |
#### File: dgt_tests/crawlers/base.py
```python
import logging
import os
import requests
from bs4 import BeautifulSoup
from dgt_tests.media import download_image, get_crawl_image_media_path
from dgt_tests.models import Question, Answer
def field(fun):
fun.is_field = True
return fun
def get_fields(obj):
fields = []
for key in dir(obj):
value = getattr(obj, key)
if getattr(value, 'is_field', False):
fields.append(key)
return fields
class BaseItem:
def __init__(self, parent_node):
self.node = parent_node
def to_dict(self):
return {key: getattr(self, key)() for key in get_fields(self)}
@classmethod
def find_items(cls, html):
root = cls(html)
return [cls(node) for node in root.find_nodes()]
def find_nodes(self):
raise NotImplementedError("BaseItem.find_nodes")
class BaseCrawler:
question_exporter = None
answer_exporter = None
def __init__(self):
self.name = self.__class__.__name__.strip('Crawler')
self.logger = logging.getLogger(__name__)
def get_test_urls(self):
raise NotImplementedError('BaseCrawler.get_test_urls')
def is_test_crawled(self, test_url):
return Question.select().where(Question.test_url==test_url).exists()
def run(self):
self.logger.info("Starting %s crawler", self.name)
for test_url in self.get_test_urls():
if self.is_test_crawled(test_url):
continue
resp = requests.get(test_url)
if resp.status_code == 200:
self.logger.info("Processing test: %s", test_url)
else:
self.logger.info("Test not found: %s", test_url)
continue
html = BeautifulSoup(resp.content, 'html.parser')
for question_item in self.question_exporter.find_items(html):
question = self.get_or_create(Question, crawler=self.name, test_url=test_url, **question_item.to_dict())
for answer_item in self.answer_exporter.find_items(question_item.node):
answer = self.get_or_create(Answer, question=question, **answer_item.to_dict())
download_image(question.image)
def get_or_create(self, model, **attributes):
instance, created = model.get_or_create(**attributes)
if created:
self.logger.info("%s CREATED: %s", model._meta.name, instance)
else:
self.logger.info("%s SKIPPED: %s", model._meta.name, instance)
return instance
``` |
{
"source": "jorgebg/gief",
"score": 3
} |
#### File: gief/gief/gief.py
```python
import os
from flask import Flask, request, redirect, url_for
from werkzeug.utils import secure_filename
app = Flask(__name__)
@app.route('/', methods=['GET'])
def get():
return '''
<!doctype html>
<title>Gimme</title>
<h1>Gimme</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
@app.route('/', methods=['POST'])
def post():
file = request.files['file']
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['path'], filename))
return filename + ' uploaded'
``` |
{
"source": "jorgebg/tictactoe",
"score": 3
} |
#### File: tictactoe/todo/test.py
```python
import random
import unittest
from game import *
class TestGame(unittest.TestCase):
def test_winner(self):
wins =
"""
XOX
XXO
XOO
XXX
OXO
OOX
XOX
XXO
OOO
"""
b0, b1, b2, b3 = Board("001001001"), Board("000000111"), Board("000000222"), Board(0)
self.assertEqual(b0.min(), b1.min())
self.assertEqual(b1.min(), b2.min())
self.assertNotEqual(b1.min(False), b3.min(False))
self.assertNotEqual(b2.min(), b3.min())
tree = Tree()
tree.generate()
self.assertEqual(len(tree.states), 5478)
#self.assertEqual(len(tree.unique_states), 765)
#self.assertEqual(len([s for s in tree.unique_states if s.is_over]), 765)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jorgeBIGS/DL-Ideas",
"score": 3
} |
#### File: src/sam/sam_wrapper_net.py
```python
import torch.nn as nn
import torch.optim as optim
from sam.sam import SAM
class SAMWrapperNet(nn.Module):
def __init__(self, device, model, epochs=100, batch_size=500):
super(SAMWrapperNet, self).__init__()
self.model = model
self.criterion = nn.CrossEntropyLoss()
base_optimizer = optim.SGD # define an optimizer for the "sharpness-aware" update
self.optimizer = SAM(self.parameters(), base_optimizer, lr=0.1, momentum=0.9)
self.device = device
self.epochs = epochs
self.batch = batch_size
def forward(self, x):
x = self.model.forward(x)
return x
def train(self, train_loader):
result = []
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# zero the parameter gradients
self.optimizer.zero_grad()
for input1, output in data:
def closure():
loss1 = self.criterion(output, self.model(input))
loss1.backward()
return loss1
loss = self.criterion(output, self.model(input1))
loss.backward()
self.optimizer.step(closure)
self.optimizer.zero_grad()
'''
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs.to(self.device)
labels.to(self.device)
# forward + backward + optimize
outputs = self(inputs)
loss = self.criterion(outputs, labels)
loss.backward()
self.optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
result.append('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
'''
return result
``` |
{
"source": "jorgebodega/dotpyle",
"score": 2
} |
#### File: commands/add/__init__.py
```python
import click
from dotpyle.commands.add.dotfile import dotfile
@click.group()
def add():
"""
This command will take KEY and ... DOTFILE
"""
add.add_command(dotfile)
```
#### File: dotpyle/dotpyle/main.py
```python
import click
from dotpyle.commands.init import init
from dotpyle.commands.edit import edit
from dotpyle.commands.add import add
from dotpyle.commands.ls import ls
from dotpyle.commands.config import config
from dotpyle.commands.link import link
from dotpyle.commands.unlink import unlink
from dotpyle.commands.commit import commit
from dotpyle.commands.checkout import checkout
from dotpyle.commands.push import push
from dotpyle.commands.pull import pull
from dotpyle.commands.switch import switch
from dotpyle.commands.script import script
from dotpyle.services.config_checker import ConfigChecker
from dotpyle.services.repo_handler import RepoHandler
from dotpyle.utils import constants
from dotpyle.services.print_handler import error
from dotpyle.exceptions import DotpyleException
@click.group()
@click.version_option()
@click.pass_context
def dotpyle(ctx=None):
"""
Manage your dotfiles, create multiple profiles for different programs, automate task with hooks, etc
"""
ctx.meta[constants.CONFIG_CHECKER_PROVIDER] = ConfigChecker()
ctx.meta[constants.REPO_HANDLER_PROVIDER] = RepoHandler()
# Add commands to group
dotpyle.add_command(link)
dotpyle.add_command(unlink)
dotpyle.add_command(switch)
dotpyle.add_command(init)
dotpyle.add_command(add)
dotpyle.add_command(commit)
dotpyle.add_command(push)
dotpyle.add_command(pull)
dotpyle.add_command(checkout)
dotpyle.add_command(config)
dotpyle.add_command(edit)
dotpyle.add_command(ls)
dotpyle.add_command(script)
def main():
try:
dotpyle()
except DotpyleException as e:
error(e)
exit(e.code)
if __name__ == "__main__":
main()
```
#### File: dotpyle/utils/autocompletion.py
```python
from click.shell_completion import CompletionItem
from dotpyle.services.file_handler import FileHandler, LocalFileHandler
from dotpyle.services.config_handler import ConfigHandler
import os
import sys
def get_names(ctx, param, incomplete):
handler = FileHandler()
parser = ConfigHandler(config=handler.config)
name_profiles = parser.get_names()
items = [(name, '') for name in name_profiles]
out = []
for value, help in items:
if incomplete in value or incomplete in help:
# yield value
out.append(CompletionItem(value, help=help))
return out
# TODO: get profiles given name (previous argument)
def get_profiles(ctx, param, incomplete):
handler = FileHandler()
parser = ConfigHandler(config=handler.config)
# first, *middle, last = ctx.split()
name_profiles = parser.get_name('vim')
# cmd_line = (tok for tok in param + [incomplete])
# last = [p for p in parm]
# cmd_line = " ".join(last + [incomplete])
# f = open("test.txt", "a")
# commandLineArgsAsStr = str(sys.argv)
# numArgs = len(sys.argv)
# f.write(str(incomplete))
# f.write(commandLineArgsAsStr)
# f.write(str(numArgs))
# # f.write(str(ctx.command))
# # f.write(str(ctx.info_name))
# # f.write(str(param))
# f.write('\n')
# f.close()
items = [(name, '') for name in name_profiles]
out = []
for value, help in items:
if incomplete in value or incomplete in help:
out.append(CompletionItem(value, help=help))
return out
```
#### File: unitary/services/test_config_checker.py
```python
import pytest
from tests.utils.mocks.config_valid_cases import valid_cases
from tests.utils.mocks.config_invalid_cases import invalid_cases
from dotpyle.services.config_checker import ConfigChecker
@pytest.mark.parametrize("config", [*valid_cases])
def test_config_checker_valid_configs(config):
"""Test that the config checker returns True when given valid configs."""
checker = ConfigChecker()
errors = checker.check_config(config)
assert len(errors) == 0
@pytest.mark.parametrize("config", [*invalid_cases])
def test_config_checker_invalid_configs(
config,
):
"""Test that the config checker returns errors when given invalid configs."""
checker = ConfigChecker()
errors = checker.check_config(config)
assert len(errors) > 0
```
#### File: unitary/services/test_config_handler.py
```python
import pytest
from yaml import safe_dump
from dotpyle.errors.InvalidConfigFile import InvalidConfigFileError
from dotpyle.services.config_handler import ConfigHandler
from tests.utils.mocks.config_valid_cases import valid_cases
def test_init_no_path(tmpdir, monkeypatch):
tmpdir.mkdir("dotpyle").join("dotpyle.yml").write("")
monkeypatch.setenv("XDG_CONFIG_HOME", str(tmpdir))
handler = ConfigHandler()
assert handler is not None
def test_init_with_file_path(tmpdir):
tmpfile = tmpdir.mkdir("dotpyle").join("dotpyle.yml")
tmpfile.write("")
handler = ConfigHandler(str(tmpfile))
assert handler is not None
def test_init_with_dir_path(tmpdir):
with pytest.raises(InvalidConfigFileError):
assert ConfigHandler(tmpdir)
@pytest.mark.parametrize("config", [*valid_cases])
def test_read(tmpdir, config):
dumped_data = safe_dump(config)
tmpfile = tmpdir.mkdir("dotpyle").join("dotpyle.yml")
tmpfile.write(dumped_data)
handler = ConfigHandler(str(tmpfile))
config_read = handler.read()
assert config_read is not None
assert config_read == config
```
#### File: utils/url/test_get_default_url.py
```python
from os import path
from dotpyle.utils.url import get_default_url
HTTPS_URL = "https://github.com/exampleuser/examplerepo.git"
GIT_URL = "<EMAIL>:exampleuser/examplerepo.git"
TOKEN = "test<PASSWORD>"
def test_with_https():
result_url = get_default_url(HTTPS_URL, "https")
assert result_url == HTTPS_URL
def test_with_https_with_token():
result_url = get_default_url(HTTPS_URL, "https", TOKEN)
expected_url = "{0}{1}@{2}".format(HTTPS_URL[:8], TOKEN, HTTPS_URL[9:])
assert result_url == expected_url
def test_with_git():
result_url = get_default_url(GIT_URL, "git")
assert result_url == GIT_URL
``` |
{
"source": "jorgebonilla/quickstart-aviatrix-nextgentransithub",
"score": 2
} |
#### File: quickstart-aviatrix-nextgentransithub/scripts/aviatrix_gateway.py
```python
import os, sys, boto3, urllib, ssl, json, logging
from urllib.request import urlopen, URLError
from time import sleep
#Needed to load Aviatrix Python API.
from aviatrix3 import Aviatrix
import cfnresponse
#logging configuration
logger = logging.getLogger()
logger.setLevel(logging.INFO)
#Read environment Variables
controller_ip = os.environ.get("Controller_IP")
username = os.environ.get("Username")
password = <PASSWORD>("Password")
queue_url = os.environ.get("GatewayQueueURL")
spoketag = os.environ.get("SpokeTag")
OtherAccountRoleApp = os.environ.get("OtherAccountRoleApp")
gatewaytopic = ""
class original_context_class():
def __init__(self,original_context):
self.log_stream_name=original_context
def tag_spoke(ec2,region_spoke,vpcid_spoke,spoketag, tag):
ec2.create_tags(Resources = [ vpcid_spoke ], Tags = [ { 'Key': spoketag, 'Value': tag } ])
def find_other_spokes(vpc_pairs,other_credentials=""):
ec2 = boto3.client('ec2',region_name='us-east-1')
regions=ec2.describe_regions()
existing_spokes=[]
if vpc_pairs:
for region in regions['Regions']:
region_id=region['RegionName']
#Find spokes in primary account
ec2=create_aws_session(region_id)
for vpc_name in vpc_pairs['pair_list']:
vpc_name_temp = {}
vpc_name_temp['vpc_name'] = vpc_name['vpc_name2']
vpc_info=ec2.describe_vpcs(Filters=[
{ 'Name': 'vpc-id', 'Values':[ vpc_name['vpc_name2'][6:] ]}
])
if vpc_info['Vpcs']:
vpc_name_temp['subnet'] = vpc_info['Vpcs'][0]['CidrBlock']
existing_spokes.append(vpc_name_temp)
#Find Spokes in Secondary account if otheraccount=TRUE
if other_credentials != "":
ec2=create_aws_session(region_id,other_credentials)
for vpc_name in vpc_pairs['pair_list']:
vpc_name_temp = {}
vpc_name_temp['vpc_name'] = vpc_name['vpc_name2']
vpc_info=ec2.describe_vpcs(Filters=[
{ 'Name': 'vpc-id', 'Values':[ vpc_name['vpc_name2'][6:] ]}
])
if vpc_info['Vpcs']:
vpc_name_temp['subnet'] = vpc_info['Vpcs'][0]['CidrBlock']
existing_spokes.append(vpc_name_temp)
return existing_spokes
def create_aws_session(region_id,other_credentials=""):
if other_credentials != "":
ec2=boto3.client('ec2',
region_name=region_id,
aws_access_key_id=other_credentials['Credentials']['AccessKeyId'],
aws_secret_access_key=other_credentials['Credentials']['SecretAccessKey'],
aws_session_token=other_credentials['Credentials']['SessionToken'] )
else:
ec2=boto3.client('ec2',region_name=region_id)
return ec2
def get_aws_session(body,region_spoke):
primary_account = body['primary_account']
try:
otheraccount = body['otheraccount']
if primary_account:
awsaccount = "AWSAccount"
other_credentials = ""
else:
awsaccount = "AWSOtherAccount"
other_credentials = get_credentials(OtherAccountRoleApp)
region_id=region_spoke
except KeyError:
otheraccount = False
awsaccount = "AWSAccount"
other_credentials=""
region_id=region_spoke
ec2=create_aws_session(region_id,other_credentials)
return { 'ec2': ec2,
'awsaccount': awsaccount,
'otheraccount': otheraccount,
'primary_account': primary_account
}
def get_credentials(rolearn):
session = boto3.session.Session()
client = session.client('sts')
assume_role_response = client.assume_role(RoleArn=rolearn,
RoleSessionName="aviatrix_poller" )
return assume_role_response
def deploy_hub(controller,body,gatewaytopic):
#Variables
vpcid_hub = body['vpcid_hub']
region_hub = body['region_hub']
gwsize_hub = body['gwsize_hub']
subnet_hub = body['subnet_hub']
subnet_hubHA= body['subnet_hubHA']
original_event = body['original_event']
original_context = body['original_context']
#Processing
try:
#Hub Gateway Creation
logger.info('Creating Gateway: hub-%s', vpcid_hub)
controller.create_gateway("AWSAccount",
"1",
"hub-" + vpcid_hub,
vpcid_hub,
region_hub,
gwsize_hub,
subnet_hub)
logger.info('Done with Hub Gateway Deployment')
#Send message to start HA gateway Deployment
message = {}
message['action'] = 'deployhubha'
message['vpcid_ha'] = 'hub-' + vpcid_hub
message['region_ha'] = region_hub
message['subnet_ha'] = subnet_hubHA
message['subnet_name'] = "Aviatrix VPC-Public Subnet HA"
message['original_event'] = original_event
message['original_context'] = original_context
#Add New Gateway to SNS
logger.info('Sending message to create Hub HA GW')
logger.info('Message sent: %s: ' % json.dumps(message))
sns = boto3.client('sns')
logger.info("Temp: %s" % gatewaytopic)
sns.publish(
TopicArn=gatewaytopic,
Subject='New Hub HA Gateway',
Message=json.dumps(message)
)
return {
'Status' : 'SUCCESS'
}
except URLError:
logger.info('Failed request. Error: %s', controller.results)
responseData = {
"PhysicalResourceId": "arn:aws:fake:myID",
"Cause" : controller.results
}
original_event=eval(original_event)
original_context=original_context_class(original_context)
cfnresponse.send(original_event, original_context, cfnresponse.FAILURE, responseData)
sys.exit(1)
def deploy_hub_ha(controller,body):
#Variables for HA GW
vpcid_ha = body['vpcid_ha']
region_ha = body['region_ha']
subnet_ha = body['subnet_ha']
subnet_name = body['subnet_name']
specific_subnet = subnet_ha + "~~" + region_ha + "~~" + subnet_name
original_event = body['original_event']
original_context = body['original_context']
try:
#Processing
logger.info('Processing HA Gateway %s.', vpcid_ha)
#HA Gateway Creation
logger.info('Creating HA Gateway: %s', vpcid_ha)
controller.enable_vpc_ha(vpcid_ha,specific_subnet)
logger.info('Created HA Gateway: %s', vpcid_ha)
sleep(10)
logger.info('Done with HA Hub Gateway Deployment')
#responseData
logger.info('Sending Message for Cloudformation Custom Resource: CREATE_COMPLETE')
responseData = {
"PhysicalResourceId": "arn:aws:fake:myID"
}
original_event=eval(original_event)
original_context=original_context_class(original_context)
cfnresponse.send(original_event, original_context, cfnresponse.SUCCESS, responseData)
except URLError:
logger.info('Failed request. Error: %s', controller.results)
responseData = {
"PhysicalResourceId": "arn:aws:fake:myID",
"Cause" : controller.results
}
original_event=eval(original_event)
original_context=original_context_class(original_context)
cfnresponse.send(original_event, original_context, cfnresponse.FAILURE, responseData)
sys.exit(1)
def deploy_gw (controller,body,gatewaytopic):
#Variables
subnet_spoke = body['subnet_spoke']
subnet_spoke_ha = body['subnet_spoke_ha']
subnet_spoke_name = body['subnet_spoke_name']
vpcid_spoke = body['vpcid_spoke']
region_spoke = body['region_spoke']
gwsize_spoke = body['gwsize_spoke']
vpcid_hub = body['vpcid_hub']
vpc_cidr_spoke = body['vpc_cidr_spoke']
#Get the right account
result= get_aws_session(body,region_spoke)
ec2=result['ec2']
awsaccount=result['awsaccount']
primary_account = result['primary_account']
#Processing
logger.info('Processing VPC %s. Updating tag:%s to processing' % (vpcid_spoke, spoketag))
tag_spoke(ec2,region_spoke,vpcid_spoke,spoketag,'processing')
try:
#Spoke Gateway Creation
logger.info('Creating Gateway: spoke-%s', vpcid_spoke)
controller.create_gateway(awsaccount,
"1",
"spoke-"+vpcid_spoke,
vpcid_spoke,
region_spoke,
gwsize_spoke,
subnet_spoke)
sleep(20)
logger.info('Created Gateway: spoke-%s', vpcid_spoke)
#Send message to start HA gateway Deployment
message = {}
message['action'] = 'deploygatewayha'
message['vpcid_ha'] = 'spoke-' + vpcid_spoke
message['region_ha'] = region_spoke
message['subnet_ha'] = subnet_spoke_ha
message['subnet_name'] = subnet_spoke_name
message['vpcid_spoke'] = vpcid_spoke
message['vpcid_hub'] = vpcid_hub
message['vpc_cidr_spoke'] = vpc_cidr_spoke
message['primary_account'] = primary_account
if awsaccount == 'AWSOtherAccount':
message['otheraccount'] = True
#Add New Gateway to SNS
sns = boto3.client('sns')
sns.publish(
TopicArn=gatewaytopic,
Subject='New Hub HA Gateway',
Message=json.dumps(message)
)
return {
'Status' : 'SUCCESS'
}
except URLError:
logger.info('Failed request. Error: %s', controller.results)
return {
'Status' : 'FAILURE',
'Error' : controller.results
}
def deploy_gw_ha(controller,body,gatewaytopic):
#Variables for HA GW
vpcid_ha = body['vpcid_ha']
region_ha = body['region_ha']
subnet_ha = body['subnet_ha']
subnet_name = body['subnet_name']
vpcid_spoke = body['vpcid_spoke']
vpcid_hub = body['vpcid_hub']
vpc_cidr_spoke = body['vpc_cidr_spoke']
specific_subnet = subnet_ha + "~~" + region_ha + "~~" + subnet_name
#Get the right account
result= get_aws_session(body,vpcid_spoke)
ec2=result['ec2']
awsaccount=result['awsaccount']
primary_account = result['primary_account']
logger.info('AWS Account: %s' % awsaccount)
#Processing
logger.info('Processing HA Gateway %s.', vpcid_ha)
#HA Gateway Creation
logger.info('Creating HA Gateway: %s', vpcid_ha)
try:
controller.enable_vpc_ha(vpcid_ha,specific_subnet)
logger.info('Created HA Gateway: %s', vpcid_ha)
sleep(10)
#Call to create the peering And routing
message = {}
message['action'] = 'create_peering'
message['vpcid_ha'] = 'spoke-' + vpcid_spoke
message['region_spoke'] = region_ha
message['vpcid_spoke'] = vpcid_spoke
message['vpcid_hub'] = vpcid_hub
message['vpc_cidr_spoke'] = vpc_cidr_spoke
message['primary_account'] = primary_account
if awsaccount == 'AWSOtherAccount':
message['otheraccount'] = True
#Add New Gateway to SNS
sns = boto3.client('sns')
sns.publish(
TopicArn=gatewaytopic,
Subject='Create Peering and Routing for new GW',
Message=json.dumps(message)
)
logger.info('Done with HA Gateway Deployment')
return {
'Status' : 'SUCCESS'
}
except URLError:
logger.info('Failed request. Error: %s', controller.results)
return {
'Status' : 'FAILURE',
'Error' : controller.results
}
def create_peering(controller,body):
#Variables
vpcid_spoke = body['vpcid_spoke']
region_spoke = body['region_spoke']
vpcid_hub = body['vpcid_hub']
vpc_cidr_spoke = body['vpc_cidr_spoke']
#Get the right account
result= get_aws_session(body,region_spoke)
ec2 = result['ec2']
awsaccount = result['awsaccount']
otheraccount = result['otheraccount']
logger.info('AWS Account: %s' % awsaccount)
try:
#Peering Hub/Spoke
logger.info('Peering: hub-%s --> spoke-%s' % (vpcid_hub, vpcid_spoke))
controller.peering("hub-"+vpcid_hub, "spoke-"+vpcid_spoke)
#get the list of existing Spokes
controller.list_peers_vpc_pairs()
found_pairs = controller.results
if OtherAccountRoleApp:
other_credentials = get_credentials(OtherAccountRoleApp)
existing_spokes = find_other_spokes(found_pairs,other_credentials)
else:
existing_spokes = find_other_spokes(found_pairs)
#Creating the transitive connections
logger.info('Creating Transitive routes, Data: %s' % existing_spokes)
if existing_spokes:
for existing_spoke in existing_spokes:
if existing_spoke['vpc_name'] != 'spoke-' + vpcid_spoke:
controller.add_extended_vpc_peer('spoke-' + vpcid_spoke, 'hub-' + vpcid_hub, existing_spoke['subnet'])
controller.add_extended_vpc_peer(existing_spoke['vpc_name'],'hub-' + vpcid_hub, vpc_cidr_spoke)
logger.info('Finished creating Transitive routes')
logger.info('Done Peering %s. Updating tag:%s to peered' % (vpcid_spoke, spoketag))
#reconnect to right Account:
result = get_aws_session(body,region_spoke)
ec2 = result['ec2']
awsaccount = result['awsaccount']
tag_spoke(ec2,region_spoke,vpcid_spoke,spoketag,'peered')
return {
'Status' : 'SUCCESS'
}
except URLError:
logger.info('Failed request. Error: %s', controller.results)
return {
'Status' : 'FAILURE',
'Error' : controller.results
}
def delete_gw(controller,body):
#Variables
region_spoke = body['region_spoke']
vpcid_hub = body['vpcid_hub']
vpcid_spoke = body['vpcid_spoke']
subnet_spoke = body['subnet_spoke']
result = get_aws_session(body,region_spoke)
ec2 = result['ec2']
awsaccount = result['awsaccount']
otheraccount = result['otheraccount']
#Processing
logger.info('Processing unpeer of VPC %s. Updating tag:%s to processing' % (vpcid_spoke,spoketag))
tag_spoke(ec2,region_spoke,vpcid_spoke,spoketag,'processing')
try:
#get the list of existing Spokes
controller.list_peers_vpc_pairs()
found_pairs = controller.results
if OtherAccountRoleApp:
other_credentials = get_credentials(OtherAccountRoleApp)
existing_spokes = find_other_spokes(found_pairs,other_credentials)
else:
existing_spokes = find_other_spokes(found_pairs)
#Delete Transitive routes
if existing_spokes:
for existing_spoke in existing_spokes:
if existing_spoke['vpc_name'] != 'spoke-' + vpcid_spoke:
controller.delete_extended_vpc_peer('spoke-' + vpcid_spoke, 'hub-' + vpcid_hub, existing_spoke['subnet'])
controller.delete_extended_vpc_peer(existing_spoke['vpc_name'],'hub-' + vpcid_hub, subnet_spoke)
#Reconnect with right account:
result = get_aws_session(body,region_spoke)
ec2 = result['ec2']
awsaccount = result['awsaccount']
#Unpeering
logger.info('UnPeering: hub-%s --> spoke-%s' % (vpcid_hub, vpcid_spoke))
tag_spoke(ec2,region_spoke,vpcid_spoke,spoketag,'unpeering')
controller.unpeering("hub-"+vpcid_hub, "spoke-"+vpcid_spoke)
#Spoke Gateway Delete
logger.info('Deleting Gateway: spoke-%s', vpcid_spoke)
controller.delete_gateway("1", "spoke-"+vpcid_spoke)
logger.info('Done unPeering %s. Updating tag:%s to unpeered' % (vpcid_spoke,spoketag))
tag_spoke(ec2,region_spoke,vpcid_spoke,spoketag,'unpeered')
return {
'Status' : 'SUCCESS'
}
except URLError:
logger.info('Failed request. Error: %s', controller.results)
return {
'Status' : 'FAILURE',
'Error' : controller.results
}
def handler(event, context):
#Grab GWtopic from SNS
gatewaytopic = event['Records'][0]['EventSubscriptionArn'][:-37]
# Receive message from SQS queue
#body=read_queue(queue_url)
logger.info('Received Message: %s', event)
body=json.loads(event['Records'][0]['Sns']['Message'])
logger.info('Received Message: %s', body)
try:
#Open connection to controller
controller = Aviatrix(controller_ip)
controller.login(username,password)
#Case Deploy Hub
if body['action'] == 'deployhub':
response = deploy_hub(controller,body,gatewaytopic)
#Case Deploy Hub HA
elif body['action'] == 'deployhubha':
response = deploy_hub_ha(controller,body)
#Case Deploy Gateway
elif body['action'] == 'deploygateway':
response = deploy_gw(controller,body,gatewaytopic)
#Case Deploy Gateway HA
elif body['action'] == 'deploygatewayha':
response = deploy_gw_ha(controller,body,gatewaytopic)
#Case Deploy peering
elif body['action'] == 'create_peering':
response = create_peering(controller,body)
#Case Delete Gateway
elif body['action'] == 'deletegateway':
response = delete_gw(controller,body)
except URLError:
logger.info('Failed request. Error: %s', controller.results)
return {
'Status' : 'FAILURE',
'Error' : controller.results
}
```
#### File: quickstart-aviatrix-nextgentransithub/scripts/aviatrix_poller.py
```python
from __future__ import print_function
import os, boto3, json, logging
lambda_client = boto3.client('lambda')
#logging configuration
logger = logging.getLogger()
logger.setLevel(logging.INFO)
#Read environment Variables
gatewayqueue = os.environ.get("GatewayQueue")
vpcid_hub = os.environ.get("HubVPC")
gwsize_spoke = os.environ.get("SpokeGWSize")
gatewaytopic = os.environ.get("GatewayTopic")
spoketag = os.environ.get("SpokeTag")
OtherAccountRoleApp = os.environ.get("OtherAccountRoleApp")
def find_subnets(ec2,region_id,vpc_id):
subnets_with_igw=ec2.describe_route_tables(Filters=[
{ 'Name': 'vpc-id', 'Values':[ vpc_id ]},
{ 'Name': 'route.gateway-id', 'Values': [ 'igw-*' ] }
])
subnetids=[]
for association in subnets_with_igw['RouteTables'][0]['Associations']:
if 'SubnetId' in association:
subnet_temp = {}
subnet_temp['SubnetId'] = association['SubnetId']
subnetids.append(subnet_temp)
for subnet in subnetids:
subnet_info=ec2.describe_subnets(Filters=[
{ 'Name': 'subnet-id', 'Values': [ subnet['SubnetId'] ] }
])
subnet['CidrBlock'] = subnet_info['Subnets'][0]['CidrBlock']
for tag in subnet_info['Subnets'][0]['Tags']:
if tag['Key'] == 'Name':
subnet['Name'] = tag['Value']
return subnetids
def get_credentials(rolearn):
session = boto3.session.Session()
client = session.client('sts')
assume_role_response = client.assume_role(RoleArn=rolearn,
RoleSessionName="aviatrix_poller" )
return assume_role_response
def handler(event, context):
#Gather all the regions:
ec2=boto3.client('ec2',region_name='us-east-1')
regions=ec2.describe_regions()
#Get Access information for OtherAccountRoleApp
if OtherAccountRoleApp:
logger.info('[Other Account]: Secondary aws account found.')
try:
other_credentials = get_credentials(OtherAccountRoleApp)
except:
logger.warning('!!!you might not have the right permissions!!!. Moving on...')
else:
logger.info('[Other Account]: Secondary aws account NOT found.')
#Findout if controller is busy:
for region in regions['Regions']:
region_id=region['RegionName']
logger.info('Checking region: %s for VPC that are processing or unpeering',region_id)
ec2=boto3.client('ec2',region_name=region_id)
#Find VPCs with Tag:spoketag = processing
#Create Gateway for it and Peer, when done change the Tag:spoketag = peered
vpcs=ec2.describe_vpcs(Filters=[
{ 'Name': 'state', 'Values': [ 'available' ] },
{ 'Name': 'tag:'+spoketag, 'Values': [ 'processing', 'unpeering' ] }
])
#logger.info('vpcs with tag:spoketag is processing or unpeering: %s:' % str(vpcs))
if vpcs['Vpcs']: # ucc is busy now
logger.info('ucc is busy in adding/removing spoke of %s:' % str(vpcs['Vpcs']))
return {
'Status' : 'SUCCESS'
}
#Findout if controller is busy in OtherAccountRoleApp
if OtherAccountRoleApp:
if other_credentials:
for region in regions['Regions']:
region_id=region['RegionName']
logger.info('[Other Account] Checking region: %s for VPC that are processing or unpeering',region_id)
ec2=boto3.client('ec2',
region_name=region_id,
aws_access_key_id=other_credentials['Credentials']['AccessKeyId'],
aws_secret_access_key=other_credentials['Credentials']['SecretAccessKey'],
aws_session_token=other_credentials['Credentials']['SessionToken'] )
#Find VPCs with Tag:spoketag = processing
#Create Gateway for it and Peer, when done change the Tag:spoketag = peered
vpcs=ec2.describe_vpcs(Filters=[
{ 'Name': 'state', 'Values': [ 'available' ] },
{ 'Name': 'tag:'+spoketag, 'Values': [ 'processing', 'unpeering' ] }
])
#logger.info('vpcs with tag:spoketag is processing or unpeering: %s:' % str(vpcs))
if vpcs['Vpcs']: # ucc is busy now
logger.info('[Other Account] ucc is busy in adding/removing spoke of %s:' % str(vpcs['Vpcs']))
return {
'Status' : 'SUCCESS'
}
#Find Spokes waiting to be peered or unpeered
for region in regions['Regions']:
region_id=region['RegionName']
logger.info('Checking region: %s for VPC tagged %s' % (region_id,spoketag))
ec2=boto3.client('ec2',region_name=region_id)
#Find VPCs with Tag:spoketag = true
#Create Gateway for it and Peer, when done change the Tag:spoketag = peered
vpcs=ec2.describe_vpcs(Filters=[
{ 'Name': 'state', 'Values': [ 'available' ] },
{ 'Name': 'tag:'+spoketag, 'Values': [ 'true', 'True', 'TRUE', 'test' ] }
])
for vpc_peering in vpcs['Vpcs']:
message = {}
message['action'] = 'deploygateway'
message['vpcid_spoke'] = vpc_peering['VpcId']
message['region_spoke'] = region_id
message['gwsize_spoke'] = gwsize_spoke
message['vpcid_hub'] = vpcid_hub
message['primary_account'] = True
if OtherAccountRoleApp:
message['otheraccount'] = True
#Finding the Public Subnet
try:
subnets=find_subnets(ec2, message['region_spoke'],message['vpcid_spoke'])
if subnets:
logger.warning('Subnets found: %s ' % (subnets))
message['subnet_spoke'] = subnets[0]['CidrBlock']
message['subnet_spoke_ha'] = subnets[1]['CidrBlock']
message['subnet_spoke_name'] = subnets[1]['Name']
except:
logger.warning('!!!your spoke vpc subnet is not setup correctly!!!')
continue
message['vpc_cidr_spoke'] = vpc_peering['CidrBlock']
logger.info('Found VPC %s waiting to be peered. Sending SQS message to Queue %s' % (message['vpcid_spoke'],gatewayqueue))
#Add New Gateway to SNS
sns = boto3.client('sns')
sns.publish(
TopicArn=gatewaytopic,
Subject='New Spoke Gateway',
Message=json.dumps(message)
)
# only add one spoke at a time, return now
return {
'Status' : 'SUCCESS'
}
vpcs=ec2.describe_vpcs(Filters=[
{ 'Name': 'state', 'Values': [ 'available' ] },
{ 'Name': 'tag:'+spoketag, 'Values': [ 'false', 'False', 'FALSE' ] }
])
for vpc_peering in vpcs['Vpcs']:
message = {}
message['action'] = 'deletegateway'
message['subnet_spoke'] = vpc_peering['CidrBlock']
message['vpcid_spoke'] = vpc_peering['VpcId']
message['region_spoke'] = region_id
message['gwsize_spoke'] = gwsize_spoke
message['vpcid_hub'] = vpcid_hub
message['primary_account'] = True
if OtherAccountRoleApp:
message['otheraccount'] = True
logger.info('Found VPC %s waiting to be unpeered. Sending SQS message to Queue %s' % (message['vpcid_spoke'],gatewayqueue))
#Add New Gateway to SQS
#sqs = boto3.resource('sqs')
sns = boto3.client('sns')
#queue = sqs.get_queue_by_name(QueueName=gatewayqueue)
#response = queue.send_message(MessageBody=json.dumps(message))
sns.publish(
TopicArn=gatewaytopic,
Subject='Delete Spoke Gateway',
Message=json.dumps(message)
)
return {
'Status' : 'SUCCESS'
}
#Find Spokes waiting to be peered or unpeered in OtherAccountRoleApp
if OtherAccountRoleApp:
if other_credentials:
for region in regions['Regions']:
region_id=region['RegionName']
logger.info('[Other Account] Checking region: %s for VPC tagged %s' % (region_id,spoketag))
ec2=boto3.client('ec2',
region_name=region_id,
aws_access_key_id=other_credentials['Credentials']['AccessKeyId'],
aws_secret_access_key=other_credentials['Credentials']['SecretAccessKey'],
aws_session_token=other_credentials['Credentials']['SessionToken'] )
#Find VPCs with Tag:spoketag = true
#Create Gateway for it and Peer, when done change the Tag:spoketag = peered
vpcs=ec2.describe_vpcs(Filters=[
{ 'Name': 'state', 'Values': [ 'available' ] },
{ 'Name': 'tag:'+spoketag, 'Values': [ 'true', 'True', 'TRUE' ] }
])
for vpc_peering in vpcs['Vpcs']:
message = {}
message['action'] = 'deploygateway'
message['vpcid_spoke'] = vpc_peering['VpcId']
message['region_spoke'] = region_id
message['gwsize_spoke'] = gwsize_spoke
message['vpcid_hub'] = vpcid_hub
message['primary_account'] = False
message['otheraccount'] = True
#Finding the Public Subnet
try:
subnets=find_subnets(ec2,message['region_spoke'],message['vpcid_spoke'])
if subnets:
logger.warning('Subnets found: %s ' % (subnets))
message['subnet_spoke'] = subnets[0]['CidrBlock']
message['subnet_spoke_ha'] = subnets[1]['CidrBlock']
message['subnet_spoke_name'] = subnets[1]['Name']
except:
logger.warning('!!!your spoke vpc subnet is not setup correctly!!!')
continue
message['vpc_cidr_spoke'] = vpc_peering['CidrBlock']
logger.info('Found VPC %s waiting to be peered. Sending SQS message to Queue %s' % (message['vpcid_spoke'],gatewayqueue))
#Add New Gateway to SNS
sns = boto3.client('sns')
sns.publish(
TopicArn=gatewaytopic,
Subject='New Spoke Gateway',
Message=json.dumps(message)
)
# only add one spoke at a time, return now
return {
'Status' : 'SUCCESS'
}
vpcs=ec2.describe_vpcs(Filters=[
{ 'Name': 'state', 'Values': [ 'available' ] },
{ 'Name': 'tag:'+spoketag, 'Values': [ 'false', 'False', 'FALSE' ] }
])
for vpc_peering in vpcs['Vpcs']:
message = {}
message['action'] = 'deletegateway'
message['subnet_spoke'] = vpc_peering['CidrBlock']
message['vpcid_spoke'] = vpc_peering['VpcId']
message['region_spoke'] = region_id
message['gwsize_spoke'] = gwsize_spoke
message['vpcid_hub'] = vpcid_hub
message['otheraccount'] = True
message['primary_account'] = False
logger.info('Found VPC %s waiting to be unpeered. Sending SQS message to Queue %s' % (message['vpcid_spoke'],gatewayqueue))
#Add New Gateway to SQS
#sqs = boto3.resource('sqs')
sns = boto3.client('sns')
#queue = sqs.get_queue_by_name(QueueName=gatewayqueue)
#response = queue.send_message(MessageBody=json.dumps(message))
sns.publish(
TopicArn=gatewaytopic,
Subject='Delete Spoke Gateway',
Message=json.dumps(message)
)
return {
'Status' : 'SUCCESS'
}
return {
'Status' : 'SUCCESS'
}
```
#### File: quickstart-aviatrix-nextgentransithub/scripts/json_validator.py
```python
import glob
import json
import unittest
class TestJsonValidator(unittest.TestCase):
TEMPLATES = 'templates/*.template'
def setUp(self):
"""Load the template files."""
self.templates = glob.glob(self.TEMPLATES)
def test_load(self):
"""Test the JSON parse and the existence of particular keys."""
for template in self.templates:
with open(template, 'r') as data_file:
print template
# Parse the template => fails if invalid JSON.
data = json.load(data_file)
# Look for the top level keys.
for key in ['AWSTemplateFormatVersion',
'Parameters',
'Description',
'Resources']:
self.assertTrue(key in data.keys())
# Look for the 'AWSTemplateFormatVersion' key.
self.assertEqual(data['AWSTemplateFormatVersion'], '2010-09-09')
# Examine the CloudFormation 'Outputs' - expect these keys.
if template == 'templates/quickstart-aviatrix-vpc.template':
self.assertItemsEqual(data['Outputs'].keys(), ['VPCID',
'SubnetID',
'SubnetIDHA',
'SubnetCIDR',
'SubnetCIDRHA'])
if template == 'templates/quickstart-aviatrix-iamroles.template':
self.assertItemsEqual(data['Outputs'].keys(), ['AccountId',
'AviatrixRoleAppARN',
'AviatrixRoleEC2ARN',
'AviatrixInstanceProfile'])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jorgeboucas/fluff",
"score": 2
} |
#### File: jorgeboucas/fluff/setup.py
```python
from setuptools import setup
from setuptools.command.test import test as TestCommand
from fluff.config import FL_VERSION
import sys
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
DESCRIPTION = "fluff : exploratory analysis and visualization of high-throughput sequencing data"
setup(name='biofluff',
version=FL_VERSION,
description=DESCRIPTION,
author='<NAME>',
author_email='<EMAIL>',
url = 'https://github.com/simonvh/fluff/',
license='MIT',
packages=[
'fluff',
'fluff/commands'
],
scripts=[
"scripts/fluff_bandplot.py",
"scripts/fluff_profile.py",
"scripts/fluff_heatmap.py",
"scripts/fluff",
],
data_files=[],
install_requires=["pysam",
"HTSeq",
"numpy",
"scipy",
"matplotlib",
"colorbrewer",
"pybedtools",
"Pycluster",
"pyBigWig",
],
tests_require=['pytest'],
# dependency_links = [
# "http://bonsai.hgc.jp/~mdehoon/software/cluster/Pycluster-1.52.tar.gz",
# ],
cmdclass = {'test': PyTest},
)
``` |
{
"source": "jorgebrandao/ShorAlgorithm",
"score": 2
} |
#### File: qiskit/mapper/_compiling.py
```python
import math
import scipy
import numpy as np
from ._mappererror import MapperError
def euler_angles_1q(unitary_matrix):
"""Compute Euler angles for a single-qubit gate.
Find angles (theta, phi, lambda) such that
unitary_matrix = phase * Rz(phi) * Ry(theta) * Rz(lambda)
Return (theta, phi, lambda, "U(theta,phi,lambda)"). The last
element of the tuple is the OpenQASM gate name with parameter
values substituted.
"""
small = 1e-10
if unitary_matrix.shape != (2, 2):
raise MapperError("compiling.euler_angles_1q expected 2x2 matrix")
phase = np.linalg.det(unitary_matrix)**(-1.0/2.0)
U = phase * unitary_matrix # U in SU(2)
# OpenQASM SU(2) parameterization:
# U[0, 0] = exp(-i(phi+lambda)/2) * cos(theta/2)
# U[0, 1] = -exp(-i(phi-lambda)/2) * sin(theta/2)
# U[1, 0] = exp(i(phi-lambda)/2) * sin(theta/2)
# U[1, 1] = exp(i(phi+lambda)/2) * cos(theta/2)
# Find theta
if abs(U[0, 0]) > small:
theta = 2 * math.acos(abs(U[0, 0]))
else:
theta = 2 * math.asin(abs(U[1, 0]))
# Find phi and lambda
phase11 = 0.0
phase10 = 0.0
if abs(math.cos(theta/2.0)) > small:
phase11 = U[1, 1] / math.cos(theta/2.0)
if abs(math.sin(theta/2.0)) > small:
phase10 = U[1, 0] / math.sin(theta/2.0)
phiplambda = 2 * math.atan2(np.imag(phase11), np.real(phase11))
phimlambda = 2 * math.atan2(np.imag(phase10), np.real(phase10))
phi = 0.0
if abs(U[0, 0]) > small and abs(U[1, 0]) > small:
phi = (phiplambda + phimlambda) / 2.0
lamb = (phiplambda - phimlambda) / 2.0
else:
if abs(U[0, 0]) < small:
lamb = -phimlambda
else:
lamb = phiplambda
# Check the solution
Rzphi = np.array([[np.exp(-1j*phi/2.0), 0],
[0, np.exp(1j*phi/2.0)]], dtype=complex)
Rytheta = np.array([[np.cos(theta/2.0), -np.sin(theta/2.0)],
[np.sin(theta/2.0), np.cos(theta/2.0)]], dtype=complex)
Rzlambda = np.array([[np.exp(-1j*lamb/2.0), 0],
[0, np.exp(1j*lamb/2.0)]], dtype=complex)
V = np.dot(Rzphi, np.dot(Rytheta, Rzlambda))
if np.linalg.norm(V - U) > small:
raise MapperError("compiling.euler_angles_1q incorrect result")
return theta, phi, lamb, "U(%.15f,%.15f,%.15f)" % (theta, phi, lamb)
def simplify_U(theta, phi, lam):
"""Return the gate u1, u2, or u3 implementing U with the fewest pulses.
U(theta, phi, lam) is the input gate.
The returned gate implements U exactly, not up to a global phase.
Return (gate_string, params, "OpenQASM string") where gate_string is one of
"u1", "u2", "u3", "id" and params is a 3-tuple of parameter values. The
OpenQASM string is the name of the gate with parameters substituted.
"""
epsilon = 1e-13
name = "u3"
params = (theta, phi, lam)
qasm = "u3(%.15f,%.15f,%.15f)" % params
# Y rotation is 0 mod 2*pi, so the gate is a u1
if abs(params[0] % (2.0 * math.pi)) < epsilon:
name = "u1"
params = (0.0, 0.0, params[1] + params[2] + params[0])
qasm = "u1(%.15f)" % params[2]
# Y rotation is pi/2 or -pi/2 mod 2*pi, so the gate is a u2
if name == "u3":
# theta = pi/2 + 2*k*pi
if abs((params[0] - math.pi / 2) % (2.0 * math.pi)) < epsilon:
name = "u2"
params = (math.pi / 2, params[1],
params[2] + (params[0] - math.pi / 2))
qasm = "u2(%.15f,%.15f)" % (params[1], params[2])
# theta = -pi/2 + 2*k*pi
if abs((params[0] + math.pi / 2) % (2.0 * math.pi)) < epsilon:
name = "u2"
params = (math.pi / 2, params[1] + math.pi,
params[2] - math.pi + (params[0] + math.pi / 2))
qasm = "u2(%.15f,%.15f)" % (params[1], params[2])
# u1 and lambda is 0 mod 4*pi so gate is nop
if name == "u1" and abs(params[2] % (4.0 * math.pi)) < epsilon:
name = "id"
params = (0.0, 0.0, 0.0)
qasm = "id"
return name, params, qasm
def rz_array(theta):
"""Return numpy array for Rz(theta).
Rz(theta) = diag(exp(-i*theta/2),exp(i*theta/2))
"""
return np.array([[np.exp(-1j*theta/2.0), 0],
[0, np.exp(1j*theta/2.0)]], dtype=complex)
def ry_array(theta):
"""Return numpy array for Ry(theta).
Ry(theta) = [[cos(theta/2), -sin(theta/2)],
[sin(theta/2), cos(theta/2)]]
"""
return np.array([[math.cos(theta/2.0), -math.sin(theta/2.0)],
[math.sin(theta/2.0), math.cos(theta/2.0)]],
dtype=complex)
def two_qubit_kak(unitary_matrix):
"""Decompose a two-qubit gate over CNOT + SU(2) using the KAK decomposition.
Based on MATLAB implementation by <NAME>.
Computes a sequence of 10 single and two qubit gates, including 3 CNOTs,
which multiply to U, including global phase. Uses Vatan and Williams
optimal two-qubit circuit (quant-ph/0308006v3). The decomposition algorithm
which achieves this is explained well in Drury and Love, 0806.4015.
unitary_matrix = numpy 4x4 unitary matrix
"""
if unitary_matrix.shape != (4, 4):
raise MapperError("compiling.two_qubit_kak expected 4x4 matrix")
phase = np.linalg.det(unitary_matrix)**(-1.0/4.0)
# Make it in SU(4), correct phase at the end
U = phase * unitary_matrix
# B changes to the Bell basis
B = (1.0/math.sqrt(2)) * np.array([[1, 1j, 0, 0],
[0, 0, 1j, 1],
[0, 0, 1j, -1],
[1, -1j, 0, 0]], dtype=complex)
# U' = Bdag . U . B
Uprime = np.dot(np.transpose(B.conjugate()), np.dot(U, B))
# M^2 = trans(U') . U'
M2 = np.dot(np.transpose(Uprime), Uprime)
# Diagonalize M2
# Must use diagonalization routine which finds a real orthogonal matrix P
# when M2 is real.
D, P = np.linalg.eig(M2)
# If det(P) == -1, apply a swap to make P in SO(4)
if abs(np.linalg.det(P)+1) < 1e-5:
swap = np.array([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]], dtype=complex)
P = np.dot(P, swap)
D = np.diag(np.dot(swap, np.dot(np.diag(D), swap)))
Q = np.diag(np.sqrt(D)) # array from elementwise sqrt
# Want to take square root so that Q has determinant 1
if abs(np.linalg.det(Q)+1) < 1e-5:
Q[0, 0] = -Q[0, 0]
Kprime = np.dot(Uprime, np.dot(P, np.dot(np.linalg.inv(Q),
np.transpose(P))))
K1 = np.dot(B, np.dot(Kprime, np.dot(P, np.transpose(B.conjugate()))))
A = np.dot(B, np.dot(Q, np.transpose(B.conjugate())))
K2 = np.dot(B, np.dot(np.transpose(P), np.transpose(B.conjugate())))
KAK = np.dot(K1, np.dot(A, K2))
if np.linalg.norm(KAK - U, 2) > 1e-6:
raise MapperError("compiling.two_qubit_kak: " +
"unknown error in KAK decomposition")
# Compute parameters alpha, beta, gamma so that
# A = exp(i * (alpha * XX + beta * YY + gamma * ZZ))
x = np.array([[0, 1], [1, 0]], dtype=complex)
y = np.array([[0, -1j], [1j, 0]], dtype=complex)
z = np.array([[1, 0], [0, -1]], dtype=complex)
xx = np.kron(x, x)
yy = np.kron(y, y)
zz = np.kron(z, z)
alpha = math.atan(np.trace(np.imag(np.dot(A, xx)))/np.trace(np.real(A)))
beta = math.atan(np.trace(np.imag(np.dot(A, yy)))/np.trace(np.real(A)))
gamma = math.atan(np.trace(np.imag(np.dot(A, zz)))/np.trace(np.real(A)))
# K1 = kron(U1, U2) and K2 = kron(V1, V2)
# Find the matrices U1, U2, V1, V2
L = K1[0:2, 0:2]
if np.linalg.norm(L) < 1e-9:
L = K1[0:2, 2:4]
if np.linalg.norm(L) < 1e-9:
L = K1[2:4, 2:4]
Q = np.dot(L, np.transpose(L.conjugate()))
U2 = L / np.sqrt(Q[0, 0])
R = np.dot(K1, np.kron(np.identity(2), np.transpose(U2.conjugate())))
U1 = np.array([[0, 0], [0, 0]], dtype=complex)
U1[0, 0] = R[0, 0]
U1[0, 1] = R[0, 2]
U1[1, 0] = R[2, 0]
U1[1, 1] = R[2, 2]
L = K2[0:2, 0:2]
if np.linalg.norm(L) < 1e-9:
L = K2[0:2, 2:4]
if np.linalg.norm(L) < 1e-9:
L = K2[2:4, 2:4]
Q = np.dot(L, np.transpose(L.conjugate()))
V2 = L / np.sqrt(Q[0, 0])
R = np.dot(K2, np.kron(np.identity(2), np.transpose(V2.conjugate())))
V1 = np.array([[0, 0], [0, 0]], dtype=complex)
V1[0, 0] = R[0, 0]
V1[0, 1] = R[0, 2]
V1[1, 0] = R[2, 0]
V1[1, 1] = R[2, 2]
if np.linalg.norm(np.kron(U1, U2) - K1) > 1e-4 or \
np.linalg.norm(np.kron(V1, V2) - K2) > 1e-4:
raise MapperError("compiling.two_qubit_kak: " +
"error in SU(2) x SU(2) part")
test = scipy.linalg.expm(1j*(alpha * xx + beta * yy + gamma * zz))
if np.linalg.norm(A - test) > 1e-4:
raise MapperError("compiling.two_qubit_kak: " +
"error in A part")
# Circuit that implements K1 * A * K2 (up to phase), using
# Vatan and Williams Fig. 6 of quant-ph/0308006v3
# Include prefix and suffix single-qubit gates into U2, V1 respectively.
V2 = np.dot(np.array([[np.exp(1j*np.pi/4), 0],
[0, np.exp(-1j*np.pi/4)]], dtype=complex), V2)
U1 = np.dot(U1, np.array([[np.exp(-1j*np.pi/4), 0],
[0, np.exp(1j*np.pi/4)]], dtype=complex))
# Corrects global phase: exp(ipi/4)*phase'
U1 = np.dot(U1, np.array([[np.exp(1j*np.pi/4), 0],
[0, np.exp(1j*np.pi/4)]], dtype=complex))
U1 = phase.conjugate() * U1
# Test
g1 = np.kron(V1, V2)
g2 = np.array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]], dtype=complex)
theta = 2*gamma - np.pi/2
Ztheta = np.array([[np.exp(1j*theta/2), 0],
[0, np.exp(-1j*theta/2)]], dtype=complex)
kappa = np.pi/2 - 2*alpha
Ykappa = np.array([[np.cos(kappa/2), np.sin(kappa/2)],
[-np.sin(kappa/2), np.cos(kappa/2)]], dtype=complex)
g3 = np.kron(Ztheta, Ykappa)
g4 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=complex)
zeta = 2*beta - np.pi/2
Yzeta = np.array([[np.cos(zeta/2), np.sin(zeta/2)],
[-np.sin(zeta/2), np.cos(zeta/2)]], dtype=complex)
g5 = np.kron(np.identity(2), Yzeta)
g6 = g2
g7 = np.kron(U1, U2)
V = np.dot(g2, g1)
V = np.dot(g3, V)
V = np.dot(g4, V)
V = np.dot(g5, V)
V = np.dot(g6, V)
V = np.dot(g7, V)
if np.linalg.norm(V - U*phase.conjugate()) > 1e-6:
raise MapperError("compiling.two_qubit_kak: " +
"sequence incorrect, unknown error")
v1_param = euler_angles_1q(V1)
v2_param = euler_angles_1q(V2)
u1_param = euler_angles_1q(U1)
u2_param = euler_angles_1q(U2)
v1_gate = simplify_U(v1_param[0], v1_param[1], v1_param[2])
v2_gate = simplify_U(v2_param[0], v2_param[1], v2_param[2])
u1_gate = simplify_U(u1_param[0], u1_param[1], u1_param[2])
u2_gate = simplify_U(u2_param[0], u2_param[1], u2_param[2])
return_circuit = []
return_circuit.append({
"name": v1_gate[0],
"args": [0],
"params": v1_gate[1]
})
return_circuit.append({
"name": v2_gate[0],
"args": [1],
"params": v2_gate[1]
})
return_circuit.append({
"name": "cx",
"args": [1, 0],
"params": ()
})
gate = simplify_U(0.0, 0.0, -2.0*gamma + np.pi/2.0)
return_circuit.append({
"name": gate[0],
"args": [0],
"params": gate[1]
})
gate = simplify_U(-np.pi/2.0 + 2.0*alpha, 0.0, 0.0)
return_circuit.append({
"name": gate[0],
"args": [1],
"params": gate[1]
})
return_circuit.append({
"name": "cx",
"args": [0, 1],
"params": ()
})
gate = simplify_U(-2.0*beta + np.pi/2.0, 0.0, 0.0)
return_circuit.append({
"name": gate[0],
"args": [1],
"params": gate[1]
})
return_circuit.append({
"name": "cx",
"args": [1, 0],
"params": ()
})
return_circuit.append({
"name": u1_gate[0],
"args": [0],
"params": u1_gate[1]
})
return_circuit.append({
"name": u2_gate[0],
"args": [1],
"params": u2_gate[1]
})
# Test gate sequence
V = np.identity(4)
cx21 = np.array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]], dtype=complex)
cx12 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=complex)
for gate in return_circuit:
if gate["name"] == "cx":
if gate["args"] == [0, 1]:
V = np.dot(cx12, V)
else:
V = np.dot(cx21, V)
else:
if gate["args"] == [0]:
V = np.dot(np.kron(rz_array(gate["params"][2]),
np.identity(2)), V)
V = np.dot(np.kron(ry_array(gate["params"][0]),
np.identity(2)), V)
V = np.dot(np.kron(rz_array(gate["params"][1]),
np.identity(2)), V)
else:
V = np.dot(np.kron(np.identity(2),
rz_array(gate["params"][2])), V)
V = np.dot(np.kron(np.identity(2),
ry_array(gate["params"][0])), V)
V = np.dot(np.kron(np.identity(2),
rz_array(gate["params"][1])), V)
# Put V in SU(4) and test up to global phase
V = np.linalg.det(V)**(-1.0/4.0) * V
if np.linalg.norm(V - U) > 1e-6 and \
np.linalg.norm(1j*V - U) > 1e-6 and \
np.linalg.norm(-1*V - U) > 1e-6 and \
np.linalg.norm(-1j*V - U) > 1e-6:
raise MapperError("compiling.two_qubit_kak: " +
"sequence incorrect, unknown error")
return return_circuit
``` |
{
"source": "jorge-canas/pdfTermExplorer",
"score": 3
} |
#### File: jorge-canas/pdfTermExplorer/utils.py
```python
import re
from cStringIO import StringIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
def removePunctuation(text):
return re.sub(r"[^A-Za-záéíóúñÁÉÍÓÚÑüǗ]+", " ", text)
def cleanWords(text):
return removePunctuation(text).lower().strip().split(' ')
def convert(fname, pages=None):
if not pages:
pagenums = set()
else:
pagenums = set(pages)
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
infile = file(fname, 'rb')
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close
return text
``` |
{
"source": "Jorgecardenas1/mcmc_multiprocessing",
"score": 3
} |
#### File: mcmc_multiprocessing/mc_sim/MCMC.py
```python
from mc_sim.imports import *
import math
class MCMC:
output_data = 0
def __init__(self):
pass
def gaussian_sample(self,params,N):
theta_sampled=[]
for key, value in params.items():
selected_samples = np.random.normal(value["nu"], value["sigma"], N)
theta_sampled.append(selected_samples[0])
return theta_sampled
def mirror_gaussian_sample(self,params,N):
theta_sampled=[]
intermediate_sample=[]
for key, value in params.items():
prior_eval=-np.inf
while prior_eval==-np.inf:
#selected_samples = np.random.uniform(low=value["min"], high=value["max"], size=1)
selected_samples = np.random.normal(value["nu"], value["sigma"], 1*N)
prior_eval = self.prior(selected_samples,value["min"],value["max"])
intermediate_sample.append(selected_samples)
return np.mean(np.array(intermediate_sample).T,axis=0)
#return np.array(intermediate_sample).T
#Assign values based on current state
#Get an array with the evaluation for an specific parameter value in the whole range of X
def prior(self,theta,minm, maxm):
#in this case priors are just the required check of parameter conditions
#it is unknown.
#it must return an array with prior evaluation of every theta
#i evaluate a SINGLE THETA, A SINGLE PARAMETER EVERY TIME
#depending on which conditional probability i am evaluating
#m, b, log_f = theta
if minm < theta < maxm:
return True
#return -np.inf
return True
#just assuming everything is in range
# Metropolis-Hastings
def set_dataframe(self,parameters):
columns=['iteration','walker','accepted','likelihood']
global_simulation_data = pd.DataFrame(columns=columns)
new_dtypes = {"iteration": int,"likelihood":np.float64,"walker":int,"accepted":"bool"}
global_simulation_data[columns] = global_simulation_data[columns].astype(new_dtypes)
for parameter in parameters:
global_simulation_data.insert(len(global_simulation_data.columns), parameter,'' )
new_dtypes = {parameter:np.float64}
global_simulation_data[parameter] = global_simulation_data[parameter].astype(np.float64)
return global_simulation_data
def acceptance(self,new_loglik,old_log_lik):
if (new_loglik > old_log_lik):
return True
else:
u = np.random.uniform(0.0,1.0)
# Since we did a log likelihood, we need to exponentiate in order to compare to the random number
# less likely x_new are less likely to be accepted
return (u < (np.exp(new_loglik - old_log_lik)))
def thining(self,dataframe,thining,num_samples ):
stack = pd.DataFrame()
walkers = dataframe.walker.unique()
for walker in walkers:
selected = dataframe.loc[dataframe['walker'].isin([walker])]
#selected = selected.sample(frac=0.8)
selected = selected.nsmallest(int(num_samples*0.55),['likelihood']) #Thining
selected = selected[selected.index % thining == 0]
stack = pd.concat([stack,selected],ignore_index=True)
return stack.sort_values(by=['iteration']).reset_index(drop=True)
def MH(self,sky_model,parameters,t_sky_data,sigma_parameter,evaluateLogLikelihood,initial_point,num_walkers,rank,comm,size_cores,num_samples):
accepted = 0.0
row=0
iteration=0
thetas_samples=[]
thetas_samples.append(initial_point)
num_of_params=len(initial_point)
walkers_result=[]
dataframe_array = [self.set_dataframe(parameters) for i in range(size_cores)]
if rank==0:
with tqdm(total=(num_samples)) as pbar:
initial_point = self.gaussian_sample(parameters,1)
for n in range(num_samples):
pbar.update(1)
old_theta = np.array(thetas_samples[len(thetas_samples)-1], copy=True)
old_log_lik = evaluateLogLikelihood(old_theta,t_sky_data.Freq,t_sky_data.t_sky,sigma_parameter)
params = sky_model.update_parameters(old_theta) #this has impact when using gaussian proposed distribution
new_theta = self.mirror_gaussian_sample(params,1)
new_loglik = evaluateLogLikelihood(new_theta,t_sky_data.Freq,t_sky_data.t_sky,sigma_parameter)
# Accept new candidate in Monte-Carlo.
if self.acceptance(new_loglik,old_log_lik):
thetas_samples.append(new_theta)
accepted = accepted + 1.0 # monitor acceptance
data = np.concatenate(([iteration, rank,1, new_loglik],new_theta),axis=0)
dataframe_array[rank].loc[iteration] = data
else:
thetas_samples.append(old_theta)
data = np.concatenate(([iteration, rank, 0, old_log_lik],old_theta),axis=0)
dataframe_array[rank].loc[iteration] = data
for i in range(1, size_cores):
req2 = comm.irecv(source=i, tag=12)
received = req2.wait()
dataframe_array[i].loc[received[0]] = received
iteration += 1
print("accepted"+str(accepted))
return dataframe_array
else:
with tqdm(total=(num_samples)) as pbar:
initial_point = self.gaussian_sample(parameters,1)
for n in range(num_samples):
pbar.update(1)
old_theta = np.array(thetas_samples[len(thetas_samples)-1], copy=True)
old_log_lik = evaluateLogLikelihood(old_theta,t_sky_data.Freq,t_sky_data.t_sky,sigma_parameter)
params = sky_model.update_parameters(old_theta) #this has impact when using gaussian proposed distribution
new_theta = self.mirror_gaussian_sample(params,1)
new_loglik = evaluateLogLikelihood(new_theta,t_sky_data.Freq,t_sky_data.t_sky,sigma_parameter)
# Accept new candidate in Monte-Carlo.
if self.acceptance(new_loglik,old_log_lik):
thetas_samples.append(new_theta)
accepted = accepted + 1.0 # monitor acceptance
data = np.concatenate(([iteration, rank,1, new_loglik],new_theta),axis=0)
req = comm.isend(data, dest=0, tag=12)
req.wait()
else:
thetas_samples.append(old_theta)
data = np.concatenate(([iteration, rank, 0, old_log_lik],old_theta),axis=0)
req = comm.isend(data, dest=0, tag=12)
req.wait()
iteration += 1
print("accepted"+str(accepted))
return np.inf
# accepted = 0.0
# row=0
# iteration=0
# thetas_samples=[]
# thetas_samples.append(initial_point)
# num_of_params=len(initial_point)
# walkers_result=[]
# dataframe = self.set_dataframe(parameters)
# #this division of rank 0 and others can be helpful to
# #stablish a main node to manage the others
# num_walkers_min = (int(num_walkers/size_cores) * rank )
# num_walkers_max = num_walkers_min + int(num_walkers/size_cores) -1
# print("\nwalkers to process" + str(num_walkers_max-num_walkers_min+1))
# with tqdm(total=(num_samples*(num_walkers_max+1-num_walkers_min))) as pbar:
# for walker in range(num_walkers_min, num_walkers_max+1):
# iteration = num_samples*walker
# #initial_point = self.gaussian_sample(parameters,1)
# for n in range(num_samples):
# pbar.update(1)
# old_theta = np.array(thetas_samples[len(thetas_samples)-1], copy=True)
# old_log_lik = evaluateLogLikelihood(old_theta,t_sky_data.Freq,t_sky_data.t_sky,sigma_parameter)
# params = sky_model.update_parameters(old_theta) #this has impact when using gaussian proposed distribution
# new_theta = self.mirror_gaussian_sample(params,1)
# new_loglik = evaluateLogLikelihood(new_theta,t_sky_data.Freq,t_sky_data.t_sky,sigma_parameter)
# # Accept new candidate in Monte-Carlo.
# if n>burn_sample:
# if self.acceptance(new_loglik,old_log_lik):
# accepted = accepted + 1.0 # monitor acceptance
# if rank == 0:
# data = np.concatenate(([iteration, walker,1, new_loglik],new_theta),axis=0)
# dataframe.loc[iteration] = data
# for i in range(1, size_cores):
# data_req = np.empty(9, dtype = float)
# #comm.Recv(data_req, source=i, tag=11)
# req = comm.irecv(source=i, tag=11)
# data_req = req.wait()
# dataframe.loc[int(data_req[0])] = data_req
# else:
# data = np.concatenate(([iteration, walker,1, new_loglik],new_theta),axis=0)
# #comm.Send(data, dest=0, tag=11)
# req = comm.isend(data, dest=0, tag=11)
# req.wait()
# else:
# if rank == 0:
# data = np.concatenate(([iteration, walker, 0, old_log_lik],old_theta),axis=0)
# dataframe.loc[iteration] = data
# for i in range(1, size_cores):
# req = comm.irecv(source=i, tag=11)
# data_req = req.wait()
# #data_req = np.empty(9, dtype = float)
# #comm.Recv(data_req, source=i, tag=11)
# dataframe.loc[int(data_req[0])] = data_req
# else:
# data = np.concatenate(([iteration, walker, 0, old_log_lik],old_theta),axis=0)
# #comm.Send(data, dest=0, tag=11)
# req = comm.isend(data, dest=0, tag=11)
# req.wait()
# iteration = iteration + 1
# return dataframe
``` |
{
"source": "jorgecarleitao/materialize",
"score": 2
} |
#### File: materialize/mzcompose/__init__.py
```python
import argparse
import copy
import importlib
import importlib.abc
import importlib.util
import inspect
import ipaddress
import json
import os
import re
import shlex
import subprocess
import sys
from contextlib import contextmanager
from inspect import getmembers, isfunction
from pathlib import Path
from tempfile import TemporaryFile
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Optional,
Sequence,
TypedDict,
TypeVar,
Union,
)
import pg8000
import sqlparse
import yaml
from materialize import mzbuild, spawn, ui
from materialize.ui import UIError
T = TypeVar("T")
say = ui.speaker("C> ")
class UnknownCompositionError(UIError):
"""The specified composition was unknown."""
def __init__(self, name: str):
super().__init__(f"unknown composition {name!r}")
class LintError:
def __init__(self, file: Path, message: str):
self.file = file
self.message = message
def __str__(self) -> str:
return f"{os.path.relpath(self.file)}: {self.message}"
def __lt__(self, other: "LintError") -> bool:
return (self.file, self.message) < (other.file, other.message)
def lint_composition(path: Path, composition: Any, errors: List[LintError]) -> None:
if "services" not in composition:
return
for (name, service) in composition["services"].items():
if service.get("mzbuild") == "materialized":
lint_materialized_service(path, name, service, errors)
elif "mzbuild" not in service and "image" in service:
lint_image_name(path, service["image"], errors)
if isinstance(service.get("environment"), dict):
errors.append(
LintError(
path, f"environment for service {name} uses dict instead of list"
)
)
def lint_image_name(path: Path, spec: str, errors: List[LintError]) -> None:
from materialize.mzcompose.services import (
DEFAULT_CONFLUENT_PLATFORM_VERSION,
LINT_DEBEZIUM_VERSIONS,
)
match = re.search(r"((?P<repo>[^/]+)/)?(?P<image>[^:]+)(:(?P<tag>.*))?", spec)
if not match:
errors.append(LintError(path, f"malformatted image specification: {spec}"))
return
(repo, image, tag) = (match.group("repo"), match.group("image"), match.group("tag"))
if not tag:
errors.append(LintError(path, f"image {spec} missing tag"))
elif tag == "latest":
errors.append(LintError(path, f'image {spec} depends on floating "latest" tag'))
if repo == "confluentinc" and image.startswith("cp-"):
# An '$XXX' environment variable may have been used to specify the version
if "$" not in tag and tag != DEFAULT_CONFLUENT_PLATFORM_VERSION:
errors.append(
LintError(
path,
f"image {spec} depends on wrong version of Confluent Platform "
f"(want {DEFAULT_CONFLUENT_PLATFORM_VERSION})",
)
)
if repo == "debezium":
if "$" not in tag and tag not in LINT_DEBEZIUM_VERSIONS:
errors.append(
LintError(
path,
f"image {spec} depends on wrong version of Debezium "
f"(want {LINT_DEBEZIUM_VERSIONS})",
)
)
if not repo and image == "zookeeper":
errors.append(
LintError(
path, f"replace {spec} with official confluentinc/cp-zookeeper image"
)
)
if repo == "wurstmeister" and image == "kafka":
errors.append(
LintError(path, f"replace {spec} with official confluentinc/cp-kafka image")
)
def lint_materialized_service(
path: Path, name: str, service: Any, errors: List[LintError]
) -> None:
# command may be a string that is passed to the shell, or a list of
# arguments.
command = service.get("command", "")
if isinstance(command, str):
command = command.split() # split on whitespace to extract individual arguments
if "--disable-telemetry" not in command:
errors.append(
LintError(
path,
"materialized service command does not include --disable-telemetry",
)
)
env = service.get("environment", [])
if "MZ_DEV=1" not in env:
errors.append(
LintError(
path,
f"materialized service '{name}' does not specify MZ_DEV=1 in its environment: {env}",
)
)
class Composition:
"""A parsed mzcompose.yml with a loaded mzworkflows.py file."""
def __init__(
self, repo: mzbuild.Repository, name: str, preserve_ports: bool = False
):
self.name = name
self.repo = repo
self.images: List[mzbuild.Image] = []
self.workflows: Dict[str, Callable[[Composition], None]] = {}
default_tag = os.getenv(f"MZBUILD_TAG", None)
if name in self.repo.compositions:
self.path = self.repo.compositions[name]
else:
raise UnknownCompositionError(name)
# load the mzcompose.yml file, if one exists
mzcompose_yml = self.path / "mzcompose.yml"
if mzcompose_yml.exists():
with open(mzcompose_yml) as f:
compose = yaml.safe_load(f) or {}
else:
compose = {}
if "version" not in compose:
compose["version"] = "3.7"
if "services" not in compose:
compose["services"] = {}
# Load the mzworkflows.py file, if one exists
mzworkflows_py = self.path / "mzworkflows.py"
if mzworkflows_py.exists():
spec = importlib.util.spec_from_file_location("mzworkflows", mzworkflows_py)
assert spec
module = importlib.util.module_from_spec(spec)
assert isinstance(spec.loader, importlib.abc.Loader)
spec.loader.exec_module(module)
for name, fn in getmembers(module, isfunction):
if name.startswith("workflow_"):
# The name of the workflow is the name of the function
# with the "workflow_" prefix stripped and any underscores
# replaced with dashes.
name = name[len("workflow_") :].replace("_", "-")
self.workflows[name] = fn
for python_service in getattr(module, "services", []):
compose["services"][python_service.name] = python_service.config
# Resolve all services that reference an `mzbuild` image to a specific
# `image` reference.
for name, config in compose["services"].items():
if "mzbuild" in config:
image_name = config["mzbuild"]
if image_name not in self.repo.images:
raise UIError(f"mzcompose: unknown image {image_name}")
image = self.repo.images[image_name]
override_tag = os.getenv(
f"MZBUILD_{image.env_var_name()}_TAG", default_tag
)
if override_tag is not None:
config["image"] = image.docker_name(override_tag)
print(
f"mzcompose: warning: overriding {image_name} image to tag {override_tag}",
file=sys.stderr,
)
del config["mzbuild"]
else:
self.images.append(image)
if "propagate_uid_gid" in config:
config["user"] = f"{os.getuid()}:{os.getgid()}"
del config["propagate_uid_gid"]
ports = config.setdefault("ports", [])
for i, port in enumerate(ports):
if ":" in str(port):
raise UIError(
"programming error: disallowed host port in service {name!r}"
)
if preserve_ports:
# If preserving ports, bind the container port to the same
# host port.
ports[i] = f"{port}:{port}"
if self.repo.rd.coverage:
# Emit coverage information to a file in a directory that is
# bind-mounted to the "coverage" directory on the host. We
# inject the configuration to all services for simplicity, but
# this only have an effect if the service runs instrumented Rust
# binaries.
config.setdefault("environment", []).append(
f"LLVM_PROFILE_FILE=/coverage/{name}-%m.profraw"
)
config.setdefault("volumes", []).append("./coverage:/coverage")
# Add default volumes
compose.setdefault("volumes", {}).update(
{
"mzdata": None,
"tmp": None,
"secrets": None,
}
)
deps = self.repo.resolve_dependencies(self.images)
for config in compose["services"].values():
if "mzbuild" in config:
config["image"] = deps[config["mzbuild"]].spec()
del config["mzbuild"]
self.compose = compose
# Emit the munged configuration to a temporary file so that we can later
# pass it to Docker Compose.
self.file = TemporaryFile()
os.set_inheritable(self.file.fileno(), True)
self._write_compose()
def _write_compose(self) -> None:
self.file.seek(0)
self.file.truncate()
yaml.dump(self.compose, self.file, encoding="utf-8") # type: ignore
self.file.flush()
def get_workflow(self, workflow_name: str) -> "Workflow":
"""Return sub-workflow."""
if workflow_name not in self.workflows:
raise KeyError(f"No workflow called {workflow_name} in {self.name}")
return Workflow(
name=workflow_name,
func=self.workflows[workflow_name],
composition=self,
)
@classmethod
def lint(cls, repo: mzbuild.Repository, name: str) -> List[LintError]:
"""Checks a composition for common errors."""
if not name in repo.compositions:
raise UnknownCompositionError(name)
errs: List[LintError] = []
path = repo.compositions[name] / "mzcompose.yml"
if path.exists():
with open(path) as f:
composition = yaml.safe_load(f) or {}
lint_composition(path, composition, errs)
return errs
def run(
self,
args: List[str],
env: Optional[Dict[str, str]] = None,
capture: bool = False,
capture_combined: bool = False,
check: bool = True,
) -> "subprocess.CompletedProcess[str]":
"""Invokes docker-compose on the composition.
Arguments to specify the files in the composition and the project
directory are added automatically.
Args:
args: Additional arguments to pass to docker-compose.
env: Additional environment variables to set for the child process.
These are merged with the current environment.
capture: Whether to capture the child's stdout and stderr, or
whether to emit directly to the current stdout/stderr streams.
capture_combined: capture stdout and stderr, and direct all output
to the stdout property on the returned object
check: Whether to raise an error if the child process exits with
a failing exit code.
"""
print(f"$ docker-compose {' '.join(args)}", file=sys.stderr)
self.file.seek(0)
if env is not None:
env = dict(os.environ, **env)
stdout = 1
stderr = 2
if capture:
stdout = stderr = subprocess.PIPE
if capture_combined:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
try:
return subprocess.run(
[
"docker-compose",
f"-f/dev/fd/{self.file.fileno()}",
"--project-directory",
self.path,
*args,
],
env=env,
close_fds=False,
check=check,
stdout=stdout,
stderr=stderr,
encoding="utf-8",
)
except subprocess.CalledProcessError as e:
raise UIError(f"running docker-compose failed (exit status {e.returncode})")
def find_host_ports(self, service: str) -> List[str]:
"""Find all ports open on the host for a given service"""
# Parsing the output of `docker-compose ps` directly is fraught, as the
# output depends on terminal width (!). Using the `-q` flag is safe,
# however, and we can pipe the container IDs into `docker inspect`,
# which supports machine-readable output.
if service not in self.compose["services"]:
raise UIError(f"unknown service {service!r}")
ports = []
for info in self.inspect_service_containers(service):
for (name, port_entry) in info["NetworkSettings"]["Ports"].items():
for p in port_entry or []:
# When IPv6 is enabled, Docker will bind each port twice. Consider
# only IPv4 address to avoid spurious warnings about duplicate
# ports.
if p["HostPort"] not in ports and isinstance(
ipaddress.ip_address(p["HostIp"]), ipaddress.IPv4Address
):
ports.append(p["HostPort"])
return ports
def inspect_service_containers(
self, service: str, include_stopped: bool = False
) -> Iterable[Dict[str, Any]]:
"""
Return the JSON from `docker inspect` for each container in the given compose service
There is no explicit documentation of the structure of the returned
fields, but you can see them in the docker core repo:
https://github.com/moby/moby/blob/91dc595e9648318/api/types/types.go#L345-L379
"""
cmd = ["ps", "-q"]
if include_stopped:
cmd.append("-a")
containers = self.run(cmd, capture=True).stdout.splitlines()
if not containers:
return
metadata = spawn.capture(["docker", "inspect", "-f", "{{json .}}", *containers])
for line in metadata.splitlines():
info = json.loads(line)
labels = info["Config"].get("Labels")
if (
labels is not None
and labels.get("com.docker.compose.service") == service
and labels.get("com.docker.compose.project") == self.name
):
yield info
def service_logs(self, service_name: str, tail: int = 20) -> str:
proc = self.run(
[
"logs",
"--tail",
str(tail),
service_name,
],
check=True,
capture_combined=True,
)
return proc.stdout
def get_container_id(self, service: str, running: bool = False) -> str:
"""Given a service name, tries to find a unique matching container id
If running is True, only return running containers.
"""
try:
if running:
cmd = f"docker ps".split()
else:
cmd = f"docker ps -a".split()
list_containers = spawn.capture(cmd, unicode=True)
pattern = re.compile(f"^(?P<c_id>[^ ]+).*{service}")
matches = []
for line in list_containers.splitlines():
m = pattern.search(line)
if m:
matches.append(m.group("c_id"))
if len(matches) != 1:
raise UIError(
f"failed to get a unique container id for service {service}, found: {matches}"
)
return matches[0]
except subprocess.CalledProcessError as e:
raise UIError(f"failed to get container id for {service}: {e}")
def docker_inspect(self, format: str, container_id: str) -> str:
try:
cmd = f"docker inspect -f '{format}' {container_id}".split()
output = spawn.capture(cmd, unicode=True, stderr_too=True).splitlines()[0]
except subprocess.CalledProcessError as e:
ui.log_in_automation(
"docker inspect ({}): error running {}: {}, stdout:\n{}\nstderr:\n{}".format(
container_id, ui.shell_quote(cmd), e, e.stdout, e.stderr
)
)
raise UIError(f"failed to inspect Docker container: {e}")
else:
return output
def docker_container_is_running(self, container_id: str) -> bool:
return self.docker_inspect("{{.State.Running}}", container_id) == "'true'"
class Workflow:
"""
A workflow is a collection of WorkflowSteps and some context
It is possible to specify additional compose files for specific workflows, and all
their child workflows will have access to services defined in those files.
"""
def __init__(
self,
name: str,
func: Callable,
composition: Composition,
) -> None:
self.name = name
self.func = func
self.composition = composition
self.takes_args = len(inspect.signature(func).parameters) > 1
def run(self, args: List[str]) -> None:
print("Running Python function {}".format(self.name))
if self.takes_args:
self.func(self, args)
else:
# If the workflow doesn't have an `args` parameter, construct
# an empty parser to reject bogus arguments and to handle the
# trivial help message.
parser = WorkflowArgumentParser(self)
parser.parse_args(args)
self.func(self)
@contextmanager
def with_services(self, services: List["Service"]) -> Iterator[None]:
"""Temporarily update the composition with the specified services.
The services must already exist in the composition. They restored to
their old definitions when the `with` block ends. Note that the service
definition is written in its entirety; i.e., the configuration is not
deep merged but replaced wholesale.
Lest you are tempted to change this function to allow dynamically
injecting new services: do not do this! These services will not be
visible to other commands, like `mzcompose run`, `mzcompose logs`, or
`mzcompose down`, which makes debugging or inspecting the composition
challenging.
"""
# Remember the old composition.
old_compose = copy.deepcopy(self.composition.compose)
# Update the composition with the new service definitions.
for service in services:
if service.name not in self.composition.compose["services"]:
raise RuntimeError(
"programming error in call to Workflow.with_services: "
f"{service.name!r} does not exist"
)
self.composition.compose["services"][service.name] = service.config
self.composition._write_compose()
try:
# Run the next composition.
yield
finally:
# Restore the old composition.
self.composition.compose = old_compose
self.composition._write_compose()
def run_compose(
self, args: List[str], capture: bool = False
) -> subprocess.CompletedProcess:
return self.composition.run(args, capture=capture)
def run_sql(self, sql: str) -> None:
"""Run a batch of SQL statements against the materialized service."""
ports = self.composition.find_host_ports("materialized")
conn = pg8000.connect(host="localhost", user="materialize", port=int(ports[0]))
conn.autocommit = True
cursor = conn.cursor()
for statement in sqlparse.split(sql):
cursor.execute(statement)
def start_and_wait_for_tcp(self, services: List[str]) -> None:
"""Sequentially start the named services, waiting for eaach to become
available via TCP before moving on to the next."""
# TODO(benesch): once the workflow API is a proper Python API,
# remove the `type: ignore` comments below.
for service in services:
self.start_services(services=[service])
for port in self.composition.compose["services"][service].get("ports", []):
self.wait_for_tcp(host=service, port=port)
def run_service(
self,
service: str,
command: Optional[Union[str, list]] = None,
*,
env: Dict[str, str] = {},
capture: bool = False,
daemon: bool = False,
entrypoint: Optional[str] = None,
) -> Any:
"""Run a service using `mzcompose run`.
Running a service behaves slightly differently than making it come up, importantly it
is not an _error_ if it ends at all.
Args:
service: (required) the name of the service, from the mzcompose file
entrypoint: Overwrite the entrypoint with this
command: the command to run. These are the arguments to the entrypoint
capture: Capture and return output (default: False)
daemon: run as a daemon (default: False)
"""
cmd = []
if daemon:
cmd.append("-d")
if entrypoint:
cmd.append(f"--entrypoint={entrypoint}")
cmd.append(service)
if isinstance(command, str):
cmd.extend(shlex.split(command))
elif isinstance(command, list):
cmd.extend(command)
return self.run_compose(
args=[
"run",
*(f"-e{k}={v}" for k, v in env.items()),
*cmd,
],
capture=capture,
).stdout
def start_services(self, services: List[str]) -> None:
"""Start a service.
This method delegates to `docker-compose start`. See that command's help
for details.
Args:
services: The names of services in the workflow.
"""
self.run_compose(["up", "-d", *services])
def kill_services(self, services: List[str], signal: Optional[str] = None) -> None:
"""Kill a service.
This method delegates to `docker-compose kill`. See that command's help
for details.
Args:
services: The names of services in the workflow.
signal: The signal to send. The default is SIGKILL.
"""
self.run_compose(
[
"kill",
*(["-s", signal] if signal else []),
*services,
]
)
def remove_services(
self, services: List[str], destroy_volumes: bool = False
) -> None:
"""Remove a stopped service.
This method delegates to `docker-compose rm`. See that command's help
for details.
Args:
services: The names of services in the workflow.
destroy_volumes: Whether to destroy any anonymous volumes associated
with the service. Note that named volumes are not removed even
when this option is enabled.
"""
self.run_compose(
[
"rm",
"-f",
"-s",
*(["-v"] if destroy_volumes else []),
*services,
],
)
def remove_volumes(self, volumes: List[str]) -> None:
"""Remove the named volumes.
Args:
volumes: The volumes to remove.
"""
volumes = (f"{self.composition.name}_{v}" for v in volumes)
spawn.runv(["docker", "volume", "rm", *volumes])
def wait_for_tcp(
self,
*,
host: str = "localhost",
port: int,
timeout_secs: int = 240,
) -> None:
ui.progress(f"waiting for {host}:{port}", "C")
for remaining in ui.timeout_loop(timeout_secs):
cmd = f"docker run --rm -t --network {self.composition.name}_default ubuntu:focal-20210723".split()
try:
_check_tcp(cmd[:], host, port, timeout_secs)
except subprocess.CalledProcessError:
ui.progress(" {}".format(int(remaining)))
else:
ui.progress(" success!", finish=True)
return
ui.progress(" error!", finish=True)
try:
logs = self.composition.service_logs(host)
except Exception as e:
logs = f"unable to determine logs: {e}"
raise UIError(f"Unable to connect to {host}:{port}\nService logs:\n{logs}")
def wait_for_postgres(
self,
*,
dbname: str = "postgres",
port: Optional[int] = None,
host: str = "localhost",
timeout_secs: int = 120,
query: str = "SELECT 1",
user: str = "postgres",
password: str = "<PASSWORD>",
expected: Union[Iterable[Any], Literal["any"]] = [[1]],
print_result: bool = False,
service: str = "postgres",
) -> None:
"""Wait for a PostgreSQL service to start.
Args:
dbname: the name of the database to wait for
host: the host postgres is listening on
port: the port postgres is listening on
timeout_secs: How long to wait for postgres to be up before failing (Default: 30)
query: The query to execute to ensure that it is running (Default: "Select 1")
user: The chosen user (this is only relevant for postgres)
service: The service that postgres is running as (Default: postgres)
"""
if port is None:
ports = self.composition.find_host_ports(service)
if len(ports) != 1:
logs = self.composition.service_logs(service)
if ports:
msg = (
f"Unable to unambiguously determine port for {service},"
f"found ports: {','.join(ports)}\nService logs:\n{logs}"
)
else:
msg = f"No ports found for {service}\nService logs:\n{logs}"
raise UIError(msg)
port = int(ports[0])
else:
port = port
_wait_for_pg(
dbname=dbname,
host=host,
port=port,
timeout_secs=timeout_secs,
query=query,
user=user,
password=password,
expected=expected,
print_result=print_result,
)
def wait_for_mz(
self,
*,
user: str = "materialize",
dbname: str = "materialize",
host: str = "localhost",
port: Optional[int] = None,
timeout_secs: int = 60,
query: str = "SELECT 1",
expected: Union[Iterable[Any], Literal["any"]] = [[1]],
print_result: bool = False,
service: str = "materialized",
) -> None:
"""Like `Workflow.wait_for_mz`, but with Materialize defaults."""
self.wait_for_postgres(
user=user,
dbname=dbname,
host=host,
port=port,
timeout_secs=timeout_secs,
query=query,
expected=expected,
print_result=print_result,
service=service,
)
class ServiceConfig(TypedDict, total=False):
mzbuild: str
image: str
hostname: str
command: str
ports: Sequence[Union[int, str]]
environment: List[str]
depends_on: List[str]
entrypoint: List[str]
volumes: List[str]
networks: Dict[str, Dict[str, List[str]]]
deploy: Dict[str, Dict[str, Dict[str, str]]]
propagate_uid_gid: bool
init: bool
class Service:
"""
A Service is a service that has been specified in the 'services' variable of mzworkflows.py
"""
def __init__(self, name: str, config: ServiceConfig) -> None:
self.name = name
self.config = config
class WorkflowArgumentParser(argparse.ArgumentParser):
"""An argument parser that takes its name and description from a `Workflow`."""
def __init__(self, w: Workflow):
super().__init__(
prog=f"mzcompose run {w.name}", description=inspect.getdoc(w.func)
)
def _check_tcp(
cmd: List[str], host: str, port: int, timeout_secs: int, kind: str = ""
) -> List[str]:
cmd.extend(
[
"timeout",
str(timeout_secs),
"bash",
"-c",
f"cat < /dev/null > /dev/tcp/{host}/{port}",
]
)
try:
spawn.capture(cmd, unicode=True, stderr_too=True)
except subprocess.CalledProcessError as e:
ui.log_in_automation(
"wait-for-tcp ({}{}:{}): error running {}: {}, stdout:\n{}\nstderr:\n{}".format(
kind, host, port, ui.shell_quote(cmd), e, e.stdout, e.stderr
)
)
raise
return cmd
def _wait_for_pg(
timeout_secs: int,
query: str,
dbname: str,
port: int,
host: str,
user: str,
password: str,
print_result: bool,
expected: Union[Iterable[Any], Literal["any"]],
) -> None:
"""Wait for a pg-compatible database (includes materialized)"""
args = f"dbname={dbname} host={host} port={port} user={user} password={password}"
ui.progress(f"waiting for {args} to handle {query!r}", "C")
error = None
for remaining in ui.timeout_loop(timeout_secs):
try:
conn = pg8000.connect(
database=dbname,
host=host,
port=port,
user=user,
password=password,
timeout=1,
)
# The default (autocommit = false) wraps everything in a transaction.
conn.autocommit = True
cur = conn.cursor()
cur.execute(query)
if expected == "any" and cur.rowcount == -1:
ui.progress("success!", finish=True)
return
result = list(cur.fetchall())
if expected == "any" or result == expected:
if print_result:
say(f"query result: {result}")
else:
ui.progress("success!", finish=True)
return
else:
say(
f"host={host} port={port} did not return rows matching {expected} got: {result}"
)
except Exception as e:
ui.progress(" " + str(int(remaining)))
error = e
ui.progress(finish=True)
raise UIError(f"never got correct result for {args}: {error}")
``` |
{
"source": "jorgecarleitao/pt_law_parser",
"score": 3
} |
#### File: pt_law_parser/pt_law_parser/html.py
```python
import re
from collections import defaultdict
from pdfminer.layout import LTComponent, LTImage
from pt_law_parser.auxiliar import eq, middle_x
class Paragraph(object):
@staticmethod
def sanitize(text):
# hyphens in words are getting a space from PDFminer. Remove it.
return re.sub(ur' (\-\w+?)', ur'\1', text, flags=re.U)
def __init__(self, text):
assert(text[-1] != '\n')
self._text = self.sanitize(text.strip())
def merge(self, other_line):
text = other_line.text
if self.text[-1] == '-':
self._text = self._text[:-1]
# don't merge two lines without a space in between if no hyphen
elif text[0] != ' ' and self._text[-1] != ' ':
text = ' ' + text
self._text += self.sanitize(text)
@property
def text(self):
return self._text
def as_html(self):
return '<p>%s</p>' % self.text
class Header(Paragraph):
def as_html(self):
return '<h1>%s</h1>' % self.text
class Table(LTComponent):
"""
A table has the following interface:
1. receives a network and converts it to a set of cells (__init__)
2. receives items and maps then to the correct cells (add)
3. represents itself in HTML (as_html)
"""
class Element():
"""
Represents an element of an HTML table. It has a colspan and rowspan.
"""
def __init__(self, cell):
self.cell = cell
self.row = None
self.column = None
self.colspan = 0
self.rowspan = 0
self._lines = []
self._min_x = 0
self._min_y = 0
@property
def lines(self):
return self._lines
def add(self, row, column):
if self.row is None:
self.row = row
self.column = column
else:
if self.row == row:
self.colspan += 1
if self.column == column:
self.rowspan += 1
def add_line(self, item, bbox):
"""
Adds a line to the cell assuming a bounding box bbox.
"""
# todo: this code is similar to _parse_line. Common implementation?
def remove_dots(text):
return text.replace(' .', '')
text = remove_dots(item.get_text())
if text == '.':
return
line = Paragraph(text)
if not self._lines:
# cell is empty
self._lines.append(line)
self._min_x = item.x0
else:
middle_x_cell = middle_x(bbox)
middle_x_line = middle_x(item.bbox)
is_centered = eq(middle_x_cell, middle_x_line, 1)
if is_centered:
if self._min_y - item.y1 < 0:
self._lines[-1].merge(line)
else:
self._lines.append(line)
elif eq(self._min_x, item.x0, 1):
self._lines.append(line)
else:
self._lines[-1].merge(line)
self._min_y = item.y0
class EmptyTableError(Exception):
"""
Raised by constructor when construction fails because table has no
cells. This means that the constructed network does not constitute a
table and should be ignored.
"""
pass
def __init__(self, network):
if len(network) <= 2:
raise self.EmptyTableError
# construct rows and columns borders by distinct x and y's.
self._rows_borders = sorted(list(
set(point.y for point in network.points)))
self._columns_borders = sorted(list(
set(point.x for point in network.points)))
LTComponent.__init__(self, (self._columns_borders[0],
self._rows_borders[0],
self._columns_borders[-1],
self._rows_borders[-1]))
self._cells = self._create_cells(network)
self._elements = self._build_elements(self._cells)
@staticmethod
def _create_cells(network):
"""
Creates cells from the network and returns then
as LTComponents.
"""
squares_taken = defaultdict(set)
cells = set()
def city_distance(point, point_prime):
return abs(point.x - point_prime.x) + abs(point.y - point_prime.y)
def is_perpendicular(v1_x, v1_y, v2_x, v2_y):
return v1_x*v2_x + v1_y*v2_y == 0
for point in sorted(network, key=lambda p: (p.x, p.y)):
for l1 in sorted(network.links[point],
key=lambda p: city_distance(p, point)):
valid_links = [
link for link in network.links[point] if link != l1 and
is_perpendicular(link.x - point.x, link.y - point.y,
l1.x - point.x, l1.y - point.y)]
for l2 in sorted(valid_links,
key=lambda p: city_distance(p, point)):
inter = network.links[l2].intersection(network.links[l1])
intersection = list(inter)
# remove initial point
intersection.remove(point)
if len(intersection) == 0:
continue
# sort by areas: smallest area first
area = lambda p: (p.x - point.x)*(p.y - point.y)
intersection.sort(key=area)
# square is formed by [point, l1, l2, last_point], in this
# order.
points = [point, l1, l2, intersection[0]]
# compute middle position of the square
middle_x = sum(point.x for point in points)/4.
middle_y = sum(point.y for point in points)/4.
# check if any point already has one of its squares
# (at most 4) used.
is_taken = False
square = range(4)
for i in range(4):
# compute the position of the point in relation to the
# middle corresponding to one of the following squares
# position: [(1,1), (-1,1), (1,-1), (-1,-1)]
vx = middle_x - points[i].x
vy = middle_y - points[i].y
square[i] = (int(vx/abs(vx)), int(vy/abs(vy)))
belongs = square[i] in squares_taken[points[i]]
is_taken = is_taken or belongs
if not is_taken:
cell = LTComponent((point.x, point.y,
intersection[0].x, intersection[0].y))
cells.add(cell)
for i in range(4):
squares_taken[points[i]].add(square[i])
break
return cells
def _build_elements(self, cells):
"""
Converts the cells into elements.
"""
elements = []
for cell in cells:
elements.append(self.Element(cell))
for row in reversed(self._rows_borders[:-1]):
for column in self._columns_borders[:-1]:
for cell_index, cell in enumerate(cells):
if cell.y0 < row + 0.1 < cell.y1 and\
cell.x0 < column + 0.1 < cell.x1:
elements[cell_index].add(row, column)
return sorted(elements, key=lambda e: (e.cell.x0, e.cell.y0))
@property
def cells(self):
return self._cells
def add(self, item):
"""
Adds a text item to the table, inserting it into the correct cell.
"""
for element in self._elements:
if element.cell.is_hoverlap(item) and element.cell.is_voverlap(item):
element.add_line(item, element.cell.bbox)
break
def as_html(self):
string = ''
for row in reversed(self._rows_borders[:-1]):
string += '<tr>\n'
for column in self._columns_borders[:-1]:
for element in self._elements:
if element.column == column and element.row == row:
lines = element.lines
colspan = element.colspan
rowspan = element.rowspan
text = '\n'.join(line.as_html() for line in lines)
if colspan:
colspan = 'colspan="%d"' % (colspan + 1)
else:
colspan = ''
if rowspan:
rowspan = 'rowspan="%d"' % (rowspan + 1)
else:
rowspan = ''
attributes = ''
if rowspan or colspan:
attributes = ' '
if rowspan and colspan:
attributes += rowspan + ' ' + colspan
else:
attributes += rowspan + colspan
string += '<td%s>%s</td>\n' % (attributes, text)
string += '</tr>\n'
return '<table>\n%s</table>' % string
class BlockquoteStart(object):
def as_html(self):
return '<blockquote>'
class BlockquoteEnd(object):
def as_html(self):
return '</blockquote>'
class SimpleImage(LTImage):
def __init__(self, ltimage):
assert(isinstance(ltimage, LTImage))
LTComponent.__init__(self, ltimage.bbox)
self._name = ltimage.name
self._stream = ltimage.stream
def as_html(self):
return '<p>(Ver imagem no documento original.)</p>'
``` |
{
"source": "jorgecarleitao/schemaflow",
"score": 3
} |
#### File: schemaflow/examples/end_to_end_kaggle.py
```python
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LassoCV
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import sklearn.metrics
import matplotlib.pyplot as plt
from schemaflow import types as sf_types
from schemaflow import ops as sf_ops
from schemaflow.pipe import Pipe
from schemaflow.pipeline import Pipeline
class SplitNumericCategorical(Pipe):
fit_requires = transform_requires = {'x': sf_types.PandasDataFrame(schema={})}
transform_modifies = {'x_categorical': sf_types.PandasDataFrame(schema={}),
'x': sf_types.PandasDataFrame(schema={})}
fitted_parameters = {'numeric_columns': sf_types.List(str)}
def fit(self, data: dict, parameters: dict=None):
self['numeric_columns'] = list(data['x'].select_dtypes(include=[np.number]).columns)
def transform(self, data: dict):
data['x_categorical'] = data['x'].drop(self['numeric_columns'], axis=1)
data['x'] = data['x'].loc[:, self['numeric_columns']]
return data
class FillNaN(Pipe):
fit_requires = transform_modifies = transform_requires = {
'x': sf_types.PandasDataFrame(schema={}),
'x_categorical': sf_types.PandasDataFrame(schema={})}
fitted_parameters = {
'means': sf_types.List(float),
'most_frequent': sf_types.List(str)}
def fit(self, data: dict, parameters: dict=None):
self['means'] = data['x'].mean(axis=0)
self['most_frequent'] = data['x_categorical'].mode(axis=0)
def transform(self, data: dict):
data['x'] = data['x'].fillna(self['means'])
for column in data['x_categorical'].columns:
data['x_categorical'].loc[data['x_categorical'][column].isnull(), column] = self['most_frequent'][column][0]
return data
class JoinCategoricalAsOneHot(Pipe):
fit_requires = {'x_categorical': sf_types.PandasDataFrame(schema={})}
transform_requires = {
'x': sf_types.PandasDataFrame(schema={}),
'x_categorical': sf_types.PandasDataFrame(schema={})
}
transform_modifies = {
'x': sf_types.PandasDataFrame(schema={}),
'x_categorical': sf_ops.Drop(),
}
fitted_parameters = {'label': object, 'one_hot': object}
def fit(self, data: dict, parameters: dict=None):
df = data['x_categorical'].copy()
self['label'] = dict((column, LabelEncoder()) for column in df.columns)
self['transformer'] = OneHotEncoder()
for column in self['label']:
df.loc[:, column] = self['label'][column].fit_transform(df.loc[:, column])
self['transformer'].fit(df.values)
def transform(self, data: dict):
index = data['x_categorical'].index
for column in self['label']:
mode = data['x_categorical'].loc[:, column].mode()[0]
def f(x):
if x not in self['label'][column].classes_:
return mode
else:
return x
data['x_categorical'].loc[:, column] = data['x_categorical'].loc[:, column].apply(f)
data['x_categorical'].loc[:, column] = self['label'][column].transform(data['x_categorical'].loc[:, column])
data['x_categorical'] = self['transformer'].transform(data['x_categorical'])
df = pd.DataFrame(data['x_categorical'].toarray(), index=index)
data['x'] = data['x'].join(df)
del data['x_categorical']
return data
class BaselineModel(Pipe):
fit_requires = transform_requires = {'x': sf_types.PandasDataFrame({})}
transform_modifies = {'y_pred_baseline': sf_types.Array(np.float64)}
fitted_parameters = {'mean': np.float64}
def fit(self, data: dict, parameters: dict = None):
self['mean'] = np.mean(data['y'])
def transform(self, data: dict):
data['y_pred_baseline'] = np.full(data['x'].shape[0], self['mean'])
return data
class LogLassoModel(Pipe):
transform_requires = {'x': sf_types.PandasDataFrame(schema={})}
fit_requires = {'x': sf_types.PandasDataFrame(schema={}), 'y': sf_types.Array(float)}
transform_modifies = {
'y_pred': sf_types.Array(np.float64),
'x': sf_ops.Drop()
}
fitted_parameters = {'model': LassoCV}
def fit(self, data: dict, parameters: dict=None):
self['model'] = LassoCV(normalize=True)
self['model'].fit(data['x'], np.log(data['y']))
def transform(self, data: dict):
data['y_pred'] = np.exp(self['model'].predict(data['x']))
del data['x']
return data
def x_y_split(df, target_column):
return df.drop(target_column, axis=1), df.loc[:, target_column]
def analyse_performance(df, target_column, pipeline, parameters: dict=None):
train, test = train_test_split(df, test_size=0.2, random_state=1)
x_train, y_train = x_y_split(train, target_column)
x_test, y_test = x_y_split(test, target_column)
pipeline.logged_fit({'x': x_train, 'y': y_train.values}, parameters)
result = pipeline.logged_transform({'x': x_test})
y_pred = result['y_pred']
y_pred_baseline = result['y_pred_baseline']
def metric(y_true, y_pred):
return sklearn.metrics.mean_squared_error(np.log(y_true), np.log(y_pred))
print(metric(y_test, y_pred))
print(metric(y_test, y_pred_baseline))
plt.plot(range(len(y_test)), y_test, 'o')
plt.plot(range(len(y_test)), y_pred, 'o')
plt.savefig('examples/comparison1.png')
plt.close()
plt.plot(y_test, y_pred, 'o', label='Lasso')
plt.plot(y_test, y_pred_baseline, 'o', label='baseline')
plt.plot(y_test, y_test, '-', label='')
plt.xlabel('truth')
plt.ylabel('pred')
plt.legend()
plt.savefig('examples/pred_vs_truth.png')
plt.close()
def export_predictions(df, target_column, predict_pipeline, parameters: dict=None):
x, y = x_y_split(df, target_column)
predict_pipeline.fit({'x': x, 'y': y}, parameters)
df = pd.read_csv('examples/all/test.csv', index_col='Id')
result = predict_pipeline.transform({'x': df})['y_pred']
pd.Series(result, name=target_column, index=df.index).to_csv('examples/submission.txt', header=True)
if __name__ == '__main__':
predict_pipeline = Pipeline([
SplitNumericCategorical(),
FillNaN(),
JoinCategoricalAsOneHot(),
('baseline', BaselineModel()),
('model', LogLassoModel())
])
import logging
import sys
logger = logging.getLogger('schemaflow')
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(ch)
# this pipeline is very generic: it does not make any assumptions about the data's format.
predict_pipeline.check_fit({'x': sf_types.PandasDataFrame({}), 'y': sf_types.Array(np.float64)}, raise_=True)
predict_pipeline.check_transform({'x': sf_types.PandasDataFrame({})}, raise_=True)
print('expected fit schema: ', predict_pipeline.fit_requires)
print('fitted parameters: ', predict_pipeline.fitted_parameters)
print('expected transform schema: ', predict_pipeline.transform_requires)
print('expected transformed schema: ', predict_pipeline.transform_schema(predict_pipeline.transform_requires))
# execution of the pipeline
target_column = 'SalePrice'
df = pd.read_csv('examples/all/train.csv', index_col='Id')
analyse_performance(df.copy(), target_column, predict_pipeline)
export_predictions(df.copy(), target_column, predict_pipeline)
```
#### File: schemaflow/tests/test_pyspark_df.py
```python
import unittest
import datetime
import numpy as np
import pyspark
from pyspark.sql.types import Row
from schemaflow.types import PySparkDataFrame, infer_schema
class PySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
conf = pyspark.SparkConf().setMaster('local[1]').setAppName("testing")
cls.sc = pyspark.SparkContext(conf=conf)
cls.sqlContext = pyspark.SQLContext(cls.sc)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class TestPySparkDataFrame(PySparkTestCase):
def test_type_check(self):
# ok
type = PySparkDataFrame(schema={'a': float, 'b': np.dtype('O')})
instance = self.sqlContext.createDataFrame(data=[Row(a=1.0, b='s'), Row(a=1.0, b='s')])
self.assertEqual(type.check_schema(instance), [])
# extra column is ok
instance = self.sqlContext.createDataFrame(data=[Row(a=1.0, b='s', c=1.0),
Row(a=1.0, b='s', c=1.0)])
self.assertEqual(type.check_schema(instance), [])
# missing column
instance = self.sqlContext.createDataFrame(data=[Row(a=1.0), Row(a=1.0)])
self.assertEqual(len(type.check_schema(instance)), 1)
# wrong column type
instance = self.sqlContext.createDataFrame(data=[Row(a=1.0, b=1),
Row(a=1.0, b=1)])
self.assertEqual(len(type.check_schema(instance)), 1)
def test_date_time(self):
instance_type = PySparkDataFrame(schema={'a': datetime.datetime})
instance = self.sqlContext.createDataFrame(data=[Row(a=datetime.datetime.now())])
self.assertEqual(len(instance_type.check_schema(instance)), 0)
instance_type = PySparkDataFrame(schema={'a': datetime.date})
instance = self.sqlContext.createDataFrame(data=[Row(a=datetime.datetime.now().date())])
self.assertEqual(len(instance_type.check_schema(instance)), 0)
# wrong types
instance_type = PySparkDataFrame(schema={'a': datetime.datetime})
instance = self.sqlContext.createDataFrame(data=[Row(a=datetime.datetime.now().date())])
self.assertEqual(len(instance_type.check_schema(instance)), 1)
instance_type = PySparkDataFrame(schema={'a': datetime.date})
instance = self.sqlContext.createDataFrame(data=[Row(a=datetime.datetime.now())])
self.assertEqual(len(instance_type.check_schema(instance)), 1)
def test_int_float(self):
instance_type = PySparkDataFrame(schema={'a': float})
instance = self.sqlContext.createDataFrame(data=[Row(a=1.0)])
self.assertEqual(len(instance_type.check_schema(instance)), 0)
instance_type = PySparkDataFrame(schema={'a': int})
instance = self.sqlContext.createDataFrame(data=[Row(a=1)])
self.assertEqual(len(instance_type.check_schema(instance)), 0)
# wrong types
instance_type = PySparkDataFrame(schema={'a': float})
instance = self.sqlContext.createDataFrame(data=[Row(a=1)])
self.assertEqual(len(instance_type.check_schema(instance)), 1)
instance_type = PySparkDataFrame(schema={'a': int})
instance = self.sqlContext.createDataFrame(data=[Row(a=1.0)])
self.assertEqual(len(instance_type.check_schema(instance)), 1)
def test_bool(self):
instance_type = PySparkDataFrame(schema={'a': bool})
instance = self.sqlContext.createDataFrame(data=[Row(a=False)])
self.assertEqual(len(instance_type.check_schema(instance)), 0)
instance = self.sqlContext.createDataFrame(data=[Row(a=1)])
self.assertEqual(len(instance_type.check_schema(instance)), 1)
def test_infer(self):
instance = self.sqlContext.createDataFrame(data=[Row(a=1.0, b='a'), Row(a=1.0, b='b')])
schema = infer_schema({'a': instance})
self.assertEqual(schema, {'a': PySparkDataFrame(schema={'a': float, 'b': np.dtype('O')})})
``` |
{
"source": "jorgecastro05/camel-xml2dsl",
"score": 2
} |
#### File: src/xml2dsl/xml2dsl.py
```python
import configargparse
from lxml import etree, objectify
from rich import console
from rich.console import Console
import importlib.metadata
import re
__version__ = importlib.metadata.version('camel-xml2dsl')
ns = {"camel": "http://camel.apache.org/schema/spring"}
console = Console()
class Converter:
def __init__(self):
self.dsl_route = ''
def xml_to_dsl(self):
p = configargparse.ArgParser(
description="Transforms xml routes to dsl routes " + __version__)
p.add_argument('--xml', metavar='xml', type=str,
help='xml camel context file', required=True, env_var='XML_CTX_INPUT')
p.add_argument('--beans', metavar='beans', type=str,
help='use beans instead processors', required=False, env_var='USE_BEANS')
args = p.parse_args()
with open(args.xml, "r") as xml_file:
parser = etree.XMLParser(remove_comments=True)
data = objectify.parse(xml_file, parser=parser)
console.log(" XML 2 DSL Utility ", style="bold red")
root = data.getroot()
for camelContext in root.findall('camel:camelContext', ns):
if 'id' in camelContext.attrib:
console.log("processing camel context", camelContext.attrib['id'])
self.get_namespaces(camelContext)
self.dsl_route = self.analyze_node(camelContext)
print("dsl route:\n", self.dsl_route)
def get_namespaces(self, node):
console.log("namespaces:", node.nsmap)
def analyze_node(self, node):
dslText = ""
for child in node:
node_name = child.tag.partition('}')[2] + "_def"
console.log("procesing node", node_name, child.tag, child.sourceline)
dslText += getattr(self, node_name)(child)
return dslText
def analyze_element(self, node):
node_name = node.tag.partition('}')[2] + "_def"
console.log("procesing node", node_name, node.tag, node.sourceline)
return getattr(self, node_name)(node)
def route_def(self, node):
route_def = self.analyze_node(node)
route_def += "\n.end();\n"
return route_def
def propertyPlaceholder_def(self, node):
return ""
def dataFormats_def(self, node):
return '\n//TODO: define dataformat ' + node[0].tag + '\n'
def endpoint_def(self, node):
return ""
def multicast_def(self, node):
multicast_def = "\n.multicast()"
multicast_def += self.analyze_node(node)
multicast_def += "\n.end() //end multicast"
return multicast_def
def bean_def(self, node):
if 'method' in node.attrib:
return '\n.bean("' + node.attrib['ref'] + '","'+ node.attrib['method'] + '")'
elif 'beanType' in node.attrib:
return '\n.bean("' + node.attrib['ref'] + '","'+ node.attrib['beanType'] + '")'
else:
return '\n.bean("' + node.attrib['ref'] + '")'
def aggregator_def(self, node):
return "//TODO: Aggregator"
def recipientList_def(self, node):
recipient_def = "\n.recipientList()."
recipient_def += self.analyze_node(node)
recipient_def += ".end() // end recipientList"
return recipient_def
def errorHandler_def(self, node):
if node.attrib['type'] == "DefaultErrorHandler":
return "\ndefaultErrorHandler().setRedeliveryPolicy(policy);"
def redeliveryPolicyProfile_def(self, node):
policy_def = "\nRedeliveryPolicy policy = new RedeliveryPolicy()"
if "maximumRedeliveries" in node.attrib:
policy_def += ".maximumRedeliveries("+ node.attrib["maximumRedeliveries"]+")"
if "retryAttemptedLogLevel" in node.attrib:
policy_def += ".retryAttemptedLogLevel(LoggingLevel." + node.attrib["retryAttemptedLogLevel"] +")"
if "redeliveryDelay" in node.attrib:
policy_def += ".redeliveryDelay("+ node.attrib["redeliveryDelay"] +")"
if "logRetryAttempted" in node.attrib:
policy_def += ".logRetryAttempted("+node.attrib["logRetryAttempted"] +")"
if "logRetryStackTrace" in node.attrib:
policy_def += ".logRetryStackTrace("+node.attrib["logRetryStackTrace"]+")"
policy_def += ";"
return policy_def
def onException_def(self, node):
exceptions = []
for exception in node.findall("camel:exception", ns):
exceptions.append(exception.text + ".class")
node.remove(exception)
exceptions = ','.join(exceptions)
onException_def = '\nonException(' + exceptions + ')'
handled = node.find("camel:handled", ns)
if handled is not None:
onException_def += '.handled(' + handled[0].text + ')'
node.remove(handled)
redeliveryPolicy = node.find("camel:redeliveryPolicy", ns)
if redeliveryPolicy is not None:
onException_def += '\n.maximumRedeliveries('+redeliveryPolicy.attrib['maximumRedeliveries'] + \
')' if 'maximumRedeliveries' in redeliveryPolicy.attrib else ""
onException_def += '\n.redeliveryDelay('+redeliveryPolicy.attrib['redeliveryDelay'] + \
')' if 'redeliveryDelay' in redeliveryPolicy.attrib else ""
onException_def += '\n.retryAttemptedLogLevel(LoggingLevel.' + \
redeliveryPolicy.attrib['retryAttemptedLogLevel'] + \
')' if 'retryAttemptedLogLevel' in redeliveryPolicy.attrib else ""
onException_def += '\n.retriesExhaustedLogLevel(LoggingLevel.' + \
redeliveryPolicy.attrib['retriesExhaustedLogLevel'] + \
')' if 'retriesExhaustedLogLevel' in redeliveryPolicy.attrib else ""
node.remove(redeliveryPolicy)
if "redeliveryPolicyRef" in node.attrib:
onException_def += ".redeliveryPolicy(policy)"
onException_def += self.analyze_node(node)
onException_def += "\n.end();\n"
return onException_def
def description_def(self, node):
if node.text:
return "//" + node.text + "\n"
else:
return ""
def from_def(self, node):
routeId = node.getparent().attrib['id']
routeFrom = node.attrib['uri']
from_def = '\nfrom("' + routeFrom+'").routeId("' + routeId + '")'
from_def += self.analyze_node(node)
return from_def
def log_def(self, node):
if 'loggingLevel' in node.attrib and node.attrib['loggingLevel'] != 'INFO' :
return '\n.log(LoggingLevel.' + node.attrib['loggingLevel'] + ', "' + self.deprecatedProcessor(node.attrib['message']) + '")'
else:
return '\n.log("' + self.deprecatedProcessor(node.attrib['message']) + '")'
def choice_def(self, node):
choice_def = '\n.choice() //' + str(node.sourceline)
choice_def += self.analyze_node(node)
parent = node.getparent()
if parent.tag != '{'+ns['camel']+'}route':
choice_def += "\n.endChoice() //" + str(node.sourceline)
else:
choice_def += "\n.end() //end choice " + str(node.sourceline)
return choice_def
def when_def(self, node):
return '\n.when().' + self.analyze_node(node)
def otherwise_def(self, node):
return '\n.otherwise()' + self.analyze_node(node)
def simple_def(self, node):
simple_def = ""
if node.text is not None:
simple_def = 'simple("' + self.deprecatedProcessor(node.text) + '")'
else:
simple_def = 'simple("")'
if "resultType" in node.attrib:
simple_def += ".resultType("+ node.attrib["resultType"]+".class)"
return simple_def
def constant_def(self, node):
if node.text is not None:
return 'constant("' + node.text + '")'
else:
return 'constant("")'
def groovy_def(self, node):
text = node.text.replace('"','\'')
return 'groovy("' + text + '")'
def xpath_def(self, node):
xpath_def = 'xpath("' + node.text + '")'
if 'resultType' in node.attrib:
xpath_def = 'xpath("' + node.text + '",' + \
node.attrib['resultType']+'.class)'
if 'saxon' in node.attrib:
xpath_def += '.saxon()'
return xpath_def
def jsonpath_def(self, node):
jsonpath_def = 'jsonpath("' + node.text + '")'
if 'resultType' in node.attrib:
jsonpath_def = 'jsonpath("' + node.text + '",' + \
node.attrib['resultType']+'.class)'
return jsonpath_def
def to_def(self, node):
if 'pattern' in node.attrib and 'InOnly' in node.attrib['pattern']:
return '\n.inOnly("' + self.componentOptions(node.attrib['uri']) + '")'
else:
return '\n.to("' + self.componentOptions(node.attrib['uri']) + '")'
def setBody_def(self, node):
setBody_predicate = self.analyze_element(node[0])
return '\n.setBody(' + setBody_predicate + ')'
def convertBodyTo_def(self, node):
return '\n.convertBodyTo('+ node.attrib['type'] + '.class)'
def unmarshal_def(self, node):
if 'ref' in node.attrib:
return '\n.unmarshal("' + node.attrib['ref']+ '") //TODO: define dataformat'
else:
return '\n.unmarshal()' + self.analyze_node(node)
def marshal_def(self, node):
if 'ref' in node.attrib:
return '\n.marshal("' + node.attrib['ref']+ '") //TODO: define dataformat'
else:
return '\n.marshal()' + self.analyze_node(node)
def jaxb_def(self, node):
if 'prettyPrint' in node.attrib:
return '.jaxb("' + node.attrib['contextPath']+'")'
else:
return '.jaxb("' + node.attrib['contextPath']+'")'
def setHeader_def(self, node):
setHeader_predicate = self.analyze_element(node[0])
return '\n.setHeader("'+node.attrib['headerName']+'",' + setHeader_predicate+')'
def setProperty_def(self, node):
setProperty_predicate = self.analyze_element(node[0])
return '\n.setProperty("' + node.attrib['propertyName']+'",' + setProperty_predicate + ')'
def process_def(self, node):
return '\n.process("' + node.attrib["ref"]+'")'
def inOnly_def(self, node):
return '\n.inOnly("' + node.attrib["uri"]+'")'
def split_def(self, node):
split_def = '\n.split().'
split_def += self.analyze_element(node[0])
if 'streaming' in node.attrib:
split_def += '.streaming()'
if 'strategyRef' in node.attrib:
split_def += '.aggregationStrategyRef("' + node.attrib["strategyRef"] + '")'
if 'parallelProcessing' in node.attrib:
split_def += '.parallelProcessing()'
node.remove(node[0]) # remove first child as was processed
split_def += self.analyze_node(node)
split_def += '\n.end() //end split'
return split_def
def removeHeaders_def(self, node):
if 'excludePattern' in node.attrib:
return '\n.removeHeaders("' + node.attrib['pattern']+'", "' + node.attrib['excludePattern']+'")'
else:
return '\n.removeHeaders("' + node.attrib['pattern']+'")'
def removeHeader_def(self, node):
return '\n.removeHeaders("' + node.attrib['headerName']+'")'
def xquery_def(self, node):
return 'xquery("'+ node.text +'") //xquery not finished please review'
def doTry_def(self, node):
doTry_def = "\n.doTry()"
doTry_def += self.analyze_node(node)
return doTry_def
def doCatch_def(self, node):
exceptions = []
for exception in node.findall("camel:exception", ns):
exceptions.append(exception.text + ".class")
node.remove(exception)
exceptions = ','.join(exceptions)
doCatch_def = '\n.endDoTry()'
doCatch_def += '\n.doCatch(' + exceptions + ')'
doCatch_def += self.analyze_node(node)
doCatch_def += "\n.end() //end doCatch"
return doCatch_def
def handled_def(self, node):
return '.handled(' + node[0].text + ')'
def transacted_def(self, node):
return ""
def wireTap_def(self, node):
if 'executorServiceRef' in node.attrib:
return '\n.wireTap("'+ node.attrib['uri'] +'").executorServiceRef("profile")'
else:
return '\n.wireTap("'+ node.attrib['uri'] +'")'
def language_def(self, node):
return 'language("'+ node.attrib['language']+'","'+ node.text +'")'
def threads_def(self, node):
threads_def = None
maxPoolSize = node.attrib['maxPoolSize'] if 'maxPoolSize' in node.attrib else None
poolSize = node.attrib['poolSize'] if 'poolSize' in node.attrib else None
if poolSize is None and maxPoolSize is not None:
poolSize = maxPoolSize
if poolSize is not None and maxPoolSize is None:
maxPoolSize = poolSize
if 'threadName' in node.attrib:
threads_def = '\n.threads('+ poolSize+','+ maxPoolSize+',"'+ node.attrib['threadName']+'")'
else:
threads_def = '\n.threads('+ poolSize+','+ maxPoolSize+')'
threads_def += self.analyze_node(node)
threads_def += "\n.end() //end threads"
return threads_def
def delay_def(self, node):
delay_def = '\n.delay().'
delay_def += self.analyze_node(node)
return delay_def
def javaScript_def(self, node):
return 'new JavaScriptExpression("'+ node.text +'")'
def threadPoolProfile_def(self, node):
profileDef = '\nThreadPoolProfile profile = new ThreadPoolProfile();'
if 'defaultProfile' in node.attrib:
profileDef += '\nprofile.setDefaultProfile('+ node.attrib['defaultProfile']+');'
if 'id' in node.attrib:
profileDef += '\nprofile.setId("'+ node.attrib['id']+'");'
if 'keepAliveTime' in node.attrib:
profileDef += '\nprofile.setKeepAliveTime('+ node.attrib['keepAliveTime']+'L);'
if 'maxPoolSize' in node.attrib:
profileDef += '\nprofile.setMaxPoolSize('+ node.attrib['maxPoolSize'] +');'
if 'maxQueueSize' in node.attrib:
profileDef += '\nprofile.setMaxQueueSize('+ node.attrib['maxQueueSize']+');'
if 'poolSize' in node.attrib:
profileDef += '\nprofile.setPoolSize('+ node.attrib['poolSize']+');'
if 'rejectedPolicy' in node.attrib:
if node.attrib['rejectedPolicy'] == 'Abort':
profileDef += '\nprofile.setRejectedPolicy(ThreadPoolRejectedPolicy.Abort);'
return profileDef
def throwException_def(self, node):
throwException_def = ''
if 'ref' in node.attrib:
throwException_def = '\n.throwException(Exception.class, "' + node.attrib['ref']+ '") //TODO: Please review throwException has changed with java DSL'
else:
throwException_def = '\n.throwException(Exception.class, "") //TODO: Please review throwException has changed with java DSL'
throwException_def += self.analyze_node(node)
return throwException_def
def spel_def(self, node):
return 'SpelExpression.spel("' + node.text + '")'
def loop_def(self, node):
loop_def = '\n.loop().'
loop_def += self.analyze_node(node)
loop_def += '\n.end() // end loop'
return loop_def
# Text deprecated processor for camel deprecated endpoints and features
def deprecatedProcessor(self, text):
text = re.sub('\${property\.(\w+\.?\w+)}', r'${exchangeProperty.\1}', text) #exhange property in simple expressions
text = re.sub('"', "'", text) # replace all ocurrences from " to '
text = re.sub('\n', "", text) # remove all endlines
return text
# Text processor for apply custom options in to endpoints
def componentOptions(self, text):
if "velocity:" in text:
text += "?contentCache=true"
return text
if __name__ == "__main__":
converter = Converter()
converter.xml_to_dsl()
def main():
converter = Converter()
converter.xml_to_dsl()
```
#### File: camel-xml2dsl/tests/test_xml2dsl.py
```python
import unittest
from xml2dsl.xml2dsl import xml_to_dsl
from unittest import TestCase, mock
class TestScript(unittest.TestCase):
def test_upper(self):
self.assertEqual(xml_to_dsl(), 0)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jorge-castro/Hermes",
"score": 3
} |
#### File: jorge-castro/Hermes/bot_commands.py
```python
from json import dumps
import requests
import yfinance as yf
#Greet the user when they interact with the bot for the first time
def start():
return "Hello! I'm Hermes, your new financial assistant."
#Show a help message to the user
def help():
return "I'm a bot that can provide information on different financial " \
"assets, i.e. the price of various cryptocurrencies and stocks.\n" \
"Select a command to see usage examples."
#Queries the coingecko API and returns basic info on the coin(s)
def get_crypto_price(ids, vs_currencies="usd", include_market_cap="false",
include_24hr_vol="false", include_24hr_change="false",
include_last_updated_at="false"):
r = requests.get(f"https://api.coingecko.com/api/v3/simple/price"
f"?ids={ids}"
f"&vs_currencies={vs_currencies}"
f"&include_market_cap={include_market_cap}"
f"&include_24hr_vol={include_24hr_vol}"
f"&include_24hr_change={include_24hr_change}"
f"&include_last_updated_at={include_last_updated_at}")
return dumps(r.json(), indent=4)
#Takes a string of space-separated tickers and returns stock price(s) in the
#format "{ticker:market_price}"
def get_stock_price(tickers):
stocks = yf.Tickers(tickers).tickers
prices = {ticker: stock.info["regularMarketPrice"]
for ticker, stock in stocks.items()
if stock.info["regularMarketPrice"] != None}
return dumps(prices, indent=4)
``` |
{
"source": "Jorge-C/bipy",
"score": 3
} |
#### File: app/tests/test_parameters.py
```python
from unittest import TestCase, main
from skbio.app.parameters import (FlagParameter, ValuedParameter,
MixedParameter, Parameters, ParameterError,
FilePath)
class FlagParameterTests(TestCase):
""" Tests of the FlagParameter class """
def setUp(self):
"""Setup some variables for the tests to use """
self.p_modify_prefix = [FlagParameter(Name='d', Prefix='-'),
FlagParameter(Name='d', Prefix='--'),
FlagParameter(Name='d', Prefix='')]
self.p_modify_name = [FlagParameter(Name='d', Prefix='-'),
FlagParameter(Name='D', Prefix='-'),
FlagParameter(Name=4, Prefix='-'),
FlagParameter(Name='abcdef', Prefix='-')]
self.p_On = [FlagParameter(Name='d', Prefix='-', Value=True),
FlagParameter(Name='d', Prefix='-', Value=5),
FlagParameter(Name='d', Prefix='-', Value=[1]),
FlagParameter(Name='d', Prefix='-', Value='F')]
self.p_Off = [FlagParameter(Name='d', Prefix='-', Value=False),
FlagParameter(Name='d', Prefix='-', Value=None),
FlagParameter(Name='d', Prefix='-', Value=[]),
FlagParameter(Name='d', Prefix='-', Value=0),
FlagParameter(Name='d', Prefix='-', Value='')]
self.ID_tests = [FlagParameter(Name='d', Prefix='-'),
FlagParameter(Name='d', Prefix=''),
FlagParameter(Name='', Prefix='-'),
FlagParameter(Name=4, Prefix='-'),
FlagParameter(Name=None, Prefix='-'),
FlagParameter(Name=4, Prefix=None),
FlagParameter(Name='abcdef', Prefix='-')]
def test_init(self):
"""FlagParameter: init functions as expected """
param = FlagParameter(Name='a', Prefix='-', Value=42)
self.assertEqual(param.Name, 'a')
self.assertEqual(param.Prefix, '-')
self.assertEqual(param.Value, 42)
self.assertEqual(param.Delimiter, None)
self.assertEqual(param.Quote, None)
self.assertEqual(param.Id, '-a')
def test_init_defaults(self):
"""FlagParameter: init functions as expected with default values"""
p = FlagParameter(Name='a', Prefix='-')
self.assertEqual(p.Name, 'a')
self.assertEqual(p.Prefix, '-')
self.assertEqual(p.Value, False)
self.assertEqual(p.Delimiter, None)
self.assertEqual(p.Quote, None)
self.assertEqual(p.Id, '-a')
def test_get_id(self):
"""FlagParameter: _get_id functions as expected """
expected_results = ['-d', 'd', '-', '-4', '-', '4', '-abcdef']
for param, exp in zip(self.ID_tests, expected_results):
self.assertEqual(param._get_id(), exp)
def test_eq(self):
"""FlagParameter: eq functions as expected """
p1 = FlagParameter(Name='a', Prefix='-', Value=True)
p2 = FlagParameter(Name='a', Prefix='-', Value=True)
p3 = FlagParameter(Name='a', Prefix='-')
p4 = FlagParameter(Name='i', Prefix='-', Value=True)
p5 = FlagParameter(Name='a', Prefix='--', Value=True)
assert p1 == p2
assert not p1 == p3
assert not p1 == p4
assert not p1 == p5
assert not p3 == p4
assert not p3 == p5
assert not p4 == p5
def test_ne(self):
"""FlagParameter: ne functions as expected """
p1 = FlagParameter(Name='a', Prefix='-', Value=True)
p2 = FlagParameter(Name='a', Prefix='-', Value=True)
p3 = FlagParameter(Name='a', Prefix='-')
p4 = FlagParameter(Name='i', Prefix='-', Value=True)
p5 = FlagParameter(Name='a', Prefix='--', Value=True)
assert not p1 != p2
assert p1 != p3
assert p1 != p4
assert p1 != p5
assert p3 != p4
assert p3 != p5
assert p4 != p5
def test_isOn_True(self):
"""FlagParameter: isOn functions as expected with True Values """
for param in self.p_On:
assert param.isOn()
def test_isOn_False(self):
"""FlagParameter: isOn functions as expected with False Values """
for param in self.p_Off:
assert not param.isOn()
def test_isOff_True(self):
"""FlagParameter: isOff functions as expected with True values """
for param in self.p_Off:
assert param.isOff()
def test_isOff_False(self):
"""FlagParameter: isOff functions as expected with False values """
for param in self.p_On:
assert not param.isOff()
def test_on(self):
"""FlagParameter: on functions as expected """
for param in self.p_On + self.p_Off:
param.on()
assert param.isOn()
def test_off(self):
"""FlagParameter: off functions as expected """
for param in self.p_On + self.p_Off:
param.off()
assert param.isOff()
def test_str_modify_prefix(self):
"""FlagParameter: str functions as expected with different prefixes """
expected_results = ['-d', '--d', 'd']
for param, exp in zip(self.p_modify_prefix, expected_results):
param.on()
self.assertEqual(str(param), exp)
def test_str_modify_name(self):
"""FlagParameter: str functions as expected with different names """
expected_results = ['-d', '-D', '-4', '-abcdef']
for param, exp in zip(self.p_modify_name, expected_results):
param.on()
self.assertEqual(str(param), exp)
class ValuedParameterTests(TestCase):
""" Tests of the ValuedParameter class """
constructor = ValuedParameter
s = 'Valued'
def setUp(self):
"""Setup some variables for the tests to use """
self.p_modify_prefix = [self.constructor(Name='d', Prefix='-'),
self.constructor(Name='d', Prefix='--'),
self.constructor(Name='d', Prefix='')]
self.p_modify_name = [self.constructor(Name='d', Prefix='-'),
self.constructor(Name='D', Prefix='-'),
self.constructor(Name=4, Prefix='-'),
self.constructor(Name='abcdef', Prefix='-')]
self.p_On = [self.constructor(Name='d', Prefix='-', Value=True),
self.constructor(Name='d', Prefix='-', Value=5),
self.constructor(Name='d', Prefix='-', Value=[1]),
self.constructor(Name='d', Prefix='-', Value=False),
self.constructor(Name='d', Prefix='-', Value='F')]
self.p_Off = [self.constructor(Name='d', Prefix='-', Value=None)]
self.p_full = [self.constructor(Name='a', Prefix='-',
Value=42, Delimiter=' ', Quote='\'')]
self.p_default = [self.constructor(Name='a', Prefix='-')]
self.p_modified_prefix = [self.constructor(Name='d', Prefix='-'),
self.constructor(Name='d', Prefix='--'),
self.constructor(Name='d', Prefix='')]
self.p_modified_name = [self.constructor(Name='d', Prefix='-'),
self.constructor(Name='D', Prefix='-'),
self.constructor(Name=4, Prefix='-'),
self.constructor(Name='abcdef', Prefix='-')]
self.p_modified_delimiter =\
[self.constructor(Name='d', Prefix='-', Value=42),
self.constructor(Name='d', Prefix='-', Value=42, Delimiter=''),
self.constructor(Name='d', Prefix='-', Value=42, Delimiter=' '),
self.constructor(Name='d', Prefix='-', Value=42, Delimiter=9),
self.constructor(Name='d', Prefix='-', Value=42, Delimiter='=')]
self.p_modified_value =\
[self.constructor(Name='d', Prefix='-', Value=42, Delimiter=' '),
self.constructor(
Name='d',
Prefix='-',
Value='pbl',
Delimiter=' '),
self.constructor(
Name='d',
Prefix='-',
Value='2-2',
Delimiter=' '),
self.constructor(Name='d', Prefix='-', Value='evo/t.txt',
Delimiter=' '),
self.constructor(Name='d', Prefix='-', Value='\'',
Delimiter=' ')]
self.p_modified_quote =\
[self.constructor(Name='d', Prefix='-', Value=42, Quote=''),
self.constructor(Name='d', Prefix='-', Value=42),
self.constructor(Name='d', Prefix='-', Value=42, Quote=' '),
self.constructor(Name='d', Prefix='-', Value=42, Quote='\''),
self.constructor(Name='d', Prefix='-', Value=42, Quote='\"'),
self.constructor(Name='d', Prefix='-', Value=42, Quote='x')]
self.ID_tests = [self.constructor(Name='d', Prefix='-'),
self.constructor(Name='d', Prefix=''),
self.constructor(Name='', Prefix='-'),
self.constructor(Name=4, Prefix='-'),
self.constructor(Name=None, Prefix='-'),
self.constructor(Name=4, Prefix=None),
self.constructor(Name='abcdef', Prefix='-')]
self.p_modified_is_path =\
[self.constructor(Name='d', Prefix='-', Delimiter=' ',
Value='test.txt', IsPath=True),
self.constructor(Name='d', Prefix='-', Delimiter=' ',
Value='test.txt', IsPath=False),
self.constructor(Name='d', Prefix='-', Delimiter=' ',
Value='test.txt', Quote='"', IsPath=True)]
def test_init(self):
"""Parameter: init functions as expected """
for param in self.p_full:
self.assertEqual(param.Name, 'a')
self.assertEqual(param.Prefix, '-')
self.assertEqual(param.Value, 42)
self.assertEqual(param.Delimiter, ' ')
self.assertEqual(param.Quote, '\'')
self.assertEqual(param.Id, '-a')
def test_init_defaults(self):
"""Parameter: init functions as expected with default values"""
for p in self.p_default:
self.assertEqual(p.Name, 'a')
self.assertEqual(p.Prefix, '-')
self.assertEqual(p.Value, None)
self.assertEqual(p.Delimiter, None)
self.assertEqual(p.Quote, None)
self.assertEqual(p.Id, '-a')
def test_get_id(self):
"""Parameter: _get_id functions as expected """
expected_results = ['-d', 'd', '-', '-4', '-', '4', '-abcdef']
for param, exp in zip(self.ID_tests, expected_results):
self.assertEqual(param._get_id(), exp)
def test_eq(self):
"""Parameter: eq functions as expected """
p1 = self.constructor(Name='a', Prefix='-', Value=42, Quote='\'',
Delimiter='=')
p2 = self.constructor(Name='a', Prefix='-', Value=42, Quote='\'',
Delimiter='=')
p3 = self.constructor(Name='dsf', Prefix='-', Value=42, Quote='\'',
Delimiter='=')
p4 = self.constructor(Name='a', Prefix='--', Value=42, Quote='\'',
Delimiter='=')
p5 = self.constructor(Name='a', Prefix='-', Value=942, Quote='\'',
Delimiter='=')
p6 = self.constructor(Name='a', Prefix='-', Value=42, Quote='\"',
Delimiter='=')
p7 = self.constructor(Name='a', Prefix='-', Value=42, Quote='\'',
Delimiter='!!!')
p8 = self.constructor(Name='wwwww', Prefix='-------')
p9 = self.constructor(Name='a', Prefix='-', Value=42, Quote='\'',
Delimiter='=', IsPath=True)
assert p1 == p2
assert not p1 == p3
assert not p1 == p4
assert not p1 == p5
assert not p1 == p6
assert not p1 == p7
assert not p1 == p8
assert not p1 == p9
# test default setting
p5.Value = 42
assert not p1 == p5
def test_ne(self):
"""Parameter: ne functions as expected """
p1 = self.constructor(Name='a', Prefix='-', Value=42, Quote='\'',
Delimiter='=')
p2 = self.constructor(Name='a', Prefix='-', Value=42, Quote='\'',
Delimiter='=')
p3 = self.constructor(Name='dsf', Prefix='-', Value=42, Quote='\'',
Delimiter='=')
p4 = self.constructor(Name='a', Prefix='--', Value=42, Quote='\'',
Delimiter='=')
p5 = self.constructor(Name='a', Prefix='-', Value=942, Quote='\'',
Delimiter='=')
p6 = self.constructor(Name='a', Prefix='-', Value=42, Quote='\"',
Delimiter='=')
p7 = self.constructor(Name='a', Prefix='-', Value=42, Quote='\'',
Delimiter='!!!')
p8 = self.constructor(Name='wwwww', Prefix='-------')
p9 = self.constructor(Name='a', Prefix='-', Value=42, Quote='\'',
Delimiter='=', IsPath=True)
assert not p1 != p2
assert p1 != p3
assert p1 != p4
assert p1 != p5
assert p1 != p6
assert p1 != p7
assert p1 != p8
assert p1 != p9
# test default setting
p5.Value = 42
assert p1 != p5
def test_get_default(self):
"""Parameter: default behaves as expected """
p1 = self.constructor(Name='a', Prefix='-', Value=42, Quote='\'',
Delimiter='=')
self.assertEqual(p1._get_default(), 42)
p1.Value = 43
self.assertEqual(p1._get_default(), 42)
def test_get_default_w_IsPath(self):
"""Parameter: default is a FilePath object when IsPath is set """
p = self.constructor(
Name='a', Prefix='-', Value='test.txt', Quote='\'',
Delimiter='=', IsPath=True)
self.assertEqual(p._get_default(), 'test.txt')
self.assertEqual(p.Default, 'test.txt')
p.Value = 'test2.txt'
self.assertEqual(p._get_default(), 'test.txt')
self.assertEqual(p.Default, 'test.txt')
assert isinstance(p._get_default(), FilePath)
assert isinstance(p.Default, FilePath)
def test_reset(self):
"""Parameter: reset correctly set Value to _default """
p1 = self.constructor(Name='a', Prefix='-', Value=42, Quote='\'',
Delimiter='=')
p1.Value = 43
self.assertNotEqual(p1.Default, p1.Value)
p1.reset()
self.assertEqual(p1.Default, p1.Value)
def test_isOn_True(self):
"""Parameter: isOn functions as expected with True Values """
for param in self.p_On:
assert param.isOn()
def test_isOn_False(self):
"""Parameter: isOn functions as expected with False Values """
for param in self.p_Off:
assert not param.isOn()
def test_isOff_True(self):
"""Parameter: isOff functions as expected with True values """
for param in self.p_Off:
assert param.isOff()
def test_isOff_False(self):
"""Parameter: isOff functions as expected with False values """
for param in self.p_On:
assert not param.isOff()
def test_on(self):
"""Parameter: on functions as expected """
for param in self.p_On + self.p_Off:
param.on('a')
assert param.isOn()
p = self.p_On[0]
self.assertRaises(ParameterError, p.on, None)
def test_off(self):
"""Parameter: off functions as expected """
for param in self.p_On + self.p_Off:
param.off()
assert param.isOff()
def test_str_off(self):
"""Parameter: str() prints empty string when off """
for p in self.p_Off:
self.assertEqual(str(p), '')
def test_str_modify_prefix(self):
"""Parameter: str functions as expected with different prefixes """
expected_results = ['-d', '--d', 'd']
for param, exp in zip(self.p_modified_prefix, expected_results):
param.on('')
self.assertEqual(str(param), exp)
def test_str_modify_name(self):
"""Parameter: str functions as expected with different names """
expected_results = ['-d', '-D', '-4', '-abcdef']
for param, exp in zip(self.p_modified_name, expected_results):
param.on('')
self.assertEqual(str(param), exp)
def test_str_modify_delimiter(self):
"""Parameter: str functions as expected with different delimiter """
expected_results = ['-d42', '-d42', '-d 42', '-d942', '-d=42']
for param, exp in zip(self.p_modified_delimiter, expected_results):
self.assertEqual(str(param), exp)
def test_str_modify_values(self):
"""Parameter: str functions as expected with different values """
expected_results = ['-d 42',
'-d pbl', '-d 2-2', '-d evo/t.txt', '-d \'']
for param, exp in zip(self.p_modified_value, expected_results):
self.assertEqual(str(param), exp)
def test_str_modify_quotes(self):
"""Parameter: str functions as expected with different quotes """
expected_results = ['-d42', '-d42', '-d 42 ', '-d\'42\'',
'-d\"42\"', '-dx42x']
for param, exp in zip(self.p_modified_quote, expected_results):
self.assertEqual(str(param), exp)
def test_str_modify_is_path(self):
"""Parameter: str functions as expected with different IsPath """
expected_results = ['-d "test.txt"', '-d test.txt', '-d "test.txt"']
for param, exp in zip(self.p_modified_is_path, expected_results):
self.assertEqual(str(param), exp)
def test_str_full(self):
"""Parameter: str functions as expected with all values non-default """
for p in self.p_full:
self.assertEqual(str(p), '-a \'42\'')
class MixedParameterTests(ValuedParameterTests):
""" Tests of the MixedParameter class """
constructor = MixedParameter
def setUp(self):
"""Setup some variables for the tests to use """
super(MixedParameterTests, self).setUp()
self.p_On = [self.constructor(Name='d', Prefix='-', Value=True),
self.constructor(Name='d', Prefix='-', Value=5),
self.constructor(Name='d', Prefix='-', Value=[1]),
self.constructor(Name='d', Prefix='-', Value=None),
self.constructor(Name='d', Prefix='-', Value='F')]
self.p_Off = [self.constructor(Name='d', Prefix='-', Value=False)]
# This is different from the superclass variable b/c we need to make
# sure that specifying IsPath with Value=None functions as expected
self.p_modified_is_path =\
[self.constructor(Name='d', Prefix='-', Delimiter=' ',
Value='test.txt', IsPath=True),
self.constructor(Name='d', Prefix='-', Delimiter=' ',
Value='test.txt', Quote='"', IsPath=True),
self.constructor(Name='d', Prefix='-', Delimiter=' ',
Value='test.txt', IsPath=False),
self.constructor(Name='d', Prefix='-', Delimiter=' ',
Value=None, IsPath=True),
self.constructor(Name='d', Prefix='-', Delimiter=' ',
Value=None, IsPath=False)]
def test_on(self):
"""Parameter: on functions as expected """
for param in self.p_On + self.p_Off:
param.on('a')
assert param.isOn()
p = self.p_On[0]
self.assertRaises(ParameterError, p.on, False)
def test_init_defaults(self):
"""MixedParameter: init functions as expected with default values"""
for p in self.p_default:
self.assertEqual(p.Name, 'a')
self.assertEqual(p.Prefix, '-')
self.assertEqual(p.Value, False)
self.assertEqual(p.Delimiter, None)
self.assertEqual(p.Quote, None)
self.assertEqual(p.Id, '-a')
self.assertEqual(p.IsPath, False)
def test_str_all_modes(self):
"""MixedParameter: str() functions in various modes """
p = MixedParameter(Prefix='-', Name='d', Delimiter='=', Quote=']')
self.assertEqual(str(p), '')
p.on()
self.assertEqual(str(p), '-d')
p.on('a')
self.assertEqual(str(p), '-d=]a]')
def test_str_modify_is_path(self):
"""MixedParameter: str functions as expected with different IsPath """
# This is different from the superclass test b/c we need to make
# sure that specifying IsPath with Value=None functions as expected
expected_results = ['-d "test.txt"', '-d "test.txt"',
'-d test.txt', '-d', '-d']
for param, exp in zip(self.p_modified_is_path, expected_results):
self.assertEqual(str(param), exp)
class ParametersTests(TestCase):
"""Tests of the Parameters class"""
def setUp(self):
self.fp = FlagParameter(Prefix='-', Name='d')
self.vp = ValuedParameter(Name='p', Prefix='-', Value=[1])
self.mp = MixedParameter(Prefix='--', Name='k', Delimiter=' ')
self.all_params = {self.fp.Id: self.fp, self.vp.Id: self.vp,
self.mp.Id: self.mp}
self.p1 = Parameters()
self.p2 = Parameters(self.all_params)
self._synonyms = {'Pino': '-p', 'K': 'k'}
self.p3 = Parameters(self.all_params, self._synonyms)
def test_init(self):
"""Parameters: init functions as expected"""
self.assertEqual(self.p1, {})
self.assertEqual(self.p2, self.all_params)
self.assertEqual(self.p3, self.all_params)
def test_lookup(self):
"""Parameters: test ability to lookup """
self.assertEqual(self.p2['-p'], self.vp)
self.assertEqual(self.p3['Pino'], self.vp)
def test_immutability(self):
"""Parameters: attempt to modify object raises error """
try:
self.p2['-p'] = 42
except TypeError:
pass
else:
raise AttributeError("Parameters shouldn't support assignment.")
try:
del self.p2['-p']
except TypeError:
pass
else:
raise AttributeError("Parameters shouldn't support deletion.")
def test_all_off(self):
"""Parameters: all_off() should turn all parameters off"""
p = self.p2
# turn everything on
for v in p.values():
try:
v.on(3)
except TypeError:
v.on()
self.assertTrue(v.isOn())
# turn everything off
p.all_off()
for v in p.values():
self.assertTrue(v.isOff())
class FilePathTests(TestCase):
""" Tests of the FilePath class """
def setUp(self):
""" Initialize variables to be used by tests """
self.filename = 'filename.txt'
self.relative_dir_path = 'a/relative/path/'
self.relative_dir_path_no_trailing_slash = 'a/relative/path'
self.relative_file_path = 'a/relative/filepath.txt'
self.absolute_dir_path = '/absolute/path/'
self.absolute_file_path = '/absolute/filepath.txt'
self.all_paths = [self.filename, self.relative_dir_path,
self.relative_file_path, self.absolute_dir_path,
self.absolute_file_path]
def test_init(self):
"""FilePath: initialization returns w/o error """
for p in self.all_paths:
self.assertEqual(FilePath(p), p)
self.assertEqual(FilePath(''), '')
def test_str(self):
"""FilePath: str wraps path in quotes """
# Do one explicit test (for sanity), then automatically run
# through the examples
self.assertEqual(str(FilePath(self.filename)), '"filename.txt"')
for p in self.all_paths:
self.assertEqual(str(FilePath(p)), '"' + p + '"')
def test_str_path_is_None(self):
"""FilePath: str return empty string when path is None """
self.assertEqual(str(FilePath(None)), '')
def test_add(self):
"""FilePath: add (or joining of paths) functions as expected """
actual = FilePath(self.relative_dir_path) + FilePath(self.filename)
expected = FilePath('a/relative/path/filename.txt')
self.assertEqual(actual, expected)
# result is a FilePath
assert isinstance(actual, FilePath)
# appending a string to a FilePath results in a FilePath
actual = FilePath(self.relative_dir_path) + 'filename.txt'
expected = FilePath('a/relative/path/filename.txt')
self.assertEqual(actual, expected)
# result is a FilePath
assert isinstance(actual, FilePath)
def test_FilePath_identity_preserved(self):
"""FilePath: trivial actions on FilePaths yeild original FilePath
"""
p = FilePath(self.filename)
# Creating FilePath from FilePath results in FilePath
# equal to original
self.assertEqual(FilePath(p), p)
for p in self.all_paths:
self.assertEqual(FilePath(p), p)
# Appending an empty FilePath to a FilePath results in FilePath
# equal to original
self.assertEqual(p + FilePath(''), p)
if __name__ == '__main__':
main()
```
#### File: skbio/app/util.py
```python
import os
from os import remove, system, mkdir, getcwd
from os.path import isabs, exists
from random import choice
from tempfile import gettempdir
from copy import deepcopy
from itertools import product
from skbio.app.parameters import Parameters, FilePath
# the following are used to create temp file names
from string import ascii_letters, digits
_all_chars = ascii_letters + digits
def which(executable_name, env_var='PATH'):
"""Equivalent to ``which executable_name`` in a *nix environment.
Will return ``None`` if ``executable_name`` cannot be found in ``env_var``
or if ``env_var`` is not set. Otherwise will return the first match in
``env_var``.
Note: this function will likely not work on Windows.
Code taken and modified from:
http://www.velocityreviews.com/forums/
t689526-python-library-call-equivalent-to-which-command.html
"""
exec_fp = None
if env_var in os.environ:
paths = os.environ[env_var]
for path in paths.split(os.pathsep):
curr_exec_fp = os.path.join(path, executable_name)
if os.access(curr_exec_fp, os.X_OK):
exec_fp = curr_exec_fp
break
return exec_fp
class ApplicationError(OSError):
pass
class ApplicationNotFoundError(ApplicationError):
pass
class ResultPath(object):
""" Hold a file path a boolean value specifying whether file was written
"""
def __init__(self, Path, IsWritten=True):
""" Initialize the ResultPath object
Path: a string representing the absolute or relative path where
the file can be found
IsWritten: a boolean specifying whether the file has been written,
default = True
"""
self.Path = FilePath(Path)
self.IsWritten = IsWritten
class CommandLineAppResult(dict):
""" Class for holding the result of a CommandLineApplication run """
def __init__(self, out, err, exit_status, result_paths):
"""Initialization of CommandLineAppResult
out: a file handler to the file containing the stdout
err: a file handler to the file containing the stderr
exit_status: the exit status of the program, 0 if run ok, 1 else.
result_paths: dictionary containing ResultPath objects for each
output file that could be written
"""
self['StdOut'] = out
self['StdErr'] = err
self['ExitStatus'] = exit_status
self.file_keys = result_paths.keys()
for key, value in result_paths.items():
if value.IsWritten:
try:
self[key] = open(value.Path)
except IOError:
raise ApplicationError('Could not open %s' % value.Path)
else:
self[key] = None
def cleanUp(self):
""" Delete files that are written by CommandLineApplication from disk
WARNING: after cleanUp() you may still have access to part of
your result data, but you should be aware that if the file
size exceeds the size of the buffer you will only have part
of the file. To be safe, you should not use cleanUp() until
you are done with the file or have copied it to a different
location.
"""
file_keys = self.file_keys
for item in file_keys:
if self[item] is not None:
self[item].close()
remove(self[item].name)
# remove input handler temp files
if hasattr(self, "_input_filename"):
remove(self._input_filename)
def __del__(self):
""" Delete temporary files created by the CommandLineApplication
"""
if self['StdOut'] is not None:
remove(self['StdOut'].name)
if self['StdErr'] is not None:
remove(self['StdErr'].name)
class Application(object):
""" Generic Class for controlling an application """
_command = None
_command_delimiter = ' '
_parameters = {}
_synonyms = {}
def __init__(self, params=None):
"""
params: a dict of parameters which should be turned on where the
key is either the parameter id or a synonym for the parameter
and the value is either the value for the parameter or None
"""
self.Parameters = Parameters(self._parameters, self._synonyms)
if params:
for key, v in params.items():
try:
self.Parameters[key].on(v)
except TypeError:
self.Parameters[key].on()
class CommandLineApplication(Application):
""" Generic class for controlling command line applications
"""
_input_handler = '_input_as_string'
_suppress_stderr = False
_suppress_stdout = False
_working_dir = None
def __init__(self, params=None, InputHandler=None, SuppressStderr=None,
SuppressStdout=None, WorkingDir=None, TmpDir='/tmp',
TmpNameLen=20, HALT_EXEC=False):
""" Initialize the CommandLineApplication object
params: a dictionary mapping the Parameter id or synonym to its
value (or None for FlagParameters or MixedParameters in flag
mode) for Parameters that should be turned on
InputHandler: this is the method to be run on data when it is
passed into call. This should be a string containing the
method name. The default is _input_as_string which casts data
to a string before appending it to the command line argument
SuppressStderr: if set to True, will route standard error to
/dev/null, False by default
SuppressStdout: if set to True, will route standard out to
/dev/null, False by default
WorkingDir: the directory where you want the application to run,
default is the current working directory, but is useful to
change in cases where the program being run creates output
to its current working directory and you either don't want
it to end up where you are running the program, or the user
running the script doesn't have write access to the current
working directory
WARNING: WorkingDir MUST be an absolute path!
TmpDir: the directory where temp files will be created, /tmp
by default
TmpNameLen: the length of the temp file name
HALT_EXEC: if True, raises exception w/ command output just
before execution, doesn't clean up temp files. Default False.
"""
# Determine if the application is installed, and raise an error if not
self._error_on_missing_application(params)
# set attributes to parameter that was passed in or class default
if InputHandler is not None:
self.InputHandler = InputHandler
else:
self.InputHandler = self._input_handler
if SuppressStderr is not None:
self.SuppressStderr = SuppressStderr
else:
self.SuppressStderr = self._suppress_stderr
if SuppressStdout is not None:
self.SuppressStdout = SuppressStdout
else:
self.SuppressStdout = self._suppress_stdout
if WorkingDir is not None:
working_dir = WorkingDir
else:
working_dir = self._working_dir or getcwd()
self.WorkingDir = FilePath(working_dir)
self.TmpDir = FilePath(TmpDir)
self.TmpNameLen = TmpNameLen
self.HaltExec = HALT_EXEC
# create a variable to hold the name of the file being used as
# input to the application. this is important especially when
# you are using an input handler which creates a temporary file
# and the output filenames are based on the input filenames
self._input_filename = None
super(CommandLineApplication, self).__init__(params=params)
def __call__(self, data=None, remove_tmp=True):
"""Run the application with the specified kwargs on data
data: anything that can be cast into a string or written out to
a file. Usually either a list of things or a single string or
number. input_handler will be called on this data before it
is passed as part of the command-line argument, so by creating
your own input handlers you can customize what kind of data
you want your application to accept
remove_tmp: if True, removes tmp files
"""
input_handler = self.InputHandler
suppress_stdout = self.SuppressStdout
suppress_stderr = self.SuppressStderr
if suppress_stdout:
outfile = FilePath('/dev/null')
else:
outfile = self.getTmpFilename(self.TmpDir)
if suppress_stderr:
errfile = FilePath('/dev/null')
else:
errfile = FilePath(self.getTmpFilename(self.TmpDir))
if data is None:
input_arg = ''
else:
input_arg = getattr(self, input_handler)(data)
# Build up the command, consisting of a BaseCommand followed by
# input and output (file) specifications
command = self._command_delimiter.join(filter(None,
[self.BaseCommand,
str(input_arg),
'>', str(outfile),
'2>', str(errfile)]))
if self.HaltExec:
raise AssertionError("Halted exec with command:\n" + command)
# The return value of system is a 16-bit number containing the signal
# number that killed the process, and then the exit status.
# We only want to keep the exit status so do a right bitwise shift to
# get rid of the signal number byte
exit_status = system(command) >> 8
# Determine if error should be raised due to exit status of
# appliciation
if not self._accept_exit_status(exit_status):
raise ApplicationError('Unacceptable application exit ' +
'status: %s\n' % str(exit_status) +
'Command:\n%s\n' % command +
'StdOut:\n%s\n' % open(outfile).read() +
'StdErr:\n%s\n' % open(errfile).read())
# open the stdout and stderr if not being suppressed
out = None
if not suppress_stdout:
out = open(outfile, "r")
err = None
if not suppress_stderr:
err = open(errfile, "r")
result_paths = self._get_result_paths(data)
try:
result = \
CommandLineAppResult(out, err, exit_status,
result_paths=result_paths)
except ApplicationError:
result = \
self._handle_app_result_build_failure(out, err, exit_status,
result_paths)
# Clean up the input file if one was created
if remove_tmp:
if self._input_filename:
remove(self._input_filename)
self._input_filename = None
return result
def _handle_app_result_build_failure(
self,
out,
err,
exit_status,
result_paths):
"""Called if ApplicationError raised on building CommandLineAppResult
This is useful for checking log files or other special handling
in cases when expected files aren't present.
"""
raise ApplicationError("Error constructing CommandLineAppResult.")
def _input_as_string(self, data):
""" Return data as a string """
return str(data)
def _input_as_multiline_string(self, data):
"""Write a multiline string to a temp file and return the filename.
data: a multiline string to be written to a file.
* Note: the result will be the filename as a FilePath object
(which is a string subclass).
"""
filename = self._input_filename = \
FilePath(self.getTmpFilename(self.TmpDir))
data_file = open(filename, 'w')
data_file.write(data)
data_file.close()
return filename
def _input_as_lines(self, data):
""" Write a seq of lines to a temp file and return the filename string
data: a sequence to be written to a file, each element of the
sequence will compose a line in the file
* Note: the result will be the filename as a FilePath object
(which is a string subclass).
* Note: '\n' will be stripped off the end of each sequence element
before writing to a file in order to avoid multiple new lines
accidentally be written to a file
"""
filename = self._input_filename = \
FilePath(self.getTmpFilename(self.TmpDir))
filename = FilePath(filename)
data_file = open(filename, 'w')
data_to_file = '\n'.join([str(d).strip('\n') for d in data])
data_file.write(data_to_file)
data_file.close()
return filename
def _input_as_path(self, data):
""" Return data as string with the path wrapped in quotes
data: path or filename, most likely as a string
* Note: the result will be the filename as a FilePath object
(which is a string subclass).
"""
return FilePath(data)
def _input_as_paths(self, data):
""" Return data as a space delimited string with each path quoted
data: paths or filenames, most likely as a list of
strings
"""
return self._command_delimiter.join(
map(str, map(self._input_as_path, data)))
def _absolute(self, path):
""" Convert a filename to an absolute path """
path = FilePath(path)
if isabs(path):
return path
else:
# these are both Path objects, so joining with + is acceptable
return self.WorkingDir + path
def _get_base_command(self):
""" Returns the full command string
input_arg: the argument to the command which represents the input
to the program, this will be a string, either
representing input or a filename to get input from
tI"""
command_parts = []
# Append a change directory to the beginning of the command to change
# to self.WorkingDir before running the command
# WorkingDir should be in quotes -- filenames might contain spaces
cd_command = ''.join(['cd ', str(self.WorkingDir), ';'])
if self._command is None:
raise ApplicationError('_command has not been set.')
command = self._command
parameters = self.Parameters
command_parts.append(cd_command)
command_parts.append(command)
command_parts.append(self._command_delimiter.join(filter(
None, (map(str, parameters.values())))))
return self._command_delimiter.join(command_parts).strip()
BaseCommand = property(_get_base_command)
def _get_WorkingDir(self):
"""Gets the working directory"""
return self._curr_working_dir
def _set_WorkingDir(self, path):
"""Sets the working directory
Appends a slash to the end of path
The reasoning behind this is that the user may or may not pass
in a path with a '/' at the end. Since having multiple
'/' at the end doesn't hurt anything, it's convienient to
be able to rely on it, and not have to check for it
"""
self._curr_working_dir = FilePath(path) + '/'
try:
mkdir(self.WorkingDir)
except OSError:
# Directory already exists
pass
WorkingDir = property(_get_WorkingDir, _set_WorkingDir)
def _error_on_missing_application(self, params):
""" Raise an ApplicationNotFoundError if the app is not accessible
This method checks in the system path (usually $PATH) or for
the existence of self._command. If self._command is not found
in either place, an ApplicationNotFoundError is raised to
inform the user that the application they are trying to access is
not available.
This method should be overwritten when self._command does not
represent the relevant executable (e.g., self._command = 'prog -a')
or in more complex cases where the file to be executed may be
passed as a parameter (e.g., with java jar files, where the
jar file is passed to java via '-jar'). It can also be overwritten
to by-pass testing for application presence by never raising an
error.
"""
command = self._command
# strip off " characters, in case we got a FilePath object
found_in_path = which(command.strip('"')) is not None
if not (exists(command) or found_in_path):
raise ApplicationNotFoundError("Cannot find %s. Is it installed? "
"Is it in your path?" % command)
def _accept_exit_status(self, exit_status):
""" Return False to raise an error due to exit_status of applciation
This method should be overwritten if you'd like to raise an error
based on certain exit statuses of the application that was run. The
default is that no value of exit_status will raise an error.
"""
return True
def _get_result_paths(self, data):
""" Return dict of ResultPath objects representing all possible output
This method should be overwritten if the application creates
output other than stdout and stderr. This dictionary will have
keys based on the name that you'd like to access the file by in
the CommandLineAppResult object that will be created, and the
values which are ResultPath objects. For an example of how this
should be written see the rnaview or vienna_package classes.
WARNING: be sure that the path that you give a file is accurate
from any directory where the program could be running. For
that reason, absolute paths are very good. Relative paths
can also be used as long as you are careful. For cases where
the application leaves files in the current working directory,
you should append self.WorkingDir to the beginning of the file
name. It would be a very bad idea to just use a file name as
the path, in some cases that you might not be testing for.
"""
return {}
def getTmpFilename(self, tmp_dir="/tmp", prefix='tmp', suffix='.txt',
include_class_id=False, result_constructor=FilePath):
""" Return a temp filename
tmp_dir: path for temp file
prefix: text to append to start of file name
suffix: text to append to end of file name
include_class_id: if True, will append a class identifier (built
from the class name) to the filename following prefix. This is
False by default b/c there is some string processing overhead
in getting the class name. This will probably be most useful for
testing: if temp files are being left behind by tests, you can
turn this on in here (temporarily) to find out which tests are
leaving the temp files.
result_constructor: the constructor used to build the result
(default: cogent.app.parameters.FilePath). Note that joining
FilePath objects with one another or with strings, you must use
the + operator. If this causes trouble, you can pass str as the
the result_constructor.
"""
# check not none
if not tmp_dir:
tmp_dir = self.TmpDir
# if not current directory, append "/" if not already on path
elif not tmp_dir.endswith("/"):
tmp_dir += "/"
if include_class_id:
# Append the classname to the prefix from the class name
# so any problematic temp files can be associated with
# the class that created them. This should be especially
# useful for testing, but is turned off by default to
# avoid the string-parsing overhead.
class_id = str(self.__class__())
prefix = ''.join([prefix,
class_id[class_id.rindex('.') + 1:
class_id.index(' ')]])
try:
mkdir(tmp_dir)
except OSError:
# Directory already exists
pass
# note: it is OK to join FilePath objects with +
return result_constructor(tmp_dir) + result_constructor(prefix) + \
result_constructor(''.join([choice(_all_chars)
for i in range(self.TmpNameLen)])) +\
result_constructor(suffix)
class ParameterIterBase:
"""Base class for parameter iteration objects
This class provides base functionality for parameter iteration objects.
A parameter iteration object acts like a generator and returns
parameter dicts of varying values. The specific keys and ranges of values
can be specified. Subclasses of this object implement the way in which
the parameter values are chosen."""
def __init__(self, Application, Parameters, AlwaysOn=None):
"""Initialize the ParameterIterBase
Application : A CommandLineApplication subclass
Parameters : A dict keyed by the application paramter, value by
the range of parameters to enumerate over. For
FlagParameters, unless specified in AlwaysOn, the value
will cycle between True/False (on/off). For
MixedParameters, include [None] specifically to utilize
flag functionality.
AlwaysOn : List of parameters that will always be on
Parameters is checked against the applications known parameters, but
only performed superficially: only keys are validated. AlwaysOn
values must have entries within Parameters.
NOTE: If the parameter is not specified in AlwaysOn, a False value
is appended so that the parameter can be turned off. Multiple False
states for a parameter will result if False is specified without
adding the parameter to AlwaysOn. If a parameter has a default value,
then that parameter is implicitly always on.
"""
self.AppParams = Application._parameters
# Validate Parameters
param_set = set(Parameters.keys())
app_param_set = set(self.AppParams.keys())
if not param_set.issubset(app_param_set):
not_present = str(param_set.difference(app_param_set))
raise ValueError(
"Parameter(s) %s not present in app" %
not_present)
# Validate AlwaysOn
alwayson_set = set(AlwaysOn)
if not alwayson_set.issubset(param_set):
not_present = str(alwayson_set.difference(param_set))
raise ValueError("AlwaysOn value(s) %s not in Parameters" %
not_present)
# Make sure all values are lists
for k, v in Parameters.items():
if not isinstance(v, list):
Parameters[k] = [v]
_my_params = Parameters
# Append "off states" to relevant parameters
for k in param_set.difference(alwayson_set):
_my_params[k].append(False)
# Create seperate key/value lists preserving index relation
self._keys, self._values = zip(*sorted(_my_params.items()))
# Construct generator
self._generator = self._init_generator()
def _init_generator(self):
"""Must be implemented in the subclass"""
pass
def _make_app_params(self, values):
"""Returns app's param dict with values set as described by values
"""
# A deep copy is necessary. Otherwise the dict values refer to
# the same object.
app_params = deepcopy(self.AppParams)
for key, value in zip(self._keys, values):
if value is False:
app_params[key].off()
elif value is True:
app_params[key].on()
else:
app_params[key].on(value)
return app_params
def __iter__(self):
return self
def next(self):
return self._generator.next()
def reset(self):
self._generator = self._init_generator()
class ParameterCombinations(ParameterIterBase):
"""Iterates over all combinations of parameters lexiographically"""
def _init_generator(self):
"""Iterates over all possible combinations of parameters
This method iterates over the cartesian product of parameter values
"""
for vals in product(*self._values):
yield self._make_app_params(vals)
def cmdline_generator(param_iter, PathToBin=None, PathToCmd=None,
PathsToInputs=None, PathToOutput=None,
PathToStderr='/dev/null', PathToStdout='/dev/null',
UniqueOutputs=False, InputParam=None,
OutputParam=None):
"""Generates command lines that can be used in a cluster environment
param_iter : ParameterIterBase subclass instance
PathToBin : Absolute location primary command (i.e. Python)
PathToCmd : Absolute location of the command
PathsToInputs : Absolute location(s) of input file(s)
PathToOutput : Absolute location of output file
PathToStderr : Path to stderr
PathToStdout : Path to stdout
UniqueOutputs : Generate unique tags for output files
InputParam : Application input parameter (if not specified, assumes
stdin is to be used)
OutputParam : Application output parameter (if not specified, assumes
stdout is to be used)
"""
# Make sure we have input(s) and output
if PathsToInputs is None:
raise ValueError("No inputfile specified")
if PathToOutput is None:
raise ValueError("No outputfile specified")
if not isinstance(PathsToInputs, list):
PathsToInputs = [PathsToInputs]
# PathToBin and PathToCmd can be blank
if PathToBin is None:
PathToBin = ''
if PathToCmd is None:
PathToCmd = ''
# stdout_ and stderr_ do not have to be redirected
if PathToStdout is None:
stdout_ = ''
else:
stdout_ = '> "%s"' % PathToStdout
if PathToStderr is None:
stderr_ = ''
else:
stderr_ = '2> "%s"' % PathToStderr
# Output can be redirected to stdout or specified output argument
if OutputParam is None:
output = '> "%s"' % PathToOutput
stdout_ = ''
else:
output_param = param_iter.AppParams[OutputParam]
output_param.on('"%s"' % PathToOutput)
output = str(output_param)
output_param.off()
output_count = 0
base_command = ' '.join([PathToBin, PathToCmd])
for params in param_iter:
# Support for multiple input files
for inputfile in PathsToInputs:
cmdline = [base_command]
cmdline.extend(sorted(filter(None, map(str, params.values()))))
# Input can come from stdin or specified input argument
if InputParam is None:
input = '< "%s"' % inputfile
else:
input_param = params[InputParam]
input_param.on('"%s"' % inputfile)
input = str(input_param)
input_param.off()
cmdline.append(input)
if UniqueOutputs:
cmdline.append(''.join([output, str(output_count)]))
output_count += 1
else:
cmdline.append(output)
cmdline.append(stdout_)
cmdline.append(stderr_)
yield ' '.join(cmdline)
def get_tmp_filename(tmp_dir=gettempdir(), prefix="tmp", suffix=".txt",
result_constructor=FilePath):
""" Generate a temporary filename and return as a FilePath object
tmp_dir: the directory to house the tmp_filename (default: '/tmp')
prefix: string to append to beginning of filename (default: 'tmp')
Note: It is very useful to have prefix be descriptive of the
process which is creating the temporary file. For example, if
your temp file will be used to build a temporary blast database,
you might pass prefix=TempBlastDB
suffix: the suffix to be appended to the temp filename
(default '.txt')
result_constructor: the constructor used to build the result filename
(default: cogent.app.parameters.FilePath). Note that joining
FilePath objects with one another or with strings, you must use
the + operator. If this causes trouble, you can pass str as the
the result_constructor.
"""
# check not none
if not tmp_dir:
tmp_dir = ""
# if not current directory, append "/" if not already on path
elif not tmp_dir.endswith("/"):
tmp_dir += "/"
chars = "abcdefghigklmnopqrstuvwxyz"
picks = chars + chars.upper() + "0123456790"
return result_constructor(tmp_dir) + result_constructor(prefix) +\
result_constructor("%s%s" %
(''.join([choice(picks) for i in range(20)]),
suffix))
def guess_input_handler(seqs, add_seq_names=False):
"""Returns the name of the input handler for seqs."""
if isinstance(seqs, str):
if '\n' in seqs: # can't be a filename...
return '_input_as_multiline_string'
else: # assume it was a filename
return '_input_as_string'
if isinstance(seqs, list) and len(seqs) and isinstance(seqs[0], tuple):
return '_input_as_seq_id_seq_pairs'
if add_seq_names:
return '_input_as_seqs'
return '_input_as_lines'
```
#### File: skbio/core/distance.py
```python
r"""
Dissimilarity and distance matrices (:mod:`skbio.core.distance`)
================================================================
.. currentmodule:: skbio.core.distance
This module provides functionality for serializing, deserializing, and
manipulating dissimilarity and distance matrices in memory. There are two
matrix classes available, `DissimilarityMatrix` and `DistanceMatrix`.
Both classes can store measures of difference/distinction between objects. A
dissimilarity/distance matrix includes both a matrix of
dissimilarities/distances (floats) between objects, as well as unique IDs
(object labels; strings) identifying each object in the matrix.
`DissimilarityMatrix` can be used to store measures of dissimilarity between
objects, and does not require that the dissimilarities are symmetric (e.g.,
dissimilarities obtained using the *Gain in PD* measure [1]_).
`DissimilarityMatrix` is a more general container to store differences than
`DistanceMatrix`.
`DistanceMatrix` has the additional requirement that the differences it
stores are symmetric (e.g., Euclidean or Hamming distances).
.. note:: `DissimilarityMatrix` can be used to store distances, but it is
recommended to use `DistanceMatrix` to store this type of data as it
provides an additional check for symmetry. A distance matrix is a
dissimilarity matrix; this is modeled in the class design by having
`DistanceMatrix` as a subclass of `DissimilarityMatrix`.
Classes
-------
.. autosummary::
:toctree: generated/
DissimilarityMatrix
DistanceMatrix
Functions
---------
.. autosummary::
:toctree: generated/
randdm
References
----------
.. [1] <NAME>. (1992). "Conservation evaluation and phylogenetic
diversity".
Examples
--------
Assume we have the following delimited text file storing distances between
three objects with IDs ``a``, ``b``, and ``c``::
\ta\tb\tc
a\t0.0\t0.5\t1.0
b\t0.5\t0.0\t0.75
c\t1.0\t0.75\t0.0
Load a distance matrix from the file:
>>> from StringIO import StringIO
>>> from skbio.core.distance import DistanceMatrix
>>> dm_f = StringIO("\ta\tb\tc\n"
... "a\t0.0\t0.5\t1.0\n"
... "b\t0.5\t0.0\t0.75\n"
... "c\t1.0\t0.75\t0.0\n")
>>> dm = DistanceMatrix.from_file(dm_f)
>>> print dm
3x3 distance matrix
IDs:
a, b, c
Data:
[[ 0. 0.5 1. ]
[ 0.5 0. 0.75]
[ 1. 0.75 0. ]]
Access the distance (scalar) between objects ``'a'`` and ``'c'``:
>>> dm['a', 'c']
1.0
Get a row vector of distances between object ``'b'`` and all other objects:
>>> dm['b']
array([ 0.5 , 0. , 0.75])
numpy indexing/slicing also works as expected. Extract the third column:
>>> dm[:, 2]
array([ 1. , 0.75, 0. ])
Serialize the distance matrix to delimited text file:
>>> out_f = StringIO()
>>> dm.to_file(out_f)
>>> out_f.getvalue()
'\ta\tb\tc\na\t0.0\t0.5\t1.0\nb\t0.5\t0.0\t0.75\nc\t1.0\t0.75\t0.0\n'
>>> out_f.getvalue() == dm_f.getvalue()
True
A distance matrix object can also be created from an existing ``numpy.array``
(or an array-like object, such as a nested Python list):
>>> import numpy as np
>>> data = np.array([[0.0, 0.5, 1.0],
... [0.5, 0.0, 0.75],
... [1.0, 0.75, 0.0]])
>>> ids = ["a", "b", "c"]
>>> dm_from_np = DistanceMatrix(data, ids)
>>> print dm_from_np
3x3 distance matrix
IDs:
a, b, c
Data:
[[ 0. 0.5 1. ]
[ 0.5 0. 0.75]
[ 1. 0.75 0. ]]
>>> dm_from_np == dm
True
"""
from __future__ import division
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from copy import deepcopy
from itertools import izip
from os.path import exists
import numpy as np
from scipy.spatial.distance import squareform
from skbio.core.exception import (DissimilarityMatrixError,
DissimilarityMatrixFormatError,
DistanceMatrixError, MissingIDError)
class DissimilarityMatrix(object):
"""Store dissimilarities between objects.
A `DissimilarityMatrix` instance stores a square, hollow, two-dimensional
matrix of dissimilarities between objects. Objects could be, for example,
samples or DNA sequences. A sequence of IDs accompanies the
dissimilarities.
Methods are provided to load and save dissimilarity matrices from/to disk,
as well as perform common operations such as extracting dissimilarities
based on object ID.
Parameters
----------
data : array_like or DissimilarityMatrix
Square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities
(floats), or a structure that can be converted to a ``numpy.ndarray``
using ``numpy.asarray``. Can instead be a `DissimilarityMatrix` (or
subclass) instance, in which case the instance's data will be used.
Data will be converted to a float ``dtype`` if necessary. A copy will
*not* be made if already a ``numpy.ndarray`` with a float ``dtype``.
ids : sequence of str
Sequence of strings to be used as object IDs. Must match the number of
rows/cols in `data`.
Attributes
----------
data
ids
dtype
shape
size
T
See Also
--------
DistanceMatrix
Notes
-----
The dissimilarities are stored in redundant (square-form) format [1]_.
The data are not checked for symmetry, nor guaranteed/assumed to be
symmetric.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
"""
# Used in __str__
_matrix_element_name = 'dissimilarity'
@classmethod
def from_file(cls, dm_f, delimiter='\t'):
"""Load dissimilarity matrix from a delimited text file or file path.
Creates a `DissimilarityMatrix` instance from a serialized
dissimilarity matrix stored as delimited text.
`dm_f` can be a file-like or a file path object containing delimited
text. The first line (header) must contain the IDs of each object. The
subsequent lines must contain an ID followed by each dissimilarity
(float) between the current object and all other objects, where the
order of objects is determined by the header line. For example, a 2x2
dissimilarity matrix with IDs ``'a'`` and ``'b'`` might look like::
<del>a<del>b
a<del>0.0<del>1.0
b<del>1.0<del>0.0
where ``<del>`` is the delimiter between elements.
Parameters
----------
dm_f : iterable of str or str
Iterable of strings (e.g., open file handle, file-like object, list
of strings, etc.) or a file path (a string) containing a serialized
dissimilarity matrix.
delimiter : str, optional
String delimiting elements in `dm_f`.
Returns
-------
DissimilarityMatrix
Instance of type `cls` containing the parsed contents of `dm_f`.
Notes
-----
Whitespace-only lines can occur anywhere throughout the "file" and are
ignored. Lines starting with ``#`` are treated as comments and ignored.
These comments can only occur *before* the ID header.
IDs will have any leading/trailing whitespace removed when they are
parsed.
.. note::
File-like objects passed to this method will not be closed upon the
completion of the parsing, it is responsibility of the owner of the
object to perform this operation.
"""
# We aren't using np.loadtxt because it uses *way* too much memory
# (e.g, a 2GB matrix eats up 10GB, which then isn't freed after parsing
# has finished). See:
# http://mail.scipy.org/pipermail/numpy-tickets/2012-August/006749.html
fd = None
# We use iter() as we want to take a single pass over the iterable and
# maintain our current position after finding the header (mainly
# necessary for something like a list of strings).
if isinstance(dm_f, str) and exists(dm_f):
# check if it's a valid path, if so read the contents
fd = open(dm_f, 'U')
dm_f = iter(fd)
else:
dm_f = iter(dm_f)
# Strategy:
# - find the header
# - initialize an empty ndarray
# - for each row of data in the input file:
# - populate the corresponding row in the ndarray with floats
ids = cls._parse_ids(dm_f, delimiter)
num_ids = len(ids)
data = np.empty((num_ids, num_ids), dtype='float')
# curr_row_idx keeps track of the row index within the data matrix.
# We're not using enumerate() because there may be
# empty/whitespace-only lines throughout the data matrix. We want to
# ignore those and only count the actual rows of data.
curr_row_idx = 0
for line in dm_f:
line = line.strip()
if not line:
continue
elif curr_row_idx >= num_ids:
# We've hit a nonempty line after we already filled the data
# matrix. Raise an error because we shouldn't ignore extra
# data.
raise DissimilarityMatrixFormatError(
"Encountered extra rows without corresponding IDs in the "
"header.")
tokens = line.split(delimiter)
# -1 because the first element contains the current ID.
if len(tokens) - 1 != num_ids:
raise DissimilarityMatrixFormatError(
"There are %d values in row number %d, which is not equal "
"to the number of IDs in the header (%d)."
% (len(tokens) - 1, curr_row_idx + 1, num_ids))
curr_id = tokens[0].strip()
expected_id = ids[curr_row_idx]
if curr_id == expected_id:
data[curr_row_idx, :] = np.asarray(tokens[1:], dtype='float')
else:
raise DissimilarityMatrixFormatError(
"Encountered mismatched IDs while parsing the "
"dissimilarity matrix file. Found '%s' but expected '%s'. "
"Please ensure that the IDs match between the "
"dissimilarity matrix header (first row) and the row "
"labels (first column)." % (curr_id, expected_id))
curr_row_idx += 1
if curr_row_idx != num_ids:
raise DissimilarityMatrixFormatError(
"Expected %d row(s) of data, but found %d." % (num_ids,
curr_row_idx))
# if the input was a file path close the file
if fd is not None:
fd.close()
return cls(data, ids)
def __init__(self, data, ids):
if isinstance(data, DissimilarityMatrix):
data = data.data
data = np.asarray(data, dtype='float')
ids = tuple(ids)
self._validate(data, ids)
self._data = data
self._ids = ids
self._id_index = self._index_list(self._ids)
@property
def data(self):
"""Array of dissimilarities.
A square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities
(floats). A copy is *not* returned.
Notes
-----
This property is not writeable.
"""
return self._data
@property
def ids(self):
"""Tuple of object IDs.
A tuple of strings, one for each object in the dissimilarity matrix.
Notes
-----
This property is writeable, but the number of new IDs must match the
number of objects in `data`.
"""
return self._ids
@ids.setter
def ids(self, ids_):
ids_ = tuple(ids_)
self._validate(self.data, ids_)
self._ids = ids_
self._id_index = self._index_list(self._ids)
@property
def dtype(self):
"""Data type of the dissimilarities."""
return self.data.dtype
@property
def shape(self):
"""Two-element tuple containing the dissimilarity matrix dimensions.
Notes
-----
As the dissimilarity matrix is guaranteed to be square, both tuple
entries will always be equal.
"""
return self.data.shape
@property
def size(self):
"""Total number of elements in the dissimilarity matrix.
Notes
-----
Equivalent to ``self.shape[0] * self.shape[1]``.
"""
return self.data.size
@property
def T(self):
"""Transpose of the dissimilarity matrix.
See Also
--------
transpose
"""
return self.transpose()
def transpose(self):
"""Return the transpose of the dissimilarity matrix.
Notes
-----
A deep copy is returned.
Returns
-------
DissimilarityMatrix
Transpose of the dissimilarity matrix. Will be the same type as
`self`.
"""
return self.__class__(self.data.T.copy(), deepcopy(self.ids))
def redundant_form(self):
"""Return an array of dissimilarities in redundant format.
As this is the native format that the dissimilarities are stored in,
this is simply an alias for `data`.
Returns
-------
ndarray
Two-dimensional ``numpy.ndarray`` of dissimilarities in redundant
format.
Notes
-----
Redundant format is described in [1]_.
Does *not* return a copy of the data.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
"""
return self.data
def copy(self):
"""Return a deep copy of the dissimilarity matrix.
Returns
-------
DissimilarityMatrix
Deep copy of the dissimilarity matrix. Will be the same type as
`self`.
"""
# We deepcopy IDs in case the tuple contains mutable objects at some
# point in the future.
return self.__class__(self.data.copy(), deepcopy(self.ids))
def __str__(self):
"""Return a string representation of the dissimilarity matrix.
Summary includes matrix dimensions, a (truncated) list of IDs, and
(truncated) array of dissimilarities.
Returns
-------
str
String representation of the dissimilarity matrix.
.. shownumpydoc
"""
return '%dx%d %s matrix\nIDs:\n%s\nData:\n' % (
self.shape[0], self.shape[1], self._matrix_element_name,
self._pprint_ids()) + str(self.data)
def __eq__(self, other):
"""Compare this dissimilarity matrix to another for equality.
Two dissimilarity matrices are equal if they have the same shape, IDs
(in the same order!), and have data arrays that are equal.
Checks are *not* performed to ensure that `other` is a
`DissimilarityMatrix` instance.
Parameters
----------
other : DissimilarityMatrix
Dissimilarity matrix to compare to for equality.
Returns
-------
bool
``True`` if `self` is equal to `other`, ``False`` otherwise.
.. shownumpydoc
"""
equal = True
# The order these checks are performed in is important to be as
# efficient as possible. The check for shape equality is not strictly
# necessary as it should be taken care of in np.array_equal, but I'd
# rather explicitly bail before comparing IDs or data. Use array_equal
# instead of (a == b).all() because of this issue:
# http://stackoverflow.com/a/10582030
try:
if self.shape != other.shape:
equal = False
elif self.ids != other.ids:
equal = False
elif not np.array_equal(self.data, other.data):
equal = False
except AttributeError:
equal = False
return equal
def __ne__(self, other):
"""Determine whether two dissimilarity matrices are not equal.
Parameters
----------
other : DissimilarityMatrix
Dissimilarity matrix to compare to.
Returns
-------
bool
``True`` if `self` is not equal to `other`, ``False`` otherwise.
See Also
--------
__eq__
.. shownumpydoc
"""
return not self == other
def __getitem__(self, index):
"""Slice into dissimilarity data by object ID or numpy indexing.
Extracts data from the dissimilarity matrix by object ID, a pair of
IDs, or numpy indexing/slicing.
Parameters
----------
index : str, two-tuple of str, or numpy index
`index` can be one of the following forms: an ID, a pair of IDs, or
a numpy index.
If `index` is a string, it is assumed to be an ID and a
``numpy.ndarray`` row vector is returned for the corresponding ID.
Note that the ID's row of dissimilarities is returned, *not* its
column. If the matrix is symmetric, the two will be identical, but
this makes a difference if the matrix is asymmetric.
If `index` is a two-tuple of strings, each string is assumed to be
an ID and the corresponding matrix element is returned that
represents the dissimilarity between the two IDs. Note that the
order of lookup by ID pair matters if the matrix is asymmetric: the
first ID will be used to look up the row, and the second ID will be
used to look up the column. Thus, ``dm['a', 'b']`` may not be the
same as ``dm['b', 'a']`` if the matrix is asymmetric.
Otherwise, `index` will be passed through to
``DissimilarityMatrix.data.__getitem__``, allowing for standard
indexing of a ``numpy.ndarray`` (e.g., slicing).
Returns
-------
ndarray or scalar
Indexed data, where return type depends on the form of `index` (see
description of `index` for more details).
Raises
------
MissingIDError
If the ID(s) specified in `index` are not in the dissimilarity
matrix.
Notes
-----
The lookup based on ID(s) is quick.
.. shownumpydoc
"""
if isinstance(index, basestring):
if index in self._id_index:
return self.data[self._id_index[index]]
else:
raise MissingIDError(index)
elif self._is_id_pair(index):
for id_ in index:
if id_ not in self._id_index:
raise MissingIDError(id_)
return self.data[self._id_index[index[0]],
self._id_index[index[1]]]
else:
return self.data.__getitem__(index)
def to_file(self, out_f, delimiter='\t'):
"""Save the dissimilarity matrix to file in delimited text format.
See Also
--------
from_file
Parameters
----------
out_f : file-like object
File-like object to write serialized data to. Must have a ``write``
method. It is the caller's responsibility to close `out_f` when
done (if necessary).
delimiter : str, optional
Delimiter used to separate elements in output format.
"""
formatted_ids = self._format_ids(delimiter)
out_f.write(formatted_ids)
out_f.write('\n')
for id_, vals in izip(self.ids, self.data):
out_f.write(id_)
out_f.write(delimiter)
out_f.write(delimiter.join(np.asarray(vals, dtype=np.str)))
out_f.write('\n')
@staticmethod
def _parse_ids(dm_f, delimiter):
header_line = None
for line in dm_f:
line = line.strip()
if line and not line.startswith('#'):
header_line = line
break
if header_line is None:
raise DissimilarityMatrixFormatError(
"Could not find a header line containing IDs in the "
"dissimilarity matrix file. Please verify that the file is "
"not empty.")
else:
return map(lambda e: e.strip(), header_line.split(delimiter))
def _validate(self, data, ids):
"""Validate the data array and IDs.
Checks that the data is at least 1x1 in size, 2D, square, hollow, and
contains only floats. Also checks that IDs are unique and that the
number of IDs matches the number of rows/cols in the data array.
Subclasses can override this method to perform different/more specific
validation (e.g., see `DistanceMatrix`).
Notes
-----
Accepts arguments instead of inspecting instance attributes to avoid
creating an invalid dissimilarity matrix before raising an error.
Otherwise, the invalid dissimilarity matrix could be used after the
exception is caught and handled.
"""
num_ids = len(ids)
if 0 in data.shape:
raise DissimilarityMatrixError("Data must be at least 1x1 in "
"size.")
elif len(data.shape) != 2:
raise DissimilarityMatrixError("Data must have exactly two "
"dimensions.")
elif data.shape[0] != data.shape[1]:
raise DissimilarityMatrixError("Data must be square (i.e., have "
"the same number of rows and "
"columns).")
elif data.dtype != np.double:
raise DissimilarityMatrixError("Data must contain only floating "
"point values.")
elif np.trace(data) != 0:
raise DissimilarityMatrixError("Data must be hollow (i.e., the "
"diagonal can only contain zeros).")
elif num_ids != len(set(ids)):
raise DissimilarityMatrixError("IDs must be unique.")
elif num_ids != data.shape[0]:
raise DissimilarityMatrixError("The number of IDs must match the "
"number of rows/columns in the "
"data.")
def _index_list(self, list_):
return {id_: idx for idx, id_ in enumerate(list_)}
def _is_id_pair(self, index):
return (isinstance(index, tuple) and
len(index) == 2 and
all(map(lambda e: isinstance(e, basestring), index)))
def _format_ids(self, delimiter):
return delimiter.join([''] + list(self.ids))
def _pprint_ids(self, max_chars=80, delimiter=', ', suffix='...',):
# Adapted from http://stackoverflow.com/a/250373
ids_str = delimiter.join(self.ids)
if len(ids_str) > max_chars:
truncated = ids_str[:max_chars + 1].split(delimiter)[0:-1]
ids_str = delimiter.join(truncated) + delimiter + suffix
return ids_str
class DistanceMatrix(DissimilarityMatrix):
"""Store distances between objects.
A `DistanceMatrix` is a `DissimilarityMatrix` with the additional
requirement that the matrix data is symmetric. There are additional methods
made available that take advantage of this symmetry.
See Also
--------
DissimilarityMatrix
Notes
-----
The distances are stored in redundant (square-form) format [1]_. To
facilitate use with other scientific Python routines (e.g., scipy), the
distances can be retrieved in condensed (vector-form) format using
`condensed_form`.
`DistanceMatrix` only requires that the distances it stores are symmetric.
Checks are *not* performed to ensure the other three metric properties
hold (non-negativity, identity of indiscernibles, and triangle inequality)
[2]_. Thus, a `DistanceMatrix` instance can store distances that are not
metric.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
.. [2] http://planetmath.org/metricspace
"""
# Override here, used in superclass __str__
_matrix_element_name = 'distance'
def condensed_form(self):
"""Return an array of distances in condensed format.
Returns
-------
ndarray
One-dimensional ``numpy.ndarray`` of distances in condensed format.
Notes
-----
Condensed format is described in [1]_.
The conversion is not a constant-time operation, though it should be
relatively quick to perform.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
"""
return squareform(self.data, force='tovector')
def _validate(self, data, ids):
"""Validate the data array and IDs.
Overrides the superclass `_validate`. Performs a check for symmetry in
addition to the checks performed in the superclass.
"""
super(DistanceMatrix, self)._validate(data, ids)
if (data.T != data).any():
raise DistanceMatrixError("Data must be symmetric.")
def randdm(num_objects, ids=None, constructor=None, random_fn=None):
"""Generate a distance matrix populated with random distances.
Using the default `random_fn`, distances are randomly drawn from a uniform
distribution over ``[0, 1)``.
Regardless of `random_fn`, the resulting distance matrix is guaranteed to
be symmetric and hollow.
Parameters
----------
num_objects : int
The number of objects in the resulting distance matrix. For example, if
`num_objects` is 3, a 3x3 distance matrix will be returned.
ids : sequence of str or None, optional
A sequence of strings to be used as IDs. ``len(ids)`` must be equal to
`num_objects`. If not provided, IDs will be monotonically-increasing
integers cast as strings (numbering starts at 1). For example,
``('1', '2', '3')``.
constructor : type, optional
`DissimilarityMatrix` or subclass constructor to use when creating the
random distance matrix. The returned distance matrix will be of this
type. If ``None`` (the default), a `DistanceMatrix` instance will be
returned.
random_fn : function, optional
Function to generate random values. `random_fn` must accept two
arguments (number of rows and number of columns) and return a 2D
``numpy.ndarray`` of floats (or something that can be cast to float).
If ``None`` (the default), ``numpy.random.rand`` will be used.
Returns
-------
DissimilarityMatrix
`DissimilarityMatrix` (or subclass) instance of random distances. Type
depends on `constructor`.
See Also
--------
numpy.random.rand
"""
if constructor is None:
constructor = DistanceMatrix
if random_fn is None:
random_fn = np.random.rand
data = np.tril(random_fn(num_objects, num_objects), -1)
data += data.T
if not ids:
ids = map(str, range(1, num_objects + 1))
return constructor(data, ids)
```
#### File: skbio/core/exception.py
```python
from __future__ import division
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
class FileFormatError(Exception):
"""Exception raised when a file can not be parsed."""
pass
class RecordError(FileFormatError):
"""Exception raised when a record is bad."""
pass
class FieldError(RecordError):
"""Exception raised when a field within a record is bad."""
pass
class BiologicalSequenceError(Exception):
"""General error for biological sequence validation failures."""
pass
class SequenceCollectionError(Exception):
"""General error for sequence collection validation failures."""
pass
class DissimilarityMatrixError(Exception):
"""General error for dissimilarity matrix validation failures."""
pass
class DistanceMatrixError(DissimilarityMatrixError):
"""General error for distance matrix validation failures."""
pass
class MissingIDError(DissimilarityMatrixError):
"""Error for ID lookup that doesn't exist in the dissimilarity matrix."""
def __init__(self, missing_id):
super(MissingIDError, self).__init__()
self.args = ("The ID '%s' is not in the dissimilarity matrix." %
missing_id,)
class DissimilarityMatrixFormatError(DissimilarityMatrixError):
"""Error for reporting issues in dissimilarity matrix file format.
Typically used during parsing.
"""
pass
class TreeError(Exception):
"""General tree error"""
pass
class NoLengthError(TreeError):
"""Missing length when expected"""
pass
class DuplicateNodeError(TreeError):
"""Duplicate nodes with identical names"""
pass
class MissingNodeError(TreeError):
"""Expecting a node"""
pass
class NoParentError(MissingNodeError):
"""Missing a parent"""
pass
class FileFormatError(Exception):
"""Exception raised when a file can not be parsed."""
pass
class RecordError(FileFormatError):
"""Exception raised when a record is bad."""
pass
class FastqParseError(FileFormatError):
pass
```
#### File: core/tests/test_tree.py
```python
import numpy as np
import numpy.testing as nptest
from unittest import TestCase, main
from skbio.core.tree import TreeNode, _dnd_tokenizer
from skbio.core.exception import (NoLengthError, TreeError, RecordError,
MissingNodeError)
from skbio.maths.stats.test import correlation_t
class TreeTests(TestCase):
def setUp(self):
"""Prep the self"""
self.simple_t = TreeNode.from_newick("((a,b)i1,(c,d)i2)root;")
nodes = dict([(x, TreeNode(x)) for x in 'abcdefgh'])
nodes['a'].append(nodes['b'])
nodes['b'].append(nodes['c'])
nodes['c'].append(nodes['d'])
nodes['c'].append(nodes['e'])
nodes['c'].append(nodes['f'])
nodes['f'].append(nodes['g'])
nodes['a'].append(nodes['h'])
self.TreeNode = nodes
self.TreeRoot = nodes['a']
def test_copy(self):
"""copy a tree"""
self.simple_t.children[0].length = 1.2
self.simple_t.children[1].children[0].length = 0.5
cp = self.simple_t.copy()
gen = zip(cp.traverse(include_self=True),
self.simple_t.traverse(include_self=True))
for a, b in gen:
self.assertIsNot(a, b)
self.assertEqual(a.name, b.name)
self.assertEqual(a.length, b.length)
def test_append(self):
"""Append a node to a tree"""
second_tree = TreeNode.from_newick("(x,y)z;")
self.simple_t.append(second_tree)
self.assertEqual(self.simple_t.children[0].name, 'i1')
self.assertEqual(self.simple_t.children[1].name, 'i2')
self.assertEqual(self.simple_t.children[2].name, 'z')
self.assertEqual(len(self.simple_t.children), 3)
self.assertEqual(self.simple_t.children[2].children[0].name, 'x')
self.assertEqual(self.simple_t.children[2].children[1].name, 'y')
self.assertEqual(second_tree.parent, self.simple_t)
def test_extend(self):
"""Extend a few nodes"""
second_tree = TreeNode.from_newick("(x1,y1)z1;")
third_tree = TreeNode.from_newick("(x2,y2)z2;")
self.simple_t.extend([second_tree, third_tree])
self.assertEqual(self.simple_t.children[0].name, 'i1')
self.assertEqual(self.simple_t.children[1].name, 'i2')
self.assertEqual(self.simple_t.children[2].name, 'z1')
self.assertEqual(self.simple_t.children[3].name, 'z2')
self.assertEqual(len(self.simple_t.children), 4)
self.assertEqual(self.simple_t.children[2].children[0].name, 'x1')
self.assertEqual(self.simple_t.children[2].children[1].name, 'y1')
self.assertEqual(self.simple_t.children[3].children[0].name, 'x2')
self.assertEqual(self.simple_t.children[3].children[1].name, 'y2')
self.assertIs(second_tree.parent, self.simple_t)
self.assertIs(third_tree.parent, self.simple_t)
def test_extend_empty(self):
"""Extend on the empty case should work"""
self.simple_t.extend([])
self.assertEqual(self.simple_t.children[0].name, 'i1')
self.assertEqual(self.simple_t.children[1].name, 'i2')
self.assertEqual(len(self.simple_t.children), 2)
def test_gops(self):
"""Basic TreeNode operations should work as expected"""
p = TreeNode()
self.assertEqual(str(p), ';')
p.name = 'abc'
self.assertEqual(str(p), 'abc;')
p.length = 3
self.assertEqual(str(p), 'abc:3;') # don't suppress branch from root
q = TreeNode()
p.append(q)
self.assertEqual(str(p), '()abc:3;')
r = TreeNode()
q.append(r)
self.assertEqual(str(p), '(())abc:3;')
r.name = 'xyz'
self.assertEqual(str(p), '((xyz))abc:3;')
q.length = 2
self.assertEqual(str(p), '((xyz):2)abc:3;')
def test_pop(self):
"""Pop off a node"""
second_tree = TreeNode.from_newick("(x1,y1)z1;")
third_tree = TreeNode.from_newick("(x2,y2)z2;")
self.simple_t.extend([second_tree, third_tree])
i1 = self.simple_t.pop(0)
z2 = self.simple_t.pop()
self.assertEqual(i1.name, 'i1')
self.assertEqual(z2.name, 'z2')
self.assertEqual(i1.children[0].name, 'a')
self.assertEqual(i1.children[1].name, 'b')
self.assertEqual(z2.children[0].name, 'x2')
self.assertEqual(z2.children[1].name, 'y2')
self.assertEqual(self.simple_t.children[0].name, 'i2')
self.assertEqual(self.simple_t.children[1].name, 'z1')
self.assertEqual(len(self.simple_t.children), 2)
def test_remove(self):
"""Remove nodes"""
self.assertTrue(self.simple_t.remove(self.simple_t.children[0]))
self.assertEqual(len(self.simple_t.children), 1)
n = TreeNode()
self.assertFalse(self.simple_t.remove(n))
def test_adopt(self):
"""Adopt a node!"""
n1 = TreeNode(name='n1')
n2 = TreeNode(name='n2')
n3 = TreeNode(name='n3')
self.simple_t._adopt(n1)
self.simple_t.children[-1]._adopt(n2)
n2._adopt(n3)
# adopt doesn't update .children
self.assertEqual(len(self.simple_t.children), 2)
self.assertIs(n1.parent, self.simple_t)
self.assertIs(n2.parent, self.simple_t.children[-1])
self.assertIs(n3.parent, n2)
def test_remove_node(self):
"""Remove a node by index"""
n = self.simple_t._remove_node(-1)
self.assertEqual(n.parent, None)
self.assertEqual(len(self.simple_t.children), 1)
self.assertEqual(len(n.children), 2)
self.assertNotIn(n, self.simple_t.children)
def test_prune(self):
"""Collapse single descendent nodes"""
# check the identity case
cp = self.simple_t.copy()
self.simple_t.prune()
gen = zip(cp.traverse(include_self=True),
self.simple_t.traverse(include_self=True))
for a, b in gen:
self.assertIsNot(a, b)
self.assertEqual(a.name, b.name)
self.assertEqual(a.length, b.length)
# create a single descendent by removing tip 'a'
n = self.simple_t.children[0]
n.remove(n.children[0])
self.simple_t.prune()
self.assertEqual(len(self.simple_t.children), 2)
self.assertEqual(self.simple_t.children[0].name, 'i2')
self.assertEqual(self.simple_t.children[1].name, 'b')
def test_subset(self):
"""subset should return set of leaves that descends from node"""
t = self.simple_t
self.assertEqual(t.subset(), frozenset('abcd'))
c = t.children[0]
self.assertEqual(c.subset(), frozenset('ab'))
leaf = c.children[1]
self.assertEqual(leaf.subset(), frozenset(''))
def test_subsets(self):
"""subsets should return all subsets descending from a set"""
t = self.simple_t
self.assertEqual(t.subsets(), frozenset(
[frozenset('ab'), frozenset('cd')]))
def test_is_tip(self):
"""see if we're a tip or not"""
self.assertFalse(self.simple_t.is_tip())
self.assertFalse(self.simple_t.children[0].is_tip())
self.assertTrue(self.simple_t.children[0].children[0].is_tip())
def test_is_root(self):
"""see if we're at the root or not"""
self.assertTrue(self.simple_t.is_root())
self.assertFalse(self.simple_t.children[0].is_root())
self.assertFalse(self.simple_t.children[0].children[0].is_root())
def test_root(self):
"""Get the root!"""
root = self.simple_t
self.assertIs(root, self.simple_t.root())
self.assertIs(root, self.simple_t.children[0].root())
self.assertIs(root, self.simple_t.children[1].children[1].root())
def test_find(self):
"""Find a node in a tree"""
t = TreeNode.from_newick("((a,b)c,(d,e)f);")
exp = t.children[0]
obs = t.find('c')
self.assertEqual(obs, exp)
exp = t.children[0].children[1]
obs = t.find('b')
self.assertEqual(obs, exp)
with self.assertRaises(MissingNodeError):
_ = t.find('does not exist')
def test_find_cache_bug(self):
"""First implementation did not force the cache to be at the root"""
t = TreeNode.from_newick("((a,b)c,(d,e)f);")
tip_a = t.children[0].children[0]
tip_a.create_node_cache()
tip_e = tip_a.find('e')
self.assertEqual(tip_a._node_cache, {})
self.assertEqual(sorted(t._node_cache.keys()), ['a', 'b', 'c',
'd', 'e', 'f'])
def test_find_by_id(self):
"""Find a node by id"""
t1 = TreeNode.from_newick("((,),(,,));")
t2 = TreeNode.from_newick("((,),(,,));")
exp = t1.children[1]
obs = t1.find_by_id(6) # right inner node with 3 children
self.assertEqual(obs, exp)
exp = t2.children[1]
obs = t2.find_by_id(6) # right inner node with 3 children
self.assertEqual(obs, exp)
with self.assertRaises(MissingNodeError):
_ = t1.find_by_id(100)
def test_find_by_func(self):
"""Find nodes by a function"""
t = TreeNode.from_newick("((a,b)c,(d,e)f);")
func = lambda x: x.parent == t.find('c')
exp = ['a', 'b']
obs = [n.name for n in t.find_by_func(func)]
self.assertEqual(obs, exp)
def test_ancestors(self):
"""Get all the ancestors"""
exp = ['i1', 'root']
obs = self.simple_t.children[0].children[0].ancestors()
self.assertEqual([o.name for o in obs], exp)
exp = ['root']
obs = self.simple_t.children[0].ancestors()
self.assertEqual([o.name for o in obs], exp)
exp = []
obs = self.simple_t.ancestors()
self.assertEqual([o.name for o in obs], exp)
def test_siblings(self):
"""Get the siblings"""
exp = []
obs = self.simple_t.siblings()
self.assertEqual(obs, exp)
exp = ['i2']
obs = self.simple_t.children[0].siblings()
self.assertEqual([o.name for o in obs], exp)
exp = ['c']
obs = self.simple_t.children[1].children[1].siblings()
self.assertEqual([o.name for o in obs], exp)
self.simple_t.append(TreeNode(name="foo"))
self.simple_t.append(TreeNode(name="bar"))
exp = ['i1', 'foo', 'bar']
obs = self.simple_t.children[1].siblings()
self.assertEqual([o.name for o in obs], exp)
def test_ascii_art(self):
"""Make some ascii trees"""
# unlabeled internal node
tr = TreeNode.from_newick("(B:0.2,(C:0.3,D:0.4):0.6)F;")
obs = tr.ascii_art(show_internal=True, compact=False)
exp = " /-B\n-F-------|\n | /-C\n "\
" \\--------|\n \\-D"
self.assertEqual(obs, exp)
obs = tr.ascii_art(show_internal=True, compact=True)
exp = "-F------- /-B\n \-------- /-C\n \-D"
self.assertEqual(obs, exp)
obs = tr.ascii_art(show_internal=False, compact=False)
exp = " /-B\n---------|\n | /-C\n "\
" \\--------|\n \\-D"
self.assertEqual(obs, exp)
def test_accumulate_to_ancestor(self):
"""Get the distance from a node to its ancestor"""
t = TreeNode.from_newick("((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;")
a = t.find('a')
exp_to_root = 0.1 + 0.3
obs_to_root = a.accumulate_to_ancestor(t)
self.assertEqual(obs_to_root, exp_to_root)
def test_distance(self):
"""Get the distance between two nodes"""
t = TreeNode.from_newick("((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;")
tips = sorted([n for n in t.tips()], key=lambda x: x.name)
nptest.assert_almost_equal(tips[0].distance(tips[0]), 0.0)
nptest.assert_almost_equal(tips[0].distance(tips[1]), 0.3)
nptest.assert_almost_equal(tips[0].distance(tips[2]), 1.3)
with self.assertRaises(NoLengthError):
_ = tips[0].distance(tips[3])
nptest.assert_almost_equal(tips[1].distance(tips[0]), 0.3)
nptest.assert_almost_equal(tips[1].distance(tips[1]), 0.0)
nptest.assert_almost_equal(tips[1].distance(tips[2]), 1.4)
with self.assertRaises(NoLengthError):
_ = tips[1].distance(tips[3])
self.assertEqual(tips[2].distance(tips[0]), 1.3)
self.assertEqual(tips[2].distance(tips[1]), 1.4)
self.assertEqual(tips[2].distance(tips[2]), 0.0)
with self.assertRaises(NoLengthError):
_ = tips[2].distance(tips[3])
def test_lowest_common_ancestor(self):
"""TreeNode lowestCommonAncestor should return LCA for set of tips"""
t1 = TreeNode.from_newick("((a,(b,c)d)e,f,(g,h)i)j;")
t2 = t1.copy()
t3 = t1.copy()
t4 = t1.copy()
input1 = ['a'] # return self
input2 = ['a', 'b'] # return e
input3 = ['b', 'c'] # return d
input4 = ['a', 'h', 'g'] # return j
exp1 = t1.find('a')
exp2 = t2.find('e')
exp3 = t3.find('d')
exp4 = t4
obs1 = t1.lowest_common_ancestor(input1)
obs2 = t2.lowest_common_ancestor(input2)
obs3 = t3.lowest_common_ancestor(input3)
obs4 = t4.lowest_common_ancestor(input4)
self.assertEqual(obs1, exp1)
self.assertEqual(obs2, exp2)
self.assertEqual(obs3, exp3)
self.assertEqual(obs4, exp4)
# verify multiple calls work
t_mul = t1.copy()
exp_1 = t_mul.find('d')
exp_2 = t_mul.find('i')
obs_1 = t_mul.lowest_common_ancestor(['b', 'c'])
obs_2 = t_mul.lowest_common_ancestor(['g', 'h'])
self.assertEqual(obs_1, exp_1)
self.assertEqual(obs_2, exp_2)
def test_get_max_distance(self):
"""get_max_distance should get max tip distance across tree"""
tree = TreeNode.from_newick(
"((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;")
dist, nodes = tree.get_max_distance()
nptest.assert_almost_equal(dist, 1.6)
self.assertEqual(sorted([n.name for n in nodes]), ['b', 'e'])
def test_set_max_distance(self):
"""set_max_distance sets MaxDistTips across tree"""
tree = TreeNode.from_newick(
"((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;")
tree._set_max_distance()
tip_a, tip_b = tree.MaxDistTips
self.assertEqual(tip_a[0] + tip_b[0], 1.6)
self.assertEqual(sorted([tip_a[1].name, tip_b[1].name]), ['b', 'e'])
def test_compare_tip_distances(self):
t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
t2 = TreeNode.from_newick('(((H:1,G:1,O:1):2,R:3):1,X:4);')
obs = t.compare_tip_distances(t2)
# note: common taxa are H, G, R (only)
m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
m2 = np.array([[0, 2, 6], [2, 0, 6], [6, 6, 0]])
r = correlation_t(m1.flat, m2.flat)[0]
self.assertEqual(obs, (1 - r) / 2)
def test_compare_tip_distances_sample(self):
t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
t2 = TreeNode.from_newick('(((H:1,G:1,O:1):2,R:3):1,X:4);')
obs = t.compare_tip_distances(t2, sample=3, shuffle_f=sorted)
# note: common taxa are H, G, R (only)
m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
m2 = np.array([[0, 2, 6], [2, 0, 6], [6, 6, 0]])
r = correlation_t(m1.flat, m2.flat)[0]
self.assertEqual(obs, (1 - r) / 2)
# 4 common taxa, still picking H, G, R
s = '((H:1,G:1):2,(R:0.5,M:0.7,Q:5):3);'
t = TreeNode.from_newick(s, TreeNode)
s3 = '(((H:1,G:1,O:1):2,R:3,Q:10):1,X:4);'
t3 = TreeNode.from_newick(s3, TreeNode)
obs = t.compare_tip_distances(t3, sample=3, shuffle_f=sorted)
def test_tip_tip_distances_endpoints(self):
"""Test getting specifc tip distances with tipToTipDistances"""
t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
nodes = [t.find('H'), t.find('G'), t.find('M')]
names = ['H', 'G', 'M']
exp = np.array([[0, 2.0, 6.7], [2.0, 0, 6.7], [6.7, 6.7, 0.0]])
exp_order = nodes
obs, obs_order = t.tip_tip_distances(endpoints=names)
nptest.assert_almost_equal(obs, exp)
self.assertEqual(obs_order, exp_order)
obs, obs_order = t.tip_tip_distances(endpoints=nodes)
nptest.assert_almost_equal(obs, exp)
self.assertEqual(obs_order, exp_order)
def test_neighbors(self):
"""Get neighbors of a node"""
t = TreeNode.from_newick("((a,b)c,(d,e)f);")
exp = t.children
obs = t.neighbors()
self.assertEqual(obs, exp)
exp = t.children[0].children + [t]
obs = t.children[0].neighbors()
self.assertEqual(obs, exp)
exp = [t.children[0].children[0]] + [t]
obs = t.children[0].neighbors(ignore=t.children[0].children[1])
self.assertEqual(obs, exp)
exp = [t.children[0]]
obs = t.children[0].children[0].neighbors()
self.assertEqual(obs, exp)
def test_has_children(self):
"""Test if has children"""
t = TreeNode.from_newick("((a,b)c,(d,e)f);")
self.assertTrue(t.has_children())
self.assertTrue(t.children[0].has_children())
self.assertTrue(t.children[1].has_children())
self.assertFalse(t.children[0].children[0].has_children())
self.assertFalse(t.children[0].children[1].has_children())
self.assertFalse(t.children[1].children[0].has_children())
self.assertFalse(t.children[1].children[1].has_children())
def test_index_tree(self):
"""index_tree should produce correct index and node map"""
# test for first tree: contains singleton outgroup
t1 = TreeNode.from_newick('(((a,b),c),(d,e))')
t2 = TreeNode.from_newick('(((a,b),(c,d)),(e,f))')
t3 = TreeNode.from_newick('(((a,b,c),(d)),(e,f))')
id_1, child_1 = t1.index_tree()
nodes_1 = [n._leaf_index for n in t1.traverse(self_before=False,
self_after=True)]
self.assertEqual(nodes_1, [0, 1, 2, 3, 6, 4, 5, 7, 8])
self.assertEqual(child_1, [(2, 0, 1), (6, 2, 3), (7, 4, 5), (8, 6, 7)])
# test for second tree: strictly bifurcating
id_2, child_2 = t2.index_tree()
nodes_2 = [n._leaf_index for n in t2.traverse(self_before=False,
self_after=True)]
self.assertEqual(nodes_2, [0, 1, 4, 2, 3, 5, 8, 6, 7, 9, 10])
self.assertEqual(child_2, [(4, 0, 1), (5, 2, 3), (8, 4, 5), (9, 6, 7),
(10, 8, 9)])
# test for third tree: contains trifurcation and single-child parent
id_3, child_3 = t3.index_tree()
nodes_3 = [n._leaf_index for n in t3.traverse(self_before=False,
self_after=True)]
self.assertEqual(nodes_3, [0, 1, 2, 4, 3, 5, 8, 6, 7, 9, 10])
self.assertEqual(child_3, [(4, 0, 2), (5, 3, 3), (8, 4, 5), (9, 6, 7),
(10, 8, 9)])
def test_root_at(self):
"""Form a new root"""
t = TreeNode.from_newick("(((a,b)c,(d,e)f)g,h)i;")
with self.assertRaises(TreeError):
_ = t.root_at(t.find('h'))
exp = "(a,b,((d,e)f,(h)g)c)root;"
rooted = t.root_at('c')
obs = str(rooted)
self.assertEqual(obs, exp)
def test_root_at_midpoint(self):
"""Root at the midpoint"""
nodes, tree1 = self.TreeNode, self.TreeRoot
for n in tree1.traverse():
n.length = 1
result = tree1.root_at_midpoint()
self.assertEqual(result.distance(result.find('e')), 1.5)
self.assertEqual(result.distance(result.find('g')), 2.5)
exp_dist, exp_order = tree1.tip_tip_distances()
obs_dist, obs_order = result.tip_tip_distances()
nptest.assert_almost_equal(obs_dist, exp_dist)
self.assertEqual([n.name for n in obs_order],
[n.name for n in exp_order])
def test_compare_subsets(self):
"""compare_subsets should return the fraction of shared subsets"""
t = TreeNode.from_newick('((H,G),(R,M));')
t2 = TreeNode.from_newick('(((H,G),R),M);')
t4 = TreeNode.from_newick('(((H,G),(O,R)),X);')
result = t.compare_subsets(t)
self.assertEqual(result, 0)
result = t2.compare_subsets(t2)
self.assertEqual(result, 0)
result = t.compare_subsets(t2)
self.assertEqual(result, 0.5)
result = t.compare_subsets(t4)
self.assertEqual(result, 1 - 2. / 5)
result = t.compare_subsets(t4, exclude_absent_taxa=True)
self.assertEqual(result, 1 - 2. / 3)
result = t.compare_subsets(self.TreeRoot, exclude_absent_taxa=True)
self.assertEqual(result, 1)
result = t.compare_subsets(self.TreeRoot)
self.assertEqual(result, 1)
def test_assign_ids(self):
"""Assign IDs to the tree"""
t1 = TreeNode.from_newick("(((a,b),c),(e,f),(g));")
t2 = TreeNode.from_newick("(((a,b),c),(e,f),(g));")
t3 = TreeNode.from_newick("((g),(e,f),(c,(a,b)));")
t1_copy = t1.copy()
t1.assign_ids()
t2.assign_ids()
t3.assign_ids()
t1_copy.assign_ids()
self.assertEqual([(n.name, n.id) for n in t1.traverse()],
[(n.name, n.id) for n in t2.traverse()])
self.assertEqual([(n.name, n.id) for n in t1.traverse()],
[(n.name, n.id) for n in t1_copy.traverse()])
self.assertNotEqual([(n.name, n.id) for n in t1.traverse()],
[(n.name, n.id) for n in t3.traverse()])
def test_unrooted_deepcopy(self):
"""Do an unrooted_copy"""
t = TreeNode.from_newick("((a,(b,c)d)e,(f,g)h)i;")
exp = "(b,c,(a,((f,g)h)e)d)root;"
obs = t.find('d').unrooted_deepcopy()
self.assertEqual(str(obs), exp)
t_ids = {id(n) for n in t.traverse()}
obs_ids = {id(n) for n in obs.traverse()}
self.assertEqual(t_ids.intersection(obs_ids), set())
class DndTokenizerTests(TestCase):
"""Tests of the DndTokenizer factory function."""
def test_gdata(self):
"""DndTokenizer should work as expected on real data"""
exp = \
['(', '(', 'xyz', ':', '0.28124', ',', '(', 'def', ':', '0.24498',
',', 'mno', ':', '0.03627', ')', ':', '0.17710', ')', ':',
'0.04870', ',', 'abc', ':', '0.05925', ',', '(', 'ghi', ':',
'0.06914', ',', 'jkl', ':', '0.13776', ')', ':', '0.09853', ')',
';']
# split it up for debugging on an item-by-item basis
obs = list(_dnd_tokenizer(sample))
self.assertEqual(len(obs), len(exp))
for i, j in zip(obs, exp):
self.assertEqual(i, j)
# try it all in one go
self.assertEqual(list(_dnd_tokenizer(sample)), exp)
def test_nonames(self):
"""DndTokenizer should work as expected on trees with no names"""
exp = ['(', '(', ',', ')', ',', '(', ',', ')', ')', ';']
obs = list(_dnd_tokenizer(no_names))
self.assertEqual(obs, exp)
def test_missing_tip_name(self):
"""DndTokenizer should work as expected on trees with a missing name"""
exp = ['(', '(', 'a', ',', 'b', ')', ',', '(', 'c', ',', ')', ')', ';']
obs = list(_dnd_tokenizer(missing_tip_name))
self.assertEqual(obs, exp)
def test_minimal(self):
"""DndTokenizer should work as expected a minimal tree without names"""
exp = ['(', ')', ';']
obs = list(_dnd_tokenizer(minimal))
self.assertEqual(obs, exp)
class DndParserTests(TestCase):
"""Tests of the DndParser factory function."""
def test_nonames(self):
"""DndParser should produce the correct tree when there are no names"""
obs = TreeNode.from_newick(no_names)
exp = TreeNode()
exp.append(TreeNode())
exp.append(TreeNode())
exp.children[0].append(TreeNode())
exp.children[0].append(TreeNode())
exp.children[1].append(TreeNode())
exp.children[1].append(TreeNode())
self.assertEqual(str(obs), str(exp))
def test_minimal(self):
"""DndParser should produce the correct minimal tree"""
obs = TreeNode.from_newick(minimal)
exp = TreeNode()
exp.append(TreeNode())
self.assertEqual(str(obs), str(exp))
def test_missing_tip_name(self):
"""DndParser should produce the correct tree when missing a name"""
obs = TreeNode.from_newick(missing_tip_name)
exp = TreeNode()
exp.append(TreeNode())
exp.append(TreeNode())
exp.children[0].append(TreeNode(name='a'))
exp.children[0].append(TreeNode(name='b'))
exp.children[1].append(TreeNode(name='c'))
exp.children[1].append(TreeNode())
self.assertEqual(str(obs), str(exp))
def test_gsingle(self):
"""DndParser should produce a single-child TreeNode on minimal data"""
t = TreeNode.from_newick(single)
self.assertEqual(len(t), 1)
child = t[0]
self.assertEqual(child.name, 'abc')
self.assertEqual(child.length, 3)
self.assertEqual(str(t), '(abc:3.0);')
def test_gdouble(self):
"""DndParser should produce a double-child TreeNode from data"""
t = TreeNode.from_newick(double)
self.assertEqual(len(t), 2)
self.assertEqual(str(t), '(abc:3.0,def:4.0);')
def test_gonenest(self):
"""DndParser should work correctly with nested data"""
t = TreeNode.from_newick(onenest)
self.assertEqual(len(t), 2)
self.assertEqual(len(t[0]), 0) # first child is terminal
self.assertEqual(len(t[1]), 2) # second child has two children
self.assertEqual(str(t), '(abc:3.0,(def:4.0,ghi:5.0):6.0);')
def test_gnodedata(self):
"""DndParser should assign name to internal nodes correctly"""
t = TreeNode.from_newick(nodedata)
self.assertEqual(len(t), 2)
self.assertEqual(len(t[0]), 0) # first child is terminal
self.assertEqual(len(t[1]), 2) # second child has two children
self.assertEqual(str(t), '(abc:3.0,(def:4.0,ghi:5.0)jkl:6.0);')
info_dict = {}
for node in t.traverse():
info_dict[node.name] = node.length
self.assertEqual(info_dict['abc'], 3.0)
self.assertEqual(info_dict['def'], 4.0)
self.assertEqual(info_dict['ghi'], 5.0)
self.assertEqual(info_dict['jkl'], 6.0)
def test_data(self):
"""DndParser should work as expected on real data"""
t = TreeNode.from_newick(sample)
self.assertEqual(
str(t), '((xyz:0.28124,(def:0.24498,mno:0.03627):0.1771):0.0487,'
'abc:0.05925,(ghi:0.06914,jkl:0.13776):0.09853);')
tdata = TreeNode.from_newick(node_data_sample, unescape_name=True)
self.assertEqual(
str(tdata), "((xyz:0.28124,(def:0.24498,mno:0.03627)A:0.1771)"
"B:0.0487,abc:0.05925,(ghi:0.06914,jkl:0.13776)"
"C:0.09853);")
def test_gbad(self):
"""DndParser should fail if parens unbalanced"""
left = '((abc:3)'
right = '(abc:3))'
self.assertRaises(RecordError, TreeNode.from_newick, left)
self.assertRaises(RecordError, TreeNode.from_newick, right)
def test_DndParser(self):
"""DndParser tests"""
t_str = "(A_a,(B:1.0,C),'D_e':0.5)E;"
tree_unesc = TreeNode.from_newick(t_str, unescape_name=True)
tree_esc = TreeNode.from_newick(t_str, unescape_name=False)
self.assertEqual(tree_unesc.name, 'E')
self.assertEqual(tree_unesc.children[0].name, 'A a')
self.assertEqual(tree_unesc.children[1].children[0].name, 'B')
self.assertEqual(tree_unesc.children[1].children[0].length, 1.0)
self.assertEqual(tree_unesc.children[1].children[1].name, 'C')
self.assertEqual(tree_unesc.children[2].name, 'D_e')
self.assertEqual(tree_unesc.children[2].length, 0.5)
self.assertEqual(tree_esc.name, 'E')
self.assertEqual(tree_esc.children[0].name, 'A_a')
self.assertEqual(tree_esc.children[1].children[0].name, 'B')
self.assertEqual(tree_esc.children[1].children[0].length, 1.0)
self.assertEqual(tree_esc.children[1].children[1].name, 'C')
self.assertEqual(tree_esc.children[2].name, "'D_e'")
self.assertEqual(tree_esc.children[2].length, 0.5)
reload_test = tree_esc.to_newick(with_distances=True,
escape_name=False)
obs = TreeNode.from_newick(reload_test, unescape_name=False)
self.assertEqual(obs.to_newick(with_distances=True),
tree_esc.to_newick(with_distances=True))
reload_test = tree_unesc.to_newick(with_distances=True,
escape_name=False)
obs = TreeNode.from_newick(reload_test, unescape_name=False)
self.assertEqual(obs.to_newick(with_distances=True),
tree_unesc.to_newick(with_distances=True))
sample = """
(
(
xyz:0.28124,
(
def:0.24498,
mno:0.03627)
:0.17710)
:0.04870,
abc:0.05925,
(
ghi:0.06914,
jkl:0.13776)
:0.09853);
"""
node_data_sample = """
(
(
xyz:0.28124,
(
def:0.24498,
mno:0.03627)
'A':0.17710)
B:0.04870,
abc:0.05925,
(
ghi:0.06914,
jkl:0.13776)
C:0.09853);
"""
minimal = "();"
no_names = "((,),(,));"
missing_tip_name = "((a,b),(c,));"
empty = '();'
single = '(abc:3);'
double = '(abc:3, def:4);'
onenest = '(abc:3, (def:4, ghi:5):6 );'
nodedata = '(abc:3, (def:4, ghi:5)jkl:6 );'
if __name__ == '__main__':
main()
```
#### File: skbio/draw/distributions.py
```python
from __future__ import division
from itertools import cycle
import warnings
import numpy as np
from matplotlib import use
use('Agg', warn=False)
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Polygon, Rectangle
def boxplots(distributions, x_values=None, x_tick_labels=None, title=None,
x_label=None, y_label=None, x_tick_labels_orientation='vertical',
y_min=None, y_max=None, whisker_length=1.5, box_width=0.5,
box_colors=None, figure_width=None, figure_height=None,
legend=None):
"""Generate a figure with a boxplot for each distribution.
Parameters
----------
distributions: list of lists
List of distributions.
x_values : list of numbers, optional
List indicating where each boxplot should be placed. Must be the same
length as `distributions` if provided.
x_tick_labels : list of str, optional
List of x-axis tick labels.
title : str, optional
Title of the plot.
x_label : str, optional
x-axis label.
y_label : str, optional
y-axis label.
x_tick_labels_orientation : {'vertical', 'horizontal'}
Orientation of the x-axis labels.
y_min : scalar, optional
Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
y_max : scalar, optional
Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
whisker_length : scalar, optional
Length of the whiskers as a function of the IQR. For example, if 1.5,
the whiskers extend to ``1.5 * IQR``. Anything outside of that range is
treated as an outlier.
box_width : scalar, optional
Width of each box in plot units.
box_colors : str, tuple, or list of colors, optional
Either a matplotlib-compatible string or tuple that indicates the color
to be used for every boxplot, or a list of colors to color each boxplot
individually. If ``None``, boxes will be the same color as the plot
background. If a list of colors is provided, a color must be provided
for each boxplot. Can also supply ``None`` instead of a color, which
will color the box the same color as the plot background.
figure_width : scalar, optional
Width of the plot figure in inches. If not provided, will default to
matplotlib's default figure width.
figure_height : scalar, optional
Height of the plot figure in inches. If not provided, will default to
matplotlib's default figure height.
legend : tuple or list, optional
Two-element tuple or list that contains a list of valid matplotlib
colors as the first element and a list of labels (strings) as the
second element. The lengths of the first and second elements must be
the same. If ``None``, a legend will not be plotted.
Returns
-------
matplotlib.figure.Figure
Figure containing a boxplot for each distribution.
See Also
--------
matplotlib.pyplot.boxplot
scipy.stats.ttest_ind
Notes
-----
This is a convenience wrapper around matplotlib's ``boxplot`` function that
allows for coloring of boxplots and legend generation.
Examples
--------
Create a plot with two boxplots:
.. plot::
>>> from skbio.draw.distributions import boxplots
>>> fig = boxplots([[2, 2, 1, 3, 4, 4.2, 7], [0, -1, 4, 5, 6, 7]])
Plot three distributions with custom colors and labels:
.. plot::
>>> from skbio.draw.distributions import boxplots
>>> fig = boxplots(
... [[2, 2, 1, 3], [0, -1, 0, 0.1, 0.3], [4, 5, 6, 3]],
... x_tick_labels=('Control', 'Treatment 1', 'Treatment 2'),
... box_colors=('green', 'blue', 'red'))
"""
# Make sure our input makes sense.
for distribution in distributions:
try:
list(map(float, distribution))
except:
raise ValueError("Each value in each distribution must be a "
"number.")
_validate_x_values(x_values, x_tick_labels, len(distributions))
# Create a new figure to plot our data on, and then plot the distributions.
result, plot_axes = plt.subplots()
box_plot = plt.boxplot(distributions, positions=x_values,
whis=whisker_length, widths=box_width)
if box_colors is not None:
if _is_single_matplotlib_color(box_colors):
box_colors = [box_colors] * len(box_plot['boxes'])
else:
# We check against the number of input distributions because mpl
# will only return non-empty boxplots from the boxplot() call
# above.
if len(box_colors) != len(distributions):
raise ValueError("Not enough colors were supplied to color "
"each boxplot.")
# Filter out colors corresponding to empty distributions.
box_colors = [color for distribution, color in zip(distributions,
box_colors)
if distribution]
_color_box_plot(plot_axes, box_plot, box_colors)
# Set up the various plotting options, such as x- and y-axis labels, plot
# title, and x-axis values if they have been supplied.
_set_axes_options(plot_axes, title, x_label, y_label,
x_tick_labels=x_tick_labels,
x_tick_labels_orientation=x_tick_labels_orientation,
y_min=y_min, y_max=y_max)
if legend is not None:
if len(legend) != 2:
raise ValueError("Invalid legend was provided. The legend must be "
"a two-element tuple/list where the first "
"element is a list of colors and the second "
"element is a list of labels.")
_create_legend(plot_axes, legend[0], legend[1], 'colors')
_set_figure_size(result, figure_width, figure_height)
return result
def grouped_distributions(plot_type, data, x_values=None,
data_point_labels=None, distribution_labels=None,
distribution_markers=None, x_label=None,
y_label=None, title=None,
x_tick_labels_orientation='vertical', y_min=None,
y_max=None, whisker_length=1.5,
error_bar_type='stdv', distribution_width=None,
figure_width=None, figure_height=None):
"""Generate a figure with distributions grouped at points along the x-axis.
Parameters
----------
plot_type : {'bar', 'scatter', 'box'}
Type of plot to visualize distributions with.
data : list of lists of lists
Each inner list represents a data point along the x-axis. Each data
point contains lists of data for each distribution in the group at that
point. This nesting allows for the grouping of distributions at each
data point.
x_values : list of scalars, optional
Spacing of data points along the x-axis. Must be the same length as the
number of data points and be in ascending sorted order. If not
provided, plots will be spaced evenly.
data_point_labels : list of str, optional
Labels for data points.
distribution_labels : list of str, optional
Labels for each distribution in a data point grouping.
distribution_markers : list of str or list of tuple, optional
Matplotlib-compatible strings or tuples that indicate the color or
symbol to be used to distinguish each distribution in a data point
grouping. Colors will be used for bar charts or box plots, while
symbols will be used for scatter plots.
x_label : str, optional
x-axis label.
y_label : str, optional
y-axis label.
title : str, optional
Plot title.
x_tick_labels_orientation : {'vertical', 'horizontal'}
Orientation of x-axis labels.
y_min : scalar, optional
Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
y_max : scalar, optional
Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
whisker_length : scalar, optional
If `plot_type` is ``'box'``, determines the length of the whiskers as a
function of the IQR. For example, if 1.5, the whiskers extend to
``1.5 * IQR``. Anything outside of that range is seen as an outlier.
If `plot_type` is not ``'box'``, this parameter is ignored.
error_bar_type : {'stdv', 'sem'}
Type of error bars to use if `plot_type` is ``'bar'``. Can be either
``'stdv'`` (for standard deviation) or ``'sem'`` for the standard error
of the mean. If `plot_type` is not ``'bar'``, this parameter is
ignored.
distribution_width : scalar, optional
Width in plot units of each individual distribution (e.g. each bar if
the plot type is a bar chart, or the width of each box if the plot type
is a boxplot). If None, will be automatically determined.
figure_width : scalar, optional
Width of the plot figure in inches. If not provided, will default to
matplotlib's default figure width.
figure_height : scalar, optional
Height of the plot figure in inches. If not provided, will default to
matplotlib's default figure height.
Returns
-------
matplotlib.figure.Figure
Figure containing distributions grouped at points along the x-axis.
Examples
--------
Create a plot with two distributions grouped at three points:
.. plot::
>>> from skbio.draw.distributions import grouped_distributions
>>> fig = grouped_distributions('bar',
... [[[2, 2, 1,], [0, 1, 4]],
... [[1, 1, 1], [4, 4.5]],
... [[2.2, 2.4, 2.7, 1.0], [0, 0.2]]],
... distribution_labels=['Treatment 1',
... 'Treatment 2'])
"""
# Set up different behavior based on the plot type.
if plot_type == 'bar':
plotting_function = _plot_bar_data
distribution_centered = False
marker_type = 'colors'
elif plot_type == 'scatter':
plotting_function = _plot_scatter_data
distribution_centered = True
marker_type = 'symbols'
elif plot_type == 'box':
plotting_function = _plot_box_data
distribution_centered = True
marker_type = 'colors'
else:
raise ValueError("Invalid plot type '%s'. Supported plot types are "
"'bar', 'scatter', or 'box'." % plot_type)
num_points, num_distributions = _validate_input(data, x_values,
data_point_labels,
distribution_labels)
# Create a list of matplotlib markers (colors or symbols) that can be used
# to distinguish each of the distributions. If the user provided a list of
# markers, use it and loop around to the beginning if there aren't enough
# markers. If they didn't provide a list, or it was empty, use our own
# predefined list of markers (again, loop around to the beginning if we
# need more markers).
distribution_markers = _get_distribution_markers(marker_type,
distribution_markers,
num_distributions)
# Now calculate where each of the data points will start on the x-axis.
x_locations = _calc_data_point_locations(num_points, x_values)
assert (len(x_locations) == num_points), "The number of x_locations " +\
"does not match the number of data points."
if distribution_width is None:
# Find the smallest gap between consecutive data points and divide this
# by the number of distributions + 1 for some extra spacing between
# data points.
min_gap = max(x_locations)
for i in range(len(x_locations) - 1):
curr_gap = x_locations[i + 1] - x_locations[i]
if curr_gap < min_gap:
min_gap = curr_gap
distribution_width = min_gap / float(num_distributions + 1)
else:
if distribution_width <= 0:
raise ValueError("The width of a distribution cannot be less than "
"or equal to zero.")
result, plot_axes = plt.subplots()
# Iterate over each data point, and plot each of the distributions at that
# data point. Increase the offset after each distribution is plotted,
# so that the grouped distributions don't overlap.
for point, x_pos in zip(data, x_locations):
dist_offset = 0
for dist_index, dist, dist_marker in zip(range(num_distributions),
point, distribution_markers):
dist_location = x_pos + dist_offset
distribution_plot_result = plotting_function(plot_axes, dist,
dist_marker,
distribution_width,
dist_location,
whisker_length,
error_bar_type)
dist_offset += distribution_width
# Set up various plot options that are best set after the plotting is done.
# The x-axis tick marks (one per data point) are centered on each group of
# distributions.
plot_axes.set_xticks(_calc_data_point_ticks(x_locations,
num_distributions,
distribution_width,
distribution_centered))
_set_axes_options(plot_axes, title, x_label, y_label, x_values,
data_point_labels, x_tick_labels_orientation, y_min,
y_max)
if distribution_labels is not None:
_create_legend(plot_axes, distribution_markers, distribution_labels,
marker_type)
_set_figure_size(result, figure_width, figure_height)
# matplotlib seems to sometimes plot points on the rightmost edge of the
# plot without adding padding, so we need to add our own to both sides of
# the plot. For some reason this has to go after the call to draw(),
# otherwise matplotlib throws an exception saying it doesn't have a
# renderer. Boxplots need extra padding on the left.
if plot_type == 'box':
left_pad = 2 * distribution_width
else:
left_pad = distribution_width
plot_axes.set_xlim(plot_axes.get_xlim()[0] - left_pad,
plot_axes.get_xlim()[1] + distribution_width)
return result
def _validate_input(data, x_values, data_point_labels, distribution_labels):
"""Returns a tuple containing the number of data points and distributions
in the data.
Validates plotting options to make sure they are valid with the supplied
data.
"""
if data is None or not data or isinstance(data, basestring):
raise ValueError("The data must be a list type, and it cannot be "
"None or empty.")
num_points = len(data)
num_distributions = len(data[0])
empty_data_error_msg = ("The data must contain at least one data "
"point, and each data point must contain at "
"least one distribution to plot.")
if num_points == 0 or num_distributions == 0:
raise ValueError(empty_data_error_msg)
for point in data:
if len(point) == 0:
raise ValueError(empty_data_error_msg)
if len(point) != num_distributions:
raise ValueError("The number of distributions in each data point "
"grouping must be the same for all data points.")
# Make sure we have the right number of x values (one for each data point),
# and make sure they are numbers.
_validate_x_values(x_values, data_point_labels, num_points)
if (distribution_labels is not None and
len(distribution_labels) != num_distributions):
raise ValueError("The number of distribution labels must be equal "
"to the number of distributions.")
return num_points, num_distributions
def _validate_x_values(x_values, x_tick_labels, num_expected_values):
"""Validates the x values provided by the user, making sure they are the
correct length and are all numbers.
Also validates the number of x-axis tick labels.
Raises a ValueError if these conditions are not met.
"""
if x_values is not None:
if len(x_values) != num_expected_values:
raise ValueError("The number of x values must match the number "
"of data points.")
try:
list(map(float, x_values))
except:
raise ValueError("Each x value must be a number.")
if x_tick_labels is not None:
if len(x_tick_labels) != num_expected_values:
raise ValueError("The number of x-axis tick labels must match the "
"number of data points.")
def _get_distribution_markers(marker_type, marker_choices, num_markers):
"""Returns a list of length num_markers of valid matplotlib colors or
symbols.
The markers will be comprised of those found in marker_choices (if not None
and not empty) or a list of predefined markers (determined by marker_type,
which can be either 'colors' or 'symbols'). If there are not enough
markers, the list of markers will be reused from the beginning again (as
many times as are necessary).
"""
if num_markers < 0:
raise ValueError("num_markers must be greater than or equal to zero.")
if marker_choices is None or len(marker_choices) == 0:
if marker_type == 'colors':
marker_choices = ['b', 'g', 'r', 'c', 'm', 'y', 'w']
elif marker_type == 'symbols':
marker_choices = \
['s', 'o', '^', '>', 'v', '<', 'd', 'p', 'h', '8', '+', 'x']
else:
raise ValueError("Invalid marker_type: '%s'. marker_type must be "
"either 'colors' or 'symbols'." % marker_type)
if len(marker_choices) < num_markers:
# We don't have enough markers to represent each distribution uniquely,
# so let the user know. We'll add as many markers (starting from the
# beginning of the list again) until we have enough, but the user
# should still know because they may want to provide a new list of
# markers.
warnings.warn(
"There are not enough markers to uniquely represent each "
"distribution in your dataset. You may want to provide a list "
"of markers that is at least as large as the number of "
"distributions in your dataset.",
RuntimeWarning)
marker_cycle = cycle(marker_choices[:])
while len(marker_choices) < num_markers:
marker_choices.append(marker_cycle.next())
return marker_choices[:num_markers]
def _calc_data_point_locations(num_points, x_values=None):
"""Returns the x-axis location for each of the data points to start at.
Note: A numpy array is returned so that the overloaded "+" operator can be
used on the array.
The x-axis locations are scaled by x_values if it is provided, or else the
x-axis locations are evenly spaced. In either case, the x-axis locations
will always be in the range [1, num_points].
"""
if x_values is None:
# Evenly space the x-axis locations.
x_locs = np.arange(1, num_points + 1)
else:
if len(x_values) != num_points:
raise ValueError("The number of x-axis values must match the "
"number of data points.")
# Scale to the range [1, num_points]. Taken from
# http://www.heatonresearch.com/wiki/Range_Normalization
x_min = min(x_values)
x_max = max(x_values)
x_range = x_max - x_min
n_range = num_points - 1
x_locs = np.array([(((x_val - x_min) * n_range) / float(x_range)) + 1
for x_val in x_values])
return x_locs
def _calc_data_point_ticks(x_locations, num_distributions, distribution_width,
distribution_centered):
"""Returns a 1D numpy array of x-axis tick positions.
These positions will be centered on each data point.
Set distribution_centered to True for scatter and box plots because their
plot types naturally center over a given horizontal position. Bar charts
should use distribution_centered = False because the leftmost edge of a bar
starts at a given horizontal position and extends to the right for the
width of the bar.
"""
dist_size = num_distributions - 1 if distribution_centered else\
num_distributions
return x_locations + ((dist_size * distribution_width) / 2)
def _plot_bar_data(plot_axes, distribution, distribution_color,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single bar in matplotlib."""
result = None
# We do not want to plot empty distributions because matplotlib will not be
# able to render them as PDFs.
if len(distribution) > 0:
avg = np.mean(distribution)
if error_bar_type == 'stdv':
error_bar = np.std(distribution)
elif error_bar_type == 'sem':
error_bar = np.std(distribution) / np.sqrt(len(distribution))
else:
raise ValueError(
"Invalid error bar type '%s'. Supported error bar types are "
"'stdv' and 'sem'." % error_bar_type)
result = plot_axes.bar(x_position, avg, distribution_width,
yerr=error_bar, ecolor='black',
facecolor=distribution_color)
return result
def _plot_scatter_data(plot_axes, distribution, distribution_symbol,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single scatterplot in matplotlib."""
result = None
x_vals = [x_position] * len(distribution)
# matplotlib's scatter function doesn't like plotting empty data.
if len(x_vals) > 0 and len(distribution) > 0:
result = plot_axes.scatter(x_vals, distribution,
marker=distribution_symbol, c='k')
return result
def _plot_box_data(plot_axes, distribution, distribution_color,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single boxplot in matplotlib."""
result = None
if len(distribution) > 0:
result = plot_axes.boxplot([distribution], positions=[x_position],
widths=distribution_width,
whis=whisker_length)
_color_box_plot(plot_axes, result, [distribution_color])
return result
def _is_single_matplotlib_color(color):
"""Returns True if color is a single (not a list) mpl color."""
single_color = False
if (isinstance(color, str)):
single_color = True
elif len(color) == 3 or len(color) == 4:
single_color = True
for e in color:
if not (isinstance(e, float) or isinstance(e, int)):
single_color = False
return single_color
def _color_box_plot(plot_axes, box_plot, colors):
"""Color boxes in the box plot with the specified colors.
If any of the colors are None, the box will not be colored.
The box_plot argument must be the dictionary returned by the call to
matplotlib's boxplot function, and the colors argument must consist of
valid matplotlib colors.
"""
# Note: the following code is largely taken from this matplotlib boxplot
# example:
# http://matplotlib.sourceforge.net/examples/pylab_examples/
# boxplot_demo2.html
if len(colors) != len(box_plot['boxes']):
raise ValueError("Not enough colors were supplied to color each "
"boxplot.")
for box, median, color in zip(box_plot['boxes'],
box_plot['medians'],
colors):
if color is not None:
box_x = []
box_y = []
# There are five points in the box. The first is the same as
# the last.
for i in range(5):
box_x.append(box.get_xdata()[i])
box_y.append(box.get_ydata()[i])
box_coords = zip(box_x, box_y)
box_polygon = Polygon(box_coords, facecolor=color)
plot_axes.add_patch(box_polygon)
# Draw the median lines back over what we just filled in with
# color.
median_x = []
median_y = []
for i in range(2):
median_x.append(median.get_xdata()[i])
median_y.append(median.get_ydata()[i])
plot_axes.plot(median_x, median_y, 'black')
def _set_axes_options(plot_axes, title=None, x_label=None, y_label=None,
x_values=None, x_tick_labels=None,
x_tick_labels_orientation='vertical', y_min=None,
y_max=None):
"""Applies various labelling options to the plot axes."""
if title is not None:
plot_axes.set_title(title)
if x_label is not None:
plot_axes.set_xlabel(x_label)
if y_label is not None:
plot_axes.set_ylabel(y_label)
if (x_tick_labels_orientation != 'vertical' and
x_tick_labels_orientation != 'horizontal'):
raise ValueError("Invalid orientation for x-axis tick labels: %s. "
"Valid orientations are 'vertical' or 'horizontal'."
% x_tick_labels_orientation)
# If labels are provided, always use them. If they aren't, use the x_values
# that denote the spacing between data points as labels. If that isn't
# available, simply label the data points in an incremental fashion,
# i.e. 1, 2, 3, ..., n, where n is the number of data points on the plot.
if x_tick_labels is not None:
labels = plot_axes.set_xticklabels(x_tick_labels,
rotation=x_tick_labels_orientation)
elif x_tick_labels is None and x_values is not None:
labels = plot_axes.set_xticklabels(x_values,
rotation=x_tick_labels_orientation)
else:
labels = plot_axes.set_xticklabels(
range(1, len(plot_axes.get_xticklabels()) + 1),
rotation=x_tick_labels_orientation)
# Set the y-axis range if specified.
if y_min is not None:
plot_axes.set_ylim(bottom=float(y_min))
if y_max is not None:
plot_axes.set_ylim(top=float(y_max))
def _create_legend(plot_axes, distribution_markers, distribution_labels,
marker_type):
"""Creates a legend on the supplied axes."""
# We have to use a proxy artist for the legend because box plots currently
# don't have a very useful legend in matplotlib, and using the default
# legend for bar/scatterplots chokes on empty/null distributions.
#
# Note: This code is based on the following examples:
# http://matplotlib.sourceforge.net/users/legend_guide.html
# http://stackoverflow.com/a/11423554
if len(distribution_markers) != len(distribution_labels):
raise ValueError("The number of distribution markers does not match "
"the number of distribution labels.")
if marker_type == 'colors':
legend_proxy = [Rectangle((0, 0), 1, 1, fc=marker)
for marker in distribution_markers]
plot_axes.legend(legend_proxy, distribution_labels, loc='best')
elif marker_type == 'symbols':
legend_proxy = [Line2D(range(1), range(1), color='white',
markerfacecolor='black', marker=marker)
for marker in distribution_markers]
plot_axes.legend(legend_proxy, distribution_labels, numpoints=3,
scatterpoints=3, loc='best')
else:
raise ValueError("Invalid marker_type: '%s'. marker_type must be "
"either 'colors' or 'symbols'." % marker_type)
def _set_figure_size(fig, width=None, height=None):
"""Sets the plot figure size and makes room for axis labels, titles, etc.
If both width and height are not provided, will use matplotlib defaults.
Making room for labels will not always work, and if it fails, the user will
be warned that their plot may have cut-off labels.
"""
# Set the size of the plot figure, then make room for the labels so they
# don't get cut off. Must be done in this order.
if width is not None and height is not None and width > 0 and height > 0:
fig.set_size_inches(width, height)
try:
fig.tight_layout()
except ValueError:
warnings.warn(
"Could not automatically resize plot to make room for "
"axes labels and plot title. This can happen if the labels or "
"title are extremely long and the plot size is too small. Your "
"plot may have its labels and/or title cut-off. To fix this, "
"try increasing the plot's size (in inches) and try again.",
RuntimeWarning)
```
#### File: distance/tests/test_base.py
```python
from __future__ import division
from unittest import TestCase, main
from skbio.core.distance import DissimilarityMatrix, DistanceMatrix
from skbio.maths.stats.distance.base import (CategoricalStats,
CategoricalStatsResults)
class CategoricalStatsTests(TestCase):
def setUp(self):
self.dm = DistanceMatrix([[0.0, 1.0, 2.0], [1.0, 0.0, 3.0],
[2.0, 3.0, 0.0]], ['a', 'b', 'c'])
self.categorical_stats = CategoricalStats(self.dm, [1, 2, 1])
def test_init_invalid_input(self):
# Requires a DistanceMatrix.
with self.assertRaises(TypeError):
_ = CategoricalStats(DissimilarityMatrix([[0, 2], [3, 0]],
['a', 'b']), [1, 2])
# Grouping vector length must match number of objects in dm.
with self.assertRaises(ValueError):
_ = CategoricalStats(self.dm, [1, 2])
# Grouping vector cannot have only unique values.
with self.assertRaises(ValueError):
_ = CategoricalStats(self.dm, [1, 2, 3])
# Grouping vector cannot have only a single group.
with self.assertRaises(ValueError):
_ = CategoricalStats(self.dm, [1, 1, 1])
def test_call(self):
with self.assertRaises(NotImplementedError):
_ = self.categorical_stats()
def test_call_invalid_permutations(self):
with self.assertRaises(ValueError):
_ = self.categorical_stats(-1)
class CategoricalStatsResultsTests(TestCase):
def setUp(self):
self.results = CategoricalStatsResults('foo', 'Foo', 'my stat', 42,
['a', 'b', 'c', 'd'],
0.01234567890, 0.1151111, 99)
self.p_value = 0.119123123123
def test_str(self):
exp = ('Method name Sample size Number of groups my stat '
'p-value Number of permutations\n foo 42'
' 4 0.0123456789 0.12'
' 99\n')
obs = str(self.results)
self.assertEqual(obs, exp)
def test_summary(self):
exp = ('Method name\tSample size\tNumber of groups\tmy stat\tp-value\t'
'Number of permutations\nfoo\t42\t4\t0.0123456789\t0.12\t99\n')
obs = self.results.summary()
self.assertEqual(obs, exp)
def test_format_p_value(self):
obs = self.results._format_p_value(self.p_value, 100)
self.assertEqual(obs, '0.12')
obs = self.results._format_p_value(self.p_value, 250)
self.assertEqual(obs, '0.12')
obs = self.results._format_p_value(self.p_value, 1000)
self.assertEqual(obs, '0.119')
def test_format_p_value_few_perms(self):
obs = self.results._format_p_value(self.p_value, 9)
self.assertEqual(obs, 'Too few permutations to compute p-value '
'(permutations = 9)')
obs = self.results._format_p_value(self.p_value, 1)
self.assertEqual(obs, 'Too few permutations to compute p-value '
'(permutations = 1)')
obs = self.results._format_p_value(self.p_value, 0)
self.assertEqual(obs, 'Too few permutations to compute p-value '
'(permutations = 0)')
def test_format_p_value_none(self):
obs = self.results._format_p_value(None, 0)
self.assertEqual(obs, 'N/A')
if __name__ == '__main__':
main()
```
#### File: stats/ordination/base.py
```python
from __future__ import print_function, absolute_import
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from collections import namedtuple
class OrdinationResults(namedtuple('OrdinationResults',
('eigvals', 'species', 'site', 'biplot',
'site_constraints', 'proportion_explained',
'ids'))):
# To avoid creating a dict, as a namedtuple doesn't have it:
__slots__ = ()
def __new__(cls, eigvals, species, site=None, biplot=None,
site_constraints=None, proportion_explained=None, ids=None):
return super(OrdinationResults, cls).__new__(cls, eigvals, species,
site, biplot,
site_constraints,
proportion_explained,
ids)
class Ordination(object):
short_method_name = 'Overwrite in subclass!'
long_method_name = 'Overwrite in subclass!'
```
#### File: stats/tests/test_distribution.py
```python
from unittest import TestCase, main
import numpy as np
from skbio.maths.stats.distribution import (chi_high, z_high, zprob, f_high,
binomial_high, bdtrc, stdtr)
class DistributionsTests(TestCase):
"""Tests of particular statistical distributions."""
def setUp(self):
self.values = [0, 0.01, 0.1, 0.5, 1, 2, 5, 10, 20, 30, 50, 200]
self.negvalues = [-i for i in self.values]
self.df = [1, 10, 100]
def test_z_high(self):
"""z_high should match R's pnorm(lower.tail=FALSE) function"""
negprobs = [
0.5000000, 0.5039894, 0.5398278, 0.6914625, 0.8413447,
0.9772499, 0.9999997, 1.0000000, 1.0000000, 1.0000000,
1.0000000, 1.0000000,
]
probs = [
5.000000e-01, 4.960106e-01, 4.601722e-01, 3.085375e-01,
1.586553e-01, 2.275013e-02, 2.866516e-07, 7.619853e-24,
2.753624e-89, 4.906714e-198, 0.000000e+00, 0.000000e+00]
for z, p in zip(self.values, probs):
np.testing.assert_allclose(z_high(z), p, atol=10e-7)
for z, p in zip(self.negvalues, negprobs):
np.testing.assert_allclose(z_high(z), p)
def test_zprob(self):
"""zprob should match twice the z_high probability for abs(z)"""
probs = [2 * i for i in [
5.000000e-01, 4.960106e-01, 4.601722e-01, 3.085375e-01,
1.586553e-01, 2.275013e-02, 2.866516e-07, 7.619853e-24,
2.753624e-89, 4.906714e-198, 0.000000e+00, 0.000000e+00]]
for z, p in zip(self.values, probs):
np.testing.assert_allclose(zprob(z), p, atol=10e-7)
for z, p in zip(self.negvalues, probs):
np.testing.assert_allclose(zprob(z), p, atol=10e-7)
def test_chi_high(self):
"""chi_high should match R's pchisq(lower.tail=FALSE) function"""
probs = {
1: [1.000000e+00, 9.203443e-01, 7.518296e-01, 4.795001e-01,
3.173105e-01, 1.572992e-01, 2.534732e-02, 1.565402e-03,
7.744216e-06, 4.320463e-08, 1.537460e-12, 2.088488e-45,
],
10: [1.000000e+00, 1.000000e-00, 1.000000e-00, 9.999934e-01,
9.998279e-01, 9.963402e-01, 8.911780e-01, 4.404933e-01,
2.925269e-02, 8.566412e-04, 2.669083e-07, 1.613931e-37,
],
100: [1.00000e+00, 1.00000e+00, 1.00000e+00, 1.00000e+00,
1.00000e+00, 1.00000e+00, 1.00000e+00, 1.00000e+00,
1.00000e+00, 1.00000e+00, 9.99993e-01, 1.17845e-08,
],
}
for df in self.df:
for x, p in zip(self.values, probs[df]):
np.testing.assert_allclose(chi_high(x, df), p, atol=10e-7)
def test_binomial_high(self):
"""Binomial high should match values from R for integer successes"""
expected = {
(0, 1, 0.5): 0.5,
(1, 1, 0.5): 0,
(1, 1, 0.0000001): 0,
(1, 1, 0.9999999): 0,
(3, 5, 0.75): 0.6328125,
(0, 60, 0.5): 1,
(129, 130, 0.5): 7.34684e-40,
(299, 300, 0.099): 4.904089e-302,
(9, 27, 0.0003): 4.958496e-29,
(1032, 2050, 0.5): 0.3702155,
(-1, 3, 0.1): 1, # if successes less than 0, return 1
(-0.5, 3, 0.1): 1,
}
for (key, value) in expected.items():
np.testing.assert_allclose(binomial_high(*key), value, 1e-4)
# should reject if successes > trials or successes < -1
self.assertRaises(ValueError, binomial_high, 7, 5, 0.5)
def test_f_high(self):
"""F high should match values from R for integer successes"""
expected = {
(1, 1, 0): 1,
(1, 1, 1): 0.5,
(1, 1, 20): 0.1400487,
(1, 1, 1000000): 0.0006366196,
(1, 10, 0): 1,
(1, 10, 5): 0.0493322,
(1, 10, 20): 0.001193467,
(10, 1, 0): 1,
(10, 10, 14.7): 0.0001062585,
# test non-integer degrees of freedom
(13.7, 11.9, 3.8): 0.01340347,
# used following series to track down a bug after a failed test
# case
(28, 29, 2): 0.03424088,
(28, 29, 10): 1.053019e-08,
(28, 29, 20): 1.628245e-12,
(28, 29, 300): 5.038791e-29,
(28, 35, 1): 0.4946777,
(28, 37, 1): 0.4934486,
(28, 38, 1): 0.4928721,
(28, 38.001, 1): 0.4928716,
(28, 38.5, 1): 0.4925927,
(28, 39, 1): 0.492319,
(28, 39, 10): 1.431901e-10,
(28, 39, 20): 1.432014e-15,
(28, 39, 30): 1.059964e-18,
(28, 39, 50): 8.846678e-23,
(28, 39, 10): 1.431901e-10,
(28, 39, 300): 1.226935e-37,
(28, 39, 50): 8.846678e-23,
(28, 39, 304.7): 9.08154e-38,
(28.4, 39.2, 304.7): 5.573927e-38,
(1032, 2050, 0): 1,
(1032, 2050, 4.15): 1.23535e-165,
(1032, 2050, 0.5): 1,
(1032, 2050, 0.1): 1,
}
e = sorted(expected.items())
for (key, value) in e:
np.testing.assert_allclose(f_high(*key), value, atol=10e-7)
def test_bdtrc(self):
"""bdtrc should give same results as cephes"""
k_s = [0, 1, 2, 3, 5]
n_s = [5, 10, 1000]
p_s = [1e-10, .1, .5, .9, .999999]
exp = [
4.999999999e-10,
0.40951,
0.96875,
0.99999,
1.0,
9.9999999955e-10,
0.6513215599,
0.9990234375,
0.9999999999,
1.0,
9.9999995005e-08,
1.0,
1.0,
1.0,
1.0,
9.999999998e-20,
0.08146,
0.8125,
0.99954,
1.0,
4.4999999976e-19,
0.2639010709,
0.9892578125,
0.9999999909,
1.0,
4.99499966766e-15,
1.0,
1.0,
1.0,
1.0,
9.9999999985e-30,
0.00856,
0.5,
0.99144,
1.0,
1.19999999937e-28,
0.0701908264,
0.9453125,
0.9999996264,
1.0,
1.66166987575e-22,
1.0,
1.0,
1.0,
1.0,
4.9999999996e-40,
0.00046,
0.1875,
0.91854,
0.99999999999,
2.09999999899e-38,
0.0127951984,
0.828125,
0.9999908784,
1.0,
4.14171214499e-30,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
2.09999999928e-58,
0.0001469026,
0.376953125,
0.9983650626,
1.0,
1.36817318242e-45,
1.0,
1.0,
1.0,
1.0,
]
index = 0
for k in k_s:
for n in n_s:
for p in p_s:
np.testing.assert_allclose(bdtrc(k, n, p), exp[index])
index += 1
def test_stdtr(self):
"""stdtr should match cephes results"""
t = [-10, -3.1, -0.5, -0.01, 0, 1, 0.5, 10]
k = [2, 10, 100]
exp = [
0.00492622851166,
7.94776587798e-07,
4.9508444923e-17,
0.0451003650651,
0.00562532860804,
0.00125696358826,
0.333333333333,
0.313946802871,
0.309086782915,
0.496464554479,
0.496108987495,
0.496020605117,
0.5,
0.5,
0.5,
0.788675134595,
0.829553433849,
0.840137922108,
0.666666666667,
0.686053197129,
0.690913217085,
0.995073771488,
0.999999205223,
1.0,
]
index = 0
for i in t:
for j in k:
np.testing.assert_allclose(stdtr(j, i), exp[index])
index += 1
if __name__ == "__main__":
main()
```
#### File: sequences/tests/test_fastq.py
```python
from __future__ import division
from skbio.parse.sequences import parse_fastq
from skbio.core.exception import FastqParseError
from unittest import TestCase, main
class ParseFastqTests(TestCase):
def setUp(self):
""" Initialize variables to be used by the tests """
self.FASTQ_EXAMPLE = FASTQ_EXAMPLE.split('\n')
self.FASTQ_EXAMPLE_2 = FASTQ_EXAMPLE_2.split('\n')
def test_parse(self):
"""sequence and info objects should correctly match"""
for label, seq, qual in parse_fastq(self.FASTQ_EXAMPLE):
self.assertTrue(label in DATA)
self.assertEqual(seq, DATA[label]["seq"])
self.assertEqual(qual, DATA[label]["qual"])
def test_parse_error(self):
"""Does this raise a FastqParseError with incorrect input?"""
with self.assertRaises(FastqParseError):
list(parse_fastq(self.FASTQ_EXAMPLE_2, strict=True))
DATA = {
"GAPC_0015:6:1:1259:10413#0/1":
dict(seq='AACACCAAACTTCTCCACCACGTGAGCTACAAAAG',
qual=r'````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF'),
"GAPC_0015:6:1:1283:11957#0/1":
dict(seq='TATGTATATATAACATATACATATATACATACATA',
qual=r']KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb'),
"GAPC_0015:6:1:1284:10484#0/1":
dict(seq='TCAGTTTTCCTCGCCATATTTCACGTCCTAAAGCG',
qual=r'UM_]]U_]Z_Y^\^^``Y]`^SZ]\Ybb`^_LbL_'),
"GAPC_0015:6:1:1287:17135#0/1":
dict(seq='TGTGCCTATGGAAGCAGTTCTAGGATCCCCTAGAA',
qual=r'^aacccL\ccc\c\cTKTS]KZ\]]I\[Wa^T`^K'),
"GAPC_0015:6:1:1293:3171#0/1":
dict(seq="AAAGAAAGGAAGAAAAGAAAAAGAAACCCGAGTTA",
qual=r"b`bbbU_[YYcadcda_LbaaabWbaacYcc`a^c"),
"GAPC_0015:6:1:1297:10729#0/1":
dict(seq="TAATGCCAAAGAAATATTTCCAAACTACATGCTTA",
qual=r"T\ccLbb``bacc]_cacccccLccc\ccTccYL^"),
"GAPC_0015:6:1:1299:5940#0/1":
dict(seq="AATCAAGAAATGAAGATTTATGTATGTGAAGAATA",
qual=r"dcddbcfffdfffd`dd`^`c`Oc`Ybb`^eecde"),
"GAPC_0015:6:1:1308:6996#0/1":
dict(seq="TGGGACACATGTCCATGCTGTGGTTTTAACCGGCA",
qual=r"a]`aLY`Y^^ccYa`^^TccK_X]\c\c`caTTTc"),
"GAPC_0015:6:1:1314:13295#0/1":
dict(seq="AATATTGCTTTGTCTGAACGATAGTGCTCTTTGAT",
qual=r"cLcc\\dddddaaYd`T```bLYT\`a```bZccc"),
"GAPC_0015:6:1:1317:3403#0/1":
dict(seq="TTGTTTCCACTTGGTTGATTTCACCCCTGAGTTTG",
# had to add space in qual line
qual=r"\\\ZTYTSaLbb``\_UZ_bbcc`cc^[ac\a\Tc ".strip())
}
FASTQ_EXAMPLE = r"""@GAPC_0015:6:1:1259:10413#0/1
AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
+GAPC_0015:6:1:1259:10413#0/1
````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
@GAPC_0015:6:1:1283:11957#0/1
TATGTATATATAACATATACATATATACATACATA
+GAPC_0015:6:1:1283:11957#0/1
]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
@GAPC_0015:6:1:1284:10484#0/1
TCAGTTTTCCTCGCCATATTTCACGTCCTAAAGCG
+GAPC_0015:6:1:1284:10484#0/1
UM_]]U_]Z_Y^\^^``Y]`^SZ]\Ybb`^_LbL_
@GAPC_0015:6:1:1287:17135#0/1
TGTGCCTATGGAAGCAGTTCTAGGATCCCCTAGAA
+GAPC_0015:6:1:1287:17135#0/1
^aacccL\ccc\c\cTKTS]KZ\]]I\[Wa^T`^K
@GAPC_0015:6:1:1293:3171#0/1
AAAGAAAGGAAGAAAAGAAAAAGAAACCCGAGTTA
+GAPC_0015:6:1:1293:3171#0/1
b`bbbU_[YYcadcda_LbaaabWbaacYcc`a^c
@GAPC_0015:6:1:1297:10729#0/1
TAATGCCAAAGAAATATTTCCAAACTACATGCTTA
+GAPC_0015:6:1:1297:10729#0/1
T\ccLbb``bacc]_cacccccLccc\ccTccYL^
@GAPC_0015:6:1:1299:5940#0/1
AATCAAGAAATGAAGATTTATGTATGTGAAGAATA
+GAPC_0015:6:1:1299:5940#0/1
dcddbcfffdfffd`dd`^`c`Oc`Ybb`^eecde
@GAPC_0015:6:1:1308:6996#0/1
TGGGACACATGTCCATGCTGTGGTTTTAACCGGCA
+GAPC_0015:6:1:1308:6996#0/1
a]`aLY`Y^^ccYa`^^TccK_X]\c\c`caTTTc
@GAPC_0015:6:1:1314:13295#0/1
AATATTGCTTTGTCTGAACGATAGTGCTCTTTGAT
+GAPC_0015:6:1:1314:13295#0/1
cLcc\\dddddaaYd`T```bLYT\`a```bZccc
@GAPC_0015:6:1:1317:3403#0/1
TTGTTTCCACTTGGTTGATTTCACCCCTGAGTTTG
+GAPC_0015:6:1:1317:3403#0/1
\\\ZTYTSaLbb``\_UZ_bbcc`cc^[ac\a\Tc"""
FASTQ_EXAMPLE_2 = r"""@GAPC_0017:6:1:1259:10413#0/1
AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
+GAPC_0015:6:1:1259:10413#0/1
````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
@GAPC_0015:6:1:1283:11957#0/1
TATGTATATATAACATATACATATATACATACATA
+GAPC_0015:6:1:1283:11957#0/1
]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
@GAPC_0015:6:1:1284:10484#0/1
"""
if __name__ == "__main__":
main()
``` |
{
"source": "jorgechp/etsiit_bot",
"score": 2
} |
#### File: etsiit_bot/etsiit_bot/__main__.py
```python
try:
from etsiit_bot.bot import run_bot
except ModuleNotFoundError:
from sys import path as syspath
from pathlib import Path
syspath.append(str(Path(__file__).parents[1].resolve()))
from etsiit_bot.bot import run_bot
def main():
"""Start the bot."""
run_bot()
if __name__ == "__main__":
main()
```
#### File: jorgechp/etsiit_bot/noxfile.py
```python
from typing import List
import nox
requirements: List[str] = ["-r", "requirements.txt"]
test_requirements: List[str] = [
*requirements,
"pytest==5.4.3",
"pytest-cov==2.10.0",
]
format_requirements: List[str] = ["black==19.10b0", "isort==4.3.21"]
lint_requirements: List[str] = [
*requirements,
*format_requirements,
"pylint==2.5.3",
"mypy==0.782",
"flake8==3.8.3",
"pycodestyle==2.6.0",
]
python_target_files = ["etsiit_bot/", "tests/"]
python = ["3.6", "3.7", "3.8"]
nox.options.reuse_existing_virtualenvs = True
nox.options.stop_on_first_error = False
###############################################################################
# Linting
###############################################################################
@nox.session(name="lintpy")
def lint_python(session):
"""Lint Python source code."""
session.log("# Linting Python files...")
session.install(*lint_requirements)
session.run("pylint", *python_target_files)
session.run("mypy", *python_target_files)
session.run("flake8", *python_target_files)
session.run("pycodestyle", *python_target_files)
session.run("black", "-l", "79", "--check", "--diff", *python_target_files)
session.run("isort", "-rc", "--check-only", "--diff", *python_target_files)
@nox.session(name="lintmd")
def lint_markdown(session):
"""Lint Markdown files."""
session.log("# Linting Markdown files...")
session.run("mdl", "--style", ".mdl.rb", ".", external=True)
###############################################################################
# Formating
###############################################################################
@nox.session(name="format")
def python_format(session):
"""Format Python source code."""
session.log("# Formating Python files...")
session.install(*format_requirements)
session.run("black", "-l", "79", *python_target_files)
session.run("isort", *python_target_files)
###############################################################################
# Testing
###############################################################################
@nox.session(python=python)
def tests(session):
"""Run python tests."""
session.log("# Running tests...")
session.install(*test_requirements)
session.run(
"pytest",
env={
"REPO_ROOT": "REPO_ROOT_dummy",
"TELEGRAM_TOKEN": "TELEGRAM_TOKEN_dummy",
"PROJECT_NAME": "PROJECT_NAME_dummy",
"PORT": "123",
},
)
```
#### File: etsiit_bot/tests/test_bot.py
```python
import unittest
from unittest import TestCase, mock
try:
from etsiit_bot import bot
except ModuleNotFoundError:
from sys import path as syspath
from pathlib import Path
syspath.append(str(Path(__file__).parents[1].resolve()))
from etsiit_bot import bot
class TestBot(TestCase):
"""Test the bot commands."""
@staticmethod
@mock.patch("etsiit_bot.bot.logger")
def test_log_context(mock_logger):
"""Test the log_context util function."""
mock_context = mock.Mock()
mock_context.user_data = {"user_id": 123}
mock_context.chat_data = {"chat_id": 456}
bot.log_context(mock_context)
mock_logger.debug.assert_called_with(
"Called by %s in %s chat.",
mock_context.user_data["user_id"],
mock_context.chat_data["chat_id"],
)
@staticmethod
@mock.patch("etsiit_bot.bot.log_context")
def test_start(mock_log_context):
"""Test the start function."""
mock_update = mock.Mock()
mock_context = mock.Mock()
bot.start(mock_update, mock_context)
mock_update.message.reply_text.assert_called_once()
mock_log_context.assert_called_with(mock_context)
@staticmethod
@mock.patch("etsiit_bot.bot.log_context")
def test_show_help(mock_log_context):
"""Test the show_help function."""
mock_update = mock.Mock()
mock_context = mock.Mock()
bot.show_help(mock_update, mock_context)
mock_update.message.reply_text.assert_called_once()
mock_log_context.assert_called_with(mock_context)
@staticmethod
@mock.patch("etsiit_bot.bot.log_context")
def test_echo(mock_log_context):
"""Test the echo function."""
mock_update = mock.Mock()
mock_context = mock.Mock()
bot.echo(mock_update, mock_context)
mock_update.message.reply_text.assert_called_with(
mock_update.message.text
)
mock_log_context.assert_called_with(mock_context)
@staticmethod
@mock.patch("etsiit_bot.bot.logger")
def test_error(mock_logger):
"""Test the error function."""
mock_update = mock.Mock()
mock_context = mock.Mock()
bot.error(mock_update, mock_context)
mock_logger.warning.assert_called_with(
'Update "%s" caused error "%s"', mock_update, mock_context.error
)
@mock.patch("etsiit_bot.bot.Updater")
def test_run_bot(self, mock_updater):
"""Test the bot initialization funtion."""
bot.CommandHandler = mock.Mock()
bot.Filters = mock.Mock()
bot.MessageHandler = mock.Mock()
bot.run_bot()
mock_updater_object = mock_updater.return_value
mock_updater.assert_called_with(
"TELEGRAM_TOKEN_dummy", use_context=True
)
mock_updater_object.start_webhook.assert_called_with(
listen="0.0.0.0", port=123, url_path="TELEGRAM_TOKEN_dummy"
)
mock_updater_object.bot.set_webhook.assert_called_with(
"https://PROJECT_NAME_dummy.glitch.me/TELEGRAM_TOKEN_dummy"
)
mock_dp = mock_updater_object.dispatcher
self.assertEqual(mock_dp.add_handler.call_count, 3)
mock_dp.add_error_handler.assert_called_with(bot.error)
mock_updater_object.idle.assert_called_once()
self.assertEqual(mock_updater_object, mock_updater())
if __name__ == "__main__":
unittest.main()
```
#### File: etsiit_bot/tests/test__main__.py
```python
import unittest
from unittest import TestCase, mock
from etsiit_bot import __main__ as main
class TestMain(TestCase):
"""Test the main entrypoint."""
@staticmethod
@mock.patch("etsiit_bot.__main__.run_bot")
def test_main(mock_run_bot):
"""Test the main function."""
main.main()
mock_run_bot.assert_called_once()
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jorgecnavarrom/fetch_context_gbk",
"score": 3
} |
#### File: jorgecnavarrom/fetch_context_gbk/fetch_context_gbk.py
```python
from pathlib import Path
import sys
import os
import argparse
from Bio import SeqIO
__author__ = "<NAME>"
__version__ = "1.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def parameter_parser():
def_extra = 20000
def_o = Path("./output")
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="GenBank file", type=Path, required=True)
parser.add_argument("-l", "--label", help="Target label (gene_id, locus_tag, etc.)",
type=str, required=True)
parser.add_argument("-e", "--extra", help=f"Number of bps at either side of \
the target gene/protein/locus_tag to extract. Default={def_extra}", \
default=def_extra, type=int)
parser.add_argument("-u", "--upstream", help="Override extension upstream of target \
with desired bps", type=int)
parser.add_argument("-d", "--downstream", help="Override extension downstream of \
target by desired bps", type=int)
parser.add_argument("-o", "--outputfolder", help=f"Where to put retrieved \
files. Default={def_o}", type=Path, default=def_o)
return parser.parse_args()
def validate_input(args):
extra_down = args.extra
extra_up = args.extra
if args.extra < 0:
sys.exit("Error: invalid argument for parameter '--extra'")
if not args.downstream is None:
extra_down = args.downstream
if args.downstream < 0:
sys.exit("Error: invalid argument for parameter '--downstream'")
if not args.upstream is None:
extra_up = args.upstream
if args.upstream < 0:
sys.exit("Error: invalid argument for parameter '--upstream'")
if not args.input.is_file():
sys.exit("Error: argument for parameter '--input' is not a valid file")
return extra_up, extra_down
# throw here as many qualifiers as desired
def find_label(qualifiers, target):
if "gene" in qualifiers:
if qualifiers["gene"][0] == target:
return True
if "protein_id" in qualifiers:
if qualifiers["protein_id"][0] == target:
return True
if "proteinId" in qualifiers:
if qualifiers["proteinId"][0] == target:
return True
if "locus_tag" in qualifiers:
if qualifiers["locus_tag"][0] == target:
return True
if "name" in qualifiers:
if qualifiers["name"][0] == target:
return True
return False
# lazy way of getting new borders that include features. Too many comparisons
def check_borders_left(spans, pos):
for span in spans:
if pos in span:
return span[0] - 10
# if pos is in the middle of two feature spans, return orig. pos
elif span[0] > pos:
return pos
return pos
def check_borders_right(spans, pos):
for span in spans:
if pos in span:
return span[-1] + 10
elif span[0] < pos:
return pos
# reached the and and there were not annotations. Return orig pos
return pos
def scan_and_extract(gbk: Path, target: str, extra_up: int, extra_down: int, o: Path) -> None:
num_extraction = 1 # just in case there is more than 1 feature with target label
out_filename_base = f"{gbk.stem}_{target}_"
target_found = False
try:
records = list(SeqIO.parse(str(gbk), "genbank"))
except ValueError as e:
print("Error, not able to parse file {}: {}".format(str(gbk), str(e)))
else:
for record in records:
features = [f for f in record.features if f.type == "CDS"]
spans = [range(f.location.start, f.location.end) for f in features]
for feature in features:
if feature.type == "CDS":
if find_label(feature.qualifiers, target):
target_found = True
start_feature = int(feature.location.start)
end_feature = int(feature.location.end)
# Extend at both sides of target
if feature.location.strand == 1:
start_extraction = max(0, start_feature - extra_up)
end_extraction = min(end_feature + extra_down, len(record))
else:
start_extraction = max(0, start_feature - extra_down)
end_extraction = min(end_feature + extra_up, len(record))
# check if we have features at borders. If so, include them
start_extraction = check_borders_left(spans, start_extraction)
end_extraction = check_borders_right(spans, end_extraction)
# If target in the reverse strand, reverse-complement the whole extract
extraction = record[start_extraction:end_extraction]
rc = ""
if feature.location.strand != 1:
# extract and reverse
rc = "_rc"
extraction = extraction.reverse_complement(id=f"{extraction.id}{rc}", annotations=True)
with open(o / f"{out_filename_base}{num_extraction}{rc}.gbk", "w") as ef:
SeqIO.write(extraction, ef, "genbank")
num_extraction += 1
if not target_found:
print("Finished, but nothing found with target label...")
else:
print(f"Finished, {num_extraction-1} extraction(s) done")
return
if __name__ == '__main__':
args = parameter_parser()
if not args.input.is_file():
sys.exit(f"Error, {args.input} not a file")
o = args.outputfolder
if not o.is_dir():
os.makedirs(o, exist_ok=True)
extra_up, extra_down = validate_input(args)
print(f"Attempting to extract locus around feature with label '{args.label}'")
scan_and_extract(args.input, args.label, extra_up, extra_down, o)
``` |
{
"source": "jorgeCollinet/keras-image-room-clasification",
"score": 3
} |
#### File: keras-image-room-clasification/src/game.py
```python
import sys, pygame, time, os
from pygame.locals import *
# Constants
WIDTH = 840
HEIGHT = 580
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
UNLABELED_DIR = 'unlabelled'
LABELED_DIR = 'labelled'
# ---------------------------------------------------------------------
def load_image(filename, transparent=False):
try:
image = pygame.image.load(filename)
image = pygame.transform.scale(image, (WIDTH, HEIGHT))
except pygame.error as message:
print("Error in file: " + filename)
raise message
image = image.convert()
if transparent:
color = image.get_at((0,0))
image.set_colorkey(color, RLEACCEL)
return image
def text_objects(text, text_color=black):
font = pygame.font.Font("freesansbold.ttf",20)
largeText = pygame.font.Font('freesansbold.ttf',115)
textSurface = font.render(text, True, text_color)
return textSurface, textSurface.get_rect()
def button(screen, msg, x, y, w, h, ic, ac, destination_folder):
global background_image
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x+w > mouse[0] > x and y+h > mouse[1] > y:
pygame.draw.rect(screen, ac,(x,y,w,h))
textSurf, textRect = text_objects(msg, white)
if click[0]==1:
org = get_first_file(UNLABELED_DIR)
dest = destination_folder + '/' + org.split('/')[-1]
move_file(org, dest)
background_image = get_first_unlabeled_photo(UNLABELED_DIR)
screen.blit(background_image, (0, 0))
print("apreto -> " + destination_folder)
time.sleep(0.5)
else:
pygame.draw.rect(screen, ic,(x,y,w,h))
textSurf, textRect = text_objects(msg, black)
textRect.center = ( (x+(w/2)), (y+(h/2)) )
screen.blit(textSurf, textRect)
def get_first_file(path):
return path +'/'+ os.listdir(path)[0]
def get_first_unlabeled_photo(path):
try :
return load_image(get_first_file(path))
except:
os.remove(get_first_file(path))
return load_image(get_first_file(path))
def move_file(org, dest):
os.rename(org, dest)
# ---------------------------------------------------------------------
def main():
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("BrotherEngine with Pygame")
background_image = get_first_unlabeled_photo(UNLABELED_DIR)
screen.blit(background_image, (0, 0))
while True:
for eventos in pygame.event.get():
if eventos.type == QUIT:
sys.exit(0)
button(screen, msg='bathroom', x=40, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/bathroom')
button(screen, msg='living', x=140, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/living')
button(screen, msg='exterior', x=240, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/exterior')
button(screen, msg='kitchen', x=340, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/kitchen')
button(screen, msg='bedroom', x=440, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/bedroom')
button(screen, msg='plane', x=540, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/plane')
button(screen, msg='publicity', x=640, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/publicity')
button(screen, msg='other', x=740, y=500, w=90, h=30, ic=red, ac=20, destination_folder='./other') # This wont be classified
pygame.display.flip()
return 0
if __name__ == '__main__':
pygame.init()
main()
``` |
{
"source": "jorgecorrea/google-play-scraper",
"score": 3
} |
#### File: tests/e2e_tests/test_reviews_all.py
```python
from unittest import TestCase
from unittest.mock import patch
from google_play_scraper.features.reviews import reviews_all, reviews
class TestReviewsAll(TestCase):
def test_request_once(self):
with patch(
"google_play_scraper.features.reviews.reviews", wraps=reviews
) as mock_reviews:
result = reviews_all("co.kr.uaram.userdeliver_")
self.assertEqual(1, mock_reviews.call_count)
result_of_reviews, _ = reviews("co.kr.uaram.userdeliver_", count=10000)
self.assertTrue(0 < len(result) < 10)
self.assertEqual(len(result), len(result_of_reviews))
def test_request_multiple_times(self):
with patch(
"google_play_scraper.features.reviews.reviews", wraps=reviews
) as mock_reviews:
result = reviews_all("co.kr.uaram.userdeliver_", lang="ko", country="kr")
self.assertEqual(2, mock_reviews.call_count)
result_of_reviews, _ = reviews(
"co.kr.uaram.userdeliver_", lang="ko", country="kr", count=10000
)
self.assertTrue(300 < len(result) < 500)
self.assertEqual(len(result), len(result_of_reviews))
def test_no_reviews(self):
result = reviews_all("com.spotify.music", lang="sw", country="it")
self.assertListEqual([], result)
``` |
{
"source": "jorgec/PayMayaPythonSDK--Wheel",
"score": 2
} |
#### File: paymaya_sdk/api/checkout_api.py
```python
import json
from typing import Dict
import requests
from paymaya_sdk.core.checkout_api_manager import CheckoutAPIManager
from paymaya_sdk.core.constants import CHECKOUTS_URL, WEBHOOKS_URL, CUSTOMIZATIONS_URL, REDIRECT_URLS
from paymaya_sdk.core.http_config import HTTP_PUT, HTTP_DELETE, HTTP_POST
from paymaya_sdk.models.checkout_customization_models import CheckoutCustomizationModel
from paymaya_sdk.models.checkout_data_models import CheckoutDataModel
class CheckoutAPI:
checkout_data: CheckoutDataModel
public_api_key: str = None
secret_api_key: str = None
environment: str = "SANDBOX"
encoded_key: str = None
last_url: str = None
redirect_urls: Dict = None
request_reference_number: str = None
manager: CheckoutAPIManager
def __init__(self, *args, **kwargs):
self.public_api_key = kwargs.get("public_api_key")
self.secret_api_key = kwargs.get("secret_api_key")
self.environment = kwargs.get("environment")
self.encoded_key = kwargs.get("encoded_key", None)
self.init_manager()
def init_manager(self):
manager_data = {
"public_api_key": self.public_api_key,
"secret_api_key": self.secret_api_key,
"environment": self.environment,
}
if self.encoded_key:
manager_data["encoded_key"] = self.encoded_key
self.manager = CheckoutAPIManager(**manager_data)
def initiate(self, checkout_data: CheckoutDataModel) -> None:
"""
Placeholder method in case we need to do some more pre-processing later
:param checkout_data:
:return:
"""
self.checkout_data = checkout_data
def execute(self, key: str = "secret", method: str = HTTP_POST) -> requests.Response:
if not self.checkout_data:
raise ValueError("No Checkout Data")
url = f"{self.manager.get_base_url()}{CHECKOUTS_URL}"
self.last_url = url
if self.request_reference_number:
self.checkout_data['requestReferenceNumber'] = self.request_reference_number
return self.manager.execute(url=url, payload=self.checkout_data.serialize(), key=key, method=method)
# The Webhooks API seems to be borked ¯\_(ツ)_/¯
# TODO: Tests
def register_webhook(self, name: str, callback_url: str) -> requests.Response:
payload = json.dumps({"name": name, "callbackUrl": callback_url})
url = f"{self.manager.get_base_url()}{WEBHOOKS_URL}"
return self.manager.execute(url=url, payload=payload)
def get_webhooks(self) -> requests.Response:
url = f"{self.manager.get_base_url()}{WEBHOOKS_URL}"
return self.manager.query(url=url)
def update_webhook(self, webhook_id: str, fields: Dict) -> requests.Response:
url = f"{self.manager.get_base_url()}{WEBHOOKS_URL}/{webhook_id}"
payload = json.dumps(fields)
return self.manager.execute(url=url, payload=payload, method=HTTP_PUT)
def delete_webhook(self, webhook_id: str):
url = f"{self.manager.get_base_url()}{WEBHOOKS_URL}/{webhook_id}"
return self.manager.execute(url=url, method=HTTP_DELETE)
def register_customization(self, customization: CheckoutCustomizationModel):
url = f"{self.manager.get_base_url()}{CUSTOMIZATIONS_URL}"
payload = customization.serialize()
return self.manager.execute(url=url, payload=payload)
def get_customizations(self):
url = f"{self.manager.get_base_url()}{CUSTOMIZATIONS_URL}"
return self.manager.query(url=url)
def delete_customizations(self):
url = f"{self.manager.get_base_url()}{CUSTOMIZATIONS_URL}"
return self.manager.execute(url=url, method=HTTP_DELETE)
```
#### File: paymaya_sdk/core/http_config.py
```python
from typing import Dict
HEADER_SEPARATOR = ";"
HTTP_GET = "GET"
HTTP_POST = "POST"
HTTP_PUT = "PUT"
HTTP_DELETE = "DELETE"
class HTTPConfig:
url: str = ""
headers: Dict = {"Content-Type": "application/json"}
method: str = ""
def __init__(
self, *, url: str = None, method: str = HTTP_POST, headers: Dict = dict
):
self.url = url
self.method = method
self.headers = {**self.headers, **headers}
```
#### File: paymaya_sdk/core/paymaya_direct_api_manager.py
```python
from typing import List
from paymaya_sdk.core.api_manager import APIManager
from paymaya_sdk.core.constants import PRODUCTION, DIRECT_PRODUCTION_URL, DIRECT_SANDBOX_URL
class PayMayaDirectAPIManager(APIManager):
base_url: str = None
def __init__(self, *args, **kwargs):
self.base_url = self.get_base_url()
super().__init__(*args, **kwargs)
def get_base_url(self) -> str:
if self.environment == PRODUCTION:
url = DIRECT_PRODUCTION_URL
else:
url = DIRECT_SANDBOX_URL
return url
```
#### File: PayMayaPythonSDK--Wheel/tests/merchants.py
```python
class Merchant:
name: str
secret_key: str
public_key: str
def __init__(self, name: str, secret: str, public: str):
self.name = name
self.secret_key = secret
self.public_key = public
m1 = Merchant(
"Sandbox Party 1",
"<KEY>",
"<KEY>",
)
m2 = Merchant(
"Sandbox Party 2",
"<KEY>",
"<KEY>",
)
```
#### File: PayMayaPythonSDK--Wheel/tests/test_payments.py
```python
import decimal
import random
import unittest
from faker import Faker
from paymaya_sdk.models.amount_models import AmountModel
from paymaya_sdk.models.buyer_models import BuyerModel
from paymaya_sdk.sdk import PayMayaSDK
from .cards import ms_2
from .merchants import m1
fake = Faker()
class PaymentTests(unittest.TestCase):
def test_token(self):
paymaya = PayMayaSDK()
paymaya.set_keys(public_api_key=m1.public_key, secret_api_key=m1.secret_key)
payment = paymaya.payment()
payment.card = ms_2
token_result = payment.create_token()
assert token_result, print(token_result.json())
def test_payment(self):
paymaya = PayMayaSDK()
paymaya.set_keys(public_api_key=m1.public_key, secret_api_key=m1.secret_key)
payment = paymaya.payment()
amt = decimal.Decimal(random.uniform(100, 10000))
amount = AmountModel(total=amt, currency_code="PHP")
profile = fake.profile()
buyer = BuyerModel(
first_name=profile.get("name").split(" ")[0],
last_name=profile.get("name").split(" ")[-1],
)
payment.buyer = buyer
payment.amount = amount
payment.card = ms_2
payment.create_token()
payment_result = payment.execute_payment()
assert payment_result.status_code == 200, print(payment_result.json(), amount.serialize())
payment_id = payment_result.json().get("id", None)
assert payment_id is not None, print(payment_result.json())
def test_customer(self):
paymaya = PayMayaSDK()
paymaya.set_keys(public_api_key=m1.public_key, secret_api_key=m1.secret_key)
payment = paymaya.payment()
profile = fake.profile()
payment.buyer = BuyerModel(
first_name=profile.get("name").split(" ")[0],
last_name=profile.get("name").split(" ")[-1],
)
customer_result = payment.register_customer()
assert customer_result.status_code == 200, customer_result.json()
customer_id = customer_result.json().get("id", None)
assert customer_id is not None, print(customer_result.json())
query_customer = payment.get_customer(customer_id=customer_id)
assert query_customer.status_code == 200, print(query_customer.json())
update_customer = payment.update_customer(
customer_id=customer_id, fields={"firstName": "Macaluluoy"}
)
assert update_customer.status_code == 200, print(update_customer.json())
query_customer = payment.get_customer(customer_id=customer_id)
assert query_customer.json().get("firstName") == "Macaluluoy", print(
query_customer.json()
)
payment.card = ms_2
card_vault = payment.save_card_to_vault()
assert card_vault.status_code == 200, print(card_vault.json())
cards_in_vault = payment.get_cards_in_vault()
assert cards_in_vault.status_code == 200, print(cards_in_vault.json())
assert len(cards_in_vault.json()) == 1, print(cards_in_vault.json())
assert cards_in_vault.json()[0].get("cardTokenId") == payment.token, print(
cards_in_vault.json()
)
card_in_vault = payment.get_card_in_vault(card_token=payment.token)
assert card_in_vault.status_code == 200, print(card_in_vault.json())
assert card_in_vault.json().get("cardTokenId") == payment.token, print(
card_in_vault.json()
)
# Updates keep failing with error: PY0026 Failed to update card details
# Generic error for failed update of card. PayMaya issue?
# update_card = payment.update_card_in_vault(card_token=payment.token, fields={"isDefault": False})
# assert update_card.status_code == 200, print(update_card.json())
#
# card_in_vault = payment.get_card_in_vault(card_token=payment.token)
# assert card_in_vault.status_code == 200, print(card_in_vault.json())
# assert card_in_vault.json().get('isDefault') is False, print(card_in_vault.json())
delete_card = payment.delete_card_in_vault(card_token=payment.token)
assert delete_card.status_code == 200, print(delete_card.json())
cards_in_vault = payment.get_cards_in_vault()
# Should return "No card found for customer"
assert cards_in_vault.status_code == 400, print(cards_in_vault.json())
delete_customer = payment.delete_customer(customer_id)
assert delete_customer.status_code == 200, print(delete_customer.json())
```
#### File: PayMayaPythonSDK--Wheel/tests/wallets.py
```python
from paymaya_sdk.models.user_account_models import PaymayaUserAccountModel
class Wallet:
name: str
is_pf: bool
secret_key: str
public_key: str
def __init__(self, name: str, secret: str, public: str, is_pf: bool = True):
self.name = name
self.secret_key = secret
self.public_key = public
self.is_pf = is_pf
w1 = Wallet(
"Payment Facilitator",
"<KEY>",
"<KEY>"
)
w2 = Wallet(
"Non-Payment Facilitator",
"<KEY>",
"<KEY>",
is_pf=False
)
wu = PaymayaUserAccountModel(
"09193890579",
"<PASSWORD>"
)
``` |
{
"source": "Jorge-C/qiita",
"score": 3
} |
#### File: qiita/qiita_core/environment_manager.py
```python
from os import fork
from sys import exit
from IPython.parallel.apps.ipclusterapp import IPClusterStart, IPClusterStop
def start_cluster(profile, n):
"""Start a cluster"""
me = fork()
if me == 0:
c = IPClusterStart(profile=profile, log_level=0, daemonize=True)
c.n = n
c.initialize(argv=[])
c.start()
def stop_cluster(profile):
"""Stop a cluster"""
me = fork()
if me == 0:
c = IPClusterStop(profile=profile, log_level=0)
c.initialize(argv=[])
c.start()
exit(0)
```
#### File: qiita/qiita_db/environment_manager.py
```python
from os.path import abspath, dirname, join
from functools import partial
from os import remove, close
from tempfile import mkstemp
from ftplib import FTP
import gzip
from future import standard_library
with standard_library.hooks():
from urllib.request import urlretrieve
from psycopg2 import connect
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from qiita_core.exceptions import QiitaEnvironmentError
from qiita_db.util import get_db_files_base_dir
get_support_file = partial(join, join(dirname(abspath(__file__)),
'support_files'))
DFLT_BASE_WORK_FOLDER = get_support_file('work_data')
SETTINGS_FP = get_support_file('qiita-db-settings.sql')
LAYOUT_FP = get_support_file('qiita-db.sql')
INITIALIZE_FP = get_support_file('initialize.sql')
POPULATE_FP = get_support_file('populate_test_db.sql')
ENVIRONMENTS = {'demo': 'qiita_demo', 'test': 'qiita_test'}
CLUSTERS = ['demo', 'reserved', 'general']
def _check_db_exists(db, cursor):
r"""Checks if the database db exists on the postgres server
Parameters
----------
db : str
The database
cursor : psycopg2.cursor
The cursor connected to the database
"""
cursor.execute('SELECT datname FROM pg_database')
# It's a list of tuples, so just create the tuple to check if exists
return (db,) in cursor.fetchall()
def make_environment(env, base_data_dir, base_work_dir, user, password, host,
load_ontologies):
r"""Creates the new environment `env`
Parameters
----------
env : {demo, test}
The environment to create
Raises
------
ValueError
If `env` not recognized
"""
if env not in ENVIRONMENTS:
raise ValueError("Environment %s not recognized. Available "
"environments are %s" % (env, ENVIRONMENTS.keys()))
# Connect to the postgres server
conn = connect(user=user, host=host, password=password)
# Set the isolation level to AUTOCOMMIT so we can execute a create database
# sql quary
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
# Get the cursor
cur = conn.cursor()
# Check that it does not already exists
if _check_db_exists(ENVIRONMENTS[env], cur):
print("Environment {0} already present on the system. You can drop "
"it by running `qiita_env drop_env --env {0}".format(env))
else:
# Create the database
print('Creating database')
cur.execute('CREATE DATABASE %s' % ENVIRONMENTS[env])
cur.close()
conn.close()
# Connect to the postgres server, but this time to the just created db
conn = connect(user=user, host=host, password=password,
database=ENVIRONMENTS[env])
cur = conn.cursor()
print('Inserting database metadata')
# Build the SQL layout into the database
with open(SETTINGS_FP, 'U') as f:
cur.execute(f.read())
# Insert the settings values to the database
cur.execute("INSERT INTO settings (test, base_data_dir, base_work_dir)"
" VALUES (TRUE, '%s', '%s')"
% (base_data_dir, base_work_dir))
if env == 'demo':
# Create the schema
print('Building SQL layout')
with open(LAYOUT_FP, 'U') as f:
cur.execute(f.read())
print('Initializing database')
# Initialize the database
with open(INITIALIZE_FP, 'U') as f:
cur.execute(f.read())
if load_ontologies:
print ('Loading Ontology Data')
ontos_fp, f = download_and_unzip_file(
host='thebeast.colorado.edu',
filename='/pub/qiita/qiita_ontoandvocab.sql.gz')
cur.execute(f.read())
f.close()
remove(ontos_fp)
# Commit all the changes and close the connections
print('Populating database with demo data')
cur.execute(
"INSERT INTO qiita.qiita_user (email, user_level_id, password,"
" name, affiliation, address, phone) VALUES "
"('<EMAIL>', 4, "
"'$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe"
"', 'Demo', 'Qitta Dev', '1345 Colorado Avenue', "
"'303-492-1984');")
conn.commit()
cur.close()
conn.close()
print('Downloading test files')
# Download tree file
url = ("https://raw.githubusercontent.com/biocore/Evident/master"
"/data/gg_97_otus_4feb2011.tre")
try:
urlretrieve(url, join(base_data_dir, "reference",
"gg_97_otus_4feb2011.tre"))
except:
raise IOError("Error: DOWNLOAD FAILED")
print('Demo environment successfully created')
elif env == "test":
# Create the schema
print('Create schema in test database')
with open(LAYOUT_FP, 'U') as f:
cur.execute(f.read())
print('Populate the test database')
# Initialize the database
with open(INITIALIZE_FP, 'U') as f:
cur.execute(f.read())
# Populate the database
with open(POPULATE_FP, 'U') as f:
cur.execute(f.read())
conn.commit()
cur.close()
conn.close()
print('Test environment successfully created')
else:
# Commit all the changes and close the connections
conn.commit()
cur.close()
conn.close()
def drop_environment(env, user, password, host):
r"""Drops the `env` environment.
Parameters
----------
env : {demo, test}
The environment to create
user : str
The postgres user to connect to the server
password : str
The password of the user
host : str
The host where the postgres server is running
"""
# Connect to the postgres server
conn = connect(user=user, host=host, password=password)
# Set the isolation level to AUTOCOMMIT so we can execute a
# drop database sql query
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
# Drop the database
cur = conn.cursor()
if not _check_db_exists(ENVIRONMENTS[env], cur):
raise QiitaEnvironmentError(
"Test environment not present on the system. You can create it "
"by running 'qiita_env make_test_env'")
if env == 'demo':
# wipe the overwriiten test files so empty as on repo
base = get_db_files_base_dir()
with open(join(base, "reference",
"gg_97_otus_4feb2011.tre"), 'w') as f:
f.write('\n')
cur.execute('DROP DATABASE %s' % ENVIRONMENTS[env])
# Close cursor and connection
cur.close()
conn.close()
def clean_test_environment(user, password, host):
r"""Cleans the test database environment.
In case that the test database is dirty (i.e. the 'qiita' schema is
present), this cleans it up by dropping the 'qiita' schema and
re-populating it.
Parameters
----------
user : str
The postgres user to connect to the server
password : str
<PASSWORD>
host : str
The host where the postgres server is running
"""
# Connect to the postgres server
conn = connect(user=user, host=host, password=password,
database='qiita_test')
# Get the cursor
cur = conn.cursor()
# Drop the qiita schema
cur.execute("DROP SCHEMA qiita CASCADE")
# Commit the changes
conn.commit()
# Close cursor and connections
cur.close()
conn.close()
def download_and_unzip_file(host, filename):
"""Function downloads though ftp and unzips a file
Parameters
-----------
host : str
the location of the ftp server that is hosting the file
filename : str
the location of the file on the ftp server to download
"""
handl, tmpfile = mkstemp()
close(handl)
ftp = FTP(host)
ftp.login()
cmd = 'RETR %s' % filename
ftp.retrbinary(cmd, open(tmpfile, 'wb').write)
f = gzip.open(tmpfile, 'rb')
return tmpfile, f
```
#### File: qiita/qiita_db/exceptions.py
```python
from __future__ import division
from qiita_core.exceptions import QiitaError
class QiitaDBError(QiitaError):
"""Base class for all qiita_db exceptions"""
pass
class QiitaDBNotImplementedError(QiitaDBError):
""""""
pass
class QiitaDBExecutionError(QiitaDBError):
"""Exception for error when executing SQL queries"""
pass
class QiitaDBConnectionError(QiitaDBError):
"""Exception for error when connecting to the db"""
pass
class QiitaDBColumnError(QiitaDBError):
"""Exception when missing table information or excess information passed"""
pass
class QiitaDBDuplicateError(QiitaDBError):
"""Exception when duplicating something in the database"""
def __init__(self, obj_name, attributes):
super(QiitaDBDuplicateError, self).__init__()
self.args = ("The '%s' object with attributes (%s) already exists."
% (obj_name, attributes),)
class QiitaDBStatusError(QiitaDBError):
"""Exception when editing is done with an unallowed status"""
pass
class QiitaDBUnknownIDError(QiitaDBError):
"""Exception for error when an object does not exists in the DB"""
def __init__(self, missing_id, table):
super(QiitaDBUnknownIDError, self).__init__()
self.args = ("The object with ID '%s' does not exists in table '%s'"
% (missing_id, table),)
class QiitaDBDuplicateHeaderError(QiitaDBError):
"""Exception for error when a MetadataTemplate has duplicate columns"""
def __init__(self):
super(QiitaDBDuplicateHeaderError, self).__init__()
self.args = ("Duplicate headers found in MetadataTemplate. Note "
"that the headers are not case-sensitive",)
class QiitaDBIncompatibleDatatypeError(QiitaDBError):
"""When arguments are used with incompatible operators in a query"""
def __init__(self, operator, argument_type):
super(QiitaDBIncompatibleDatatypeError, self).__init__()
self.args = ("The %s operator is not for use with data of type %s" %
(operator, str(argument_type)))
```
#### File: qiita/qiita_db/job.py
```python
r"""
Data objects (:mod: `qiita_db.data`)
====================================
..currentmodule:: qiita_db.data
This module provides functionality for creating, running, and storing results
of jobs in an analysis. It also provides the ability to query what commmands
are available for jobs, as well as the options for these commands.
Classes
-------
..autosummary::
:toctree: generated/
Job
Command
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from json import loads
from os.path import join, relpath
from os import remove
from glob import glob
from shutil import rmtree
from functools import partial
from collections import defaultdict
from .base import QiitaStatusObject
from .util import (insert_filepaths, convert_to_id, get_db_files_base_dir,
params_dict_to_json)
from .sql_connection import SQLConnectionHandler
from .logger import LogEntry
from .exceptions import QiitaDBStatusError, QiitaDBDuplicateError
class Job(QiitaStatusObject):
"""
Job object to access to the Qiita Job information
Attributes
----------
datatype
command
options
results
error
Methods
-------
set_error
add_results
"""
_table = "job"
def _lock_job(self, conn_handler):
"""Raises QiitaDBStatusError if study is public"""
if self.check_status(("completed", "error")):
raise QiitaDBStatusError("Can't change status of finished job!")
def _status_setter_checks(self, conn_handler):
r"""Perform a check to make sure not setting status away from completed
or errored
"""
self._lock_job(conn_handler)
@staticmethod
def get_commands():
"""returns commands available with the options as well
Returns
-------
list of command objects
"""
return Command.create_list()
@classmethod
def exists(cls, datatype, command, options, analysis,
return_existing=False):
"""Checks if the given job already exists
Parameters
----------
datatype : str
Datatype the job is operating on
command : str
The name of the command run on the data
options : dict
Options for the command in the format {option: value}
analysis : Analysis object
The analysis the job will be attached to on creation
return_existing : bool, optional
If True, function will return the instatiated Job object for the
matching job. Default False
Returns
-------
bool
Whether the job exists or not
Job or None, optional
If return_existing is True, the Job object of the matching job or
None if none exists
"""
conn_handler = SQLConnectionHandler()
# check passed arguments and grab analyses for matching jobs
datatype_id = convert_to_id(datatype, "data_type", conn_handler)
sql = "SELECT command_id FROM qiita.command WHERE name = %s"
command_id = conn_handler.execute_fetchone(sql, (command, ))[0]
opts_json = params_dict_to_json(options)
sql = ("SELECT DISTINCT aj.analysis_id, aj.job_id FROM "
"qiita.analysis_job aj JOIN qiita.{0} j ON aj.job_id = j.job_id"
" WHERE j.data_type_id = %s AND j.command_id = %s "
"AND j.options = %s".format(cls._table))
analyses = conn_handler.execute_fetchall(
sql, (datatype_id, command_id, opts_json))
if not analyses and return_existing:
# stop looking since we have no possible matches
return False, None
elif not analyses:
return False
# build the samples dict as list of samples keyed to their proc_data_id
sql = ("SELECT processed_data_id, array_agg(sample_id ORDER BY "
"sample_id) FROM qiita.analysis_sample WHERE analysis_id = %s "
"GROUP BY processed_data_id")
samples = dict(conn_handler.execute_fetchall(sql, [analysis.id]))
# check passed analyses' samples dict against all found analyses
matched_job = None
for aid, jid in analyses:
# build the samples dict for a found analysis
comp_samples = dict(conn_handler.execute_fetchall(sql, [aid]))
# compare samples and stop checking if a match is found
matched_samples = True if samples == comp_samples else False
if matched_samples:
matched_job = jid
break
if return_existing:
return matched_samples, (cls(matched_job) if matched_job else None)
return matched_samples
@classmethod
def delete(cls, jobid):
"""Removes a job and all files attached to it
Parameters
----------
jobid : int
ID of the job to delete
Notes
-----
This function will remove a job from all analyses it is attached to in
analysis_job table, as well as the job itself from the job table. All
files and references to files for the job will be removed from the
filepath and job_results_filepath tables. All the job's files on the
filesystem will also be removed.
"""
conn_handler = SQLConnectionHandler()
# store filepath info for later use
sql = ("SELECT f.filepath, f.filepath_id FROM qiita.filepath f JOIN "
"qiita.job_results_filepath jf ON jf.filepath_id = "
"f.filepath_id WHERE jf.job_id = %s")
filepaths = conn_handler.execute_fetchall(sql, [jobid])
# remove fiepath links in DB
conn_handler.execute("DELETE FROM qiita.job_results_filepath WHERE "
"job_id = %s", [jobid])
sql = "DELETE FROM qiita.filepath WHERE"
for x in range(len(filepaths)):
sql = ' '.join((sql, "filepath_id = %s"))
conn_handler.execute(sql, [fp[1] for fp in filepaths])
# remove job
conn_handler.execute("DELETE FROM qiita.analysis_job WHERE "
"job_id = %s", [jobid])
conn_handler.execute("DELETE FROM qiita.job WHERE job_id = %s",
[jobid])
# remove files/folders attached to job
basedir = get_db_files_base_dir()
for fp in filepaths:
try:
rmtree(join(basedir, "job", fp[0]))
except OSError:
remove(join(basedir, "job", fp[0]))
@classmethod
def create(cls, datatype, command, options, analysis,
return_existing=False):
"""Creates a new job on the database
Parameters
----------
datatype : str
The datatype in which this job applies
command : str
The name of the command executed in this job
analysis : Analysis object
The analysis which this job belongs to
return_existing : bool, optional
If True, returns an instantiated Job object pointing to an already
existing job with the given parameters. Default False
Returns
-------
Job object
The newly created job
Raises
------
QiitaDBDuplicateError
return_existing is False and an exact duplicate of the job already
exists in the DB.
"""
analysis_sql = ("INSERT INTO qiita.analysis_job (analysis_id, job_id) "
"VALUES (%s, %s)")
exists, job = cls.exists(datatype, command, options, analysis,
return_existing=True)
conn_handler = SQLConnectionHandler()
if exists:
if return_existing:
# add job to analysis
conn_handler.execute(analysis_sql, (analysis.id, job.id))
return job
else:
raise QiitaDBDuplicateError(
"Job", "datatype: %s, command: %s, options: %s, "
"analysis: %s" % (datatype, command, options, analysis.id))
# Get the datatype and command ids from the strings
datatype_id = convert_to_id(datatype, "data_type", conn_handler)
sql = "SELECT command_id FROM qiita.command WHERE name = %s"
command_id = conn_handler.execute_fetchone(sql, (command, ))[0]
opts_json = params_dict_to_json(options)
# Create the job and return it
sql = ("INSERT INTO qiita.{0} (data_type_id, job_status_id, "
"command_id, options) VALUES "
"(%s, %s, %s, %s) RETURNING job_id").format(cls._table)
job_id = conn_handler.execute_fetchone(sql, (datatype_id, 1,
command_id, opts_json))[0]
# add job to analysis
conn_handler.execute(analysis_sql, (analysis.id, job_id))
return cls(job_id)
@property
def datatype(self):
sql = ("SELECT data_type from qiita.data_type WHERE data_type_id = "
"(SELECT data_type_id from qiita.{0} WHERE "
"job_id = %s)".format(self._table))
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(sql, (self._id, ))[0]
@property
def command(self):
"""Returns the command of the job as (name, command)
Returns
-------
str
command run by the job
"""
sql = ("SELECT name, command from qiita.command WHERE command_id = "
"(SELECT command_id from qiita.{0} WHERE "
"job_id = %s)".format(self._table))
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(sql, (self._id, ))
@property
def options(self):
"""Options used in the job
Returns
-------
dict
options in the format {option: setting}
"""
sql = ("SELECT options FROM qiita.{0} WHERE "
"job_id = %s".format(self._table))
conn_handler = SQLConnectionHandler()
db_opts = conn_handler.execute_fetchone(sql, (self._id, ))[0]
opts = loads(db_opts) if db_opts else {}
sql = ("SELECT command, output from qiita.command WHERE command_id = ("
"SELECT command_id from qiita.{0} WHERE "
"job_id = %s)".format(self._table))
db_comm = conn_handler.execute_fetchone(sql, (self._id, ))
out_opt = loads(db_comm[1])
basedir = get_db_files_base_dir(conn_handler)
join_f = partial(join, join(basedir, "job"))
for k in out_opt:
opts[k] = join_f("%s_%s_%s" % (self._id, db_comm[0], k.strip("-")))
return opts
@options.setter
def options(self, opts):
""" Sets the options for the job
Parameters
----------
opts: dict
The options for the command in format {option: value}
"""
conn_handler = SQLConnectionHandler()
# make sure job is editable
self._lock_job(conn_handler)
# JSON the options dictionary
opts_json = params_dict_to_json(opts)
# Add the options to the job
sql = ("UPDATE qiita.{0} SET options = %s WHERE "
"job_id = %s").format(self._table)
conn_handler.execute(sql, (opts_json, self._id))
@property
def results(self):
"""List of job result filepaths
Returns
-------
list
Filepaths to the result files
"""
# Select results filepaths and filepath types from the database
conn_handler = SQLConnectionHandler()
basedir = get_db_files_base_dir(conn_handler)
results = conn_handler.execute_fetchall(
"SELECT fp.filepath, fpt.filepath_type FROM qiita.filepath fp "
"JOIN qiita.filepath_type fpt ON fp.filepath_type_id = "
"fpt.filepath_type_id JOIN qiita.job_results_filepath jrfp ON "
"fp.filepath_id = jrfp.filepath_id WHERE jrfp.job_id = %s",
(self._id, ))
def add_html(basedir, check_dir, result_fps):
for res in glob(join(basedir, check_dir, "*.htm")) + \
glob(join(basedir, check_dir, "*.html")):
result_fps.append(relpath(res, basedir))
# create new list, with relative paths from db base
result_fps = []
for fp in results:
if fp[1] == "directory":
# directory, so all html files in it are results
# first, see if we have any in the main directory
add_html(basedir, join("job", fp[0]), result_fps)
# now do all subdirectories
add_html(basedir, join("job", fp[0], "*"), result_fps)
else:
# result is exact filepath given
result_fps.append(join("job", fp[0]))
return result_fps
@property
def error(self):
"""String with an error message, if the job failed
Returns
-------
str or None
error message/traceback for a job, or None if none exists
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT log_id FROM qiita.{0} "
"WHERE job_id = %s".format(self._table))
logging_id = conn_handler.execute_fetchone(sql, (self._id, ))[0]
if logging_id is None:
ret = None
else:
ret = LogEntry(logging_id)
return ret
# --- Functions ---
def set_error(self, msg):
"""Logs an error for the job
Parameters
----------
msg : str
Error message/stacktrace if available
"""
conn_handler = SQLConnectionHandler()
log_entry = LogEntry.create('Runtime', msg,
info={'job': self._id})
self._lock_job(conn_handler)
err_id = conn_handler.execute_fetchone(
"SELECT job_status_id FROM qiita.job_status WHERE "
"status = 'error'")[0]
# attach the error to the job and set to error
sql = ("UPDATE qiita.{0} SET log_id = %s, job_status_id = %s WHERE "
"job_id = %s".format(self._table))
conn_handler.execute(sql, (log_entry.id, err_id, self._id))
def add_results(self, results):
"""Adds a list of results to the results
Parameters
----------
results : list of tuples
filepath information to add to job, in format
[(filepath, type), ...]
Where type is the filepath type of the filepath passed
Notes
-----
Curently available file types are:
biom, directory, plain_text
"""
# add filepaths to the job
conn_handler = SQLConnectionHandler()
self._lock_job(conn_handler)
# convert all file type text to file type ids
res_ids = [(fp, convert_to_id(fptype, "filepath_type", conn_handler))
for fp, fptype in results]
file_ids = insert_filepaths(res_ids, self._id, self._table,
"filepath", conn_handler, move_files=False)
# associate filepaths with job
sql = ("INSERT INTO qiita.{0}_results_filepath (job_id, filepath_id) "
"VALUES (%s, %s)".format(self._table))
conn_handler.executemany(sql, [(self._id, fid) for fid in file_ids])
class Command(object):
"""Holds all information on the commands available
This will be an in-memory representation because the command table is
considerably more static than other objects tables, changing only with new
QIIME releases.
Attributes
----------
name
command
input_opts
required_opts
optional_opts
output_opts
"""
@classmethod
def create_list(cls):
"""Creates list of all available commands
Returns
-------
list of Command objects
"""
conn_handler = SQLConnectionHandler()
commands = conn_handler.execute_fetchall("SELECT * FROM qiita.command")
# create the list of command objects
return [cls(c["name"], c["command"], c["input"], c["required"],
c["optional"], c["output"]) for c in commands]
@classmethod
def get_commands_by_datatype(cls, datatypes=None):
"""Returns the commands available for all or a subset of the datatypes
Parameters
----------
datatypes : list of str, optional
List of the datatypes to get commands for. Default is all datatypes
Returns
-------
dict of lists of Command objects
Returns commands in the format {datatype: [com name1, com name2]}
Notes
-----
If no datatypes are passed, the function will default to returning all
datatypes available.
"""
conn_handler = SQLConnectionHandler()
# get the ids of the datatypes to get commands for
if datatypes is not None:
datatype_info = [(convert_to_id(dt, "data_type", conn_handler), dt)
for dt in datatypes]
else:
datatype_info = conn_handler.execute_fetchall(
"SELECT data_type_id, data_type from qiita.data_type")
commands = defaultdict(list)
# get commands for each datatype
sql = ("SELECT C.* FROM qiita.command C JOIN qiita.command_data_type "
"CD on C.command_id = CD.command_id WHERE CD.data_type_id = %s")
for dt_id, dt in datatype_info:
comms = conn_handler.execute_fetchall(sql, (dt_id, ))
for comm in comms:
commands[dt].append(cls(comm["name"], comm["command"],
comm["input"],
comm["required"],
comm["optional"],
comm["output"]))
return commands
def __eq__(self, other):
if type(self) != type(other):
return False
if self.name != other.name:
return False
if self.command != other.command:
return False
if self.input_opts != other.input_opts:
return False
if self.output_opts != other.output_opts:
return False
if self.required_opts != other.required_opts:
return False
if self.optional_opts != other.optional_opts:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __init__(self, name, command, input_opts, required_opts,
optional_opts, output_opts):
"""Creates the command object
Parameters:
name : str
Name of the command
command: str
python command to run
input_opts : str
JSON of input options for the command
required_opts : str
JSON of required options for the command
optional_opts : str
JSON of optional options for the command
output_opts : str
JSON of output options for the command
"""
self.name = name
self.command = command
self.input_opts = loads(input_opts)
self.required_opts = loads(required_opts)
self.optional_opts = loads(optional_opts)
self.output_opts = loads(output_opts)
```
#### File: qiita_db/test/test_commands.py
```python
from os import remove, close
from os.path import exists, join, basename
from tempfile import mkstemp, mkdtemp
from shutil import rmtree
from unittest import TestCase, main
from future.utils.six import StringIO
from future import standard_library
from functools import partial
with standard_library.hooks():
import configparser
from qiita_db.commands import (load_study_from_cmd, load_raw_data_cmd,
load_sample_template_from_cmd,
load_prep_template_from_cmd,
load_processed_data_cmd,
load_preprocessed_data_from_cmd)
from qiita_db.study import Study, StudyPerson
from qiita_db.user import User
from qiita_db.data import RawData
from qiita_db.util import get_count, check_count, get_db_files_base_dir
from qiita_core.util import qiita_test_checker
@qiita_test_checker()
class TestMakeStudyFromCmd(TestCase):
def setUp(self):
StudyPerson.create('SomeDude', '<EMAIL>', 'some',
'111 fake street', '111-121-1313')
User.create('<EMAIL>', 'password')
self.config1 = CONFIG_1
self.config2 = CONFIG_2
def test_make_study_from_cmd(self):
fh = StringIO(self.config1)
load_study_from_cmd('<EMAIL>', 'newstudy', fh)
sql = ("select study_id from qiita.study where email = %s and "
"study_title = %s")
study_id = self.conn_handler.execute_fetchone(sql, ('<EMAIL>',
'newstudy'))
self.assertTrue(study_id is not None)
fh2 = StringIO(self.config2)
with self.assertRaises(configparser.NoOptionError):
load_study_from_cmd('<EMAIL>', 'newstudy2', fh2)
@qiita_test_checker()
class TestImportPreprocessedData(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
fd, self.file1 = mkstemp(dir=self.tmpdir)
close(fd)
fd, self.file2 = mkstemp(dir=self.tmpdir)
close(fd)
with open(self.file1, "w") as f:
f.write("\n")
with open(self.file2, "w") as f:
f.write("\n")
self.files_to_remove = [self.file1, self.file2]
self.dirs_to_remove = [self.tmpdir]
self.db_test_ppd_dir = join(get_db_files_base_dir(),
'preprocessed_data')
def tearDown(self):
for fp in self.files_to_remove:
if exists(fp):
remove(fp)
for dp in self.dirs_to_remove:
if exists(dp):
rmtree(dp)
def test_import_preprocessed_data(self):
initial_ppd_count = get_count('qiita.preprocessed_data')
initial_fp_count = get_count('qiita.filepath')
ppd = load_preprocessed_data_from_cmd(
1, 'preprocessed_sequence_illumina_params',
self.tmpdir, 'preprocessed_sequences', 1, False, 1)
self.files_to_remove.append(
join(self.db_test_ppd_dir,
'%d_%s' % (ppd.id, basename(self.file1))))
self.files_to_remove.append(
join(self.db_test_ppd_dir,
'%d_%s' % (ppd.id, basename(self.file2))))
self.assertEqual(ppd.id, 3)
self.assertTrue(check_count('qiita.preprocessed_data',
initial_ppd_count + 1))
self.assertTrue(check_count('qiita.filepath', initial_fp_count+2))
@qiita_test_checker()
class TestLoadSampleTemplateFromCmd(TestCase):
def setUp(self):
# Create a sample template file
self.st_contents = SAMPLE_TEMPLATE
# create a new study to attach the sample template
info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"number_samples_collected": 4,
"number_samples_promised": 4,
"portal_type_id": 3,
"study_alias": "TestStudy",
"study_description": "Description of a test study",
"study_abstract": "No abstract right now...",
"emp_person_id": StudyPerson(2),
"principal_investigator_id": StudyPerson(3),
"lab_person_id": StudyPerson(1)
}
self.study = Study.create(User('<EMAIL>'),
"Test study", [1], info)
def test_load_sample_template_from_cmd(self):
"""Correctly adds a sample template to the DB"""
fh = StringIO(self.st_contents)
st = load_sample_template_from_cmd(fh, self.study.id)
self.assertEqual(st.id, self.study.id)
@qiita_test_checker()
class TestLoadPrepTemplateFromCmd(TestCase):
def setUp(self):
# Create a sample template file
fd, seqs_fp = mkstemp(suffix='_seqs.fastq')
close(fd)
fd, barcodes_fp = mkstemp(suffix='_barcodes.fastq')
close(fd)
with open(seqs_fp, "w") as f:
f.write("\n")
with open(barcodes_fp, "w") as f:
f.write("\n")
self.pt_contents = PREP_TEMPLATE
self.raw_data = RawData.create(
2, [(seqs_fp, 1), (barcodes_fp, 2)], [Study(1)])
join_f = partial(join, join(get_db_files_base_dir(), 'raw_data'))
self.files_to_remove = [
join_f("%s_%s" % (self.raw_data.id, basename(seqs_fp))),
join_f("%s_%s" % (self.raw_data.id, basename(barcodes_fp)))]
def tearDown(self):
for fp in self.files_to_remove:
if exists(fp):
remove(fp)
def test_load_prep_template_from_cmd(self):
"""Correctly adds a sample template to the DB"""
fh = StringIO(self.pt_contents)
st = load_prep_template_from_cmd(fh, self.raw_data.id)
self.assertEqual(st.id, self.raw_data.id)
@qiita_test_checker()
class TestLoadRawDataFromCmd(TestCase):
def setUp(self):
fd, self.forward_fp = mkstemp(suffix='_forward.fastq.gz')
close(fd)
fd, self.reverse_fp = mkstemp(suffix='_reverse.fastq.gz')
close(fd)
fd, self.barcodes_fp = mkstemp(suffix='_barcodes.fastq.gz')
close(fd)
with open(self.forward_fp, "w") as f:
f.write("\n")
with open(self.reverse_fp, "w") as f:
f.write("\n")
with open(self.barcodes_fp, "w") as f:
f.write("\n")
self.files_to_remove = []
self.files_to_remove.append(self.forward_fp)
self.files_to_remove.append(self.reverse_fp)
self.files_to_remove.append(self.barcodes_fp)
self.db_test_raw_dir = join(get_db_files_base_dir(), 'raw_data')
def tearDown(self):
for fp in self.files_to_remove:
if exists(fp):
remove(fp)
def test_load_data_from_cmd(self):
filepaths = [self.forward_fp, self.reverse_fp, self.barcodes_fp]
filepath_types = ['raw_sequences', 'raw_sequences', 'raw_barcodes']
filetype = 'FASTQ'
study_ids = [1]
initial_raw_count = get_count('qiita.raw_data')
initial_fp_count = get_count('qiita.filepath')
initial_raw_fp_count = get_count('qiita.raw_filepath')
new = load_raw_data_cmd(filepaths, filepath_types, filetype,
study_ids)
raw_data_id = new.id
self.files_to_remove.append(
join(self.db_test_raw_dir,
'%d_%s' % (raw_data_id, basename(self.forward_fp))))
self.files_to_remove.append(
join(self.db_test_raw_dir,
'%d_%s' % (raw_data_id, basename(self.reverse_fp))))
self.files_to_remove.append(
join(self.db_test_raw_dir,
'%d_%s' % (raw_data_id, basename(self.barcodes_fp))))
self.assertTrue(check_count('qiita.raw_data', initial_raw_count + 1))
self.assertTrue(check_count('qiita.filepath',
initial_fp_count + 3))
self.assertTrue(check_count('qiita.raw_filepath',
initial_raw_fp_count + 3))
self.assertTrue(check_count('qiita.study_raw_data',
initial_raw_count + 1))
# Ensure that the ValueError is raised when a filepath_type is not
# provided for each and every filepath
with self.assertRaises(ValueError):
load_raw_data_cmd(filepaths, filepath_types[:-1], filetype,
study_ids)
@qiita_test_checker()
class TestLoadProcessedDataFromCmd(TestCase):
def setUp(self):
fd, self.otu_table_fp = mkstemp(suffix='_otu_table.biom')
close(fd)
fd, self.otu_table_2_fp = mkstemp(suffix='_otu_table2.biom')
close(fd)
with open(self.otu_table_fp, "w") as f:
f.write("\n")
with open(self.otu_table_2_fp, "w") as f:
f.write("\n")
self.files_to_remove = []
self.files_to_remove.append(self.otu_table_fp)
self.files_to_remove.append(self.otu_table_2_fp)
self.db_test_processed_data_dir = join(get_db_files_base_dir(),
'processed_data')
def tearDown(self):
for fp in self.files_to_remove:
if exists(fp):
remove(fp)
def test_load_processed_data_from_cmd(self):
filepaths = [self.otu_table_fp, self.otu_table_2_fp]
filepath_types = ['biom', 'biom']
initial_processed_data_count = get_count('qiita.processed_data')
initial_processed_fp_count = get_count('qiita.processed_filepath')
initial_fp_count = get_count('qiita.filepath')
new = load_processed_data_cmd(filepaths, filepath_types,
'processed_params_uclust', 1, 1, None)
processed_data_id = new.id
self.files_to_remove.append(
join(self.db_test_processed_data_dir,
'%d_%s' % (processed_data_id, basename(self.otu_table_fp))))
self.files_to_remove.append(
join(self.db_test_processed_data_dir,
'%d_%s' % (processed_data_id,
basename(self.otu_table_2_fp))))
self.assertTrue(check_count('qiita.processed_data',
initial_processed_data_count + 1))
self.assertTrue(check_count('qiita.processed_filepath',
initial_processed_fp_count + 2))
self.assertTrue(check_count('qiita.filepath',
initial_fp_count + 2))
# Ensure that the ValueError is raised when a filepath_type is not
# provided for each and every filepath
with self.assertRaises(ValueError):
load_processed_data_cmd(filepaths, filepath_types[:-1],
'processed_params_uclust', 1, 1, None)
CONFIG_1 = """[required]
timeseries_type_id = 1
metadata_complete = True
mixs_compliant = True
portal_type_id = 3
principal_investigator = SomeDude, <EMAIL>, some
reprocess = False
study_alias = 'test study'
study_description = 'test study description'
study_abstract = 'study abstract'
efo_ids = 1,2,3,4
[optional]
number_samples_collected = 50
number_samples_promised = 25
lab_person = SomeDude, <EMAIL>, some
funding = 'funding source'
vamps_id = vamps_id
"""
CONFIG_2 = """[required]
timeseries_type_id = 1
metadata_complete = True
portal_type_id = 3
principal_investigator = SomeDude, <EMAIL>, some
reprocess = False
study_alias = 'test study'
study_description = 'test study description'
study_abstract = 'study abstract'
efo_ids = 1,2,3,4
[optional]
number_samples_collected = 50
number_samples_promised = 25
lab_person = SomeDude, <EMAIL>, some
funding = 'funding source'
vamps_id = vamps_id
"""
SAMPLE_TEMPLATE = (
"#SampleID\trequired_sample_info_status_id\tcollection_timestamp\t"
"sample_type\thas_physical_specimen\tphysical_location\thas_extracted_data"
"\thost_subject_id\tTreatment\tDOB\tlatitude\tlongitude\tDescription\n"
"PC.354\t1\t2014-06-18 16:44\ttype_1\tTrue\tLocation_1\tTrue\tHS_ID_PC.354"
"\tControl\t20061218\t1.88401499993\t56.0003871552\t"
"Control_mouse_I.D._354\n"
"PC.593\t1\t2014-06-18 16:44\ttype_1\tTrue\tLocation_1\tTrue\tHS_ID_PC.593"
"\tControl\t20071210\t35.4079458313\t83.2595338611\t"
"Control_mouse_I.D._593\n"
"PC.607\t1\t2014-06-18 16:44\ttype_1\tTrue\tLocation_1\tTrue\tHS_ID_PC.607"
"\tFast\t20071112\t18.3175615444\t91.3713989729\t"
"Fasting_mouse_I.D._607\n"
"PC.636\t1\t2014-06-18 16:44\ttype_1\tTrue\tLocation_1\tTrue\tHS_ID_PC.636"
"\tFast\t20080116\t31.0856060708\t4.16781143893\tFasting_mouse_I.D._636")
PREP_TEMPLATE = (
"#SampleID\tcenter_name\tcusom_col\temp_status_id\tdata_type_id\n"
"SKB8.640193\tANL\tPC.354\t1\t1\n"
"SKD8.640184\tANL\tPC.593\t1\t1\n"
"SKB7.640196\tANL\tPC.607\t1\t1\n"
"SKM9.640192\tANL\tPC.636\t1\t1\n")
if __name__ == "__main__":
main()
```
#### File: qiita_db/test/test_parameters.py
```python
from unittest import TestCase, main
from qiita_core.util import qiita_test_checker
from qiita_db.parameters import PreprocessedIlluminaParams
@qiita_test_checker()
class PreprocessedIlluminaParamsTests(TestCase):
def test_to_str(self):
params = PreprocessedIlluminaParams(1)
obs = params.to_str()
exp = ("--barcode_type golay_12 --max_bad_run_length 3 "
"--max_barcode_errors 1.5 --min_per_read_length_fraction 0.75 "
"--phred_quality_threshold 3 --preprocessed_params_id 1 "
"--sequence_max_n 0 --trim_length 151")
self.assertEqual(obs, exp)
if __name__ == '__main__':
main()
```
#### File: qiita_db/test/test_reference.py
```python
from unittest import TestCase, main
from os import close, remove
from os.path import basename, join
from tempfile import mkstemp
from qiita_core.util import qiita_test_checker
from qiita_db.reference import Reference
from qiita_db.util import get_db_files_base_dir
@qiita_test_checker()
class ReferenceTests(TestCase):
def setUp(self):
self.name = "Fake GreenGenes"
self.version = "13_8"
fd, self.seqs_fp = mkstemp(suffix="_seqs.fna")
close(fd)
fd, self.tax_fp = mkstemp(suffix="_tax.txt")
close(fd)
fd, self.tree_fp = mkstemp(suffix="_tree.tre")
close(fd)
self.db_dir = join(get_db_files_base_dir(), 'reference')
self._clean_up_files = []
def tearDown(self):
for f in self._clean_up_files:
remove(f)
def test_create(self):
"""Correctly creates the rows in the DB for the reference"""
# Check that the returned object has the correct id
obs = Reference.create(self.name, self.version, self.seqs_fp,
self.tax_fp, self.tree_fp)
self.assertEqual(obs.id, 2)
# Check that the information on the database is correct
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.reference WHERE reference_id=2")
exp = [[2, self.name, self.version, 15, 16, 17]]
self.assertEqual(obs, exp)
# Check that the filepaths have been correctly added to the DB
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id=15 or "
"filepath_id=16 or filepath_id=17")
exp_seq = join(self.db_dir, "%s_%s_%s" % (self.name, self.version,
basename(self.seqs_fp)))
exp_tax = join(self.db_dir, "%s_%s_%s" % (self.name, self.version,
basename(self.tax_fp)))
exp_tree = join(self.db_dir, "%s_%s_%s" % (self.name, self.version,
basename(self.tree_fp)))
exp = [[15, exp_seq, 9, '0', 1],
[16, exp_tax, 10, '0', 1],
[17, exp_tree, 11, '0', 1]]
self.assertEqual(obs, exp)
def test_sequence_fp(self):
ref = Reference(1)
exp = join(self.db_dir, "GreenGenes_13_8_97_otus.fasta")
self.assertEqual(ref.sequence_fp, exp)
def test_taxonomy_fp(self):
ref = Reference(1)
exp = join(self.db_dir, "GreenGenes_13_8_97_otu_taxonomy.txt")
self.assertEqual(ref.taxonomy_fp, exp)
def test_tree_fp(self):
ref = Reference(1)
exp = join(self.db_dir, "GreenGenes_13_8_97_otus.tree")
self.assertEqual(ref.tree_fp, exp)
if __name__ == '__main__':
main()
```
#### File: qiita_pet/handlers/logger_handlers.py
```python
from __future__ import division
from tornado.web import authenticated
from .base_handlers import BaseHandler
from qiita_db.logger import LogEntry
class LogEntryViewerHandler(BaseHandler):
@authenticated
def get(self):
logentries = LogEntry.newest_records()
self.render("error_log.html", logentries=logentries,
user=self.current_user)
@authenticated
def post(self):
numentries = int(self.get_argument("numrecords"))
if numentries < 0:
numentries = 100
logentries = LogEntry.newest_records(numentries)
self.render("error_log.html", logentries=logentries,
user=self.current_user)
```
#### File: qiita_pet/test/test_study_handlers.py
```python
from unittest import main
from tornado_test_base import TestHandlerBase
from qiita_db.study import StudyPerson
from qiita_db.util import get_count, check_count
class CreateStudyHandlerTestsDB(TestHandlerBase):
database = True
def test_new_person_created(self):
person_count_before = get_count('qiita.study_person')
post_data = {'new_people_names': ['Adam', 'Ethan'],
'new_people_emails': ['<EMAIL>', '<EMAIL>'],
'new_people_affiliations': ['CU Boulder', 'NYU'],
'new_people_addresses': ['Some St., Boulder, CO 80305',
''],
'new_people_phones': ['', ''],
'study_title': 'dummy title',
'study_alias': 'dummy alias',
'pubmed_id': 'dummy pmid',
'investigation_type': 'eukaryote',
'environmental_packages': 'air',
'is_timeseries': 'y',
'study_abstract': "dummy abstract",
'study_description': 'dummy description',
'principal_investigator': '-2',
'lab_person': '1'}
self.post('/study/create/', post_data)
# Check that the new person was created
expected_id = person_count_before + 1
self.assertTrue(check_count('qiita.study_person', expected_id))
new_person = StudyPerson(expected_id)
self.assertTrue(new_person.name == 'Ethan')
self.assertTrue(new_person.email == '<EMAIL>')
self.assertTrue(new_person.affiliation == 'NYU')
self.assertTrue(new_person.address is None)
self.assertTrue(new_person.phone is None)
class CreateStudyHandlerTestsNoDB(TestHandlerBase):
def test_page_load(self):
"""Make sure the page loads when no arguments are passed"""
response = self.get('/create_study/')
self.assertEqual(response.code, 200)
if __name__ == '__main__':
main()
```
#### File: qiita_pet/test/tornado_test_base.py
```python
from mock import Mock
try:
from urllib import urlencode
except ImportError: # py3
from urllib.parse import urlencode
from tornado.testing import AsyncHTTPTestCase
from qiita_pet.webserver import Application
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_db.sql_connection import SQLConnectionHandler
from qiita_db.environment_manager import (LAYOUT_FP, INITIALIZE_FP,
POPULATE_FP)
class TestHandlerBase(AsyncHTTPTestCase):
database = False
def get_app(self):
BaseHandler.get_current_user = Mock(return_value="<EMAIL>")
self.app = Application()
return self.app
def setUp(self):
if self.database:
self.conn_handler = SQLConnectionHandler()
# Drop the schema
self.conn_handler.execute("DROP SCHEMA qiita CASCADE")
# Create the schema
with open(LAYOUT_FP, 'U') as f:
self.conn_handler.execute(f.read())
# Initialize the database
with open(INITIALIZE_FP, 'U') as f:
self.conn_handler.execute(f.read())
# Populate the database
with open(POPULATE_FP, 'U') as f:
self.conn_handler.execute(f.read())
super(TestHandlerBase, self).setUp()
def tearDown(self):
if self.database:
del self.conn_handler
# helpers from http://www.peterbe.com/plog/tricks-asynchttpclient-tornado
def get(self, url, data=None, headers=None, doseq=True):
if data is not None:
if isinstance(data, dict):
data = urlencode(data, doseq=doseq)
if '?' in url:
url += '&%s' % data
else:
url += '?%s' % data
return self._fetch(url, 'GET', headers=headers)
def post(self, url, data, headers=None, doseq=True):
if data is not None:
if isinstance(data, dict):
data = urlencode(data, doseq=doseq)
return self._fetch(url, 'POST', data, headers)
def _fetch(self, url, method, data=None, headers=None):
self.http_client.fetch(self.get_url(url), self.stop, method=method,
body=data, headers=headers)
return self.wait()
```
#### File: qiita/qiita_pet/webserver.py
```python
import tornado.auth
import tornado.escape
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
from tornado.options import define, options
from os.path import dirname, join
from base64 import b64encode
from uuid import uuid4
from qiita_pet.handlers.base_handlers import (MainHandler, NoPageHandler)
from qiita_pet.handlers.auth_handlers import (
AuthCreateHandler, AuthLoginHandler, AuthLogoutHandler, AuthVerifyHandler)
from qiita_pet.handlers.user_handlers import (
ChangeForgotPasswordHandler, ForgotPasswordHandler, UserProfileHandler)
from qiita_pet.handlers.analysis_handlers import (
SelectCommandsHandler, AnalysisWaitHandler, AnalysisResultsHandler,
ShowAnalysesHandler, SearchStudiesHandler)
from qiita_pet.handlers.study_handlers import (
CreateStudyHandler, PrivateStudiesHandler, PublicStudiesHandler,
StudyDescriptionHandler)
from qiita_pet.handlers.logger_handlers import LogEntryViewerHandler
from qiita_pet.handlers.websocket_handlers import MessageHandler
from qiita_pet.handlers.upload import UploadFileHandler
from qiita_db.util import get_db_files_base_dir
define("port", default=8888, help="run on the given port", type=int)
DIRNAME = dirname(__file__)
STATIC_PATH = join(DIRNAME, "static")
TEMPLATE_PATH = join(DIRNAME, "templates") # base folder for webpages
RES_PATH = get_db_files_base_dir()
COOKIE_SECRET = b64encode(uuid4().bytes + uuid4().bytes)
DEBUG = True
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/auth/login/", AuthLoginHandler),
(r"/auth/logout/", AuthLogoutHandler),
(r"/auth/create/", AuthCreateHandler),
(r"/auth/verify/(.*)", AuthVerifyHandler),
(r"/auth/forgot/", ForgotPasswordHandler),
(r"/auth/reset/(.*)", ChangeForgotPasswordHandler),
(r"/profile/", UserProfileHandler),
(r"/results/(.*)", tornado.web.StaticFileHandler,
{"path": RES_PATH}),
(r"/static/(.*)", tornado.web.StaticFileHandler,
{"path": STATIC_PATH}),
(r"/analysis/2", SearchStudiesHandler),
(r"/analysis/3", SelectCommandsHandler),
(r"/analysis/wait/(.*)", AnalysisWaitHandler),
(r"/analysis/results/(.*)", AnalysisResultsHandler),
(r"/analysis/show/", ShowAnalysesHandler),
(r"/consumer/", MessageHandler),
(r"/error/", LogEntryViewerHandler),
(r"/study/create/", CreateStudyHandler),
(r"/study/private/", PrivateStudiesHandler),
(r"/study/public/", PublicStudiesHandler),
(r"/study/description/(.*)", StudyDescriptionHandler),
(r"/upload/", UploadFileHandler),
# 404 PAGE MUST BE LAST IN THIS LIST!
(r".*", NoPageHandler)
]
settings = {
"template_path": TEMPLATE_PATH,
"debug": DEBUG,
"cookie_secret": COOKIE_SECRET,
"login_url": "/auth/login/"
}
tornado.web.Application.__init__(self, handlers, **settings)
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
print("Tornado started on port", options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
```
#### File: qiita/qiita_ware/demux.py
```python
r"""HDF5 demultiplexed DDL
Attributes off of ./ for the full file:
n : int, the number of sequences
max : int, the max sequence length
min : int, the min sequence length
mean : float, the mean sequence length
std : float, the standard deviation of sequence length
median : float, the median sequence length
hist : np.array of int, 10 bin histogram of sequence lengths
hist_edge : np.array of int, left edge of each bin
Each sample has its own group with the following structure:
./<sample_name>/sequence : (N,) of str where N is the number of \
sequences in the sample
./<sample_name>/qual : (N, M) of int where N is the number \
of sequences in the sample, and M is the max sequence length (file-wide)
./<sample_name>/barcode/corrected : (N,) of str where N is the number of \
sequences in the sample
./<sample_name>/barcode/original : (N,) of str where N is the number of \
sequences in the sample
./<sample_name>/barcode/error : (N,) of int where N is the number of
sequences in the sample
Each sample additionally has the following attributes described on the
sample group:
n : int, the number of sequences
max : int, the max sequence length
min : int, the min sequence length
mean : float, the mean sequence length
std : float, the standard deviation of sequence length
median : float, the median sequence length
hist : np.array of int, 10 bin histogram of sequence lengths
hist_edge : np.array of int, left edge of each bin
"""
from __future__ import division
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import os
from functools import partial
from itertools import repeat
from collections import defaultdict, namedtuple
from re import search
import numpy as np
from future.utils import viewitems, viewvalues
from future.builtins import zip
from skbio.parse.sequences import load
from skbio.format.sequences import format_fastq_record
# track some basic stats about the samples
stat = namedtuple('stat', 'n max min mean median std hist hist_edge')
# centralized in case paths change
dset_paths = {'sequence': 'sequence',
'barcode_original': 'barcode/original',
'barcode_corrected': 'barcode/corrected',
'barcode_error': 'barcode/error',
'qual': 'qual'}
class _buffer(object):
"""Buffer baseclass that sits on top of an HDF5 dataset
Notes
-----
The intent of the buffer is to minimize direct writes against HDF5 datasets
and to instead perform bulk operations against the h5py API. The 'stdio'
driver for h5py also enables system buffers, but in practice, there is
still a large amount of overhead when writing small pieces of data
incrementally to h5py datasets.
"""
def __init__(self, dset, max_fill=10000):
"""Construct thy self
Parameters
----------
dset : h5py.Dataset
The dataset to buffer
max_fill : unsigned int
The maximum fill for the buffer
"""
self.dset = dset
self._n = 0
self._idx = 0
self._max_fill = max_fill
self._alloc()
def __del__(self):
"""Flush when the buffer is deconstructed"""
if self._n > 0:
self.flush()
def write(self, data):
"""Deposit into the buffer, write to dataset if necessary
Parameters
----------
data : scalar or np.array
The data is dependent on the underlying buffer
"""
self._write(data)
self._n += 1
if self.is_full():
self.flush()
def _write(self, data):
raise NotImplementedError
def _alloc(self):
raise NotImplementedError
def is_full(self):
"""Determine if the buffer is full"""
return self._n >= self._max_fill
def flush(self):
"""Flush the buffer to the dataset
Notes
-----
Buffer is zero'd out following the flush
"""
# write
start, end = self._idx, self._idx + self._n
self.dset[start:end] = self._buf[:self._n]
# zero out
self._idx += self._n
self._n = 0
self._buf[:] = 0
class buffer1d(_buffer):
"""A 1 dimensional buffer
Notes
-----
This buffer is useful for str or int. Strings, such as nucleotide
sequences, can leverage this buffer if the strings are stored as strings
and not char.
"""
def _write(self, data):
self._buf[self._n] = data
def _alloc(self):
self._buf = np.zeros(self._max_fill, self.dset.dtype)
class buffer2d(_buffer):
"""A 2 dimensional buffer
Notes
-----
This buffer is useful for storing vectors of int or float. Qual scores,
such as those commonly associated with nucleotide sequence data, can
leverage this buffer as the qual scores are commonly represented as vectors
of int.
"""
def _write(self, data):
self._buf[self._n, :data.size] = data
def _alloc(self):
shape = (self._max_fill, self.dset.shape[1])
self._buf = np.zeros(shape, dtype=self.dset.dtype)
def _has_qual(fp):
"""Check if it looks like we have qual"""
iter_ = load(fp)
rec = next(iter(iter_))
return rec['Qual'] is not None
def _per_sample_lengths(fp):
"""Determine the lengths of all sequences per sample
Parameters
----------
fp : filepath
The sequence file to walk over
Returns
-------
dict
{sample_id: [sequence_length]}
"""
lengths = defaultdict(list)
for record in load(fp):
sample_id = record['SequenceID'].split('_', 1)[0]
lengths[sample_id].append(len(record['Sequence']))
return lengths
def _summarize_lengths(lengths):
"""Summarize lengths per sample
Parameters
----------
lengths : dict
{sample_id: [sequence_length]}
Returns
-------
dict
{sample_id: sample_stat}
stat
The full file stats
"""
sample_stats = {}
all_lengths = np.zeros(sum([len(v) for v in viewvalues(lengths)]), int)
pos = 0
for sid, lens in viewitems(lengths):
lens = np.array(lens)
hist, edge = np.histogram(lens)
sample_stats[sid] = stat(n=lens.size, max=lens.max(), std=lens.std(),
min=lens.min(), mean=lens.mean(),
median=np.median(lens), hist=hist,
hist_edge=edge)
all_lengths[pos:pos+lens.size] = lens
pos += lens.size
hist, edge = np.histogram(all_lengths)
full_stats = stat(n=all_lengths.size, max=all_lengths.max(),
min=all_lengths.min(), std=all_lengths.std(),
mean=all_lengths.mean(), median=np.median(all_lengths),
hist=hist, hist_edge=edge)
return sample_stats, full_stats
def _set_attr_stats(h5grp, stats):
"""Store stats in h5grp attrs
Parameters
----------
h5grp : h5py.Group or h5py.File
The group or file to update .attrs on
stats : stat
The stats to record
"""
h5grp.attrs['n'] = stats.n
h5grp.attrs['mean'] = stats.mean
h5grp.attrs['max'] = stats.max
h5grp.attrs['min'] = stats.min
h5grp.attrs['median'] = stats.median
h5grp.attrs['std'] = stats.std
h5grp.attrs['hist'] = stats.hist
h5grp.attrs['hist_edge'] = stats.hist_edge
def _construct_datasets(sample_stats, h5file, max_barcode_length=12):
"""Construct the datasets within the h5file
Parameters
----------
sample_stats : dict
{sample_id: stat}
h5file : h5py.File
The file to store the demux data
Returns
-------
dict
{str : _buffer} where str is the dataset path and the `_buffer` is
either `buffer1d` or `buffer2d`.
"""
def create_dataset(path, dtype, rows, cols):
if cols == 1:
shape = (rows,)
buftype = buffer1d
else:
shape = (rows, cols)
buftype = buffer2d
kwargs = {'chunks': True, 'compression': True, 'compression_opts': 1}
dset = h5file.create_dataset(path, dtype=dtype, shape=shape, **kwargs)
return buftype(dset)
buffers = {}
for sid, stats in viewitems(sample_stats):
# determine group
pjoin = partial(os.path.join, sid)
# setup dataset sizes and types
rows = stats.n
cols = stats.max
seq_dtype = '|S%d' % cols
bc_dtype = '|S%d' % max_barcode_length
# construct datasets
path = pjoin(dset_paths['sequence'])
buffers[path] = create_dataset(path, seq_dtype, rows, 1)
path = pjoin(dset_paths['barcode_original'])
buffers[path] = create_dataset(path, bc_dtype, rows, 1)
path = pjoin(dset_paths['barcode_corrected'])
buffers[path] = create_dataset(path, bc_dtype, rows, 1)
path = pjoin(dset_paths['barcode_error'])
buffers[path] = create_dataset(path, int, rows, 1)
path = pjoin(dset_paths['qual'])
buffers[path] = create_dataset(path, int, rows, cols)
# set stats
_set_attr_stats(h5file[sid], stats)
return buffers
def to_hdf5(fp, h5file, max_barcode_length=12):
"""Represent demux data in an h5file
Parameters
----------
fp : filepath
The filepath containing either FASTA or FASTQ data.
h5file : h5py.File
The file to write into.
Notes
-----
A group, per sample, will be created and within that group, 5 datasets will
be constructed that correspond to sequence, original_barcode,
corrected_barcode, barcode_errors, and qual.
The filepath is required as two passes over the file are essential.
The expectation is that the filepath being operated on is the result of
split_libraries.py or split_libraries_fastq.py from QIIME. This code makes
assumptions about items in the comment line that are added by split
libraries. Specifically, the code looks for a "new_bc", an "ori_bc" and a
"bc_diffs" field, and additionally assumes the sample ID is encoded in the
ID.
"""
# walk over the file and collect summary stats
sample_stats, full_stats = _summarize_lengths(_per_sample_lengths(fp))
# construct the datasets, storing per sample stats and full file stats
buffers = _construct_datasets(sample_stats, h5file)
_set_attr_stats(h5file, full_stats)
h5file.attrs['has-qual'] = _has_qual(fp)
for rec in load(fp):
result = search((r'^(?P<sample>.+?)_\d+? .*orig_bc=(?P<orig_bc>.+?) '
'new_bc=(?P<corr_bc>.+?) bc_diffs=(?P<bc_diffs>\d+)'),
rec['SequenceID'])
if result is None:
raise ValueError("%s doesn't appear to be split libraries "
"output!" % fp)
sample = result.group('sample')
bc_diffs = result.group('bc_diffs')
corr_bc = result.group('corr_bc')
orig_bc = result.group('orig_bc')
sequence = rec['Sequence']
qual = rec['Qual']
pjoin = partial(os.path.join, sample)
buffers[pjoin(dset_paths['sequence'])].write(sequence)
buffers[pjoin(dset_paths['barcode_original'])].write(orig_bc)
buffers[pjoin(dset_paths['barcode_corrected'])].write(corr_bc)
buffers[pjoin(dset_paths['barcode_error'])].write(bc_diffs)
if qual is not None:
buffers[pjoin(dset_paths['qual'])].write(qual)
def format_fasta_record(seqid, seq, qual):
"""Format a fasta record
Parameters
----------
seqid : str
The sequence ID
seq : str
The sequence
qual : ignored
This is ignored
Returns
-------
str
A formatted sequence record
"""
return b'\n'.join([b'>' + seqid, seq, b''])
def to_ascii(demux, samples=None):
"""Consume a demux HDF5 file and yield sequence records
Parameters
----------
demux : h5py.File
The demux file to operate on
samples : list, optional
Samples to pull out. If None, the all samples will be examined.
Defaults to None.
Returns
-------
generator
A formatted fasta or fastq record. The format is determined based on
the presence of qual scores. If qual scores exist, then fastq is
returned, otherwise fasta is returned.
"""
if demux.attrs['has-qual']:
formatter = format_fastq_record
else:
formatter = format_fasta_record
id_fmt = ("%(sample)s_%(idx)d orig_bc=%(bc_ori)s new_bc=%(bc_cor)s "
"bc_diffs=%(bc_diff)d")
if samples is None:
samples = demux.keys()
for samp, idx, seq, qual, bc_ori, bc_cor, bc_err in fetch(demux, samples):
seq_id = id_fmt % {'sample': samp, 'idx': idx, 'bc_ori': bc_ori,
'bc_cor': bc_cor, 'bc_diff': bc_err}
yield formatter(seq_id, seq, qual)
def fetch(demux, samples=None, k=None):
"""Fetch sequences from a HDF5 demux file
Parameters
----------
demux : h5py.File
The demux file to operate on.
samples : list, optional
Samples to pull out. If None, then all samples will be examined.
Defaults to None.
k : int, optional
Randomly select (without replacement) k sequences from a sample. Only
samples in which the number of sequences are >= k are considered. If
None, all sequences for a sample are returned. Defaults to None.
Returns
-------
generator
Yields (sample, index, sequence, qual, original_barcode,
corrected_barcode, barcode_error)
"""
if samples is None:
samples = demux.keys()
for sample in samples:
pjoin = partial(os.path.join, sample)
# h5py only has partial fancy indexing support and it is limited to a
# boolean vector.
indices = np.ones(demux[sample].attrs['n'], dtype=bool)
if k is not None:
if demux[sample].attrs['n'] < k:
continue
to_keep = np.arange(demux[sample].attrs['n'])
np.random.shuffle(to_keep)
indices = np.logical_not(indices)
indices[to_keep[:k]] = True
seqs = demux[pjoin(dset_paths['sequence'])][indices]
# only yield qual if we have it
quals = repeat(None)
if demux.attrs['has-qual']:
if len(indices) == 1:
if indices[0]:
quals = demux[pjoin(dset_paths['qual'])][:]
else:
quals = demux[pjoin(dset_paths['qual'])][indices, :]
bc_original = demux[pjoin(dset_paths['barcode_original'])][indices]
bc_corrected = demux[pjoin(dset_paths['barcode_corrected'])][indices]
bc_error = demux[pjoin(dset_paths['barcode_error'])][indices]
iter_ = zip(repeat(sample), np.arange(indices.size)[indices], seqs,
quals, bc_original, bc_corrected, bc_error)
for item in iter_:
yield item
```
#### File: qiita_ware/test/test_run.py
```python
from unittest import TestCase, main
from os.path import exists, join
from os import remove
from redis import Redis
from qiita_core.util import qiita_test_checker
from qiita_db.analysis import Analysis
from qiita_db.job import Job
from qiita_db.util import get_db_files_base_dir
from qiita_ware.run import (
RunAnalysis, _build_analysis_files, _job_comm_wrapper, _finish_analysis)
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
@qiita_test_checker()
class TestRun(TestCase):
def setUp(self):
self._del_files = []
def tearDown(self):
for delfile in self._del_files:
remove(delfile)
def test_finish_analysis(self):
redis = Redis()
pubsub = redis.pubsub()
pubsub.subscribe("<EMAIL>")
msgs = []
_finish_analysis("<EMAIL>", Analysis(1))
for msg in pubsub.listen():
if msg['type'] == 'message':
msgs.append(msg['data'])
if "allcomplete" in msg['data']:
pubsub.unsubscribe("<EMAIL>")
break
self.assertEqual(msgs, ['{"msg": "allcomplete", "analysis": 1}'])
def test_build_files_job_comm_wrapper(self):
# basic setup needed for test
job = Job(3)
# create the files needed for job, testing _build_analysis_files
analysis = Analysis(2)
_build_analysis_files(analysis, 100)
self._del_files.append(join(get_db_files_base_dir(), "analysis",
"2_analysis_mapping.txt"))
self._del_files.append(join(get_db_files_base_dir(), "analysis",
"2_analysis_18S.biom"))
self.assertTrue(exists(join(get_db_files_base_dir(), "analysis",
"2_analysis_mapping.txt")))
self.assertTrue(exists(join(get_db_files_base_dir(), "analysis",
"2_analysis_18S.biom")))
self.assertEqual([3], analysis.jobs)
_job_comm_wrapper("<EMAIL>", 2, job)
self.assertEqual(job.status, "error")
def test_redis_comms(self):
"""Make sure redis communication happens"""
msgs = []
redis = Redis()
pubsub = redis.pubsub()
pubsub.subscribe("<EMAIL>")
app = RunAnalysis()
app("<EMAIL>", Analysis(2), [], rarefaction_depth=100)
for msg in pubsub.listen():
if msg['type'] == 'message':
msgs.append(msg['data'])
if "allcomplete" in msg['data']:
pubsub.unsubscribe("<EMAIL>")
break
self.assertEqual(
msgs,
['{"msg": "Running", "command": "18S: Beta Diversity", '
'"analysis": 2}',
'{"msg": "ERROR", "command": "18S: Beta Diversity", '
'"analysis": 2}',
'{"msg": "allcomplete", "analysis": 2}'])
log = self.conn_handler.execute_fetchall(
"SELECT * from qiita.logging")
self.assertEqual(1, len(log))
log = log[0]
self.assertEqual(1, log[0])
self.assertEqual(2, log[2])
self.assertTrue(len(log[3]) > 0)
self.assertTrue('[{"job": 3, "analysis": 2}]')
def test_add_jobs_in_construct_job_graphs(self):
analysis = Analysis(2)
RunAnalysis()._construct_job_graph(
"<EMAIL>", analysis, [('18S', 'Summarize Taxa')],
comm_opts={'Summarize Taxa': {'opt1': 5}})
self.assertEqual(analysis.jobs, [3, 4])
job = Job(4)
self.assertEqual(job.datatype, '18S')
self.assertEqual(job.command,
['Summarize Taxa', 'summarize_taxa_through_plots.py'])
expopts = {
'--output_dir': join(
get_db_files_base_dir(), 'job',
'4_summarize_taxa_through_plots.py_output_dir'),
'opt1': 5}
self.assertEqual(job.options, expopts)
if __name__ == "__main__":
main()
``` |
{
"source": "jorged104/Algoritmos-Python",
"score": 4
} |
#### File: Algoritmos-Python/structures/circular_list.py
```python
class Node():
def __init__(self , data: str):
super().__init__()
self.data :str = data
self.next :Node = None
self.prev :Node = None
class circular_list():
def __init__(self):
self.head :Node = None
def append(self,data :str ):
new_nodo = Node(data)
temp_node = self.head
if temp_node == None:
self.head = new_nodo
new_nodo.next = new_nodo
new_nodo.prev = new_nodo
return
"""
Go to last node
"""
temp_node = temp_node.prev
temp_node.next = new_nodo
new_nodo.prev = temp_node
new_nodo.next = self.head
self.head.prev = new_nodo
if __name__ == "__main__":
list_double = circular_list()
list_double.append("Python")
list_double.append("JavaScript")
list_double.append("Java")
list_double.append("Go")
list_double.append("Ruby")
list_double.append("Now")
temp_node = list_double.head
count = 0
while count < 100:
print(str(count) , " ",temp_node.data)
temp_node = temp_node.prev
count += 1
``` |
{
"source": "JorgeDeLosSantos/_blogs_",
"score": 3
} |
#### File: Mini-Curso wxPython/4-agregando-controles/test_app.py
```python
import random
import wx
import wx.lib.buttons as buttons
########################################################################
class TTTPanel(wx.Panel):
"""
Tic-Tac-Toe Panel object
"""
#----------------------------------------------------------------------
def __init__(self, parent):
"""
Initialize the panel
"""
wx.Panel.__init__(self, parent)
self.toggled = False
self.playerWon = False
self.layoutWidgets()
#----------------------------------------------------------------------
def checkWin(self, computer=False):
"""
Check if the player won
"""
for button1, button2, button3 in self.methodsToWin:
if button1.GetLabel() == button2.GetLabel() and \
button2.GetLabel() == button3.GetLabel() and \
button1.GetLabel() != "":
print "Player wins!"
self.playerWon = True
button1.SetBackgroundColour("Yellow")
button2.SetBackgroundColour("Yellow")
button3.SetBackgroundColour("Yellow")
self.Layout()
if not computer:
msg = "You Won! Would you like to play again?"
dlg = wx.MessageDialog(None, msg, "Winner!",
wx.YES_NO | wx.ICON_WARNING)
result = dlg.ShowModal()
if result == wx.ID_YES:
wx.CallAfter(self.restart)
dlg.Destroy()
break
else:
return True
#----------------------------------------------------------------------
def layoutWidgets(self):
"""
Create and layout the widgets
"""
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.fgSizer = wx.FlexGridSizer(rows=3, cols=3, vgap=5, hgap=5)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
font = wx.Font(22, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD)
size = (100,100)
self.button1 = buttons.GenToggleButton(self, size=size, name="btn1")
self.button2 = buttons.GenToggleButton(self, size=size, name="btn2")
self.button3 = buttons.GenToggleButton(self, size=size, name="btn3")
self.button4 = buttons.GenToggleButton(self, size=size, name="btn4")
self.button5 = buttons.GenToggleButton(self, size=size, name="btn5")
self.button6 = buttons.GenToggleButton(self, size=size, name="btn6")
self.button7 = buttons.GenToggleButton(self, size=size, name="btn7")
self.button8 = buttons.GenToggleButton(self, size=size, name="btn8")
self.button9 = buttons.GenToggleButton(self, size=size, name="btn9")
self.normalBtnColour = self.button1.GetBackgroundColour()
self.widgets = [self.button1, self.button2, self.button3,
self.button4, self.button5, self.button6,
self.button7, self.button8, self.button9]
# change all the main game buttons' font and bind them to an event
for button in self.widgets:
button.SetFont(font)
button.Bind(wx.EVT_BUTTON, self.onToggle)
# add the widgets to the sizers
self.fgSizer.AddMany(self.widgets)
mainSizer.Add(self.fgSizer, 0, wx.ALL|wx.CENTER, 5)
self.endTurnBtn = wx.Button(self, label="End Turn")
self.endTurnBtn.Bind(wx.EVT_BUTTON, self.onEndTurn)
self.endTurnBtn.Disable()
btnSizer.Add(self.endTurnBtn, 0, wx.ALL|wx.CENTER, 5)
startOverBtn = wx.Button(self, label="Restart")
startOverBtn.Bind(wx.EVT_BUTTON, self.onRestart)
btnSizer.Add(startOverBtn, 0, wx.ALL|wx.CENTER, 5)
mainSizer.Add(btnSizer, 0, wx.CENTER)
self.methodsToWin = [(self.button1, self.button2, self.button3),
(self.button4, self.button5, self.button6),
(self.button7, self.button8, self.button9),
# vertical ways to win
(self.button1, self.button4, self.button7),
(self.button2, self.button5, self.button8),
(self.button3, self.button6, self.button9),
# diagonal ways to win
(self.button1, self.button5, self.button9),
(self.button3, self.button5, self.button7)]
self.SetSizer(mainSizer)
#----------------------------------------------------------------------
def enableUnusedButtons(self):
"""
Re-enable unused buttons
"""
for button in self.widgets:
if button.GetLabel() == "":
button.Enable()
self.Refresh()
self.Layout()
#----------------------------------------------------------------------
def onEndTurn(self, event):
"""
Let the computer play
"""
# rest toggled flag state
self.toggled = False
# disable all played buttons
for btn in self.widgets:
if btn.GetLabel():
btn.Disable()
computerPlays = []
noPlays = []
for button1, button2, button3 in self.methodsToWin:
if button1.GetLabel() == button2.GetLabel() and button3.GetLabel() == "":
if button1.GetLabel() == "" and button2.GetLabel() == "" and button1.GetLabel() == "":
pass
else:
#if button1.GetLabel() == "O":
noPlays.append(button3)
elif button1.GetLabel() == button3.GetLabel() and button2.GetLabel() == "":
if button1.GetLabel() == "" and button2.GetLabel() == "" and button1.GetLabel() == "":
pass
else:
noPlays.append(button2)
elif button2.GetLabel() == button3.GetLabel() and button1.GetLabel() == "":
if button1.GetLabel() == "" and button2.GetLabel() == "" and button1.GetLabel() == "":
pass
else:
noPlays.append(button1)
noPlays = list(set(noPlays))
if button1.GetLabel() == "" and button1 not in noPlays:
if not self.checkWin(computer=True):
computerPlays.append(button1)
if button2.GetLabel() == "" and button2 not in noPlays:
if not self.checkWin(computer=True):
computerPlays.append(button2)
if button3.GetLabel() == "" and button3 not in noPlays:
if not self.checkWin(computer=True):
computerPlays.append(button3)
computerPlays = list(set(computerPlays))
print noPlays
choices = len(computerPlays)
while 1 and computerPlays:
btn = random.choice(computerPlays)
if btn not in noPlays:
print btn.GetName()
btn.SetLabel("O")
btn.Disable()
break
else:
print "Removed => " + btn.GetName()
computerPlays.remove(btn)
if choices < 1:
self.giveUp()
break
choices -= 1
else:
# Computer cannot play without winning
self.giveUp()
self.endTurnBtn.Disable()
self.enableUnusedButtons()
#----------------------------------------------------------------------
def giveUp(self):
"""
The computer cannot find a way to play that lets the user win,
so it gives up.
"""
msg = "I give up, Dave. You're too good at this game!"
dlg = wx.MessageDialog(None, msg, "Game Over!",
wx.YES_NO | wx.ICON_WARNING)
result = dlg.ShowModal()
if result == wx.ID_YES:
self.restart()
else:
wx.CallAfter(self.GetParent().Close)
dlg.Destroy()
#----------------------------------------------------------------------
def onRestart(self, event):
"""
Calls the restart method
"""
self.restart()
#----------------------------------------------------------------------
def onToggle(self, event):
"""
On button toggle, change the label of the button pressed
and disable the other buttons unless the user changes their mind
"""
button = event.GetEventObject()
button.SetLabel("X")
button_id = button.GetId()
self.checkWin()
if not self.toggled:
self.toggled = True
self.endTurnBtn.Enable()
for btn in self.widgets:
if button_id != btn.GetId():
btn.Disable()
else:
self.toggled = False
self.endTurnBtn.Disable()
button.SetLabel("")
self.enableUnusedButtons()
# check if it's a "cats game" - no one's won
if not self.playerWon:
labels = [True if btn.GetLabel() else False for btn in self.widgets]
if False not in labels:
msg = "Cats Game - No one won! Would you like to play again?"
dlg = wx.MessageDialog(None, msg, "Game Over!",
wx.YES_NO | wx.ICON_WARNING)
result = dlg.ShowModal()
if result == wx.ID_YES:
self.restart()
dlg.Destroy()
#----------------------------------------------------------------------
def restart(self):
"""
Restart the game and reset everything
"""
for button in self.widgets:
button.SetLabel("")
button.SetValue(False)
button.SetBackgroundColour(self.normalBtnColour)
self.toggled = False
self.playerWon = False
self.endTurnBtn.Disable()
self.enableUnusedButtons()
########################################################################
class TTTFrame(wx.Frame):
"""
Tic-Tac-Toe Frame object
"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
title = "Tic-Tac-Toe"
size = (500, 500)
wx.Frame.__init__(self, parent=None, title=title, size=size)
panel = TTTPanel(self)
self.Show()
if __name__ == "__main__":
app = wx.App(False)
frame = TTTFrame()
app.MainLoop()
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.