metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joidegn/pyxero",
"score": 2
} |
#### File: pyxero/xero/allocationsmanager.py
```python
from __future__ import unicode_literals
import requests
from .basemanager import BaseManager
from .constants import XERO_API_URL
class PrepaymentAllocationsManager(BaseManager):
def __init__(self, credentials, unit_price_4dps=False, user_agent=None):
from xero import __version__ as VERSION
self.credentials = credentials
self.singular = 'Allocation'
self.name = 'Allocations'
self.base_url = credentials.base_url + XERO_API_URL
self.extra_params = {"unitdp": 4} if unit_price_4dps else {}
if user_agent is None:
self.user_agent = 'pyxero/%s ' % VERSION + requests.utils.default_user_agent()
method = self._put
setattr(self, 'put', self._get_data(method))
def _put(self, prepayment_id, data, summarize_errors=True, headers=None):
uri = '/'.join([self.base_url, 'Prepayments', prepayment_id, self.name])
params = self.extra_params.copy()
method = 'put'
body = {'xml': self._prepare_data_for_save(data)}
if not summarize_errors:
params['summarizeErrors'] = 'false'
return uri, params, method, body, headers, False
class CreditNoteAllocationsManager(BaseManager):
def __init__(self, credentials, unit_price_4dps=False, user_agent=None):
from xero import __version__ as VERSION
self.credentials = credentials
self.singular = 'Allocation'
self.name = 'Allocations'
self.base_url = credentials.base_url + XERO_API_URL
self.extra_params = {"unitdp": 4} if unit_price_4dps else {}
if user_agent is None:
self.user_agent = 'pyxero/%s ' % VERSION + requests.utils.default_user_agent()
method = self._put
setattr(self, 'put', self._get_data(method))
def _put(self, credit_note_id, data, summarize_errors=True, headers=None):
uri = '/'.join([self.base_url, 'CreditNotes', credit_note_id, self.name])
params = self.extra_params.copy()
method = 'put'
body = {'xml': self._prepare_data_for_save(data)}
if not summarize_errors:
params['summarizeErrors'] = 'false'
return uri, params, method, body, headers, False
```
#### File: pyxero/xero/optionsmanager.py
```python
from __future__ import unicode_literals
import requests
from .basemanager import BaseManager
from .constants import XERO_API_URL
class TrackingCategoryOptionsManager(BaseManager):
def __init__(self, credentials, user_agent=None):
from xero import __version__ as VERSION
self.credentials = credentials
self.singular = 'Option'
self.name = 'TrackingCategoryOptions'
self.base_url = credentials.base_url + XERO_API_URL
if user_agent is None:
self.user_agent = 'pyxero/%s ' % VERSION + requests.utils.default_user_agent()
method = self._put
setattr(self, 'put', self._get_data(method))
def _put(self, tracking_category_id, data, summarize_errors=True, headers=None):
uri = '/'.join([self.base_url, 'TrackingCategories', tracking_category_id, self.name])
params = {}
method = 'put'
body = {'xml': self._prepare_data_for_save(data)}
if not summarize_errors:
params['summarizeErrors'] = 'false'
return uri, params, method, body, headers, False
``` |
{
"source": "JoiDvision/bayesian-machine-learning",
"score": 2
} |
#### File: bayesian-machine-learning/noise-contrastive-priors/utils.py
```python
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# ------------------------------------------
# Data
# ------------------------------------------
def select_bands(x, y, mask):
assert x.shape[0] == y.shape[0]
num_bands = len(mask)
if x.shape[0] % num_bands != 0:
raise ValueError('size of first dimension must be a multiple of mask length')
data_mask = np.repeat(mask, x.shape[0] // num_bands)
return [arr[data_mask] for arr in (x, y)]
def select_subset(x, y, num, rng=np.random):
assert x.shape[0] == y.shape[0]
choices = rng.choice(range(x.shape[0]), num, replace=False)
return [x[choices] for x in (x, y)]
# ------------------------------------------
# Training
# ------------------------------------------
def data_loader(x, y, batch_size, shuffle=True):
ds = tf.data.Dataset.from_tensor_slices((x, y))
if shuffle:
ds = ds.shuffle(x.shape[0])
return ds.batch(batch_size)
def scheduler(decay_steps, decay_rate=0.5, lr=1e-3):
return tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=lr,
decay_steps=decay_steps,
decay_rate=decay_rate)
def optimizer(lr):
return tf.optimizers.Adam(learning_rate=lr)
def backprop(model, loss, tape):
trainable_vars = model.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
return zip(gradients, trainable_vars)
def train(model, x, y,
batch_size,
epochs,
step_fn,
optimizer_fn=optimizer,
scheduler_fn=scheduler,
verbose=1,
verbose_every=1000):
steps_per_epoch = int(np.ceil(x.shape[0] / batch_size))
steps = epochs * steps_per_epoch
scheduler = scheduler_fn(steps)
optimizer = optimizer_fn(scheduler)
loss_tracker = tf.keras.metrics.Mean(name='loss')
mse_tracker = tf.keras.metrics.MeanSquaredError(name='mse')
loader = data_loader(x, y, batch_size=batch_size)
for epoch in range(1, epochs + 1):
for x_batch, y_batch in loader:
loss, y_pred = step_fn(model, optimizer, x_batch, y_batch)
loss_tracker.update_state(loss)
mse_tracker.update_state(y_batch, y_pred)
if verbose and epoch % verbose_every == 0:
print(f'epoch {epoch}: loss = {loss_tracker.result():.3f}, mse = {mse_tracker.result():.3f}')
loss_tracker.reset_states()
mse_tracker.reset_states()
# ------------------------------------------
# Visualization
# ------------------------------------------
style = {
'bg_line': {'ls': '--', 'c': 'black', 'lw': 1.0, 'alpha': 0.5},
'fg_data': {'marker': '.', 'c': 'red', 'lw': 1.0, 'alpha': 1.0},
'bg_data': {'marker': '.', 'c': 'gray', 'lw': 0.2, 'alpha': 0.2},
'pred_sample': {'marker': 'x', 'c': 'blue', 'lw': 0.6, 'alpha': 0.5},
'pred_mean': {'ls': '-', 'c': 'blue', 'lw': 1.0},
'a_unc': {'color': 'lightgreen'},
'e_unc': {'color': 'orange'},
}
def plot_data(x_train, y_train, x=None, y=None):
if x is not None and y is not None:
plt.plot(x, y, **style['bg_line'], label='f')
plt.scatter(x_train, y_train, **style['fg_data'], label='Train data')
plt.xlabel('x')
plt.ylabel('y')
def plot_prediction(x, y_mean, y_samples=None, aleatoric_uncertainty=None, epistemic_uncertainty=None):
x, y_mean, y_samples, epistemic_uncertainty, aleatoric_uncertainty = \
flatten(x, y_mean, y_samples, epistemic_uncertainty, aleatoric_uncertainty)
plt.plot(x, y_mean, **style['pred_mean'], label='Expected output')
if y_samples is not None:
plt.scatter(x, y_samples, **style['pred_sample'], label='Predictive samples')
if aleatoric_uncertainty is not None:
plt.fill_between(x,
y_mean + 2 * aleatoric_uncertainty,
y_mean - 2 * aleatoric_uncertainty,
**style['a_unc'], alpha=0.3, label='Aleatoric uncertainty')
if epistemic_uncertainty is not None:
plt.fill_between(x,
y_mean + 2 * epistemic_uncertainty,
y_mean - 2 * epistemic_uncertainty,
**style['e_unc'], alpha=0.3, label='Epistemic uncertainty')
def plot_uncertainty(x, aleatoric_uncertainty, epistemic_uncertainty=None):
plt.plot(x, aleatoric_uncertainty, **style['a_unc'], label='Aleatoric uncertainty')
if epistemic_uncertainty is not None:
plt.plot(x, epistemic_uncertainty, **style['e_unc'], label='Epistemic uncertainty')
plt.xlabel('x')
plt.ylabel('Uncertainty')
def flatten(*ts):
def _flatten(t):
if t is not None:
return tf.reshape(t, -1)
return [_flatten(t) for t in ts]
``` |
{
"source": "joiellantero/name-roulette",
"score": 3
} |
#### File: joiellantero/name-roulette/name-roulette.py
```python
from cowsay import *
import random
import itertools
import threading
import time
import sys
import os
name = []
again = 0
sleep_time = 1
# loading animation
def loading_animation():
done = False
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rchoosing someone... ' + c)
sys.stdout.flush()
time.sleep(0.1)
t = threading.Thread(target=animate)
t.start()
#long process here
time.sleep(sleep_time)
done = True
def clear_terminal():
os.system('cls' if os.name == 'nt' else 'clear')
def get_names():
i = 0
# take all the names
print("Please enter all the names then type 'done' when you're finished.")
while (i != 'done'):
i = input('Enter name: ')
if i != 'done':
name.append(i)
def repeat():
again = input("Again? [y/n]: ")
if ((again == 'y' or again == 'Y') and len(name)!=0):
return
else:
while again != 'n' and again != 'y':
print("Choices: y for yes or n for no.")
again = input("Again? [y/n]: ")
if (again == 'n'):
print("Program ended")
sys.exit(0)
elif (again == 'y' or again == 'Y') and len(name)==0:
print("No more names to choose from")
sys.exit(0)
else:
return
def repeat_forever():
get_names()
# choose a random name from list name
while (True):
chosen_name = random.choice(name)
# clear terminal
clear_terminal()
# do loading animation
loading_animation()
# clear terminal
clear_terminal()
# display the chosen name
print(dragon(chosen_name))
repeat()
def repeat_until_last():
get_names()
# choose a random name from list name
while (True):
chosen_name = random.choice(name)
# clear terminal
clear_terminal()
# do loading animation
loading_animation()
# clear terminal
clear_terminal()
# display the chosen name
print(dragon(chosen_name))
name.remove(chosen_name)
repeat()
print("Commands:\n1 - repeat until last person\n2 - repeat forever")
cmd = input("choice [1/2]: ")
# repeat until last person
if (int(cmd) == 1):
repeat_until_last()
# repeat forever
elif (int(cmd) == 2):
repeat_forever()
# error handling for invalid input
else:
print("Error: Invalid Choice!")
``` |
{
"source": "joiellantero/notification",
"score": 3
} |
#### File: joiellantero/notification/notification2.py
```python
import os
import smtplib
from email.message import EmailMessage
from dotenv import load_dotenv
load_dotenv()
EMAIL_CLIENT = os.getenv('EMAIL_CLIENT')
EMAIL_CLIENT_APP_PASSWORD = os.getenv('EMAIL_CLIENT_APP_PASSWORD')
def email_alert(subject, to, body):
message = EmailMessage()
message['subject'] = subject
message['to'] = to
message['from'] = EMAIL_CLIENT
message.set_content(body)
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login(EMAIL_CLIENT, EMAIL_CLIENT_APP_PASSWORD)
server.send_message(message)
server.quit()
if __name__ == '__main__':
email_alert("this is my subject", "<EMAIL>", "this is my body")
``` |
{
"source": "joiellantero/py-playlist",
"score": 3
} |
#### File: joiellantero/py-playlist/main.py
```python
import time
def divider(text):
if text:
length = (text.join(text)).count(text) + 1
half = round(length/2) + 1
print('-' * (20 - half), f'{text}', '-' * (20 - half))
return
print('-' * 40)
class Track:
def __init__(self, title, artist, duration):
self.title = title
self.artist = artist
self.duration = duration
class Playlist:
def __init__(self):
self.tracks = []
def enqueue(self, track):
index = len(self.tracks) + 1
self.tracks.append([index, track])
def remove(self, track_number):
self.tracks.remove(self.tracks[track_number - 1])
def view(self):
divider('VIEW')
for track in self.tracks:
print(f'{track[0]} - {track[1].title} - {track[1].artist}')
divider('')
def duration(self):
total_duration = 0
for track in self.tracks:
total_duration += int(track[1].duration)
ty_res = time.gmtime(total_duration)
res = time.strftime("%H:%M:%S", ty_res)
divider('DURATION')
print(f'Total duration: {res}')
divider('')
if __name__ == '__main__':
myTrack = Playlist()
n = int(input())
for i in range(n):
input_list = input().split(',')
myTrack.enqueue(Track(input_list[0], input_list[1], input_list[2]))
myTrack.view()
myTrack.duration()
myTrack.remove(2)
myTrack.view()
myTrack.duration()
``` |
{
"source": "joiellantero/Python-Tutorial-Exercises",
"score": 4
} |
#### File: Python-Tutorial-Exercises/games/lights_out.py
```python
def checker(lights, num_lights):
counter = 0
# we loop through all of the lights to count if all are turned off
for light in lights:
if light == "🌚":
counter = counter + 1
if counter == num_lights:
return True
return False
def start_game():
print("Lights Out!")
# get the number of lights from the user
num_lights = int(input("Enter number of lights: "))
# make a list filled with the sun emoji
lights = list(num_lights*"🌞")
# convert the list into a string then display it on the terminal
print("".join(lights))
# save the number of times the user toggles a light
moves = 0
# toggle the lights until it's all moon emoji
while True:
toggle_index = int(input("Which light should you toggle? [1-10] "))-1
moves = moves + 1
if toggle_index < num_lights and toggle_index >= 0 :
# toggle the chosen index of the user
if lights[toggle_index] == "🌚":
lights[toggle_index] = "🌞"
else:
lights[toggle_index] = "🌚"
# toggle the light on the right side of the chosen index
if toggle_index+1 < num_lights and lights[toggle_index+1] == "🌚":
lights[toggle_index+1] = "🌞"
elif toggle_index+1 < num_lights:
lights[toggle_index+1] = "🌚"
# toggle the light on the left side of the chosen index
if toggle_index-1 >= 0 and lights[toggle_index-1] == "🌚":
lights[toggle_index-1] = "🌞"
elif toggle_index-1 >= 0:
lights[toggle_index-1] = "🌚"
else:
print(f"The light that you picked doesn't exist. Pick only from 1 to {num_lights}")
# convert the list into a string then display it on the terminal
print("".join(lights))
# this if statement will run when the checker function returns True and it will exit the current loop and go back to the while loop in get_choice function.
if checker(lights, num_lights):
print(f"You won! Moves: {moves}")
break
def get_choice():
choice = 0
while(choice != 1 or choice != 2):
# print the menu
print("Welcome to Arcade\n[1] Lights Out!\n[2] Exit")
# ask the user for their choice, i.e., to play the game or exit the game
choice = int(input("Enter Choice: "))
# when user wants to play the game, call start_game function. when the user wins, they will be asked again if they want to play again or exit. that's why there's no "break" here.
if choice == 1:
start_game()
elif choice == 2:
print("Exit Game")
break
else:
print("Invalid choice. Choices: 1 or 2 only.")
if __name__ == "__main__":
get_choice()
``` |
{
"source": "joievoyage/CodeAcademyExs",
"score": 4
} |
#### File: joievoyage/CodeAcademyExs/taking_a_vacation.py
```python
def hotel_cost(nights):
#The hotel costs $140 per night
return 140 * nights
def plane_ride_cost(city):
if city == "Charlotte":
return 183
elif city == "Tampa":
return 220
elif city == "Pittsburgh":
return 222
elif city == "Los Angeles":
return 475
def rental_car_cost(days):
#The car cost $40 per day
cost = 40 * days
if days >= 7:
cost -= 50
elif days >= 3 and days < 7:
cost -= 20
return cost
def trip_cost(city, days, spending_money):
print trip_cost("Los Angeles", 5, 600)
return hotel_cost(days) + plane_ride_cost(city) + rental_car_cost(days) + spending_money
``` |
{
"source": "joigalcar3/IHDP",
"score": 4
} |
#### File: IHDP/Cleaned code PSO/Actor.py
```python
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten
"----------------------------------------------------------------------------------------------------------------------"
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2020 <NAME>"
__credits__ = []
__license__ = "MIT"
__version__ = "2.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
"----------------------------------------------------------------------------------------------------------------------"
class Actor:
# Class attributes
# Attributes related to RMSprop
beta_rmsprop = 0.999
epsilon = 1e-8
# Attributes related to the momentum
beta_momentum = 0.9
def __init__(self, selected_inputs, selected_states, tracking_states, indices_tracking_states,
number_time_steps, start_training, layers=(6, 1), activations=('sigmoid', 'sigmoid'),
learning_rate=0.9, learning_rate_cascaded=0.9, learning_rate_exponent_limit=10,
type_PE='3211', amplitude_3211=1, pulse_length_3211=15, WB_limits=30,
maximum_input=25, maximum_q_rate=20, cascaded_actor=False, NN_initial=None):
self.number_inputs = len(selected_inputs)
self.selected_states = selected_states
self.number_states = len(selected_states)
self.number_tracking_states = len(tracking_states)
self.indices_tracking_states = indices_tracking_states
self.xt = None
self.xt_ref = None
self.ut = 0
self.maximum_input = maximum_input
self.maximum_q_rate = maximum_q_rate
# Attributes related to time
self.number_time_steps = number_time_steps
self.time_step = 0
self.start_training = start_training
# Attributes related to the NN
self.model = None
self.model_q = None
if layers[-1] != 1:
raise Exception("The last layer should have a single neuron.")
elif len(layers) != len(activations):
raise Exception("The number of layers needs to be equal to the number of activations.")
self.layers = layers
self.activations = activations
self.learning_rate = learning_rate
self.learning_rate_cascaded = learning_rate_cascaded
self.learning_rate_0 = learning_rate
self.learning_rate_exponent_limit = learning_rate_exponent_limit
self.WB_limits = WB_limits
self.NN_initial = NN_initial
# Attributes related to the persistent excitation
self.type_PE = type_PE
self.amplitude_3211 = amplitude_3211
self.pulse_length_3211 = pulse_length_3211
# Attributes related to the training of the NN
self.dut_dWb = None
self.dut_dWb_1 = None
# Attributes related to the Adam optimizer
self.Adam_opt = None
# Attributes related to the momentum
self.momentum_dict = {}
# Attributes related to RMSprop
self.rmsprop_dict = {}
# Declaration of the storage arrays for the weights
self.store_weights = {}
self.store_weights_q = {}
# Attributes for the cascaded actor
self.cascaded_actor = cascaded_actor
self.dut_dq_ref = None
self.dq_ref_dWb = None
self.store_q = np.zeros((1, self.number_time_steps))
def build_actor_model(self):
"""
Function that creates the actor ANN architecture. It is a densely connected neural network. The user
can decide the number of layers, the number of neurons, as well as the activation function.
:return:
"""
# First Neural Network
self.model, self.store_weights = self.create_NN(self.store_weights)
# Second Neural Network for the cascaded actor
if self.cascaded_actor:
print("It is assumed that the input to the NNs is the tracking error.")
tracking_states = ['alpha', 'q']
self.indices_tracking_states = [self.selected_states.index(tracking_states[i]) for i in
range(len(tracking_states))]
self.number_tracking_states = len(tracking_states)
self.model_q, self.store_weights_q = self.create_NN(self.store_weights_q)
for count in range(len(self.model.trainable_variables) * 2):
self.momentum_dict[count] = 0
self.rmsprop_dict[count] = 0
def create_NN(self, store_weights):
"""
Creates a NN given the user input
:param store_weights: dictionary containing weights and biases
:return: model --> the created NN model
store_weights --> the dictionary that contains the updated weights and biases.
"""
# initializer = tf.keras.initializers.GlorotNormal()
initializer = tf.keras.initializers.VarianceScaling(
scale=0.01, mode='fan_in', distribution='truncated_normal', seed=self.NN_initial)
model = tf.keras.Sequential()
model.add(Flatten(input_shape=(self.number_tracking_states, 1), name='Flatten_1'))
model.add(Dense(self.layers[0], activation=self.activations[0], kernel_initializer=initializer,
name='dense_1'))
store_weights['W1'] = np.zeros((self.number_tracking_states * self.layers[0], self.number_time_steps + 1))
store_weights['W1'][:, self.time_step] = model.trainable_variables[0].numpy().flatten()
for counter, layer in enumerate(self.layers[1:]):
model.add(Dense(self.layers[counter + 1], activation=self.activations[counter + 1],
kernel_initializer=initializer, name='dense_' + str(counter + 2)))
store_weights['W' + str(counter + 2)] = np.zeros(
(self.layers[counter] * self.layers[counter + 1], self.number_time_steps + 1))
store_weights['W' + str(counter + 2)][:, self.time_step] = model.trainable_variables[
(counter + 1) * 2].numpy().flatten()
return model, store_weights
def run_actor_online(self, xt, xt_ref):
"""
Generate input to the system with the reference and real states.
:param xt: current time_step states
:param xt_ref: current time step reference states
:return: ut --> input to the system and the incremental model
"""
if self.cascaded_actor:
self.xt = xt
self.xt_ref = xt_ref
tracked_states = np.reshape(xt[self.indices_tracking_states[0], :], [-1, 1])
alphat_error = np.reshape(tracked_states - xt_ref, [-1, 1])
nn_input_alpha = tf.constant(np.array([(alphat_error)]).astype('float32'))
with tf.GradientTape() as tape:
tape.watch(self.model.trainable_variables)
q_ref = self.model(nn_input_alpha)
self.dq_ref_dWb = tape.gradient(q_ref, self.model.trainable_variables)
if self.activations[-1] == 'sigmoid':
q_ref = max(min((2 * self.maximum_q_rate * q_ref.numpy()) - self.maximum_q_rate,
np.reshape(self.maximum_q_rate, q_ref.numpy().shape)),
np.reshape(-self.maximum_q_rate, q_ref.numpy().shape))
elif self.activations[-1] == 'tanh':
q_ref = max(min((self.maximum_q_rate * q_ref.numpy()),
np.reshape(self.maximum_q_rate, q_ref.numpy().shape)),
np.reshape(-self.maximum_q_rate, q_ref.numpy().shape))
self.store_q[:, self.time_step] = q_ref
tracked_states_q = np.reshape(xt[self.indices_tracking_states[1], :], [-1, 1])
qt_error = np.reshape(tracked_states_q - np.reshape(q_ref, tracked_states_q.shape), [-1, 1])
nn_input_q = tf.constant(np.array([(qt_error)]).astype('float32'))
with tf.GradientTape() as tape:
tape.watch(nn_input_q)
ut = self.model_q(nn_input_q)
self.dut_dq_ref = tape.gradient(ut, nn_input_q)
with tf.GradientTape() as tape:
tape.watch(self.model_q.trainable_variables)
ut = self.model_q(nn_input_q)
self.dut_dWb = tape.gradient(ut, self.model_q.trainable_variables)
else:
self.xt = xt
self.xt_ref = xt_ref
tracked_states = np.reshape(xt[self.indices_tracking_states, :], [-1, 1])
xt_error = np.reshape(tracked_states - xt_ref, [-1, 1])
nn_input = tf.constant(np.array([(xt_error)]).astype('float32'))
with tf.GradientTape() as tape:
tape.watch(self.model.trainable_variables)
ut = self.model(nn_input)
self.dut_dWb = tape.gradient(ut, self.model.trainable_variables)
e0 = self.compute_persistent_excitation()
if self.activations[-1] == 'sigmoid':
self.ut = max(min((2 * self.maximum_input * ut.numpy()) - self.maximum_input + e0,
np.reshape(self.maximum_input, ut.numpy().shape)),
np.reshape(-self.maximum_input, ut.numpy().shape))
elif self.activations[-1] == 'tanh':
ut = max(min((self.maximum_input * ut.numpy()),
np.reshape(self.maximum_input, ut.numpy().shape)),
np.reshape(-self.maximum_input, ut.numpy().shape))
self.ut = max(min(ut + e0,
np.reshape(self.maximum_input, ut.shape)),
np.reshape(-self.maximum_input, ut.shape))
return self.ut
def train_actor_online(self, Jt1, dJt1_dxt1, G):
"""
Obtains the elements of the chain rule, computes the gradient and applies it to the corresponding weights and
biases.
:param Jt1: dEa/dJ
:param dJt1_dxt1: dJ/dx
:param G: dx/du, obtained from the incremental model
:return:
"""
Jt1 = Jt1.flatten()[0]
chain_rule = Jt1 * np.matmul(np.reshape(G[self.indices_tracking_states, :], [-1, 1]).T, dJt1_dxt1)
chain_rule = chain_rule.flatten()[0]
for count in range(len(self.dut_dWb)):
update = chain_rule * self.dut_dWb[count]
self.model.trainable_variables[count].assign_sub(np.reshape(self.learning_rate * update,
self.model.trainable_variables[count].shape))
# Implement WB_limits: the weights and biases can not have values whose absolute value exceeds WB_limits
self.model = self.check_WB_limits(count, self.model)
def train_actor_online_adaptive_alpha(self, Jt1, dJt1_dxt1, G, incremental_model, critic, xt_ref1):
"""
Train the actor with an adaptive alpha depending on the sign and magnitude of the network errors
:param Jt1: the evaluation of the critic with the next time step prediction of the incremental model
:param dJt1_dxt1: the gradient of the critic network with respect to the next time prediction of the incremental model
:param G: the input distribution matrix
:param incremental_model: the incremental model
:param critic: the critic
:param xt_ref1: reference states at the next time step
:return:
"""
Ec_actor_before = 0.5 * np.square(Jt1)
weight_cache = [tf.Variable(self.model.trainable_variables[i].numpy()) for i in
range(len(self.model.trainable_variables))]
network_improvement = False
n_reductions = 0
while not network_improvement and self.time_step > self.start_training:
# Train the actor
self.train_actor_online(Jt1, dJt1_dxt1, G)
# Code for checking if the actor NN error with the new weights has changed sign
ut_after = self.evaluate_actor()
xt1_est_after = incremental_model.evaluate_incremental_model(ut_after)
Jt1_after, _ = critic.evaluate_critic(xt1_est_after, xt_ref1)
Ec_actor_after = 0.5 * np.square(Jt1_after)
# Code for checking whether the learning rate of the actor should be halved
if Ec_actor_after <= Ec_actor_before or n_reductions > 10:
network_improvement = True
if np.sign(Jt1) == np.sign(Jt1_after):
self.learning_rate = min(2 * self.learning_rate,
self.learning_rate_0 * 2**self.learning_rate_exponent_limit)
else:
n_reductions += 1
self.learning_rate = max(self.learning_rate / 2,
self.learning_rate_0/2**self.learning_rate_exponent_limit)
for WB_count in range(len(self.model.trainable_variables)):
self.model.trainable_variables[WB_count].assign(weight_cache[WB_count].numpy())
def train_actor_online_adam(self, Jt1, dJt1_dxt1, G, incremental_model, critic, xt_ref1):
"""
Train the actor with the Adam optimizer .
:param Jt1: the evaluation of the critic with the next time step prediction of the incremental model
:param dJt1_dxt1: the gradient of the critic network with respect to the next time prediction of the incremental model
:param G: the input distribution matrix
:param incremental_model: the incremental model
:param critic: the critic
:param xt_ref1: reference states at the next time step
:return:
"""
if self.cascaded_actor:
# Train the actor
Jt1 = Jt1.flatten()[0]
chain_rule = Jt1 * np.matmul(np.reshape(G[self.indices_tracking_states[0], :], [-1, 1]).T, dJt1_dxt1)
chain_rule = chain_rule.flatten()[0]
if self.time_step > self.start_training and np.abs(self.ut) < 25:
for count in range(len(self.dut_dWb)):
if self.activations[-1] == 'sigmoid':
gradient = 2 * self.maximum_input * chain_rule * self.dut_dWb[count]
elif self.activations[-1] == 'tanh':
gradient = self.maximum_input * chain_rule * self.dut_dWb[count]
else:
raise Exception("There is no code for the defined output activation function.")
self.model_q, self.learning_rate_cascaded = self.compute_Adam_update(count, gradient,
self.model_q,
self.learning_rate_cascaded)
for count in range(len(self.dq_ref_dWb)):
if self.activations[-1] == 'sigmoid':
gradient = -2 * self.maximum_q_rate * chain_rule * self.dut_dq_ref * self.dq_ref_dWb[count]
elif self.activations[-1] == 'tanh':
gradient = -self.maximum_q_rate * chain_rule * self.dut_dq_ref * self.dq_ref_dWb[count]
else:
raise Exception("There is no code for the defined output activation function.")
self.model, self.learning_rate = self.compute_Adam_update(count, gradient,
self.model, self.learning_rate)
# Code for checking if the actor NN error with the new weights has changed sign
ut_after = self.evaluate_actor()
xt1_est_after = incremental_model.evaluate_incremental_model(ut_after)
Jt1_after, _ = critic.evaluate_critic(xt1_est_after, xt_ref1)
else:
# Train the actor
Jt1 = Jt1.flatten()[0]
chain_rule = Jt1 * np.matmul(np.reshape(G[self.indices_tracking_states[0], :], [-1, 1]).T, dJt1_dxt1)
chain_rule = chain_rule.flatten()[0]
if self.time_step > self.start_training:
for count in range(len(self.dut_dWb)):
gradient = chain_rule * self.dut_dWb[count]
self.model, self.learning_rate = self.compute_Adam_update(count, gradient,
self.model, self.learning_rate)
# Code for checking if the actor NN error with the new weights has changed sign
ut_after = self.evaluate_actor()
xt1_est_after = incremental_model.evaluate_incremental_model(ut_after)
Jt1_after, _ = critic.evaluate_critic(xt1_est_after, xt_ref1)
def train_actor_online_alpha_decay(self, Jt1, dJt1_dxt1, G, incremental_model, critic, xt_ref1):
"""
Train the actor with a learning rate that decay with the number of time steps
:param Jt1: the evaluation of the critic with the next time step prediction of the incremental model
:param dJt1_dxt1: the gradient of the critic network with respect to the next time prediction of the incremental model
:param G: the input distribution matrix
:param incremental_model: the incremental model
:param critic: the critic
:param xt_ref1: reference states at the next time step
:return:
"""
if self.cascaded_actor:
# Train the actor
Jt1 = Jt1.flatten()[0]
chain_rule = Jt1 * np.matmul(np.reshape(G[self.indices_tracking_states[0], :], [-1, 1]).T, dJt1_dxt1)
chain_rule = chain_rule.flatten()[0]
if self.time_step > self.start_training and np.abs(self.ut) < 25:
for count in range(len(self.dut_dWb)):
if self.activations[-1] == 'sigmoid':
gradient = 2 * self.maximum_input * chain_rule * self.dut_dWb[count]
elif self.activations[-1] == 'tanh':
gradient = self.maximum_input * chain_rule * self.dut_dWb[count]
self.model_q.trainable_variables[count].assign_sub(
np.reshape(self.learning_rate_cascaded * gradient,
self.model_q.trainable_variables[
count].shape))
# Implement WB_limits: the weights and biases can not have values whose absolute value
# exceeds WB_limits
self.model_q = self.check_WB_limits(count, self.model_q)
if count % 2 == 1:
self.model_q.trainable_variables[count].assign(
np.zeros(self.model_q.trainable_variables[count].shape))
for count in range(len(self.dq_ref_dWb)):
if self.activations[-1] == 'sigmoid':
gradient = -2 * self.maximum_q_rate * chain_rule * self.dut_dq_ref * self.dq_ref_dWb[count]
elif self.activations[-1] == 'tanh':
gradient = -self.maximum_q_rate * chain_rule * self.dut_dq_ref * self.dq_ref_dWb[count]
self.model.trainable_variables[count].assign_sub(np.reshape(self.learning_rate * gradient,
self.model.trainable_variables[
count].shape))
self.model = self.check_WB_limits(count, self.model)
if count % 2 == 1:
self.model.trainable_variables[count].assign(
np.zeros(self.model.trainable_variables[count].shape))
# Update the learning rate
self.learning_rate = max(self.learning_rate * 0.9995, 0.0001)
self.learning_rate_cascaded = max(self.learning_rate_cascaded * 0.9995, 0.0001)
# Code for checking if the actor NN error with the new weights has changed sign
ut_after = self.evaluate_actor()
# incremental_model.identify_incremental_model_LS(self.xt, ut_after)
xt1_est_after = incremental_model.evaluate_incremental_model(ut_after)
Jt1_after, _ = critic.evaluate_critic(xt1_est_after, xt_ref1)
else:
# Train the actor
Jt1 = Jt1.flatten()[0]
chain_rule = Jt1 * np.matmul(np.reshape(G[self.indices_tracking_states[0], :], [-1, 1]).T, dJt1_dxt1)
chain_rule = chain_rule.flatten()[0]
if self.time_step > self.start_training:
for count in range(len(self.dut_dWb)):
gradient = chain_rule * self.dut_dWb[count]
self.model.trainable_variables[count].assign_sub(np.reshape(self.learning_rate * gradient,
self.model.trainable_variables[
count].shape))
# Implement WB_limits: the weights and biases can not have values whose absolute value exceeds WB_limits
self.model = self.check_WB_limits(count, self.model)
if count % 2 == 1:
self.model.trainable_variables[count].assign(
np.zeros(self.model.trainable_variables[count].shape))
# Update the learning rate
self.learning_rate = max(self.learning_rate * 0.995, 0.001)
# Code for checking if the actor NN error with the new weights has changed sign
ut_after = self.evaluate_actor()
xt1_est_after = incremental_model.evaluate_incremental_model(ut_after)
Jt1_after, _ = critic.evaluate_critic(xt1_est_after, xt_ref1)
def compute_Adam_update(self, count, gradient, model, learning_rate):
"""
Computes the Adam update and applies it to the weight updates.
:param count: index of weight matrix being analysed
:param gradient: computed gradient for each of the weights
:param model: NN of the weights that are being updated
:param learning_rate: the learning rate of the model being analysed
:return: model --> return the updated model
learning_rate --> return the updated learning rate
"""
momentum = self.beta_momentum * self.momentum_dict[count] + (1 - self.beta_momentum) * gradient
self.momentum_dict[count] = momentum
momentum_corrected = momentum / (1 - np.power(self.beta_momentum, self.time_step + 1))
rmsprop = self.beta_rmsprop * self.rmsprop_dict[count] + \
(1 - self.beta_rmsprop) * np.multiply(gradient, gradient)
self.rmsprop_dict[count] = rmsprop
rmsprop_corrected = rmsprop / (1 - np.power(self.beta_rmsprop, self.time_step + 1))
update = momentum_corrected / (np.sqrt(rmsprop_corrected) + self.epsilon)
model.trainable_variables[count].assign_sub(np.reshape(learning_rate * update,
model.trainable_variables[count].shape))
# Implement WB_limits: the weights and biases can not have values whose absolute value
# exceeds WB_limits
model = self.check_WB_limits(count, model)
if count % 2 == 1:
model.trainable_variables[count].assign(np.zeros(model.trainable_variables[count].shape))
if count == len(model.trainable_variables) - 1:
learning_rate = max(learning_rate * 0.9995, 0.0001)
return model, learning_rate
def check_WB_limits(self, count, model):
"""
Check whether any of the weights and biases exceed the limit imposed (WB_limits) and saturate the values
:param count: index within the model.trainable_variables being analysed
:return:
"""
WB_variable = model.trainable_variables[count].numpy()
WB_variable[WB_variable > self.WB_limits] = self.WB_limits
WB_variable[WB_variable < -self.WB_limits] = -self.WB_limits
model.trainable_variables[count].assign(WB_variable)
return model
def compute_persistent_excitation(self, *args):
"""
Computation of the persistent excitation at each time step. Formula obtained from Pedro's thesis
:return: e0 --> PE deviation
"""
if len(args) == 1:
t = args[0] + 1
elif len(args) == 0:
t = self.time_step + 1
e0_1 = 0
e0_2 = 0
if self.type_PE == 'sinusoidal' or self.type_PE == 'combined':
e0_1 = np.sin(t) * np.cos(2 * t) * (np.sin(3 * t + np.pi / 4) + np.cos(4 * t - np.pi / 3)) * 1e-2
if self.type_PE == '3211' or self.type_PE == 'combined':
if t < 3 * self.pulse_length_3211 / 7:
e0_2 = 0.5 * self.amplitude_3211
elif t < 5 * self.pulse_length_3211 / 7:
e0_2 = -0.5 * self.amplitude_3211
elif t < 6 * self.pulse_length_3211 / 7:
e0_2 = 0.8 * self.amplitude_3211
elif t < self.pulse_length_3211:
e0_2 = -self.amplitude_3211
e0 = e0_1 + e0_2
return e0
def update_actor_attributes(self):
"""
The attributes that change with every time step are updated
:return:
"""
self.time_step += 1
self.dut_dWb_1 = self.dut_dWb
for counter in range(len(self.layers)):
self.store_weights['W' + str(counter+1)][:, self.time_step] = self.model.trainable_variables[counter*2].numpy().flatten()
if self.cascaded_actor:
for counter in range(len(self.layers)):
self.store_weights_q['W' + str(counter+1)][:, self.time_step] = self.model_q.trainable_variables[counter*2].numpy().flatten()
def evaluate_actor(self, *args):
"""
Evaluation of the actor NN given an input or attributes stored in the object
:param args: the real and reference states could be provided as input for the evaluation, or not if already stored
:return: ut --> input to the system and the incremental model
"""
if len(args) == 0:
xt = self.xt
xt_ref = self.xt_ref
elif len(args) == 1:
xt = self.xt
xt_ref = self.xt_ref
time_step = args[0]
elif len(args) == 2:
xt = args[0]
xt_ref = args[1]
else:
raise Exception("THERE SHOULD BE AN OUTPUT in the evaluate_actor function.")
if self.cascaded_actor:
tracked_states = np.reshape(xt[self.indices_tracking_states[0], :], [-1, 1])
xt_error = np.reshape(tracked_states - xt_ref, [-1, 1])
nn_input = tf.constant(np.array([(xt_error)]).astype('float32'))
q_ref_0 = self.model(nn_input)
if self.activations[-1] == 'sigmoid':
q_ref = max(min((2 * self.maximum_q_rate * q_ref_0.numpy()) - self.maximum_q_rate,
np.reshape(self.maximum_q_rate, q_ref_0.numpy().shape)),
np.reshape(-self.maximum_q_rate, q_ref_0.numpy().shape))
elif self.activations[-1] == 'tanh':
q_ref = max(min((self.maximum_q_rate * q_ref_0.numpy()),
np.reshape(self.maximum_q_rate, q_ref_0.numpy().shape)),
np.reshape(-self.maximum_q_rate, q_ref_0.numpy().shape))
tracked_states = np.reshape(xt[self.indices_tracking_states[1], :], [-1, 1])
xt_error_q = np.reshape(tracked_states - np.reshape(q_ref, tracked_states.shape), [-1, 1])
nn_input_q = tf.constant(np.array([(xt_error_q)]).astype('float32'))
ut = self.model_q(nn_input_q).numpy()
else:
tracked_states = np.reshape(xt[self.indices_tracking_states, :], [-1, 1])
xt_error = np.reshape(tracked_states - xt_ref, [-1, 1])
nn_input = tf.constant(np.array([(xt_error)]).astype('float32'))
ut = self.model(nn_input).numpy()
if len(args) == 1:
e0 = self.compute_persistent_excitation(time_step)
else:
e0 = self.compute_persistent_excitation()
if self.activations[-1] == 'sigmoid':
ut = max(min((2 * self.maximum_input * ut) - self.maximum_input + e0,
np.reshape(self.maximum_input, ut.shape)),
np.reshape(-self.maximum_input, ut.shape))
elif self.activations[-1] == 'tanh':
ut = max(min(((self.maximum_input + 10) * ut) + e0,
np.reshape(self.maximum_input, ut.shape)),
np.reshape(-self.maximum_input, ut.shape))
return ut
def restart_actor(self):
"""
Restart the actor attributes
:return:
"""
self.time_step = 0
self.xt = None
self.xt_ref = None
self.ut = 0
# Attributes related to the training of the NN
self.dut_dWb = None
self.dut_dWb_1 = None
self.learning_rate = self.learning_rate_0
# Attributes related to the Adam optimizer
self.Adam_opt = None
# Restart momentum and rmsprop
for count in range(len(self.model.trainable_variables)):
self.momentum_dict[count] = 0
self.rmsprop_dict[count] = 0
```
#### File: IHDP/Cleaned code PSO/Simulation.py
```python
import numpy as np
import matplotlib as mpl
mpl.use('TkAgg') # or can use 'TkAgg', whatever you have/prefer
"----------------------------------------------------------------------------------------------------------------------"
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2020 <NAME>"
__credits__ = []
__license__ = "MIT"
__version__ = "2.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
"----------------------------------------------------------------------------------------------------------------------"
class Simulation:
def __init__(self, iterations, selected_inputs, selected_states, selected_outputs, number_time_steps,
initial_states, reference_signals, actor, critic, system, incremental_model,
discretisation_time=0.5, tracking_states=['alpha']):
# Attributes regarding the simulation
self.iterations = iterations
self.number_time_steps = number_time_steps
self.time_step = 0
self.discretisation_time = discretisation_time
self.time = list(np.arange(0, self.number_time_steps * self.discretisation_time, self.discretisation_time))
self.iteration = 0
# Attributes regarding the system
self.selected_inputs = selected_inputs
self.selected_states = selected_states
self.selected_outputs = selected_outputs
self.initial_states = initial_states
self.tracking_states = tracking_states
self.indices_tracking_states = [self.selected_states.index(self.tracking_states[i])
for i in range(len(self.tracking_states))]
self.reference_signals = reference_signals
# Initialise all the elements of the simulation
self.actor = actor
self.critic = critic
self.system = system
self.incremental_model = incremental_model
# Cyclic parameters
self.xt = self.initial_states
self.xt_track = np.reshape(self.xt[self.indices_tracking_states, self.time_step], [-1, 1])
self.xt_ref = np.reshape(self.reference_signals[:, self.time_step], [-1, 1])
self.store_xt1 = np.zeros((len(self.selected_states), self.number_time_steps))
# Prepare system
self.system.initialise_system(self.xt, self.number_time_steps)
# Initialise the NN
self.actor.build_actor_model()
self.critic.build_critic_model()
# Initialise the error
self.RMSE = 0
def run_simulation(self):
"""
Runs the complete simulation by executing each iteration, restarting the controller components, as well as
the simulation attributes
:return:
"""
self.run_iteration()
self.compute_performance()
print(self.RMSE)
return self.RMSE
def run_iteration(self):
"""
Core of the program that runs a complete iteration, evaluating and training the controller components in the
correct order.
:return:
"""
while self.time_step < self.number_time_steps:
# Retrieve the reference signal
self.xt_ref = np.reshape(self.reference_signals[:, self.time_step], [-1, 1])
# Obtain the input from the actor
ut = self.actor.run_actor_online(self.xt, self.xt_ref)
# Run the system
xt1 = self.system.run_step(ut)
# Identify the incremental model
G = self.incremental_model.identify_incremental_model_LS(self.xt, ut)
# Run the incremental model
xt1_est = self.incremental_model.evaluate_incremental_model()
# Run and train the critic model
xt_ref1 = np.reshape(self.reference_signals[:, self.time_step + 1], [-1, 1])
# _ = self.critic.run_train_critic_online_adaptive_alpha(self.xt, self.xt_ref)
# _ = self.critic.run_train_critic_online_adam(self.xt, self.xt_ref, self.iteration)
_ = self.critic.run_train_critic_online_alpha_decay(self.xt, self.xt_ref)
# Evaluate the critic
# self.critic.train_critic_replay_adam(10, self.iteration)
Jt1, dJt1_dxt1 = self.critic.evaluate_critic(np.reshape(xt1_est, [-1, 1]), xt_ref1)
# Train the actor
# self.actor.train_actor_online_adaptive_alpha(Jt1, dJt1_dxt1, G,
# self.incremental_model, self.critic, xt_ref1)
# self.actor.train_actor_online_adam(Jt1, dJt1_dxt1, G,
# self.incremental_model, self.critic, xt_ref1)
self.actor.train_actor_online_alpha_decay(Jt1, dJt1_dxt1, G,
self.incremental_model, self.critic, xt_ref1)
# Update models attributes
self.system.update_system_attributes()
self.incremental_model.update_incremental_model_attributes()
self.critic.update_critic_attributes()
self.actor.update_actor_attributes()
self.time_step += 1
self.xt = xt1
self.xt_track = np.reshape(xt1[self.indices_tracking_states, :], [-1, 1])
def compute_performance(self):
"""
Compute the Root Mean Square error of the tracking task
:return:
"""
x = self.system.store_states[self.indices_tracking_states[0], :min(len(self.time), self.time_step)]
x_ref = self.reference_signals[0, :min(len(self.time), self.time_step)]
self.RMSE = np.sqrt(np.sum(np.power((x-x_ref), 2)/x.shape[0]))
print("The current RMSE is ", self.RMSE)
``` |
{
"source": "joigalcar3/LambdaNetworks",
"score": 3
} |
#### File: joigalcar3/LambdaNetworks/log_data.py
```python
import torch
def log_data(resnet_nn, start_train, train_loss, train_acc, end_train, total_train_time, start_test, test_loss, test_acc,
end_test, total_test_time, f, epoch, optimizer, folder_checkpoint, writer):
'''
Data logger and printer
Args:
resnet_nn: selected model
start_train: time at which the current epoch started training
train_loss: training loss
train_acc: training accuracy
end_train: time at which the current epoch stopped training
total_train_time: total time spent training
start_test: time at which the current epoch started testing
test_loss: testing loss
test_acc: testing accuracy
end_test: time at which the current epoch stopped testing
total_test_time: total time spent testing
f: logging file
epoch: current epoch
optimizer: chosen optimizer
folder_checkpoint: folder location of the checkpoints
writer: writer to Tensorboard
Returns:
'''
# Obtain training time
elapsed_train = end_train-start_train
total_train_time += elapsed_train
# Obtain testing time
elapsed_test = end_test - start_test
total_test_time += elapsed_test
# Print train and test accuracy and train and test loss
print("train_acc = ", train_acc, "test_acc = ", test_acc)
print("train_loss = ", round(train_loss.item(), 2), "test_loss = ", round(test_loss.item(), 2))
# Store information in logs
f.write("epoch = " + str(epoch) + "\n")
f.write("\tlearning rate = " + str(optimizer.param_groups[0]['lr']) + "\n")
f.write("\ttrain acc = " + str(train_acc) + " --- test acc = " + str(test_acc) + "\n")
f.write("\ttrain epoch time = " + str(elapsed_train) + " s\n")
f.write("\ttrain loss = " + str(round(train_loss.item(), 2)) + " --- test loss = " + str(round(test_loss.item(),
2)) + "\n")
f.write("\ttest epoch time = " + str(elapsed_test) + " s\n")
f.write("\ttotal time train = " + str(total_train_time) + " s\n")
f.write("\ttotal time test = " + str(total_test_time) + " s\n")
# Save checkpoint
if epoch % 5 == 0:
path_save_epoch = folder_checkpoint + "\\model" + str(epoch) + ".pt"
torch.save({
'epoch': epoch,
'model_state_dict': resnet_nn.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'test_loss': test_loss,
'train_loss': train_loss,
'test_acc': test_acc,
'train_acc': train_acc
}, path_save_epoch)
# Write metrics to Tensorboard
writer.add_scalars("Loss", {'Train': train_loss, 'Test': test_loss}, epoch)
writer.add_scalars('Accuracy', {'Train': train_acc, 'Test': test_acc}, epoch)
return total_train_time, total_test_time
```
#### File: joigalcar3/LambdaNetworks/nntrain.py
```python
import torch
def train(train_loader, net, optimizer, criterion, device, label_smoothing, smoothing=False):
"""
Trains network for one epoch in batches.
Args:
train_loader: Data loader for training set.
net: Neural network model.
optimizer: Optimizer (e.g. SGD).
criterion: Loss function (e.g. cross-entropy loss).
"""
avg_loss = 0
correct = 0
total = 0
# iterate through batches
for i, data in enumerate(train_loader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# Move data to target device
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
if smoothing:
loss = label_smoothing(outputs, labels)
else:
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# keep track of loss and accuracy
avg_loss += loss
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return avg_loss / len(train_loader), 100 * correct / total
``` |
{
"source": "joigno/cloudlight",
"score": 3
} |
#### File: cloudlight/algorithms/privacy.py
```python
import random
import time
import math
class LinkPrivacyModelException(Exception):
'''
classdocs
'''
pass
class SetMem:
def __init__(self, list=[]):
self.__rep = set(list)
def add(self, elem):
try:
elem = int(elem)
except:
pass
self.__rep.add(elem)
def __contains__(self, elem):
try:
elem = int(elem)
except:
pass
return elem in self.__rep
class LinkPrivacyModel(object):
'''
A class to simulate link privacy attacks on network with limited node visibility.
'''
def __init__(self, graph, lookahead, debug=False):
'''
graph: graph where the attack is made.
lookahead: visibility of the nodes in the graph.
'''
self.debug = debug
self.graph = graph
self.number_of_nodes = graph.number_of_nodes()
self.number_of_edges = graph.number_of_edges()
self.lookahead = lookahead
if self.debug:
print 'INFO: Initializing LinkPrivacyModel...'
self.visible_edges_count = 0
self.controlled_nodes = set([]) # remember extra rogues nodes added
self.agents = set([])
self.false_links = [] # remember false links added
self.agents_effort = 0 # cost in new nodes created
self.bribe_effort = 0 # cost in existing nodes bribed
self.false_link_effort = 0 # cost in new links created
self.__unseen_param_key = 'unseen_degree'
self.__unseen_triangles_param_key = 'unseen_triangles'
self.__seen_triangles_param_key = 'seen_triangles'
self.__seen_triangles2_param_key = 'seen_triangles2'
self.__seen_param_key = 'seen_degree'
self.__seen2_param_key = 'seen_degree2'
self.strategy = None
self.coverage_type = None
self.__total_triangles = self.graph.total_triangles()
#self.__init_unseen_degree()
#self.graph.remove_index_parameter_cache(self.__unseen_param_key)
#self.graph.reset_edge_weights()
def __init_unseen_degree(self):
self.graph.remove_index_parameter_cache(self.__unseen_param_key)
self.graph.remove_parameter_cache(self.__unseen_param_key)
self.graph.add_parameter_cache(self.__unseen_param_key)
total_nodes = self.graph.number_of_nodes()
count = 0
if self.debug:
print 'INFO: __init_unseen_degree %d nodes of %d nodes total ...' % (count, total_nodes)
for node, degree in self.graph.get_parameter_cache_iter('degree'):
self.graph.insert_parameter_cache(self.__unseen_param_key, node, degree)
count += 1
if self.debug and count % 100000 == 0:
print 'INFO: __init_unseen_degree %d nodes of %d nodes total ... %s' % (count, total_nodes, time.ctime())
print 'INFO: __init_unseen_degree %d nodes of %d nodes total ...' % (count, total_nodes)
self.graph.index_parameter_cache( self.__unseen_param_key )
def add_agent_node(self, r_node):
if self.debug:
print 'INFO: add_rogue_node --> %s ...' % str(r_node)
if self.graph.has_node( r_node ):
raise LinkPrivacyModelException('new rogue node "%s" already in friend graph!' % str(r_node))
if r_node in self.controlled_nodes:
raise LinkPrivacyModelException('new node "%s" already in rogue node set!' % str(r_node))
self.controlled_nodes.add(r_node)
self.agents.add( r_node )
self.graph.add_node(r_node)
# if self.using_unseen_degree():
# self.graph.insert_parameter_cache(self.__unseen_param_key, r_node, 0)
#
# if self.using_unseen_triangles() and self.lookahead > 0:
# self.graph.insert_parameter_cache(self.__unseen_triangles_param_key, r_node, 0)
#
# if self.using_seen_triangles() and self.lookahead > 0:
# new_triangles_node = self.graph.get_parameter_cache('triangles', r_node)
# self.graph.insert_parameter_cache(self.__seen_triangles_param_key, r_node, new_triangles_node)
self.__visited = SetMem()
self.__add_bribed_node_recursive(r_node, self.lookahead)
self.agents_effort += 1
def using_unseen_degree(self):
return (self.strategy and ('start_greedy' == self.strategy or 'start_crawler' == self.strategy or 'start_crawler_triangles' == self.strategy or 'start_crawler_seen_triangles' == self.strategy)) or ('korolova' in self.coverage_types or 'complete_node' in self.coverage_types)
def using_unseen_triangles(self):
return (self.strategy and ('greedy_triangles' in self.strategy or 'crawler_triangles' in self.strategy))
def using_seen_triangles(self):
return ('triangle' in self.coverage_types) or (self.strategy and ('greedy_seen_triangles' in self.strategy or 'crawler_seen_triangles' in self.strategy or 'crawler_degree_hist_bin_dist_orderby_triangles' in self.strategy))
def using_seen_degree(self):
return self.using_degree_hist() or ('node' in self.coverage_types) or (self.strategy and ('greedy_seen_degree' in self.strategy or 'crawler_seen_degree' in self.strategy or 'crawler_random' in self.strategy))
def add_bribed_node(self, r_node):
if self.debug:
print 'INFO: add_bribed_node --> %s ...' % str(r_node)
#if not self.graph.has_node( r_node ):
# raise LinkPrivacyModelException('new bribed node "%s" NOT in friend graph!' % str(r_node))
self.__visited = SetMem()
self.__add_bribed_node_recursive(r_node, self.lookahead)
self.controlled_nodes.add(r_node)
self.bribe_effort += 1
def __add_bribed_node_recursive(self, r_node, lookahead):
'''
Pseudo-codigo sobornar_nodo_recursiva( nodo, lookahead):
Notas:
- visitados se inicializa antes de la primara llamada en Vacio
- aristas_visibles es persistente y ya viene con valores marcados de otros nodos sobornados de forma no-recursiva.
grado_no_visto[nodo] := 0
histograma_grado[ grado_visto2[nodo] ] -= 1
histograma_grado[ grado[nodo] ] += 1
grado_visto1[nodo] := -1
grado_visto2[nodo] := grado[nodo]
visitados.agregar( nodo )
para cada arista (nodo -- vecino) hacer:
si no pertenece (nodo -- vecino) a aristas_visibles hacer:
aristas_visibles.agregar( (nodo -- vecino) ] )
si grado_visto1[vecino] no es -1 hacer:
histograma_grado[ grado_visto1[vecino] ] -= 1
histograma_grado[ grado_visto1[vecino] + 1 ] += 1
grado_visto1[vecino] := grado_visto1[vecino] + 1
grado_visto2[vecino] := grado_visto2[vecino] + 1
grado_no_visto[vecino] := grado_no_visto[vecino] - 1
si triangulos_vistos2[nodo] no es -1 entonces:
si lookahead > 0 entonces:
triangulos_no_vistos[nodo] := 0
triangulos_vistos1[nodo] := triangulos[nodo]
triangulos_vistos2[nodo] := -1
en cambio si lookahead = 0 entonces:
triangulos_vistos1[nodo] := computar_triangulos_vistos(nodo)
triangulos_vistos2[nodo] := computar_triangulos_vistos(nodo)
triangulos_no_vistos[nodo] := triangulos[nodo] - computar_triangulos_vistos(nodo)
para cada arista (nodo -- vecino) hacer:
si triangulos_vistos2[vecino] no es -1 hacer:
triangulos_vistos1[vecino] := computar_triangulos_vistos(vecino)
triangulos_vistos2[vecino] := computar_triangulos_vistos(vecino)
triangulos_no_vistos[vecino] := triangulos[vecino] - computar_triangulos_vistos(vecino)
si no pertenece vecino a visitados y lookahead > 0 hacer:
sobornar_nodo_recursiva( vecino, lookahead - 1)
'''
# if unseen degree is 0, then node is fully visible...
if self.using_unseen_degree():
self.graph.update_parameter_cache(self.__unseen_param_key, r_node, 0)
# seen degree -1 means all degree is visible
if self.using_seen_degree() and self.graph.get_parameter_cache(self.__seen_param_key, r_node) != -1:
self.graph.update_parameter_cache(self.__seen_param_key, r_node, -1)
r_node_degree = self.graph.get_parameter_cache('degree', r_node)
if self.using_degree_hist():
self.update_degree_hist_fixed(r_node_degree)
previous_r_node_degree = self.graph.get_parameter_cache(self.__seen2_param_key, r_node)
#print 'POR ACTUALIZAR R_NODE DEG HIST: PREV DEG %d DEG %d' % (previous_r_node_degree, r_node_degree)
if previous_r_node_degree != r_node_degree:
self.graph.update_parameter_cache(self.__seen2_param_key, r_node, r_node_degree)
if self.using_degree_hist():
#print 'ACTUALIZANDO R_NODE DEG HIST: NODO %s PREV %d NEW %d' % (r_node, previous_r_node_degree, r_node_degree)
self.update_degree_hist(r_node, previous_r_node_degree, r_node_degree)
#print 'VISITANDO NODO --> %s LOOKAHEAD %d' % (r_node, lookahead)
self.__visited.add( r_node )
for neigh in self.graph.neighbors_iter( r_node, upper_bound_weigh=1 ): # visible edges neighbors with weight <= 1
self.graph.update_edge_weight( r_node, neigh, 2) # mark as visible, 2 == visible
#print 'ARISTA VISITADA %s -- %s' % (r_node, neigh)
self.visible_edges_count += 1
if lookahead == 0 and self.graph.get_parameter_cache(self.__seen_param_key, neigh) != -1:
if self.using_unseen_degree():
self.graph.dec_parameter_cache(self.__unseen_param_key, neigh) # decrease unseen degree
if self.using_seen_degree():
self.graph.inc_parameter_cache(self.__seen_param_key, neigh) # increase seen degree
previous_neigh_degree = self.graph.get_parameter_cache(self.__seen2_param_key, neigh)
self.graph.inc_parameter_cache(self.__seen2_param_key, neigh) # increase seen degree
if self.using_degree_hist():
#print 'ACTUALIZANDO NEIGH DEG HIST: NODO %s PREV %d NEW %d' % (neigh, previous_neigh_degree, previous_neigh_degree+1)
self.update_degree_hist(neigh, previous_neigh_degree, previous_neigh_degree+1)
if self.using_seen_triangles() or self.using_unseen_triangles():
#print 'updating seen_triangles and unseen_triangles ... node = ', r_node
if self.graph.get_parameter_cache(self.__seen_triangles2_param_key, r_node) != -1:
if lookahead > 0:
#print 'lookahead > 0'
self.graph.update_parameter_cache(self.__unseen_triangles_param_key, r_node, 0)
new_triangles_node = self.graph.get_parameter_cache('triangles', r_node)
self.graph.update_parameter_cache(self.__seen_triangles_param_key, r_node, new_triangles_node)
self.graph.update_parameter_cache(self.__seen_triangles2_param_key, r_node, -1)
else:
#print 'lookahead == 0'
seen_triangles_node = self.graph.triangles_weight(r_node, 2) # 2 == visible
self.graph.update_parameter_cache(self.__seen_triangles_param_key, r_node, seen_triangles_node)
self.graph.update_parameter_cache(self.__seen_triangles2_param_key, r_node, seen_triangles_node)
unseen_triangles_node = self.graph.triangles([r_node])[0] - seen_triangles_node # 1 == not visible yet
self.graph.update_parameter_cache(self.__unseen_triangles_param_key, r_node, unseen_triangles_node)
#if unseen_triangles_node == 0:
# self.graph.update_parameter_cache(self.__seen_triangles2_param_key, r_node, -1)
for neigh in self.graph.neighbors_iter( r_node, upper_bound_weigh=2 ): # all neighbors
if lookahead == 0 and (self.using_unseen_triangles() or self.using_seen_triangles()): # update unseen_triangles and seen triangles for neighbor
if self.graph.get_parameter_cache(self.__seen_triangles2_param_key, neigh) != -1:
# update seen_triangles
seen_triangles_neigh = self.graph.triangles_weight(neigh, 2) # 2 == visible
self.graph.update_parameter_cache(self.__seen_triangles_param_key, neigh, seen_triangles_neigh)
self.graph.update_parameter_cache(self.__seen_triangles2_param_key, neigh, seen_triangles_neigh)
# update unseen_triangles, complement of seen_triangles
unseen_triangles_neigh = self.graph.get_parameter_cache('triangles', neigh) - seen_triangles_neigh # 1 == not visible yet
self.graph.update_parameter_cache(self.__unseen_triangles_param_key, neigh, unseen_triangles_neigh)
if lookahead > 0 and not neigh in self.__visited:
self.__add_bribed_node_recursive(neigh, lookahead - 1)
def add_false_link(self, src, dst):
'''
Assuming nothing, should work if src or dst are controlled by the attacker or
any controlled node is in the range of the new link!
'''
#if not src in self.controlled_nodes and not dst in self.controlled_nodes :
# raise LinkPrivacyModelException('Error: nor node %s nor %s are controlled nodes!' % (src,dst))
self.false_links.append( (src, dst) )
# assuming edge not exists
self.graph.add_edge( src, dst )
self.graph.update_edge_weight( src, dst, 2) # mark as visible, 2 == visible
self.visible_edges_count += 1
# found controlled node at minimum distance
self.__visited = SetMem()
controlled_node_min_distance, _ = self.__found_controlled_recursive(src, src, self.lookahead, self.lookahead)
if controlled_node_min_distance:
self.__visited = SetMem()
self.__add_bribed_node_recursive(controlled_node_min_distance, self.lookahead)
self.false_link_effort += 1
def __found_controlled_recursive(self, center, r_node, lookahead, initial_lookahead):
'''
Found closer node in neighborhood of center that is controlled.
'''
if r_node in self.controlled_nodes:
return r_node, initial_lookahead - lookahead
#self.graph.update_parameter_cache('visited', r_node, 1.0)
self.__visited.add( r_node )
candidate, candidate_distance = None, 9999999
#print list(self.graph.neighbors_iter( r_node ))
for neigh in self.graph.neighbors_iter( r_node ):
if lookahead > 0 and not neigh in self.__visited:
ret, ret_dist = self.__found_controlled_recursive(center, neigh, lookahead - 1, initial_lookahead)
if ret and ret_dist < candidate_distance:
candidate = ret
candidate_distance = ret_dist
return candidate, candidate_distance
def link_coverage(self):
numerator = float(self.visible_edges_count - self.false_link_effort)
denominator = float(self.graph.number_of_edges() - self.false_link_effort)
return numerator / denominator
def node_coverage(self):
#raise LinkPrivacyModelException('Unimplemmented: method node_coverage() untested!')
# seen nodes have seen degree greater than 0
numerator1 = self.graph.get_parameter_cache_inverse_count_gt(self.__seen2_param_key, 0)
numerator2 = 0
#numerator2 = self.graph.get_parameter_cache_inverse_count_lt(self.__seen_param_key, 0)
denominator = self.number_of_nodes
return (float(numerator1 + numerator2) / denominator)
def korolova_node_coverage(self):
numerator = self.graph.get_parameter_cache_inverse_count(self.__unseen_param_key, 0)
denominator = self.number_of_nodes
return float(numerator) / denominator
def triangle_coverage(self):
#numerator = float(self.__total_triangles - self.graph.total_unseen_triangles())
numerator = float(self.graph.total_seen_triangles())
denominator = float(self.__total_triangles)
#print 'numerator', numerator
#print 'denominator', denominator
if denominator == 0.0:
return 1.0
return ( numerator / denominator )
def total_effort(self):
return self.agents_effort + self.bribe_effort + self.false_link_effort
def max_unseen_degree_node(self):
node = None
for max_node, _ in self.graph.get_parameter_cache_iter(self.__unseen_param_key):
node = max_node
break
return node
def max_unseen_degree_crawler_node(self):
node = None
#print 'START max_unseen_degree_crawler_node: unseen_degree_Table: '
for next_node, unseen_degree_val in self.graph.get_parameter_cache_iter(self.__unseen_param_key):
degree_next_node = self.graph.get_parameter_cache('degree', next_node)
# check if node seen by attack, that is the unseen degree is not complete
if 0 < unseen_degree_val != degree_next_node:
#print self.graph.get_parameter_cache(self.__unseen_param_key, next_node), self.graph.get_parameter_cache('degree', next_node)
node = next_node
break
# if node == None:
# for next_node, seen_triangles in self.graph.get_parameter_cache_iter(self.__seen_triangles_param_key):
# print next_node, seen_triangles
return node
def max_unseen_triangles_node(self):
node = None
for max_node, _ in self.graph.get_parameter_cache_iter(self.__unseen_triangles_param_key):
node = max_node
break
return node
def max_seen_degree_node(self):
#for max_node, seen_degree in self.graph.get_parameter_cache_iter(self.__seen_param_key):
# print max_node, seen_degree
node = None
for max_node, _ in self.graph.get_parameter_cache_iter(self.__seen_param_key):
node = max_node
break
return node
def max_seen_triangles_node(self):
node = None
for max_node, _ in self.graph.get_parameter_cache_iter(self.__seen_triangles2_param_key):
node = max_node
break
return node
def max_unseen_triangles_crawler_node(self):
if self.debug:
print 'INFO: max_unseen_triangles_crawler_node ...'
node = None
# ordered by unseen triangles, decrementaly
for next_node, _ in self.graph.get_parameter_cache_iter(self.__unseen_triangles_param_key):
# check if node seen by attack, that is the unseen degree is not complete
#print next_node,
if self.graph.get_parameter_cache(self.__unseen_param_key, next_node) != self.graph.get_parameter_cache('degree', next_node):
node = next_node
break
return node
def max_seen_degree_crawler_node(self):
'''
Only works with connected graphs!!!
'''
if self.debug:
print 'INFO: max_seen_degree_crawler_node ...'
node = None
# ordered by seen degrees, decrementaly
for next_node, seen_degree in self.graph.get_parameter_cache_iter(self.__seen_param_key):
# check if node seen by attack, that is the unseen degree is not complete
#print next_node, seen_degree, self.graph.get_parameter_cache('degree', next_node)
if seen_degree > 0: # inside the visible graph
node = next_node
break
return node
def max_seen_triangles_crawler_node(self):
'''
Only works with connected graphs!!!
'''
if self.debug:
print 'INFO: max_seen_triangles_crawler_node ...'
node = None
# ordered by unseen triangles, decrementaly
for next_node, _ in self.graph.get_parameter_cache_iter(self.__seen_triangles2_param_key):
# check if node seen by attack, that is the unseen degree is not complete
#print next_node, seen_triangles, self.graph.get_parameter_cache('degree', next_node)
if self.graph.get_parameter_cache(self.__unseen_param_key, next_node) != self.graph.get_parameter_cache('degree', next_node):
node = next_node
break
return node
def random_crawler_node(self):
'''
Only works with connected graphs!!!
'''
if self.debug:
print 'INFO: random_crawler_node ...'
node = None
for next_node in self.graph.get_parameter_cache_inverse_gt(self.__seen_param_key, 0, random=True):
# for next_node, _ in self.graph.get_parameter_cache_inverse_count_gt(self.__seen_param_key, 0):
node = next_node
break
return node
def sorted_degrees_dec_iter(self):
if self.debug:
print 'INFO: sorted_degrees_dec ...'
for node, _ in self.graph.get_parameter_cache_iter('degree'):
yield node
def sorted_triangles_dec_iter(self):
if self.debug:
print 'INFO: sorted_triangles_dec ...'
for node, _ in self.graph.get_parameter_cache_iter('triangles'):
yield node
def sorted_degrees_dec(self):
raise LinkPrivacyModelException('NotImplemented: use sorted_degrees_dec_iter() instead!!!')
def random_node_order_iter(self):
if self.debug:
print 'INFO: random_node_order ...'
use_random = True
for node, _ in self.graph.get_parameter_cache_iter('degree', random=use_random):
yield node
def random_node_order(self):
raise LinkPrivacyModelException('NotImplemented: use random_node_order_iter() instead!!!')
def random_node(self):
node = None
for next_node in self.random_node_order_iter():
node = next_node
break
return node
def __del__(self):
for agent_node in self.agents:
self.graph.remove_node( agent_node )
for src, dst in self.false_links:
try:
self.graph.remove_edge( src, dst )
except:
pass
def initialize_histogram_degree( self):
deg_dict = {}
for _, deg in self.graph.get_parameter_cache_iter('degree'):
deg = int(deg)
if not deg in deg_dict:
deg_dict[deg] = 0
deg_dict[deg] += 1
self.N = []
for deg in range(max(deg_dict.keys())+1):
if not deg in deg_dict.keys():
self.N.append( 0 )
else:
self.N.append( deg_dict[deg] )
self.__deg_hist_max = len(self.N) - 1
# init accum degree table.
self.M = [0]*len(self.N)
self.n = [0]*len(self.N)
self.M[0] = self.graph.number_of_nodes()
self.Dmax = len(self.N) - 1
def initialize_histogram_degree_dist( self, degree_dist=None, number_of_nodes=None, max_degree = 10000 ):
'''
Initialize degree histogram, assuming connected graph and power-law degree dist.
'''
histogram_total = number_of_nodes
deg_hist = []
deg = 1
est_total = 0
while True:
#for deg in range(1, max_degree+1):
est_nodes_per_deg = round( degree_dist( deg ) * histogram_total )
if est_nodes_per_deg == 0:
break
#continue
#if est_total >= histogram_total:
# deg_hist.append( 0 )
#else:
deg_hist.append( est_nodes_per_deg )
est_total += est_nodes_per_deg
deg += 1
deg_hist += [0]*len(deg_hist)
deg_hist = [0] + deg_hist
# correct deviation from number_of_nodes
factor = float(histogram_total) / float(est_total)
deg_hist = [ round(freq * factor) for freq in deg_hist ]
error = sum(deg_hist) - histogram_total
# correct error
self.__deg_table_min = 1
self.__deg_hist_max = len(deg_hist) - 1
self.N = deg_hist
self.N[self.__deg_table_min] += error
print 'APROX DEGREE HISTOGRAM !!! len = %d nodes = %d' % (len(self.N), self.graph.number_of_nodes())
print self.N
print
print 'REAL DEGREE HISTOGRAM !!! len = %d' % len(self.N)
deg_dict = {}
for _, deg in self.graph.get_parameter_cache_iter('degree'):
deg = int(deg)
if not deg in deg_dict:
deg_dict[deg] = 0
deg_dict[deg] += 1
self.Nreal = []
for deg in range(max(deg_dict.keys())+1):
if not deg in deg_dict.keys():
self.Nreal.append( 0 )
else:
self.Nreal.append( deg_dict[deg] )
print self.Nreal
print
# init accum degree table.
self.M = [0]*len(deg_hist)
self.n = [0]*len(deg_hist)
self.Dmax = len(deg_hist) - 1
def using_degree_hist(self):
return self.strategy and ('start_crawler_degree_hist' in self.strategy or 'start_crawler_degree_aprox_hist_bin_dist' in self.strategy)
def update_degree_hist(self, node, prev_deg, deg):
#if prev_deg > 0: # para grado 0 no hay interes en contar nada.
self.M[int(prev_deg)] -= 1
#try:
self.M[int(deg)] += 1
#except:
# print 'ERROR deg = %f M = %s' % (deg, str(self.M))
def update_degree_hist_fixed(self, degree):
'''
Update histogram for nodes we know for sure that we have seen the complete degree.
'''
try:
self.n[int(degree)] += 1
except:
print 'ERROR in histogram: degree = %d' % degree
raise Exception('ERROR in histogram: degree = %d' % degree)
def average_degree_jump_bin_distribution(self):
# 1. Sea ei = 0 para i = 1, 2, ... , Dmax .
e = [0.0] * len(self.N)
# 2. Sea Bi = Ni − ni para i = 1, 2, ... , Dmax .
B = [ N_i - n_i for N_i, n_i in zip(self.N, self.n) ]
# a "m" también lo calculo ahora pq no lo tengo.
m = [ M_i - n_i for M_i, n_i in zip(self.M, self.n) ]
# 3. Sea I = Dmax
# 4. Mientras I > 0 hacemos lo siguiente:
I = self.Dmax
while I > 0:
# a) Sea J = max {i ≤ I : mi = 0}
aux_list = [(i, m_i) for i, m_i in zip(range(0,I+1),m[:I+1]) if m_i != 0]
# Si mi = 0 para todo i ≤ I, se sale del algoritmo.
if len(aux_list) == 0:
break # si m_i para todo i <= I, se sale del algoritmo
else:
# maximo i con m_i != 0
J = aux_list[-1][0]
# b) Definimos: CJ = \sum_{i=J}^[D_max] B_i
C_J = sum(B[J:])
# c) Para cada i ≥ J, definimos pi = B_i / C_J
p = [ C_J > 0.0 and B[i] / C_J or 0.0 for i in range(self.Dmax+1)]
# d) Sea e_J = \sum_{i=J}^{Dmax} i*p_i − J.
e[J] = sum( [ i*p[i] for i in range(J, self.Dmax+1)] )
# e) Para cada i ≥ J, cambiamos
# Bi = Bi − pi · mJ .
for i in range(J, self.Dmax+1):
B[i] = B[i] - p[i] * m[J]
# f ) Sea I = J − 1.
I = J - 1
return e
def average_degree_jump_bin_distribution_random(self):
# a "m" también lo calculo ahora pq no lo tengo.
self.m = [ M_i - n_i for M_i, n_i in zip(self.M, self.n) ]
# 1. Sea A = {i ≤ Dmax : mi = 0}
A = [i for i in range(self.Dmax+1) if self.m[i] != 0]
# 2. Sea B_i = N_i − n_i para i = 1, 2, · · · , Dmax .
B = [ N_i - n_i for N_i,n_i in zip(self.N, self.n) ]
# 3. Para cada j ∈ A sea p^j_i,0 = B_i / \sum^Dmax_k=j B_k para i = j, j + 1, · · · , Dmax .
p_prev = dict( [ ((j,i), B[i]/sum(B[j:self.Dmax+1])) for j in A for i in range(j,self.Dmax+1) ] )
# 4. Sea n = 0.
n = 0
MAXITER = len(A) * 2
eps_conv = 0.05
random.seed(MAXITER)
# 5. Hacemos lo siguiente:
while True: # outer loop
# a) Sea n = n + 1.
n = n + 1
while True: # inner loop
# b) Elegimos J ∈ A.
J = A[random.randint(0,len(A)-1)]
# c) Para cada i ≥ J, cambiamos Bi = Bi − p^J_{i,n−1} * m_J
B = B[:J] + [ B_i - p_prev[(J,i)] * self.m[J] for B_i in B[J:] ]
# d) Si Bi < 0 para algun i, reseteamos Bi = Ni − ni para i = 1, 2, · · · , Dmax y vamos nuevamente a b.
if len([B_i for B_i in B if B_i < 0]) == 0:
break # inner loop
else:
B = [ N_i - n_i for N_i,n_i in zip(self.N, self.n) ]
# e) Para cada I ∈ A − {J}
# p^I_i,n = Bi / \sum^Dmax_i=I para i = I, I + 1, · · · , Dmax .
p = dict( [ ((I,i), sum(B[I:self.Dmax+1])>0 and B[i]/sum(B[I:self.Dmax+1] or 0.0)) for I in A if I!=J for i in range(I,self.Dmax+1) ] )
# f ) Definimos p^J_i,n = p^J_i,n−1 para i = J, J + 1, · · · , Dmax .
for i in range(J,self.Dmax+1):
p[(J,i)] = p_prev[(J,i)]
# g) Volvemos a a) si n < MAXITER y || p^j_i,n − p^ji,n-1 || < eps_conv
norm = sum( [abs( p[(j,i)] - p_prev[(j,i)] ) for j in A for i in range(j,self.Dmax+1) ] )
if norm > eps_conv and n < MAXITER:
p_prev = p
continue # outer loop
break
# 6. Sea ei = 0 para i = 1, 2, · · · , Dmax .
e = [0.0] * (self.Dmax+1)
# 7. Para cada j ∈ A sea e_j = \sum_{i=j}^Dmax i * p^j_{i,n} - j
for j in A:
e[j] = sum( [ i * p[(j,i)] for i in range(j,self.Dmax+1) ] ) - j
return e
def average_degree_jump(self):
B = [ N_i - M_i for N_i, M_i in zip(self.N, self.M) ]
e, j = [], 0
for j in range(self.Dmax+1):
if j > 0:
degs_right = range(j, self.Dmax+1)
sum_B_j_slice = sum(B[j:])
if sum_B_j_slice > 0:
e_j = sum( [ i * float(B_i) for i, B_i in zip(degs_right, B[j:])] )
e_j = (e_j / sum_B_j_slice) - j
else:
e_j = 0.0
e.append(e_j)
else:
e.append( 0.0 )
return e
def histogram_degree_crawler_node_bin_dist(self):
return self.histogram_degree_crawler_node(bin_distribution=True, use_random_distribution=False )
def aprox_histogram_degree_crawler_node_bin_dist(self):
return self.histogram_degree_crawler_node(bin_distribution=True, use_random_distribution=False )
def histogram_degree_crawler_node_bin_dist_orderby_triangles(self):
return self.histogram_degree_crawler_node(bin_distribution=True, use_random_distribution=False, with_max_seen_triangles=True )
def histogram_degree_crawler_node_bin_dist_rand(self):
return self.histogram_degree_crawler_node(bin_distribution=True, use_random_distribution=True )
def histogram_degree_crawler_node(self, bin_distribution=False, use_random_distribution=False, with_max_seen_triangles=False ):
'''
Get some node with high probability of having more unseen degree.
- Si el grado de los nodos va de 0 a Dmax.
- Para cada grado i, tiene ni nodos visitados y cubiertos completamente
(es decir, sabe con certeza que su grado es i) y mi nodos con grado visto
i de los que no sabe con certeza el grado real.
- Llamemos M_i = n_i + m_i y B_i = N_i − M_i . Los B_i se definen de otra manera
para las optimizaciones con distribucion de los m_i entre los bins.
'''
if not bin_distribution:
e = self.average_degree_jump()
else:
if not use_random_distribution:
e = self.average_degree_jump_bin_distribution()
else:
e = self.average_degree_jump_bin_distribution_random()
jumps_tuples = zip( e, range(len(e)) )
jumps_tuples.sort( lambda x,y : x[0] < y[0] and 1 or -1 )
n = None
for e_j, degree in jumps_tuples:
n = self.choose_node_with_degree(degree, with_max_seen_triangles)
if n:
break
#if not n:
# raise Exception('not-fully visible node not found in histogram_degree_crawler_node()!')
return n
def choose_node_with_degree(self, degree, with_max_seen_triangles):
n = None
if not with_max_seen_triangles:
for node in self.graph.get_parameter_cache_inverse(self.__seen_param_key, degree):
n = node
break
else: # use information about seen triangles
for node in self.graph.get_parameter_cache_inverse_orderby(self.__seen_param_key, degree, self.__seen_triangles_param_key):
n = node
break
return n
if __name__ == '__main__':
model = LinkPrivacyModel()
```
#### File: cloudlight/bots/io.py
```python
from cloudlight.bots.bot import Bot
class Printer(Bot):
outfile = None
def __init__(self, outfile=None, decoratedBot=None):
self.outfile = outfile
self.decoratedBot = decoratedBot
# visit node
def visit_node(self, node, graph):
if self.outfile:
self.outfile.write( 'NODE: %s\n' % str(node) )
else:
return 'NODE: %s' % str(node)
# visit edge with attributes.
def visit_edge(self, link, graph):
if len(list(link)) == 3:
pr_attrs = ''.join([str(a) for a in link[2]])
else:
pr_attrs = ''
if self.outfile:
self.outfile.write( 'EDGE: %s -- %s ATTRS: %s\n' % (str(link[0]),str(link[1]),pr_attrs) )
else:
return 'EDGE: %s -- %s ATTRS: %s' % (str(link[0]),str(link[1]),pr_attrs)
```
#### File: cloudlight/bots/visitor.py
```python
class Visitor(object):
'''
http://peak.telecommunity.com/DevCenter/VisitorRevisited
http://docs.python.org/tutorial/classes.html#multiple-inheritance
'''
def visit(self, elem, *args):
self.elem = elem
className = elem.__class__.__name__
meth = getattr(self, 'visit' + className, self.visit_default)
return meth(elem, *args)
def visit_default(self, elem, *args):
return None
class IdentityVisitor(Visitor):
def visit(self, elem, *args):
self.elem = elem
className = elem.__class__.__name__
meth = getattr(self, 'visit' + className, self.visit_default)
return meth(elem, *args)
def visit_default(self, elem, *args):
return elem
class GraphVisitor(Visitor):
def visitNode(self, node, *args):
print 'Visitando nodo: id = %s' % node.id
pass
def visittuple(self, edge, *args):
print 'Visitando arista: src.id = %s dst.id = %s' % (edge.src, edge.dst)
pass
class DegreeVisitor(Visitor):
def visitNode(self, node):
return self.graph.degree(node)
class GraphPrinterVisitor(Visitor):
def visitNode(self, node):
print 'Node: %s' % (str(node))
def visittuple(self, tup):
print 'Edge: %s -- %s' % (str(tup[0]), str(tup[1]))
def visit_default(self, elem, *args):
print 'Node (subclass): %s' % (str(elem))
if __name__ == '__main__':
# FacebookVisitor().visitFacebookNode(FacebookNode('<NAME>'))
#
# TwitterVisitor().visitTwitterNode(TwitterNode('starvejobs'))
#
# class MyVisitor(FacebookVisitor, TwitterVisitor): pass
#
# MyVisitor().visitFacebookNode(FacebookNode('<NAME>'))
#
# MyVisitor().visitTwitterNode(TwitterNode('starvejobs'))
pass
```
#### File: cloudlight/classes/graph.py
```python
import sys, zlib, base64, time, re, cPickle, copy
from math import log
from random import shuffle
from cStringIO import StringIO
import networkx as nx
from cloudlight.utils.random_items import random_items
from cloudlight.utils.itertools_recipes import izip
from cloudlight.utils.misc import Base, identity
from cloudlight.utils.estimator import TimeEstimator
# matplot lib is an OPTIONAL dependency.
try:
import matplotlib.pyplot as plt
except:
pass
class GraphException(Exception):
pass
class IndexedTable:
def __init__(self):
self.__rep = {}
self.__rep_inv = {}
def __contains__(self, key):
return key in self.__rep
def __len__(self):
return len(self.__rep)
def __setitem__(self, key, item):
if key in self.__rep:
self.__rep_inv[self.__rep[key]].remove( key )
if len(self.__rep_inv[self.__rep[key]]) == 0:
del self.__rep_inv[self.__rep[key]]
del self.__rep[key]
self.__rep[key] = item
if not item in self.__rep_inv:
self.__rep_inv[item] = set([])
self.__rep_inv[item].add( key )
def __getitem__(self, key):
return self.__rep[key]
def keys(self):
return self.__rep.keys()
def items(self):
return self.__rep_inv.keys()
def preimage(self, item):
if item in self.__rep_inv:
for key in self.__rep_inv[item]:
yield key
def preimage_size(self, item):
return item in self.__rep_inv and len( self.__rep_inv[item] ) or 0
def iterate(self, random=False, ascending=False):
if random:
keys = list(self.__rep.keys())
shuffle(keys)
for key in keys:
yield key, self.__rep[key]
else:
items = list(self.__rep_inv.keys())
if ascending:
items.sort(cmp=None, key=None, reverse=False)
else:
items.sort(cmp=None, key=None, reverse=True)
for item in items:
for key in self.__rep_inv[item]:
yield key, item
class Graph(nx.Graph):
'''
A graph that extends NetworkX Graph with more analytic parameters and other things.
'''
debug = False
input_debug_links = 100000
output_debug_nodes = 10
max_links_input = 10 ** 8
max_nodes_analysis = 10 ** 8
cached_diameter = None
cache_internal_growth = {}
cache_path_lens = {} # keys are nodes, values are tuples maximum and average
__weight_one = {} # means "unvisited"
__weight_two = {True:True} # means "visited"
def __init__(self):
'''
Constructor
'''
super(Graph, self).__init__()
self.__params = {}
self.cores = None
def load_edgelist(self, fileobj, num=False, use_big_alphabet=False):
c = 0
modulo = self.input_debug_links
total = self.max_links_input
estimator = TimeEstimator(total/modulo)
if use_big_alphabet:
base = Base()
for line in fileobj:
if line.strip() == '' or line.strip()[0]=='#':
continue
s = line.split()
if num:
if use_big_alphabet:
src = base.base2num(s[0])
dst = base.base2num(s[1].strip())
else:
src = int(s[0])
dst = int(s[1].strip())
else:
src = s[0]
dst = s[1].strip()
self.add_edge(src, dst)
c += 1
if self.debug and c % self.input_debug_links == 0:
sys.stdout.write('INFO: INPUT load_edgelist(), link count = %d %s\n' % (c,time.ctime()))
if self.debug and c%modulo == 0:
print 'INFO: %d edges loaded in load_edgelist(), estimated total %d' % (c, total)
estimator.tick()
print estimator.log_line()
if c >= self.max_links_input:
break
if self.debug:
sys.stdout.write('INFO: FINISH INPUT load_edgelist(), link count = %d\n' % c)
def load_only_symmetric_edgelist(self, fileobj):
'''
Load from a set of directed links, only bidirectional links are added.
'''
sep = None
graph = {}
count = 0
for line in fileobj:
if self.debug and count % self.input_debug_links == 0:
sys.stdout.write('INFO: INPUT load_only_symmetric_edgelist(), link count = %d\n' % count)
if count >= self.max_links_input:
break
count += 1
if len(line.strip()) == 0:
continue
s = line.split(sep)
n1, n2 = s[0], s[1].strip()
if not n1 in graph:
graph[n1] = []
graph[n1].append(n2)
if self.debug:
sys.stdout.write('INFO: FINISH INPUT load_only_symmetric_edgelist(), link count = %d\n' % count)
sys.stdout.write('INFO: begin second stage\n')
count = 0
for n1 in graph:
if self.debug and count % self.output_debug_nodes == 0:
sys.stdout.write('INFO: INPUT load_only_symmetric_edgelist(), node count = %d\n' % count)
count += 1
for n2 in graph[n1]:
if n2 in graph and n1 in graph[n2]:
self.add_edge(n1, n2)
graph[n2].remove(n1)
if self.debug:
sys.stdout.write('INFO: FINISH INPUT load_only_symmetric_edgelist(), node count = %d\n' % count)
def degrees_iter(self, nodes=None):
return self.generic_networkx_parameter_iter(nx.degree, 'degrees', None, nodes)
def save_edgelist(self, path, comments='#', delimiter=' ', data=False):
'''
Save graph as a set of directed links with format:
<nodeA> <nodeB>
G : graph
A NetworkX graph
path : file or string
File or filename to write. Filenames ending in .gz or .bz2 will be compressed.
comments : string, optional
The character used to indicate the start of a comment
delimiter : string, optional
The string uses to separate values. The default is whitespace.
data : bool, optional
If True write a string representation of the edge data.
Save graph as a set of directed links with format:
<nodeA> <nodeB>
<nodeF> <nodeU>
...
etc.
'''
try:
out = open(path, 'w')
except:
out = path
modulo = 100000
total = self.number_of_edges()
estimator = TimeEstimator(total/modulo)
count = 1
for src, dst in self.edges_iter():
out.write('%s %s\n' % (str(src),str(dst)) )
if self.debug and count%modulo == 0:
print 'INFO: %d edges dumped in save_edgelist(), total %d' % (count, total)
estimator.tick()
print estimator.log_line()
count += 1
#out.close()
#nx.write_edgelist(self, path, comments, delimiter)
def degrees(self, nodes=None):
return self.generic_networkx_parameter(nx.degree, 'degrees', None, nodes)
def clustering_indices_iter(self, nodes=None):
return self.generic_networkx_parameter_iter(nx.clustering, 'clustering_indices', None, nodes)
def clustering_indices(self, nodes=None):
return self.generic_networkx_parameter(nx.clustering, 'clustering_indices', None, nodes)
def average_neighbor_degrees_iter(self, nodes=None):
return self.generic_networkx_parameter_iter(self.__average_neighbor_degrees_func, 'average_neighbor_degrees', None, nodes)
def average_neighbor_degrees(self, nodes=None):
return self.generic_networkx_parameter(self.__average_neighbor_degrees_func, 'average_neighbor_degrees', None, nodes)
def eccentricities_iter(self, nodes=None):
return self.generic_networkx_parameter_iter(self.__eccentricity_func, 'eccentricities', None, nodes)
def eccentricities(self, nodes=None):
return self.generic_networkx_parameter(self.__eccentricity_func, 'eccentricities', None, nodes)
def average_path_lengths_iter(self, nodes=None):
return self.generic_networkx_parameter_iter(self.__average_path_length_func, 'average_path_lengths', None, nodes)
def average_path_lengths(self, nodes=None):
return self.generic_networkx_parameter(self.__average_path_length_func, 'average_path_lengths', None, nodes)
def max_internal_scaling_iter(self, nodes=None):
return self.generic_networkx_parameter_iter(self.__max_internal_scaling_func, 'max_internal_scaling_iter', None, nodes)
def max_internal_scaling(self, nodes=None):
return self.generic_networkx_parameter(self.__max_internal_scaling_func, 'max_internal_scaling', None, nodes)
def max_connectivity_iter(self, nodes=None):
return self.generic_networkx_parameter_iter(self.__max_connectivity_func, 'max_connectivity_iter', None, nodes)
def max_connectivity(self, nodes=None):
return self.generic_networkx_parameter(self.__max_connectivity_func, 'max_connectivity', None, nodes)
def internal_scaling_iter(self, nodes=None):
return self.generic_networkx_parameter_iter(self.__internal_scaling_func, 'internal_scaling_iter', None, nodes)
def internal_scaling(self, nodes=None):
return self.generic_networkx_parameter(self.__internal_scaling_func, 'internal_scaling', None, nodes)
def connectivity_iter(self, nodes=None):
return self.generic_networkx_parameter_iter(self.__connectivity_func, 'connectivity_iter', None, nodes)
def connectivity(self, nodes=None):
return self.generic_networkx_parameter(self.__connectivity_func, 'connectivity', None, nodes)
def kcoreness_iter(self, nodes=None):
if not self.cores:
self.cores = nx.find_cores(self)
return self.generic_networkx_parameter_iter(self.__kcoreness_func, 'kcoreness', self.cores, nodes)
def kcoreness(self, nodes=None):
cores = nx.find_cores(self)
return self.generic_networkx_parameter(self.__kcoreness_func, 'kcoreness', cores, nodes)
def triangles_iter(self, nodes=None):
return self.generic_networkx_parameter_iter(self.__triangles, 'triangles', None, nodes)
def triangles(self, nodes=None):
return self.generic_networkx_parameter(self.__triangles, 'triangles', None, nodes)
def generic_networkx_parameter_iter(self, nx_func, name, pre_dic=None, nodes=None):
c = 0
for node in nodes or self.nodes_iter():
if pre_dic:
val = nx_func(self, node, pre_dic)
else:
val = nx_func(self, node)
yield val
c += 1
#if self.debug and c % self.output_debug_nodes == 0:
# sys.stdout.write('INFO: OUTPUT %s(), node count = %d\n' % (name, c))
if c >= self.max_nodes_analysis:
break
if self.debug and c % self.output_debug_nodes == 0:
sys.stdout.write('INFO: FINISH OUTPUT %s(), node count = %d\n' % (name, c))
def generic_networkx_parameter(self, nx_func, name, pre_dic=None, nodes=None):
ret_value = []
c = 0
for node in nodes or self.nodes_iter():
if pre_dic:
val = nx_func(self, node, pre_dic)
else:
val = nx_func(self, node)
ret_value.append(val)
c += 1
if self.debug and c % self.output_debug_nodes == 0:
sys.stdout.write('INFO: OUTPUT %s(), node count = %d\n' % (name, c))
if c >= self.max_nodes_analysis:
break
if self.debug and c % self.output_debug_nodes == 0:
sys.stdout.write('INFO: FINISH OUTPUT %s(), node count = %d\n' % (name, c))
return ret_value
def __eccentricity_func(self, G, node):
if not node in self.cache_path_lens:
self.__update_path_lens_cache(G, node)
elif self.debug:
print 'INFO: using cached path lens for node %s ' % str(node)
return self.cache_path_lens[node][0]
def __update_path_lens_cache(self, G, node):
spaths = nx.single_source_shortest_path_length(G, node)
path_max, path_len = 0, 0
for _,l in spaths.iteritems():
if l > path_max:
path_max = l
path_len += l
self.cache_path_lens[node] = path_max, float(path_len)/len(spaths)
def __average_path_length_func(self, G, node):
if not node in self.cache_path_lens:
self.__update_path_lens_cache(G, node)
elif self.debug:
print 'INFO: using cached path lens for node %s ' % str(node)
return self.cache_path_lens[node][1]
def average_neighbor_degree(self, node):
return self.__average_neighbor_degrees_func(self, node)
def __average_neighbor_degrees_func(self, G, node):
neighbors = G.neighbors(node)
if len(neighbors) == 0:
return 0.0
else:
return float (sum (self.degrees_iter(neighbors))) / len(neighbors)
def __max_internal_scaling_func(self, G, node):
return max( G.internal_scaling_dimension(node) )
def __max_connectivity_func(self, G, node):
return max( G.connectivity_dimension(node) )
def __internal_scaling_func(self, G, node):
return G.internal_scaling_dimension(node)
def __connectivity_func(self, G, node):
return G.connectivity_dimension(node)
def __kcoreness_func(self, G, node, pre_dic):
return pre_dic[node]
def __triangles(self, big_graph, node):
#if 'triangles' in self.__params and len(self.__params['triangles']) == self.number_of_nodes():
# return self.__params['triangles'][node]
deg = self.degree(node)
clust = nx.clustering(big_graph, node)
return int( clust * deg * (deg-1) / 2 )
def show(self, mode=None):
if not mode:
nx.draw(self)
elif mode == 'random':
nx.draw_random(self)
elif mode == 'circular':
nx.draw_circular(self)
elif mode == 'spectral':
nx.draw_spectral(self)
plt.show()
def random_edges(self, k=1, data=False):
"""Choose k random edges uniformly from graph.
For undirected graphs this might produce duplicates
since each edge is considered twice, once for each
representation u-v and v-u. Duplicates can be removed by
using set(random_edges()).
Extracted from Eric Hagberg post:
http://groups.google.com/group/networkx-discuss/browse_thread/thread/a87dd6ca7063a778?pli=1
"""
return random_items(self.edges(data=data), k=k)
def random_nodes(self, k=1):
"""Choose k random nodes uniformly from graph.
"""
ret = []
use_random = True
for node, _ in self.get_parameter_cache_iter('degree', random=use_random):
ret.append( node )
return ret
#return random_items(self.nodes_iter(), k=k)
def lookahead_edges(self, nbunch, lookahead):
nbunch = [n for n in nbunch if n in self.nodes()]
edge_bunch_list = [self.edges(nbunch)]
for _ in range(lookahead - 1):
new_nodes = [d for _, d in edge_bunch_list[-1]]
edge_bunch_list.append(self.edges(new_nodes))
ret = set([])
for edge_set in edge_bunch_list:
ret = ret.union(edge_set)
return ret
def diameter(self):
if self.debug:
print 'INFO: computing graph diameter...'
dia = nx.diameter(self)
if self.debug:
print 'INFO: done computing graph diameter.'
return dia
def internal_scaling_dimension_iter(self, node, diameter=100):
return self.__dimension_iter(node, self.internal_scaling_growth_iter, diameter)
def internal_scaling_dimension(self, node, diameter=100):
return self.__dimension(node, self.internal_scaling_growth, diameter)
def __dimension_iter(self, node, growth_func, diameter=None):
if not diameter:
if not self.cached_diameter:
self.cached_diameter = self.diameter()
diameter = self.cached_diameter
growth = growth_func( node, diameter )
for g, l in izip(growth,range(diameter)):
if g == 0 or l <= 1:
yield -1.0
else:
yield log(g)/log(l)
def __dimension(self, node, growth_func, diameter=None):
if not diameter:
if not self.cached_diameter:
self.cached_diameter = self.diameter()
diameter = self.cached_diameter
growth = growth_func( node, diameter )
ret = []
for g, l in izip(growth,range(diameter)):
if g == 0 or l <= 1:
ret.append( -1.0 )
else:
ret.append( log(g)/log(l) )
return ret
def internal_scaling_growth_iter(self, node, diameter=None):
nodes = set([node])
visited_nodes = set([])
yield 1
if not diameter:
if not self.cached_diameter:
self.cached_diameter = self.diameter()
diameter = self.cached_diameter
prev = None
if diameter:
diameter -= 1
for _ in range( diameter ):
new_edges = self.edges(nodes)
visited_nodes.union( nodes )
new_nodes = set([])
for v, w in new_edges:
if not w in visited_nodes:
new_nodes.add( w )
if not v in visited_nodes:
new_nodes.add( v )
if not prev:
prev = len(visited_nodes) + len(new_nodes)
elif prev == len(visited_nodes) + len(new_nodes):
break
else:
prev = len(visited_nodes) + len(new_nodes)
if self.debug:
#print 'internal scaling growth (iter) : %d' % (len(visited_nodes) + len(new_nodes) )
pass
yield len(visited_nodes) + len(new_nodes)
nodes = new_nodes
def internal_scaling_growth(self, node, diameter=None):
nodes = set([node])
visited_nodes = set([])
ret = []
ret.append( 1 )
if not diameter:
if not self.cached_diameter:
self.cached_diameter = self.diameter()
diameter = self.cached_diameter
if (node,diameter) in self.cache_internal_growth:
if self.debug:
print 'INFO: using cached internal_growth'
return self.cache_internal_growth[(node,diameter)]
prev = None
for _ in range( diameter - 1 ):
new_edges = self.edges(nodes)
visited_nodes.union( nodes )
new_nodes = set([])
for v, w in new_edges:
if not w in visited_nodes:
new_nodes.add( w )
if not v in visited_nodes:
new_nodes.add( v )
if not prev:
prev = len(visited_nodes) + len(new_nodes)
elif prev == len(visited_nodes) + len(new_nodes):
break
else:
prev = len(visited_nodes) + len(new_nodes)
if self.debug:
#print 'internal scaling growth : %d' % (len(visited_nodes) + len(new_nodes) )
pass
ret.append( len(visited_nodes) + len(new_nodes) )
nodes = new_nodes
if self.debug:
print 'INFO: caching internal growth for node %s and diameter %d' % (str(node),diameter)
self.cache_internal_growth[(node,diameter)] = ret
return ret
def connectivity_dimension_iter(self, node, diameter=100):
return self.__dimension_iter(node, self.connectivity_growth_iter, diameter)
def connectivity_dimension(self, node, diameter=100):
return self.__dimension(node, self.connectivity_growth, diameter)
def connectivity_growth_iter(self, node, diameter=None):
internal_growth = self.internal_scaling_growth_iter(node, diameter)
prev = None
for i in internal_growth:
if not prev:
prev = i
else:
yield i - prev
yield 0
def connectivity_growth(self, node, diameter=None):
internal_growth = self.internal_scaling_growth(node, diameter)
prev = None
ret = []
for i in internal_growth:
if not prev:
prev = i
else:
ret.append( i - prev )
ret.append( 0 )
return ret
def compressed_by_degree_graph(self, use_big_alphabet=True):
orig_max_nodes_analysis = self.max_nodes_analysis
self.max_nodes_analysis = self.number_of_nodes()
encoding = zip(self.nodes_iter(), self.degrees_iter())
encoding.sort( lambda x, y: cmp(x[1],y[1]) )
encoding.reverse()
if use_big_alphabet:
base = Base()
base_enc = base.num2base
else:
base_enc = identity
encoding = dict( zip( [t[0] for t in encoding], range(len(encoding)) ) )
new_graph = Graph()
if self.debug:
print 'encoding nodes...'
for node in self.nodes_iter():
new_graph.add_node( base_enc( encoding[node] ) )
if self.debug:
print 'encoding edges...'
for v, w in self.edges_iter():
new_graph.add_edge( base_enc( encoding[v] ), base_enc( encoding[w] ), )
self.max_nodes_analysis = orig_max_nodes_analysis
return new_graph
def save_compressed_graph(self, outfile, use_big_alphabet=True):
g2 = self.compressed_by_degree_graph(use_big_alphabet)
output = StringIO()
g2.save_edgelist(output)
cont = output.getvalue()
output.close()
comp_cont = zlib.compress( cont, 9 )
enc_comp_cont = base64.b64encode( comp_cont )
if outfile == str(outfile):
outfile = open(outfile,'w')
outfile.write( "compressed_graph = '''\n%s\n'''" % enc_comp_cont )
def load_compressed_graph(self, module, use_big_alphabet=True, has_num=True):
enc_comp_cont = module.compressed_graph.strip()
comp_cont = base64.b64decode( enc_comp_cont )
cont = zlib.decompress(comp_cont)
self.load_edgelist(StringIO(cont), has_num, use_big_alphabet)
def bigger_component(self):
#if nx.is_connected(self):
# return self
graph = Graph()
graph.add_edges_from(nx.connected_component_subgraphs(self)[0].edges_iter())
return graph
def add_bigger_component_to(self, graph):
#if nx.is_connected(self):
# return self
print 'nx.connected_components(self) ...'
nx.connected_components(self)
graph.add_edges_from(self.edges_iter(nx.connected_components(self)[0]))
return graph
def connected_components(self):
return nx.connected_components(self)
def save_bigger_component(self, filename):
graph = self.bigger_component()
graph.save_edgelist(filename)
# Indexed parameters methods.
def check_parameter_name(self, parameter_name):
# check that the parameter name is ok
findings = re.findall('[a-z_]+[a-z_0-9]*', parameter_name)
if len(findings)==0 or len(findings[0]) != len(parameter_name):
raise GraphException('Error: bad parameter name, only [a-z_]+ allowed!')
def add_parameter_cache(self, parameter_name):
self.check_parameter_name(parameter_name)
if not parameter_name in self.__params:
self.__params[parameter_name] = IndexedTable()
def has_parameter_cache(self, parameter_name):
return parameter_name in self.__params
def remove_parameter_cache(self, parameter_name):
self.check_parameter_name(parameter_name)
del self.__params[parameter_name]
def index_parameter_cache(self, parameter_name):
self.check_parameter_name(parameter_name)
pass
def remove_index_parameter_cache(self, parameter_name):
self.check_parameter_name(parameter_name)
pass
def check_float(self, value):
try:
return float(value)
except:
raise GraphException('Error: value %s is not a floating-point or equivalent number!' % str(value))
def insert_parameter_cache(self, param_name, node, value):
value = self.check_float(value)
self.__params[param_name][node] = value
def update_parameter_cache(self, param_name, node, value):
'''
'''
#self.check_parameter_name(param_name)
#self.check_node(node)
value = self.check_float(value)
if not self.has_node(node):
raise GraphException('Error: node %s not in BigGraph instance!' % str(node) )
self.__params[param_name][node] = value
def dec_parameter_cache(self, param_name, node):
old_val = self.__params[param_name][node]
self.__params[param_name][node] = (old_val - 1 > 0) and (old_val - 1) or 0
def inc_parameter_cache(self, param_name, node):
old_val = self.__params[param_name][node]
self.__params[param_name][node] = old_val + 1
def get_parameter_cache(self, param_name, node):
if node in self.__params[param_name]:
return self.__params[param_name][node]
else:
return None
def get_max_value_parameter_cache(self, param_name):
raise GraphException('Not implemmented!')
def get_sum_value_parameter_cache(self, param_name):
print 'summing table ...'
ret = 0
indexed_table = self.__params[param_name]
for item in indexed_table.items():
#print 'item', item
#print 'indexed_table.preimage_size(item)', indexed_table.preimage_size(item)
ret += indexed_table.preimage_size(item) * item
#print 'unseen_triangles', ret / 3
#print 'total_triangles', self.total_triangles()
#print '-'*50
print 'end summing table ...'
return ret
def get_parameter_cache_inverse(self, param_name, value):
for node in self.__params[param_name].preimage(value):
yield node
def get_parameter_cache_inverse_between(self, param_name, lower, upper):
raise GraphException('Not implemmented!')
def get_parameter_cache_inverse_count(self, param_name, value):
return self.__params[param_name].preimage_size(value)
def get_parameter_cache_iter(self, param_name, random=False, ascending=False):
self.check_parameter_name(param_name)
for node, value in self.__params[param_name].iterate(random, ascending):
yield node, value
def create_indices(self):
pass
def create_index_degree(self):
self.index_parameter_generic('degree', self.degrees_iter)
def create_index_unseen_degree(self):
self.index_parameter_generic('unseen_degree', self.degrees_iter)
def remove_degree_cache(self):
self.remove_parameter_cache('degree')
def create_index_clustering(self):
self.index_parameter_generic('clustering', self.clustering_indices_iter)
def create_index_triangles(self):
self.index_parameter_generic('triangles', self.triangles_iter)
def create_index_knn(self):
self.index_parameter_generic('knn', self.average_neighbor_degrees_iter)
def create_index_kcores(self):
self.index_parameter_generic('shell', self.kcoreness_iter)
def index_parameter_generic(self, param_name, param_iter_func):
self.add_parameter_cache(param_name)
modulo = 1000
estimator = TimeEstimator(self.number_of_nodes()/modulo)
count = 1
for node, value in izip( self.nodes_iter(), param_iter_func(),):
if self.debug and count%modulo == 0:
print 'INFO: %d nodes processed in index_parameter_generic, param_name %s' % (count, param_name)
estimator.tick()
print estimator.log_line()
self.insert_parameter_cache(param_name, node, value)
count +=1
self.index_parameter_cache(param_name)
def initialize_parameter(self, param_name, value=0.0):
for node in self.nodes_iter():
self.insert_parameter_cache(param_name, node, value)
def index_parameter_from_degree(self, param_name):
try:
del self.__params[param_name]
except:
pass
self.add_parameter_cache(param_name)
self.__params[param_name] = copy.deepcopy( self.__params['degree'] )
def index_parameter_from_parameter(self, param_src, param_dst, ):
try:
del self.__params[param_dst]
except:
pass
self.add_parameter_cache(param_dst)
self.__params[param_dst] = copy.deepcopy( self.__params[param_src] )
def index_all_parameters(self):
self.create_index_degree()
if self.debug:
print 'done'
print 'creating knn index ...'
self.create_index_knn()
if self.debug:
print 'done'
print 'creating clustering index ...'
self.create_index_clustering()
if self.debug:
print 'done'
print 'creating kcores index ...'
self.create_index_kcores()
if self.debug:
print 'done'
print 'creating unseen degree index ...'
self.create_index_unseen_degree()
def load_index_and_pickle(self, fileobj, num=False, outfilename=None):
if self.debug:
print 'loading graph ...'
self.load_edgelist(fileobj, num)
self.create_indices()
if self.debug:
print 'done'
print 'creating degree index ...'
self.create_index_degree()
if self.debug:
print 'done'
if self.debug:
print 'dumping Graph pickle ...'
self.pickle_dump(outfilename)
if self.debug:
print 'done.'
def pickle_dump(self, outfilename):
if str(outfilename) == outfilename:
output = open(outfilename, 'wb')
else:
output = outfilename
cPickle.dump(self, output, -1)
# methods related to edge weight
def update_edge_weight(self, src, dst, weight):
if weight == 1:
self[src][dst] = self.__weight_one
self[dst][src] = self.__weight_one
elif weight == 2:
self[src][dst] = self.__weight_two
self[dst][src] = self.__weight_two
else:
raise GraphException('One weights 1 or 2 supported for edges!')
def edge_weight(self, node1, node2):
if self[node1][node2] == self.__weight_one:
return 1
elif self[node1][node2] == self.__weight_two:
return 2
else:
raise GraphException('One weights 1 or 2 supported for edges!')
def reset_edge_weights(self):
for src, dst in self.edges_iter():
self[src][dst] = self.__weight_one
def total_triangles(self):
if self.has_parameter_cache('triangles'):
return self.get_sum_value_parameter_cache('triangles') / 3.0
else:
raise GraphException('triangles not indexed!')
# ret = 0
# for n1, n2 in self.edges_iter():
# for n3 in self.neighbors_iter(n2):
# for n4 in self.neighbors_iter(n3):
# if n4 == n1:
# ret += 1
#
# return ret / 3
def total_unseen_triangles(self):
if self.has_parameter_cache('unseen_triangles'):
return self.get_sum_value_parameter_cache('unseen_triangles') / 3.0
else:
raise GraphException('unseen_triangles not indexed!')
def total_seen_triangles(self):
if self.has_parameter_cache('seen_triangles'):
return self.get_sum_value_parameter_cache('seen_triangles') / 3.0
else:
raise GraphException('unseen_triangles not indexed!')
def total_triangles_weight(self, weight=1):
if weight==1:
weight = self.__weight_one
elif weight==2:
weight = self.__weight_two
else:
raise GraphException('One weights 1 or 2 supported for edges!')
ret = 0
for n1, n2 in self.edges_iter():
if self[n1][n2] == weight:
for n3 in self.neighbors_iter(n2):
if self[n2][n3] == weight:
for n4 in self.neighbors_iter(n3):
if self[n3][n4] == weight:
if n4 == n1:
ret += 1
return ret / 3
def triangles_weight(self, node, weight=1):
if weight==1:
weight = self.__weight_one
elif weight==2:
weight = self.__weight_two
else:
raise GraphException('One weights 1 or 2 supported for edges!')
if not node in self:
return 0
ret = 0
n1 = node
for n2 in self.neighbors_iter(n1):
if self[n1][n2] == weight:
for n3 in self.neighbors_iter(n2):
if self[n2][n3] == weight:
for n4 in self.neighbors_iter(n3):
if self[n3][n4] == weight:
if n4 == n1:
ret += 1
return ret / 2
def add_random_component(self, graph):
queue = set([self.random_nodes()[0]])
visited = set([])
while len(queue) > 0:
node = queue.pop()
visited.add( node )
for neigh in self.neighbors_iter(node):
if not neigh in visited:
queue.add( neigh )
if not graph.has_edge(node, neigh):
graph.add_edge(node, neigh)
return graph
def save_snowball_edgelist(self, filename):
out = open(filename, 'w')
modulo = 10000
total = self.number_of_edges()
estimator = TimeEstimator(total/modulo)
count = 0
queue = [self.random_nodes()[0]]
visited = set([])
while len(queue) > 0:
# impl: node = queue.pop()
node = queue[0]
queue = queue[1:]
visited.add( node )
for neigh in self.neighbors_iter(node):
if not neigh in visited:
queue.append( neigh )
out.write('%s %s\n' % (str(node),str(neigh)))
count += 1
if self.debug and count%modulo == 0:
print 'INFO: %d edges dumped in save_snowball_edgelist(), total %d' % (count, total)
estimator.tick()
print estimator.log_line()
out.flush()
if __name__ == '__main__':
graph = Graph()
graph.run_tests()
```
#### File: cloudlight/nodes/web.py
```python
import re, urllib2
class UrlNode(object):
'''
classdocs
'''
url_regex = '((https?|ftp|gopher|telnet|file|notes|ms-help):((//)|(\\\\))+[\w\d:#@%/;$()~_?\+-=\\\.&]*)'
def getUrl(self):
return self.__url
def setUrl(self, url):
matches = re.findall( self.url_regex, url)
if len(matches) == 0:
raise Exception('ERROR: malformed url attribute of node instance of class LinkNode!')
self.__url = url
def delUrl(self):
del self.__url
def __init__(self, url):
'''
Constructor
'''
self.url = url
url = property(getUrl, setUrl, delUrl, "Url's Docstring")
def protocol(self):
return self.url.split(':')[0]
def domain(self):
if '//' in self.url:
return self.url.split('//')[1].split('/')[0]
else:
return self.url.split('\\\\')[1].split('\\')[0]
def resource(self):
if '//' in self.url:
res = self.url.split('//')[1].split('/')
return '/' + '/'.join(res[1:])
else:
res = self.url.split('\\\\')[1].split('\\')
return '\\' + '\\'.join(res[1:])
def data(self):
return urllib2.urlopen(self.url).read()
if __name__ == '__main__':
myurl = UrlNode('http://tuplets.org')
print myurl.protocol()
print myurl.domain()
print myurl.resource()
myurl = UrlNode('http://www.search.com/search?q=zarasa%20bla')
print myurl.protocol()
print myurl.domain()
print myurl.resource()
```
#### File: cloudlight/tests/test_bot.py
```python
import unittest
from cStringIO import StringIO
from cloudlight.classes.graph import Graph
from cloudlight.tests.data import example_txt
from cloudlight.bots.io import Printer
from cloudlight.bots.traversal import DFSBot, BFSBot
class Test(unittest.TestCase):
def setUp(self):
self.graph = Graph()
self.graph.debug = False
self.graph.input_debug_links = 1
self.graph.output_debug_nodes = 1
self.graph.load_edgelist(StringIO(example_txt), num=True)
def tearDown(self):
pass
def testPrinter(self):
printer = Printer()
result = '\n'.join( printer.visit(self.graph) )
self.assertEqual( result, 'NODE: 1\nNODE: 2\nNODE: 3\nNODE: 4\nNODE: 5\nNODE: 6\nNODE: 7\nNODE: 8\nNODE: 9\nNODE: 666\nEDGE: 1 -- 2 ATTRS: \nEDGE: 3 -- 5 ATTRS: \nEDGE: 4 -- 5 ATTRS: \nEDGE: 6 -- 7 ATTRS: \nEDGE: 7 -- 8 ATTRS: \nEDGE: 7 -- 9 ATTRS: \nEDGE: 8 -- 9 ATTRS: \nEDGE: 9 -- 666 ATTRS: ' )
def testDFSBot(self):
bot = Printer(None, DFSBot())
result = '\n'.join( bot.visit(self.graph) )
self.assertEqual( result, 'NODE: 666\nEDGE: 666 -- 9 ATTRS: \nNODE: 9\nEDGE: 9 -- 8 ATTRS: \nEDGE: 9 -- 666 ATTRS: \nEDGE: 9 -- 7 ATTRS: \nNODE: 7\nEDGE: 7 -- 8 ATTRS: \nEDGE: 7 -- 9 ATTRS: \nEDGE: 7 -- 6 ATTRS: \nNODE: 6\nEDGE: 6 -- 7 ATTRS: \nNODE: 8\nEDGE: 8 -- 9 ATTRS: \nEDGE: 8 -- 7 ATTRS: \nNODE: 8\nEDGE: 8 -- 9 ATTRS: \nEDGE: 8 -- 7 ATTRS: ' )
def testBFSBot(self):
bot = Printer(None, BFSBot())
result = '\n'.join( bot.visit(self.graph) )
self.assertEqual( result, 'NODE: 666\nEDGE: 666 -- 9 ATTRS: \nNODE: 9\nEDGE: 9 -- 8 ATTRS: \nEDGE: 9 -- 666 ATTRS: \nEDGE: 9 -- 7 ATTRS: \nNODE: 8\nEDGE: 8 -- 9 ATTRS: \nEDGE: 8 -- 7 ATTRS: \nNODE: 7\nEDGE: 7 -- 8 ATTRS: \nEDGE: 7 -- 9 ATTRS: \nEDGE: 7 -- 6 ATTRS: \nNODE: 7\nEDGE: 7 -- 8 ATTRS: \nEDGE: 7 -- 9 ATTRS: \nEDGE: 7 -- 6 ATTRS: \nNODE: 6\nEDGE: 6 -- 7 ATTRS: \nNODE: 6\nEDGE: 6 -- 7 ATTRS: ' )
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: cloudlight/tests/test_graph_stress.py
```python
import unittest
from cStringIO import StringIO
from cloudlight.classes.graph import Graph
import cloudlight.tests.data_enc1
import cloudlight.tests.data_enc2
class Test(unittest.TestCase):
def setUp(self):
self.graph = Graph()
self.graph.debug = False
self.graph.max_links_input = 100000
self.graph.input_debug_links = 200000
self.graph.output_debug_nodes = 10000
def testLoadCompressed1(self):
use_big_alphabet = False
self.graph.load_compressed_graph(cloudlight.tests.data_enc1, use_big_alphabet)
self.assertEqual( self.graph.number_of_nodes(), 43948 )
self.assertEqual( self.graph.number_of_edges(), 50000 )
def testLoadCompressed2(self):
use_big_alphabet = True
self.graph.load_compressed_graph(cloudlight.tests.data_enc2, use_big_alphabet)
self.assertEqual( self.graph.number_of_nodes(), 43948 )
self.assertEqual( self.graph.number_of_edges(), 50000 )
def tearDown(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: cloudlight/tests/test_plot.py
```python
import unittest
from cStringIO import StringIO
from cloudlight.algorithms.plot import Plot
from cloudlight.classes.graph import Graph
from cloudlight.tests.data import example_txt, example_txt2
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testBasicHistogram(self):
x = map(lambda x : 2**x , range(0,5) )
p = Plot()
p.clear()
p.hist( x, 4, False, False, False )
#p.show()
def testGraphHistogram(self):
self.graph = Graph()
self.graph.debug = False
self.graph.input_debug_links = 200000
self.graph.output_debug_nodes = 10000
self.graph.load_edgelist(StringIO(example_txt2))
degrees = list(self.graph.degrees())
p = Plot()
p.clear()
p.hist( degrees, 15, True, True, False )
#p.show()
def testPlotSave(self):
self.graph = Graph()
self.graph.debug = False
self.graph.input_debug_links = 200000
self.graph.output_debug_nodes = 10000
self.graph.load_edgelist(StringIO(example_txt))
clusts = list( self.graph.eccentricities() )
clusts
p = Plot()
p.clear()
p.hist( clusts, 3, True, True, True )
p.save('testPlotSave.png')
#p.show()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: cloudlight/tests/test_plot_stress2.py
```python
import unittest
from cloudlight.classes.graph import Graph
from cloudlight.algorithms.plot import Plot
import cloudlight.tests.data_enc1
class PlotStressTest2(unittest.TestCase):
def setUp(self):
self.graph = Graph()
self.graph.debug = False
self.graph.input_debug_links = 50000
self.graph.max_links_input = 25000
self.graph.output_debug_nodes = 10000
use_big_alphabet = False
self.graph.load_compressed_graph(cloudlight.tests.data_enc1, use_big_alphabet)
def tearDown(self):
pass
def testCompleteAnalysis(self):
p = Plot()
p.debug = False
sample_size = 10
bins = 10
p.init_complete_analysis(self.graph, '/tmp/graph_analysis', sample_size, bins)
p.complete_analysis(self.graph)
#p.plot_graph_params()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: cloudlight/tests/test_privacy.py
```python
import unittest, random
from StringIO import StringIO
from cloudlight.classes.big_graph import BigGraph
from cloudlight.tests.data import example_txt2
from cloudlight.algorithms.privacy_attack import LinkPrivacyModel
class Test(unittest.TestCase):
def setUp(self):
self.graph = BigGraph()
self.graph.debug = False
self.graph.input_debug_links = 1
self.graph.output_debug_nodes = 1
self.graph.load_edgelist(StringIO(example_txt2))
self.graph.create_indices()
self.graph.create_index_degree()
self.graph.create_index_triangles()
self.graph.create_index_unseen_degree()
self.graph.index_parameter_from_parameter('triangles', 'unseen_triangles')
self.graph.add_parameter_cache('seen_degree')
self.graph.initialize_parameter('seen_degree', 0.0)
self.graph.index_parameter_cache('seen_degree')
self.graph.add_parameter_cache('seen_degree2')
self.graph.initialize_parameter('seen_degree2', 0.0)
self.graph.index_parameter_cache('seen_degree2')
self.graph.add_parameter_cache('seen_triangles')
self.graph.initialize_parameter('seen_triangles', 0.0)
self.graph.index_parameter_cache('seen_triangles')
self.graph.add_parameter_cache('seen_triangles2')
self.graph.initialize_parameter('seen_triangles2', 0.0)
self.graph.index_parameter_cache('seen_triangles2')
self.privacy_model = LinkPrivacyModel(self.graph, 1)
self.privacy_model.coverage_types = ['node','korolova','link','triangle']
def setUp2(self):
self.privacy_model.add_bribed_node('RCARecords')
self.assertTrue('RCARecords' in self.privacy_model.graph.nodes_iter() )
self.assertTrue(not 'RCARecords' in self.privacy_model.agents )
self.assertTrue('RCARecords' in self.privacy_model.controlled_nodes )
self.assertAlmostEqual(self.privacy_model.bribe_effort, 1)
self.assertAlmostEqual(self.privacy_model.total_effort(), 1)
self.privacy_model.strategy = 'start_crawler'
self.privacy_model.add_bribed_node('foofighters')
def tearDown(self):
pass
# def testAddRogueNode(self):
#
# pass
#
# def testCoverage(self):
#
# self.setUp2()
#
# self.assertEqual( self.privacy_model.link_coverage(), 0.51162790697674421)
#
#
# def testMaxUnseenDegreeNode(self):
#
# self.setUp2()
#
# self.assertEqual( self.privacy_model.max_unseen_degree_node(), 'Egger3rd')
#
#
# def testMaxUnseenDegreeCrawlerNode(self):
#
# self.setUp2()
#
# self.privacy_model.add_bribed_node('Egger3rd')
#
# self.assertEqual( self.privacy_model.max_unseen_degree_crawler_node(), 'cjweeks')
#
#
# def testSortedDegreesDec(self):
#
# self.assertEqual( list(self.privacy_model.sorted_degrees_dec_iter())[:4], ['ABadFeeling', 'Egger3rd', 'cjweeks', 'foofighters'])
# self.assertEqual( sorted(list(self.privacy_model.sorted_degrees_dec_iter())), sorted(list(set( self.privacy_model.sorted_degrees_dec_iter() ))) )
def setUp3(self):
self.graph = BigGraph()
self.graph.debug = False
self.graph.input_debug_links = 1
self.graph.output_debug_nodes = 1
edge = self.graph.add_edge
a,b,c,d,e,f,g,h,i = 'a','b','c','d','e','f','g','h','i'
edge(a,b)
edge(b,c)
edge(c,e)
edge(c,d)
edge(d,e)
edge(b,d)
edge(a,f)
edge(a,g)
edge(a,h)
edge(a,i)
edge(g,h)
edge(h,i)
self.graph.create_indices()
self.graph.create_index_degree()
self.graph.create_index_triangles()
self.graph.create_index_unseen_degree()
self.graph.index_parameter_from_parameter('triangles', 'unseen_triangles')
self.graph.add_parameter_cache('seen_degree')
self.graph.initialize_parameter('seen_degree', 0.0)
self.graph.index_parameter_cache('seen_degree')
self.graph.add_parameter_cache('seen_degree2')
self.graph.initialize_parameter('seen_degree2', 0.0)
self.graph.index_parameter_cache('seen_degree2')
self.graph.add_parameter_cache('seen_triangles')
self.graph.initialize_parameter('seen_triangles', 0.0)
self.graph.index_parameter_cache('seen_triangles')
self.graph.add_parameter_cache('seen_triangles2')
self.graph.initialize_parameter('seen_triangles2', 0.0)
self.graph.index_parameter_cache('seen_triangles2')
self.privacy_model = LinkPrivacyModel(self.graph, 1)
self.privacy_model.coverage_types = ['node','korolova','link','triangle']
self.privacy_model.initialize_histogram_degree(number_of_nodes=self.graph.number_of_nodes())
self.privacy_model.strategy = 'start_crawler_degree_hist'
def testBribedNodeRecursive(self):
self.setUp3()
g = self.graph
self.assertEqual( g.get_parameter_cache('seen_degree', 'a') , 0 )
self.assertEqual( g.get_parameter_cache('seen_degree2', 'a') , 0 )
self.assertEqual( g.get_parameter_cache('unseen_degree', 'a') , 5 )
self.assertEqual( g.get_parameter_cache('seen_triangles', 'a') , 0 )
self.assertEqual( g.get_parameter_cache('seen_triangles2', 'a') , 0 )
self.assertEqual( g.get_parameter_cache('unseen_triangles', 'a') , 2 )
self.assertEqual( g.get_parameter_cache('seen_degree', 'b') , 0 )
self.assertEqual( g.get_parameter_cache('seen_degree2', 'b') , 0 )
self.assertEqual( g.get_parameter_cache('unseen_degree', 'b') , 3 )
self.assertEqual( g.get_parameter_cache('seen_triangles', 'b') , 0 )
self.assertEqual( g.get_parameter_cache('seen_triangles2', 'b') , 0 )
self.assertEqual( g.get_parameter_cache('unseen_triangles', 'b') , 1 )
self.assertEqual( self.privacy_model.M, [9, 0, 0, 0, 0, 0] )
#print 'SOBORNANDO NODO "a"'
self.privacy_model.add_bribed_node('a')
self.assertEqual( g.get_parameter_cache('seen_degree', 'a') , -1 )
self.assertEqual( g.get_parameter_cache('seen_degree2', 'a') , 5 )
self.assertEqual( g.get_parameter_cache('unseen_degree', 'a') , 0 )
self.assertEqual( g.get_parameter_cache('seen_triangles', 'a') , 2 )
self.assertEqual( g.get_parameter_cache('seen_triangles2', 'a') , -1 )
self.assertEqual( g.get_parameter_cache('unseen_triangles', 'a') , 0 )
self.assertEqual( g.get_parameter_cache('seen_degree', 'b') , -1 )
self.assertEqual( g.get_parameter_cache('seen_degree2', 'b') , 3 )
self.assertEqual( g.get_parameter_cache('unseen_degree', 'b') , 0 )
self.assertEqual( g.get_parameter_cache('seen_triangles', 'b') , 0 )
self.assertEqual( g.get_parameter_cache('seen_triangles2', 'b') , 0 )
self.assertEqual( g.get_parameter_cache('unseen_triangles', 'b') , 1 )
self.assertEqual( self.privacy_model.M, [1, 3, 2, 2, 0, 1] )
#print 'SOBORNANDO NODO "a"'
self.privacy_model.add_bribed_node('b')
self.assertEqual( g.get_parameter_cache('seen_degree', 'b') , -1 )
self.assertEqual( g.get_parameter_cache('seen_degree2', 'b') , 3 )
self.assertEqual( g.get_parameter_cache('unseen_degree', 'b') , 0 )
self.assertEqual( g.get_parameter_cache('seen_triangles', 'b') , 1 )
self.assertEqual( g.get_parameter_cache('seen_triangles2', 'b') , -1 )
self.assertEqual( g.get_parameter_cache('unseen_triangles', 'b') , 0 )
self.assertEqual( self.privacy_model.M, [0, 1, 3, 4, 0, 1] )
self.assertEqual( self.privacy_model.link_coverage(), 1.0 )
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: cloudlight/tests/test_privacy_stress2.py
```python
import unittest, random
from cloudlight.classes.graph import Graph
from cloudlight.algorithms.privacy_attack import PrivacyAttackStrategies
import cloudlight.tests.data_enc1
class PrivacyStressTest2(unittest.TestCase):
def setUp(self):
self.graph = Graph()
self.graph.debug = False
self.graph.max_links_input = 100000
self.graph.input_debug_links = 200000
self.graph.output_debug_nodes = 10000
use_big_alphabet = False
self.graph.load_compressed_graph(cloudlight.tests.data_enc1, use_big_alphabet)
self.graph.create_index_degree()
self.graph.create_index_unseen_degree()
lookahead = 0
coverage = 'link'
self.strategies = PrivacyAttackStrategies(self.graph, lookahead, coverage, False)
random.seed(666)
self.coverages = [0.1] # [ float(f)/100 for f in range(2, 104, 2) ]
def testStrategyRandom(self):
self.assertEqual( list(self.strategies.start_random([0.01]))[0], 117)
def testStrategyDegree(self):
self.assertEqual( list(self.strategies.start_degree(self.coverages))[0], 5)
def testStrategyGreedy(self):
self.assertEqual( list(self.strategies.start_greedy(self.coverages))[0], 5)
def testStrategyCrawler(self):
self.assertEqual( list(self.strategies.start_crawler(self.coverages))[0], 11)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: cloudlight/tests/test_privacy_stress4.py
```python
import unittest, random
from cloudlight.classes.graph import Graph
from cloudlight.algorithms.privacy_attack import PrivacyAttackStrategies
import cloudlight.tests.data_enc1
class BigPrivacyStressTest4(unittest.TestCase):
def setUp(self):
self.graph = Graph()
self.graph.debug = False
self.graph.max_links_input = 5000
self.graph.input_debug_links = 200000
self.graph.output_debug_nodes = 10000
use_big_alphabet = False
self.graph.load_compressed_graph(cloudlight.tests.data_enc1, use_big_alphabet)
self.graph.create_index_degree()
self.graph.create_index_unseen_degree()
lookahead = 1
coverage = 'korolova node'
self.strategies = PrivacyAttackStrategies(self.graph, lookahead, coverage, False)
self.coverages = [0.8] # [ float(f)/100 for f in range(2, 104, 2) ]
def testStrategyRandom(self):
random.seed(6661)
self.assertEqual( list(self.strategies.start_random([0.01]))[0], 1)
def testStrategyDegree(self):
random.seed(6662)
self.assertEqual( list(self.strategies.start_degree(self.coverages))[0], 3)
def testStrategyGreedy(self):
random.seed(6663)
self.assertEqual( list(self.strategies.start_greedy(self.coverages))[0], 3)
def testStrategyCrawler(self):
random.seed(6664)
self.assertEqual( list(self.strategies.start_crawler(self.coverages))[0], 3)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: cloudlight/utils/random_items.py
```python
from random import *
def random_items(iterable, k=1):
# Raymond Hettinger's recipe
# http://code.activestate.com/recipes/426332/
result = [None] * k
for i, item in enumerate(iterable):
if i < k:
result[i] = item
else:
j = int(random() * (i + 1))
if j < k:
result[j] = item
shuffle(result)
return result
``` |
{
"source": "joigno/cryptoalerts",
"score": 2
} |
#### File: joigno/cryptoalerts/main.py
```python
from pycoingecko import CoinGeckoAPI
import json, datetime, logging, datetime, os
logging.basicConfig(filename='cryptoalerts.log', level=logging.INFO)
from send_emails import send_email
from utils import load_alerts, load_portfolios
cg = CoinGeckoAPI()
def get_price_market(base_ticker, target_ticker='BUSD', market='binance'):
pairs = [ticker for ticker in cg.get_exchanges_by_id(market)['tickers']
if ticker['base'] == base_ticker and ticker['target']==target_ticker]
if pairs == []:
target_ticker = 'USDT'
pairs = [ticker for ticker in cg.get_exchanges_by_id(market)['tickers']
if ticker['base'] == base_ticker and ticker['target']==target_ticker]
return pairs[0]['last']
def process_alert_single(alert, prices, cg, portfolios):
triggered = False
asset = alert['asset_id']
if asset not in prices:
price = cg.get_price(ids=asset, vs_currencies='usd')[asset]['usd']
prices[asset] = price
else:
price = prices[asset]
# analyze logical conditions
if alert['condition'] == '>':
triggered = price > float(alert['value'])
elif alert['condition'] == '<':
triggered = price < float(alert['value'])
return triggered, prices
def update_prices_portfolio(portfolio,prices, cg):
asset_ticker = {
'avalanche-2' : 'AVAX',
'terra-luna' : 'LUNA',
'ethereum': 'ETH',
'bitcoin': 'BTC',
}
cash_value = 0.0
for asset in portfolio['portfolio_assets'].keys():
if asset == 'usd':
continue
if asset not in prices:
#price = cg.get_price(ids=asset, vs_currencies='usd')[asset]['usd']
price = get_price_market(asset_ticker[asset])
prices[asset] = price
cash_value += prices[asset] * float(portfolio['portfolio_assets'][asset]['amount'])
return cash_value, prices
def calculate_rebalancing(cash_value, usd_total, prices, portfolio, min_trade_usd):
"""cash_value <- total usd in crypto
usd_total <- cantidad de busd
prices <-
portfolio <- el json que aparece en default.json file
tolerance_usd <- minimum volume para operar"""
ret = ''
total_value = cash_value + usd_total
logging.info('total_value = %.4f' % total_value)
# cash_percentage is really non-cash percentage
cash_percentage_portfolio = float(portfolio['cash_percentage'])
expected_value = total_value * (100.0-cash_percentage_portfolio) / 100.0
logging.info('expected_value = %.4f' % expected_value)
num_assets = len(portfolio['portfolio_assets'].keys())
balanced_value = expected_value / (num_assets-1)
logging.info('balanced_value = %.4f'% balanced_value)
trades = []
for asset in portfolio['portfolio_assets'].keys():
if asset == 'usd':
continue
# diff
curr_amount = float(portfolio['portfolio_assets'][asset]['amount'])
logging.info("amount %s = %.4f"% (asset, curr_amount))
curr_value = curr_amount * prices[asset]
logging.info("price %s = %.4f"% (asset, prices[asset]))
diff_value = curr_value - balanced_value
logging.info("curr_value %s = %.4f"% (asset, curr_value))
logging.info("diff_value %s = %.4f"% (asset, diff_value))
if diff_value > min_trade_usd:
# SELL
sell_amount = diff_value / prices[asset]
ret += '<br/>\nSELL %f %s' % (sell_amount, asset.upper())
trades.append((asset,'SELL',sell_amount))
elif diff_value < -min_trade_usd:
# BUY
buy_amount = -diff_value / prices[asset]
ret += '<br/>\nBUY %f %s' % (buy_amount, asset.upper())
trades.append((asset, 'BUY', buy_amount))
return ret, trades
def process_alert_cash(alert, prices, cg, portfolios):
trades = []
min_trade_usd = float(alert['min_trade_usd'])
triggered = False
portfolio_name = alert['portfolio']
portfolio = portfolios[portfolio_name]
# cash value of non-USD assets
cash_value, prices = update_prices_portfolio(portfolio,prices,cg)
logging.info("cash_value = %.4f" % cash_value)
# cash value of USD assets
usd_total = float(portfolio['portfolio_assets']['usd']['amount'])
logging.info("usd_total = %.4f" % usd_total)
cash_percentage_alert = float(alert['value'])
current_cash_percentage = 100.0 * cash_value / (cash_value + usd_total)
logging.info("cash_percentage_alert = %.4f" % cash_percentage_alert)
logging.info("current_cash_percentage = %.4f" % current_cash_percentage)
#print("current_cash_percentage = %.4f" % current_cash_percentage)
logging.info("cash_percentage_portfolio = %.4f" % float(portfolio['cash_percentage']))
# analyze logical conditions
msg_extra = ''
if alert['condition'] == '>':
triggered = current_cash_percentage > cash_percentage_alert
if triggered:
#print("current_cash_percentage = %.4f" % current_cash_percentage)
#print("cash_percentage_alert = %.4f" % cash_percentage_alert)
msg_extra, trades = calculate_rebalancing(cash_value, usd_total, prices, portfolio, min_trade_usd)
#print('TRADES CASH: ', trades)
elif alert['condition'] == '<':
triggered = current_cash_percentage < cash_percentage_alert
if triggered:
#print("current_cash_percentage = %.4f" % current_cash_percentage)
#print("cash_percentage_alert = %.4f" % cash_percentage_alert)
msg_extra, trades = calculate_rebalancing(cash_value, usd_total, prices, portfolio, min_trade_usd)
#print('TRADES CASH: ', trades)
return triggered, prices, msg_extra, trades
def process_alert_crypto(alert, prices, cg, portfolios):
min_trade_usd = float(alert['min_trade_usd'])
triggered = False
trades = []
portfolio_name = alert['portfolio']
portfolio = portfolios[portfolio_name]
# cash value of non-USD assets
cash_value_cryptos, prices = update_prices_portfolio(portfolio,prices,cg)
logging.info("cash_value_cryptos = %.4f" % cash_value_cryptos)
msg_extra = ''
cryptos_num = len(portfolio['portfolio_assets'].keys()) - 1
target_crypto_percentage = (1 / cryptos_num) * 100.0
target_crypto_value = (cash_value_cryptos / cryptos_num)
for asset in portfolio['portfolio_assets'].keys():
if asset == 'usd':
continue
logging.info("asset = %s" % asset)
# cash value of 1 crypto asset
curr_amount = float(portfolio['portfolio_assets'][asset]['amount'])
logging.info("amount %s = %.4f"% (asset, curr_amount))
curr_value = curr_amount * prices[asset]
curr_crypto_percentage = 100.0 * curr_value / cash_value_cryptos
# analyze logical conditions
condition_value = float(alert['value'])
logging.info("condition %s"% (alert['condition']))
logging.info("curr_crypto_percentage = %.4f" % curr_crypto_percentage)
logging.info("delta_condition_percentage = %.4f" % condition_value)
if alert['condition'] == '>':
triggered = curr_crypto_percentage > target_crypto_percentage + condition_value
if triggered:
#print("curr_crypto_percentage = %.4f" % curr_crypto_percentage)
#print("delta_condition_percentage = %.4f" % condition_value)
#print("target_crypto_percentage = %.4f" % target_crypto_percentage)
logging.info("limit_condition_percentage = %.4f" % (target_crypto_percentage + condition_value))
cash_backup = portfolio['cash_percentage']
usd_amount_backup = portfolio['portfolio_assets']['usd']['amount']
portfolio['cash_percentage'] = 0
portfolio['portfolio_assets']['usd']['amount'] = 0
msg_extra, trades = calculate_rebalancing(cash_value_cryptos, 0.0, prices, portfolio, min_trade_usd)
portfolio['cash_percentage'] = cash_backup
portfolio['portfolio_assets']['usd']['amount'] = usd_amount_backup
elif alert['condition'] == '<':
triggered = curr_crypto_percentage < target_crypto_percentage - condition_value
if triggered:
#print("curr_crypto_percentage = %.4f" % curr_crypto_percentage)
#print("delta_condition_percentage = %.4f" % condition_value)
#print("target_crypto_percentage = %.4f" % target_crypto_percentage)
logging.info("limit_condition_percentage = %.4f" % (target_crypto_percentage - condition_value))
cash_backup = portfolio['cash_percentage']
usd_amount_backup = portfolio['portfolio_assets']['usd']['amount']
portfolio['cash_percentage'] = 0
portfolio['portfolio_assets']['usd']['amount'] = 0
msg_extra, trades = calculate_rebalancing(cash_value_cryptos, 0.0, prices, portfolio, min_trade_usd)
portfolio['cash_percentage'] = cash_backup
portfolio['portfolio_assets']['usd']['amount'] = usd_amount_backup
if triggered:
break
return triggered, prices, msg_extra, trades
def run(portfolios=None, alerts=None, prices=None, mail_enabled=True):
ret_trades = []
logging.info('='*80)
logging.info('='*80)
logging.info('='*80)
logging.info('BUENOS_AIRES ' + str(datetime.datetime.now()-datetime.timedelta(hours=3)))
logging.info('GMT (SERVER) ' + str(datetime.datetime.now()+datetime.timedelta(hours=0)))
logging.info('PARIS ' + str(datetime.datetime.now()+datetime.timedelta(hours=1)))
# portfolio https://raw.githubusercontent.com/joigno/alerts/main/default.json
if not portfolios:
portfolios = load_portfolios()
if not alerts:
alerts = load_alerts()
if not prices:
prices = {}
msg = ''
status_sent = {}
for alert in alerts:
logging.info('--------------- processsing alert %s ---------------' % alert['type'].upper())
if alert['type'] == 'single_asset':
triggered, prices = process_alert_single(alert, prices, cg, portfolios)
if triggered:
# Send Email
logging.info(alert['message'])
if mail_enabled:
send_email(alert['recipient'].split(','), 'CRYPTO-ALERT: '+ alert['message'], alert['message'])
elif alert['type'] == 'cash_percentage':
triggered, prices, msg_extra, trades = process_alert_cash(alert, prices, cg, portfolios)
if triggered:
# Send Email
msg = alert['message'] + '<br/>\n' + msg_extra
subject = 'ALERT crypto-alerts: ' + alert['message']
logging.info(msg)
os.system('tail -n 117 cryptoalerts.log > extra.log;')
extra = '\n' + '<br/><pre>' + open('extra.log').read() + '<pre/>'
if mail_enabled:
send_email(alert['recipient'].split(','), subject, msg + '\n\n' + extra)
if trades != []:
ret_trades = trades
elif alert['type'] == 'crypto_percentage':
triggered, prices, msg_extra, trades = process_alert_crypto(alert, prices, cg, portfolios)
if triggered:
# Send Email
msg = alert['message'] + '<br/>\n' + msg_extra
subject = 'ALERT crypto-alerts: ' + alert['message']
logging.info(msg)
os.system('tail -n 117 cryptoalerts.log > extra.log;')
extra = '\n' + '<br/><pre>' + str(open('extra.log').read()) + '<pre/>'
if mail_enabled:
send_email(alert['recipient'].split(','), subject, msg + '\n\n' + extra)
if trades != []:
ret_trades = trades
# Send Status Message (Daily)
if datetime.datetime.now().hour in [12]: #,13,14,15]:
logging.info('INFO: sending status email')
subject = 'INFO crypto-alerts: system is up and running'
msg = subject + '\n' + '<pre>' + json.dumps(portfolios, indent=4, sort_keys=True) \
+ '\n' + json.dumps(alerts, indent=4, sort_keys=True) + '<pre/>'
recipients = alert['recipient'].split(',')
for recp in recipients:
if not recp in status_sent:
send_email(alert['recipient'].split(','), subject, msg)
status_sent[recp] = True
return msg, ret_trades
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
run()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
``` |
{
"source": "joigno/mirror-hash",
"score": 3
} |
#### File: joigno/mirror-hash/mirror.py
```python
import random, sys, struct
def new(m=None):
return mirror256(m)
# IEEE 754 representation in hex, without heading byte.
def prime_to_cubic_root_hex(p):
h = hex(long(str(int(p)**(1./3) - int(int(p)**(1./3) ))[2:]))
while len(h) < 13:
h = '0x' + '0' + h[2:]
ret = [0]*8
h = h[4:]
for i in range(8):
ret[i] = int(h[i],16)
return ret
# IEEE 754 representation in hex, without heading byte.
def cubic_root_array(cr):
h = hex(cr)
if h[-1] != 'L':
h += 'L'
while len(h) < 13:
h = '0x' + '0' + h[2:]
ret = [0]*8
h = h[4:]
for i in range(8):
ret[i] = int(h[i],16)
return ret
class mirror256(object):
'''
Mirror256 Hash Function, Provable Reversible (Biyective) for Hashes.
'''
DEFAULT_DEPTH = 64 #1 #16
DEFAULT_SIZE = 256 #8 # 256
GATES = [0, 1]#['Toffoli','Fredkin']
lastHashes = []
firstPrimesCubicRootDecRep = [
0xa54ddd35b5L, 0xd48ef058b4L, 0x342640f4c9L, 0x51cd2de3e9L, 0x8503094982L, 0x9b9fd7c452L, 0xc47a643e0cL, 0xa8602fe35aL,
0x20eaf18d67L, 0x4d59f727feL, 0x685bd4533fL, 0x7534dcd163L, 0x8dc0dcbb8bL, 0xb01624cb6dL, 0xcfeabbf181L, 0xda0b94f97eL,
0x8f4d86d1a9L, 0x20c96455afL, 0x29c172f7ddL, 0x43b770ba12L, 0x544d18005fL, 0x6c34f761a1L, 0x8a76ef782fL, 0x98f8d17ddcL,
0xa0151027c6L, 0xae080d4b7bL, 0xb4e03c992bL, 0xc251542f88L, 0x3dc28be52fL, 0xb75c7e128fL, 0x241edeb8f4L, 0x04317d07b2L,
0x46305e3a3dL, 0x4bafebecefL, 0x09308a3b6bL, 0x6bb275e451L, 0x76044f4b33L, 0x85311d5237L, 0x94051aaeb0L, 0x98e38ef4dfL,
0xb0b5da348cL, 0xb55fd044a0L, 0xbe9b372069L, 0xc32ceea80eL, 0xddf799a193L, 0x0eee44484bL, 0x17529bf549L, 0x1b7b53489dL,
0x23ba4d74a0L, 0x2febef5a50L, 0x33f0db9016L, 0x47b5d89777L, 0x5352304156L, 0x5ec09f1622L, 0x6a02e0a83bL, 0x0af9027c88L,
0x78c3f873a6L, 0x8009496a17L, 0x83a5537ad2L, 0x95715f4210L, 0xadb0de7719L, 0xb47bab87d1L, 0xb7db7bc375L, 0xbe90221e69L,
0xd599166861L, 0xdf457a1ae2L, 0x3f1c4f10f8L, 0x5e7beeb45fL, 0x0faff58d42L, 0x18f53d76f5L, 0x2528d4cd81L, 0x2e31dbfd82L,
0x372236c49fL, 0x3d0a65aa42L, 0x45d30e2165L, 0x516594b949L, 0x571fef6fc3L, 0x6277a55af4L, 0x70708531b1L, 0x73350c1f03L,
0x80ea6cfcaaL, 0x83a1c53701L, 0x8bbb0acdffL, 0x9116bfdc91L, 0x9910e9b48fL, 0xa397b0d72fL, 0xa8cf4c2583L, 0xab68347813L,
0xb0944c2d01L, 0xbfebc31b97L, 0xca01c32639L, 0xcf022b04f8L, 0xd8ee4454deL, 0xddda24f5e1L, 0xe52f7f4ce3L, 0x6c818c6507L,
0x8472d1b3d3L, 0x2285f71052L, 0x2982d70320L, 0x350b7321b5L, 0x3be615f828L, 0x42b44ca815L, 0x44f6508617L, 0x4bb44d615eL,
0x56d68d1d1aL, 0x5d7531014eL, 0x64087059f4L, 0x663705c553L, 0x6cbb5fa88aL, 0x7334c44133L, 0x777fb210c9L, 0x79a3612cf1L,
0x8660f49e64L, 0x90df7d6e60L, 0x92f570231eL, 0x971e058f2cL, 0x9d52b376f9L, 0x10595e2a63L, 0x108dc9904fL, 0xb1bd0befacL,
0xb5c5d93981L, 0xbbcb73733bL, 0xc3c4f0aabbL, 0xcda6c368f9L, 0xd57d5a7e75L, 0x16520c34cfL, 0xe6e99f4c26L, 0x264192d75eL,
0x0989bc2dbfL, 0x854a9cfefbL, 0xd0b522f906L, 0x1a7dec746cL, 0x1e390b5485L, 0x25a54a9675L, 0x295679bf9bL, 0x36292b3477L,
0x3f3a2ff04aL, 0x4a01ec61b3L, 0x4bcb38937bL, 0x54ae80966cL, 0x567356a419L, 0x59fad0877cL, 0x5bbd75dfd9L, 0x647fe2621cL,
0x0b43c0fed1L, 0x7417c16475L, 0x75cfd5a597L, 0x793df2a072L, 0x852a251c4dL, 0x888c3a9c35L, 0x8a3c49c1d7L, 0x8d9a740155L,
0x9e4ad6fa74L, 0xa199c2f506L, 0xa830325a9dL, 0xb05e889d91L, 0xb6df352e6eL, 0xba1bfe44edL, 0xbef2c774d8L, 0xc3c457f223L,
0xceeea9021fL, 0xd21a4009acL, 0xd6d77766f3L, 0xdb8fb9c68dL, 0xe1d31d3112L, 0x024e147bb9L, 0x45a4041059L, 0x64758e789aL,
0x9289050518L, 0xa1da8c8b5dL, 0x17d09378c9L, 0x1955aee1bfL, 0x1de1ffd3b5L, 0x03be0f8051L, 0x26ed40ba60L, 0x2e69348875L,
0x2fe6f31384L, 0x345d51384aL, 0x41a6fa9d42L, 0x06dbc790eaL, 0x460c840ae7L, 0x48f8964fb7L, 0x4d574ac9d0L, 0x51b1f28f29L,
0x5779eb0acfL, 0x5bcb43e3efL, 0x099c117651L, 0x6fbe24810dL, 0x0b50c60b1cL, 0x78317578e7L, 0x7dcb6a4081L, 0x84c2b49cc0L,
0x88ebda33b6L, 0x8d116a7403L, 0x92934fbcacL, 0x9aca7688f4L, 0x9d846e148dL, 0xa19884c4e2L, 0xa5a932ae5eL, 0xa70356989fL,
0xab0f83d1a6L, 0xb31ddc0413L, 0xb9ca6990bfL, 0xc5b725f629L, 0xc70890fc30L, 0xc9aa5942ebL, 0xcd9a66bed0L, 0xcee9b9403aL,
0xd2d59e0287L, 0xd5712927f2L, 0xd6be6b65d1L, 0xd957ea5633L, 0x1682d445b9L, 0xe266839b86L, 0xe6432f6b31L, 0xbefe814019L,
0xe4daf84b08L, 0x1aa91c8b09L, 0x1fad4b5ea5L, 0x2ae4ae18c5L, 0x311543795eL, 0x39b322532eL, 0x3c26b944ebL, 0x0623353793L,
0x3fd1e9b51cL, 0x06bf72cb7cL, 0x485734706dL, 0x0779fc0a08L, 0x4bf9bce29fL, 0x4f99b6704cL, 0x56d20ebfb1L, 0x5cceae69c5L,
0x5e0060a5bcL, 0x09a37ea0f6L, 0x09c1fb669eL, 0x63f4c00913L, 0x67841bdf2dL, 0x6e9b8e7208L, 0x75a96ad747L, 0x7a580f92e2L,
0x815638c600L, 0x84d1d2bb80L, 0x8722f21b88L, 0x0ddc45b53bL, 0x0e5249fa19L, 0x9183779dc3L, 0x9619a1c10cL, 0x98633a3308L,
0xa05d13a173L, 0xa2a244e832L, 0xa6083edd92L, 0xa729c1a4baL, 0xa96c0f26edL, 0xaccdb931edL, 0xadedcd8549L, 0xb14c9f2e14L,
0xb6e5f3a401L, 0xc20731b710L, 0xc5597e3e50L, 0xc78f393564L, 0xc8a9bfd2e9L, 0xd5d696d008L, 0xd8059fd29cL, 0xd91cd0017aL,
0xde8d7a1929L, 0xe50d858cc9L, 0x17036935a4L, 0x1aec0ae713L, 0x45e3ca4534L, 0x660781d922L, 0x86186845e1L, 0xa61698fe32L,
0x1a29cbf7ecL, 0x1d556fe9a3L, 0x1f71850a35L, 0x207f423e17L, 0x26cd7c6f71L, 0x2c0934f621L, 0x324a60b5faL, 0x3671ecd976L,
0x3eb7c32011L, 0x06fde2cc3fL, 0x48fdedbf9bL, 0x4b09b48cbbL, 0x4c0f504b01L, 0x4e19f8e3b3L, 0x4f1f05e96cL, 0x543581a053L,
0x5a4a6ecb10L, 0x5d52709977L, 0x6058d22656L, 0x69624ae937L, 0x6a6274cc37L, 0x725d6e8b91L, 0x735c04408fL, 0x7e4108faa8L,
0x8136134ae4L, 0x852515fe57L, 0x88169247f5L, 0x8a0c0e2855L, 0x8b068bd109L, 0x8cfb06eedaL, 0x90e1fe58d2L, 0x017a5ff39dL,
0x98a801a35aL, 0x99a0055314L, 0x0195a8132aL, 0xa5338fb1e6L, 0xa9ff8dcbd6L, 0xacde7217b2L, 0x11dbff6f0bL, 0xb38baf623aL,
0x1225102d5cL, 0xb665df3dcaL, 0xbb23bfe0abL, 0xc0cf52f722L, 0xc1c0b7cc65L, 0xc946683c84L, 0xca36706d10L, 0xcd05a29fc3L,
0xcee45a0baeL, 0xcfd37c85a4L, 0xd47ceeeda1L, 0xd83508683fL, 0xe08ab2aaa6L, 0x1b87647d28L, 0x2dda8c56dfL, 0x494caa6138L,
0x6dd118371bL, 0xb696d24ebbL, 0x132a8748adL, 0xd1ca109288L, 0x18991c243fL, 0x1fcf8d4fbeL, 0x20b5bee829L, 0x03735f6de3L,
0x26181342dbL, 0x28c76d7f23L, 0x2b75943d65L, 0x2d3e593c24L, 0x32957bc0acL, 0x3378e0de19L, 0x3d359bacf5L, 0x3fdab8cfceL,
0x40bc2b4f35L, 0x435fbdd0afL, 0x45217bcbb2L, 0x47c32428acL, 0x4de2934638L, 0x508071b007L, 0x523e65b3edL, 0x531d2fc459L,
0x55b8ceb8aaL, 0x5775446fe3L, 0x5a0f0783aeL, 0x5f3f3b06f8L, 0x61d5add070L, 0x646b082d20L, 0x6a6df44b67L, 0x6c24903d20L,
0x6eb59463a6L, 0x73d465a770L, 0x773be86b4bL, 0x79c84e2a0dL, 0x7b7aa6a329L, 0x0d7241927cL, 0x0e33c9bf1fL, 0x9235147a66L,
0x958c0279deL, 0x0f1f1482afL, 0x0f5f03d7dbL, 0x9a8b0b2052L, 0x9d090e7cb7L, 0xa623ef6a66L, 0xab15bd8361L, 0xabe853dd2cL,
0xb27923f3b4L, 0xb5bef88516L, 0xb7613eea45L, 0xbc4585df81L, 0xc1f5b69e0eL, 0xc602b56a25L, 0xc6d1cb9c0cL, 0xc86fa84d57L,
0xcbaa23d3ddL, 0xce14eb6297L, 0xd07ec66ccbL, 0xd21a2ad247L, 0xd2e7b5d3fbL, 0x15403fc313L, 0xd6e9e63d10L, 0xda1d06f27bL,
0xdbb5fc65c8L, 0xdc82508207L, 0xdee6b2c233L, 0xe2e156cb42L, 0xe3acc4b9a2L, 0xe7a46db595L, 0xc5dd77cea1L, 0x23993428edL,
0x5ad1e7218aL, 0x822660524dL, 0xb138b07a6fL, 0xb90db66620L, 0x0215d4958fL, 0xe0287c9248L, 0xe7f7c7f5a3L, 0x1d6e6678adL,
0x22ddf91d8bL, 0x03a445b593L, 0x26bd2634f2L, 0x29d46b0213L, 0x2c24e6115fL, 0x2daf6cd5e9L, 0x349a42059dL, 0x37aaed0245L,
0x3b7db9947bL, 0x3dc7ec72bdL, 0x401150af07L, 0x431c9620f6L, 0x46e8acac4dL, 0x4b7443145fL, 0x50bdc8dd4dL, 0x523fb8293bL,
0x5481f82f11L, 0x56c3708024L, 0x5783c1ad36L, 0x61ff2de4b7L, 0x62be364c7eL, 0x66781af577L, 0x6971b3493bL, 0x6aedfe4ec7L,
0x0b363d0311L, 0x71992c5698L, 0x748dc1f8daL, 0x0c19040500L, 0x7b2f3f404cL, 0x7f9738554aL, 0x810e8f4692L, 0x01500275b5L,
0x8a8e636ca2L, 0x8e3218c29eL, 0x8eec330714L, 0x94ba2a6b76L, 0x9e1e1f2dfbL, 0x9f8eca6514L, 0xa047023427L, 0xa496b1302aL,
0xa6bd7e4503L, 0xa82c4f5d6dL, 0xac76eca6d9L, 0xae9b33edd7L, 0xb1752dbd79L, 0xb2e1b67b93L, 0xb5b9e07146L, 0xbd86255ed6L,
0x1305eaf449L, 0xbfa504ee55L, 0xc059d42fbfL, 0x13a8837ac6L, 0xce6a96d830L, 0xcf1ddc8595L, 0xd1373e6d63L, 0xd34ffa08eaL,
0xd5680fd896L, 0xd6cd17db96L, 0xd8e41a7335L, 0xd9964c12f8L, 0xddc1f7139eL, 0xdf25495fb8L, 0xe34d8e2da9L, 0x16ccab2fb3L,
0xe7734adce1L, 0xe82400e6f0L, 0x303963efabL, 0x371a3d8fe0L, 0x6e082d995dL, 0x82960dc05eL, 0xc6ed3a4969L, 0x1957402b09L,
0x1c0eff5a25L, 0x1d6a77e9ddL, 0x1e181a7e96L, 0x1f732c56daL, 0x20209ba2d1L, 0x2790016836L, 0x2a422ac9eeL, 0x2e4b709deeL,
0x304f318debL, 0x33a97b2627L, 0x3454f19d58L, 0x35abacd166L, 0x37ad4989aeL, 0x38585cc02fL, 0x3a5933869cL, 0x3daea6332cL,
0x3e592594a7L, 0x4256cb12f0L, 0x45a86b1961L, 0x070837213dL, 0x49a1e217eeL, 0x4e4210f6f1L, 0x503ca2a6e6L, 0x518e095e0cL,
0x5387ac1ebcL, 0x5628fc56f2L, 0x5821526d06L, 0x5a191a9805L, 0x5f55d2d75fL, 0x09ece14bdfL, 0x63e7c03a2eL, 0x6535783b8fL,
]
def __init__(self, m=None, depth=None, size=None, useStandardState=True):
'''
Constructor
'''
self._buffer = ''
self._counter = 0
if not depth:
self.depth = self.DEFAULT_DEPTH
if not size:
self.size = self.DEFAULT_SIZE
if len(self.lastHashes) < self.depth:
if useStandardState:
self.initStandardState()
else:
self.initLastHashes()
if m is not None:
if type(m) is not str:
raise TypeError, '%s() argument 1 must be string, not %s' % (self.__class__.__name__, type(m).__name__)
self.update(m)
def unpack(self, m):
ret = [0]*64
l = struct.unpack('!32b',m)
i = 0
for b in l:
# high nibble
ret[i] = b >> 4
i += 1
# low nibble
ret[i] = b & 0x0F
i += 1
return ret
def pack(self, hm):
hb = [0]*(self.depth/2)
i = 0
for i in range(self.depth/2):
b = hm[i*2] << 4
b = b | hm[i*2+1]
hb[i] = b
return struct.pack('!32B',*tuple(hb))
def digest(self):
return self.pack(self._hashed)
def hexdigest(self):
return '0x' + self.digest().encode('hex')
def update(self, m):
if not m:
return
if type(m) is not str:
raise TypeError, '%s() argument 1 must be string, not %s' % (sys._getframe().f_code.co_name, type(m).__name__)
self._buffer += m
self._counter += len(m)
while len(self._buffer) >= 32:
hm = self._mirror256_process(self._buffer[:32])
self.lastHashes = [hm] + self.lastHashes[:self.depth]
self._buffer = self._buffer[32:]
if 0 < len(self._buffer) < 32 or m == '':
hm = self._mirror256_process(self._buffer + 'A'*(32-len(self._buffer)))
self._buffer = self._buffer[32:]
self._hashed = hm
# hex(long(str(5**(1./3) - int(5**(1./3) ))[2:]))
def initStandardState(self):
while len(self.lastHashes) < self.depth:
i = len(self.lastHashes)
layer = []
for j in range(8*i,8*(i+1)):
jprimerep = self.firstPrimesCubicRootDecRep[i]
layer += cubic_root_array(jprimerep)
self.lastHashes.append( layer )
def initLastHashes(self):
# TODO replace with fixed initial internal state, for example based on cubic roots of primes.
random.seed(777)
while len(self.lastHashes) < self.depth:
oneRandomHash = self.randomHash()
self.lastHashes.append(oneRandomHash)
def randomHash(self):
ret = [0]*(self.size/4)
for i in range(self.size/4):
newhex = random.randint(0,15) #hex(random.randint(0,15))[2]
ret[i] = newhex
return ret #'0x' + ret
def _mirror256_process(self, m):
m = self.unpack(m)
for layer in range(self.depth):
hm = self.hashLayerPass(layer, m)
return hm
def hashLayerPass(self, layer, block, startLeft=None):
# Layer1 (a zigzag)
# 1 ### @@@
# 2 ###
# 3 ### @@@
# 4 @@@
# 5 ### @@@
# 6 ###
# 7 ### @@@
# 8 @@@
# Size must divisible by 8
# Each 3-wire gate can be Toffoli or Fredkin, mirrored or not., 4 choices.
# First a XOR with layer encoding to avoid 0 to 0 hashes.
layerHash = self.lastHashes[layer]
for gateIndex in range(self.size/4):
block[gateIndex] = block[gateIndex] ^ layerHash[gateIndex]
for gateIndex in range(self.size/4):
gateType = layerHash[gateIndex] & 0x3 #int(layerHash[2:][gateIndex],16) & 0x3
if gateType % 2 ==0: # Toffoli
gateName = 0 #'Toffoli'
else:
gateName = 1 #'Fredkin'
if gateType >> 1 == 0: # Toffoli
gateSymmetry = 0 #'Regular'
else:
gateSymmetry = 1 #'Mirrored'
## do gate
block = self.applyGate(gateIndex,gateName,gateSymmetry, block, firstSublayer=True, layer=layer)
for gateIndex in range(self.size/4):
gateType = gateType = layerHash[gateIndex] & 0xc #int(layerHash[2:][gateIndex],16) & 0xc
if ((gateType>>2) % 2) ==0: # Toffoli
gateName = 0 #'Toffoli'
else:
gateName = 1 #'Fredkin'
if ((gateType>>2) >> 1) == 0: # Toffoli
gateSymmetry = 0 #'Regular'
else:
gateSymmetry = 1 #'Mirrored'
## do gate
block = self.applyGate(gateIndex,gateName,gateSymmetry, block, firstSublayer=False, layer=layer)
return block
def getWire(self, gateIndex, firstSublayer, offset=0):
return (gateIndex * 4 + offset + (not firstSublayer and 2 or 0 )) % self.size
def getBit(self, block, wire):
return block[wire/4] >> wire%4 & 1
def setBit(self, block, wire, bit):
oldNib = block[wire/4] #int(block[2:][wire/4],16)
ret = (oldNib & (15^(1 << wire%4)))
ret = ret | (int(bit) << wire%4)
#ret = hex(ret)[2:]
#block = block[:2 + wire/4] + ret + block[2 + wire/4 + 1:]
block[wire/4] = ret
return block
def applyGate(self, gateIndex,gateName,gateSymmetry, block, firstSublayer=None, layer=None):
# Layer1 (a zigzag)
# 1 ### @@@
# 2 ###
# 3 ### @@@
# 4 @@@
# 5 ### @@@
# 6 ###
# 7 ### @@@
# 8 @@@
# Size must divisible by 8
# Each 3-wire gate can be Toffoli or Fredkin, mirrored or not., 4 choices.
initialOffset = layer%2
wire1 = self.getWire(gateIndex, firstSublayer, offset=initialOffset+0)
wire2 = self.getWire(gateIndex, firstSublayer, offset=initialOffset+1)
wire3 = self.getWire(gateIndex, firstSublayer, offset=initialOffset+2)
val1 = self.getBit(block, wire1)
oval1 = val1
val2 = self.getBit(block, wire2)
oval2 = val2
val3 = self.getBit(block, wire3)
oval3 = val3
# Toffoli and Regular
if gateName == 0 and gateSymmetry == 0 and (val1 and val2):
val3 = val3 ^ (val1 and val2)
# Toffoli and Mirrored
elif gateName == 0 and gateSymmetry == 1 and (val2 and val3):
val1 = val1 ^ (val2 and val3)
# Fredkin and Regular
elif gateName == 1 and gateSymmetry == 0 and val1 and val2!=val3:
#if val1:
val2,val3 = val3,val2
# Fredkin and Mirrored
elif gateName == 1 and gateSymmetry == 1 and val3 and val1!=val2:
#if val3:
val1,val2 = val2,val1
if val1 != oval1:
block = self.setBit(block, wire1, val1)
if val2 != oval2:
block = self.setBit(block, wire2, val2)
if val3 != oval3:
block = self.setBit(block, wire3, val3)
return block
def randomAlfanumericString(N):
import string
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
if __name__ == "__main__":
random.seed(777)
m='This is the canary.'
print 'Message=',m
h = mirror256(m=m)
import time
t = time.time()
c = 0
for i in range(1024):
digest = h.digest()
#print i, '0x' + digest.encode('hex')
randStr = randomAlfanumericString(N=32)
#print len(randStr), randStr
msg = 'This is the canary #%d. asdfasdfasdfasdfasdfqwerqwerqwerdfnnjkdfnjldljknsvv' % i
h = mirror256( msg )
c += 1
if time.time() > t + 1:
print '%d hashes per seconds!' % c
print 'Example message = ', msg
print 'Example digest = ', h.hexdigest()
print 'Example message =',randStr
print 'Example digest = ', mirror256(randStr).hexdigest()
c = 0
t = time.time()
``` |
{
"source": "joiiewang/ProjectBackEndCC",
"score": 3
} |
#### File: joiiewang/ProjectBackEndCC/myapp.py
```python
import os, json
from flask import Flask, request, jsonify, make_response, abort, redirect, url_for
from database import db, User, Course
import psycopg2
DEBUG=True
#use this if linking to a reaact app on the same server
#app = Flask(__name__, static_folder='./build', static_url_path='/')
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(os.path.dirname(__file__),'app.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
if False:
with app.app_context():
# Resetting database for now since format changes are expected
db.drop_all()
# Mockup of database
db.create_all()
print('Reset database')
#deprecated
#from api import api_v1
#app.register_blueprint(api_v1,url_prefix='/api/v1/')
#---
from api2 import initialize_routes
from flask_restful import Api
api = Api(app)
initialize_routes(api)
### CORS section
@app.after_request
def after_request_func(response):
if DEBUG:
print("in after_request")
#origin = request.headers.get('Origin')
origin = '*'
if request.method == 'OPTIONS':
response = make_response()
response.headers.add('Access-Control-Allow-Credentials', 'true')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type')
response.headers.add('Access-Control-Allow-Headers', '*')
response.headers.add('Access-Control-Allow-Headers', 'x-csrf-token')
response.headers.add('Access-Control-Allow-Methods',
'GET, POST, OPTIONS, PUT, PATCH, DELETE')
if origin:
response.headers.add('Access-Control-Allow-Origin', origin)
else:
response.headers.add('Access-Control-Allow-Credentials', 'true')
if origin:
response.headers.add('Access-Control-Allow-Origin', origin)
return response
### end CORS section
'''
Note that flask automatically redirects routes without a final slash (/) to one with a final slash (e.g. /getmsg redirects to /getmsg/). Curl does not handle redirects but instead prints the updated url. The browser handles redirects (i.e. takes them). You should always code your routes with both a start/end slash.
'''
# Set the base route to be the react index.html
@app.route('/')
def index():
return redirect(url_for('users'))
def main():
'''The threaded option for concurrent accesses, 0.0.0.0 host says listen to all network interfaces (leaving this off changes this to local (same host) only access, port is the port listened on -- this must be open in your firewall or mapped out if within a Docker container. In Heroku, the heroku runtime sets this value via the PORT environment variable (you are not allowed to hard code it) so set it from this variable and give a default value (8118) for when we execute locally. Python will tell us if the port is in use. Start by using a value > 8000 as these are likely to be available.
'''
localport = int(os.getenv("PORT", 8118))
app.run(threaded=True, host='0.0.0.0', port=localport)
if __name__ == '__main__':
main()
``` |
{
"source": "JOiiNT-LAB/multimaster_fkie",
"score": 2
} |
#### File: fkie_node_manager_daemon/tests/test_screen.py
```python
import os
import unittest
import time
import fkie_node_manager_daemon.screen as screen
PKG = 'fkie_node_manager_daemon'
class TestScreen(unittest.TestCase):
'''
'''
def setUp(self):
pass
def tearDown(self):
pass
def test_create_session_name(self):
name = screen.create_session_name(None)
self.assertEqual(name, '', "wrong screen session name from `None`, got: %s, expected: %s" % (name, ''))
name = screen.create_session_name('/test/node')
self.assertEqual(name, '_test_node', "wrong screen session name from `/test/node`, got: %s, expected: %s" % (name, '_test_node'))
def test_session_name2node_name(self):
sname = screen.create_session_name('/test/node')
nname = screen.session_name2node_name(sname)
self.assertEqual(nname, '/test/node', "wrong node name from session name, got: %s, expected: %s" % (nname, '/test/node'))
def test_split_session_name(self):
_pid, name = screen.split_session_name(None)
self.assertEqual(name, '', "wrong screen session name after split from `None`, got: %s, expected: %s" % (name, ''))
_pid, name = screen.split_session_name('123._test_node')
self.assertEqual(name, '_test_node', "wrong screen session name after split from `123._test_node`, got: %s, expected: %s" % (name, '_test_node'))
pid, _name = screen.split_session_name('was._test_node')
self.assertEqual(pid, -1, "wrong pid after screen split session `was._test_node`, got: %d, expected: %d" % (pid, -1))
_pid, name = screen.split_session_name('666. ')
self.assertEqual(name, '', "wrong name after screen split session `666.`, got: %s, expected: %s" % (name, ''))
def test_rosclean(self):
screen.rosclean()
if __name__ == '__main__':
import rosunit
rosunit.unitrun(PKG, os.path.basename(__file__), TestScreen)
``` |
{
"source": "Joi/jibot3",
"score": 2
} |
#### File: jibot3/include/herald.py
```python
import json
import os
from plugins.user_likes import user_likes
from plugins.user_dislikes import user_dislikes
from slack_bolt import Ack, BoltRequest, BoltResponse, Respond, Say
from slack_bolt.context import BoltContext
from slack_sdk.web import WebClient, slack_response
class herald:
user_id:str
def __init__(self, ack: Ack, client:WebClient, context:BoltContext, payload:dict, request:BoltRequest, respond:Respond, say:Say):
ack()
self.user_id = context.get('user_id')
if payload.get('text') is not None:
who_to_herald, sep, payload_text = payload.get('text').partition(" ")
if who_to_herald != "me":
user_token = os.environ.get("JIBOT_SLACK_USER_TOKEN", None)
user_response:slack_response = client.users_identity(token=user_token, name=who_to_herald)
if user_response.get('ok'):
self.user_id = user_response.get('user').get('id')
if respond.response_url is None:
say(blocks=self.blocks())
else:
respond(blocks=self.blocks())
def blocks(self):
blocks:list = [{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Huzzah to <@{self.user_id}>!"
}
}]
blocks.extend(user_likes().blocks(self.user_id))
blocks.extend(user_dislikes().blocks(self.user_id))
return blocks
```
#### File: jibot3/plugins/definition.py
```python
from lib.database import SQLite
import inspect
import logging
import re
from pathlib import Path
from slack_bolt import Ack, BoltRequest, Respond, Say
from slack_sdk.web import WebClient
from stop_words import get_stop_words
table_name = Path(__file__).stem
table_params:str = "ID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, OBJECT text NOT NULL, ATTRIBUTE text NOT NULL, UNIQUE(OBJECT, ATTRIBUTE)"
SQLite().create_table(table_name, table_params)
class definition:
db:SQLite
def __init__(self):
self.db = SQLite()
def blocks(self):
blocks:list = []
definitions = self.select()
if definitions is not None:
for d in definitions:
object:str = d[0]
definition = d[1]
blocks.append({
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*{object}:* {definition}"
}
})
return(blocks)
def select(self, key:str = None):
logging.debug(inspect.currentframe().f_code.co_name)
db_response = None
if key is None:
query:str = self.db.select_query(table_name, columns="OBJECT, ATTRIBUTE", order_by="OBJECT")
db_response = self.db.cursor.execute(query).fetchall()
else:
query:str = self.db.select_query(table_name, columns="OBJECT, ATTRIBUTE", order_by="OBJECT", where="OBJECT=?")
self.db.cursor.execute(query, [key]).fetchall()
# if type(values) == type(tuple()):
# db_response = json.loads(values[0])
return db_response
def define(self, object, attribute):
logging.debug(inspect.currentframe().f_code.co_name)
self.db:SQLite = SQLite()
self.db.cursor.execute(f"INSERT OR IGNORE INTO {table_name} (OBJECT, ATTRIBUTE) VALUES(?, ?)", (object, attribute))
self.db.connection.commit()
def undefine(self, object, attribute):
logging.debug(inspect.currentframe().f_code.co_name)
self.db:SQLite = SQLite()
self.db.cursor.execute(f"DELETE FROM {table_name} WHERE OBJECT=? AND ATTRIBUTE=?", (object, attribute))
self.db.connection.commit()
class action(definition):
def __init__(self, ack:Ack, client:WebClient, logger:logging.Logger, request:BoltRequest):
super().__init__()
ack()
container = request.body.get('container', None)
view:dict = request.body.get(container.get('type'))
title:dict = view.get('title')
title.update(text=":brain: Brain")
close_button = view.get('close')
close_button.update(text="Go Back")
client.views_push(
trigger_id=request.body.get('trigger_id'),
view={
"type": view.get('type'),
"title": title,
"close": close_button,
"blocks": self.blocks()
}
)
class message(definition):
__doc__ = "The bot tries to learn about stuff, when you say a declarative statement, such as `[SOMETHING] is [SOMEATTRIBUTE]`, the bot will save that information."
spaces:str = "|".join([' ', '\xa0'])
space_re = f"({spaces})+"
user_re:str = "<@(?P<user_id>[A-Z0-9]+)>"
object_re:str = "(?P<object>\w+)"
plus_operators = ["is the", "is", "are"]
minus_operators = ["is not the", "is not", "isn't", "are not", "aren't"]
operator_re:str = f"(?P<operator>{'|'.join(minus_operators)}|{'|'.join(plus_operators)})"
definition_re:str = "(?P<definition>\w+)"
keyword:re = re.compile(f"({user_re}|{object_re}){space_re}{operator_re}{space_re}{definition_re}")
def __init__(self, logger:logging.Logger, payload:dict, request:BoltRequest, say:Say):
super().__init__()
matches = re.finditer(self.keyword, payload.get('text'))
for match in matches:
user_id = match.group('user_id')
object = match.group('object').lower()
operator = match.group('operator')
definition = match.group('definition')
object = user_id if user_id is not None else object
logger.info(f"{object} {operator} {definition}")
if definition not in get_stop_words('english'):
if operator in self.plus_operators:
self.define(object, definition)
else:
self.undefine(object, definition)
``` |
{
"source": "joiller/flask_blog",
"score": 3
} |
#### File: flask_blog/app/__init__.py
```python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import pymysql
pymysql.install_as_MySQLdb()
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
app.config['DEBUG']=True # 调试模式
app.config['SQLALCHEMY_DATABASE_URI']='mysql://root:jhl233666@localhost:3306/dblog' # 连接数据库
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']=True # 设置数据库在内容更新时自动提交
app.config['SECRET_KEY']='NINCAICAIKANYA' # 设置session秘钥
db.init_app(app) # 数据库的初始化
from .main import main as main_blueprint # 将app与main关联到一起
app.register_blueprint(main_blueprint)
from .user import user as user_blueprint
app.register_blueprint(user_blueprint)
return app
```
#### File: flask_blog/app/models.py
```python
from . import db
class BlogType(db.Model):
__tablename__ = 'blogtype'
id = db.Column(db.INTEGER, primary_key=True)
type_name = db.Column(db.String(20))
topic = db.relationship('Topic', backref='blogtype', lazy='dynamic')
def __init__(self, n):
self.type_name = n
def __repr__(self):
return 'BlogType : %r' % self.type_name
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.INTEGER, primary_key=True)
cate_name = db.Column(db.String(50))
topic = db.relationship('Topic', backref='category', lazy='dynamic')
def __init__(self, n):
self.cate_name = n
def __repr__(self):
return 'Category : %r' % self.cate_name
class User(db.Model):
__tablename__ = 'user'
ID = db.Column(db.INTEGER, primary_key=True)
loginname = db.Column(db.String(50), nullable=False)
uname = db.Column(db.String(30), nullable=False)
email = db.Column(db.String(200), nullable=False)
url = db.Column(db.String(200))
upwd = db.Column(db.String(30), nullable=False)
is_author = db.Column(db.SmallInteger, default=0)
topic = db.relationship('Topic', backref='user', lazy='dynamic')
reply = db.relationship('Reply', backref='user', lazy='dynamic')
voke_topic = db.relationship(
'Topic',
secondary='voke',
backref=db.backref('voke_user', lazy='dynamic'),
lazy='dynamic'
)
# def __init__(self, n):
# self.uname = n
#
def __repr__(self):
return 'User : %r' % self.uname
class Topic(db.Model):
__tablename__ = 'topic'
id = db.Column(db.INTEGER, primary_key=True)
title = db.Column(db.String(200), nullable=False)
pub_date = db.Column(db.DateTime, nullable=False)
read_num = db.Column(db.INTEGER, default=0)
content = db.Column(db.Text, nullable=False)
images = db.Column(db.Text)
blogtype_id = db.Column(db.INTEGER, db.ForeignKey('blogtype.id'))
category_id = db.Column(db.INTEGER, db.ForeignKey('category.id'))
user_id = db.Column(db.INTEGER, db.ForeignKey('user.ID'))
reply = db.relationship('Reply', backref='topic', lazy='dynamic')
# def __init__(self, n):
# self.cate_name = n
#
# def __repr__(self):
# return 'Category : %r' % self.cate_name
class Reply(db.Model):
__tablename__ = 'reply'
id = db.Column(db.INTEGER, primary_key=True)
content = db.Column(db.Text, nullable=False)
reply_time = db.Column(db.DateTime)
user_id = db.Column(db.INTEGER, db.ForeignKey('user.ID'))
topic_id = db.Column(db.INTEGER, db.ForeignKey('topic.id'))
# def __init__(self, n):
#
# def __repr__(self):
# return 'Category : %r' % self.cate_name
Voke = db.Table(
'voke',
db.Column('id', db.INTEGER, primary_key=True),
db.Column('user_id', db.INTEGER, db.ForeignKey('user.ID')),
db.Column('topic_id', db.INTEGER, db.ForeignKey('topic.id'))
)
``` |
{
"source": "joilsonsr/kyros",
"score": 3
} |
#### File: joilsonsr/kyros/main.py
```python
import asyncio
import logging
import pyqrcode
import kyros
logging.basicConfig()
# set a logging level: just to know if something (bad?) happens
logging.getLogger("kyros").setLevel(logging.WARNING)
async def main():
# create the Client instance using create class method
whatsapp = await kyros.Client.create()
# do a QR login
#qr_data, scanned = await whatsapp.qr_login()
# generate qr code image'''
#qr_code = pyqrcode.create(qr_data)
#print(qr_code.terminal(quiet_zone=1))
#try:
# wait for the QR code to be scanned
## await scanned
##except asyncio.TimeoutError:
#timed out (left unscanned), do a shutdown
## await whatsapp.shutdown()
## return
# how to send a websocket message
teste = whatsapp.session.from_file("saveSession.dat")
print(teste)
await whatsapp.restore_session(teste)
message = kyros.WebsocketMessage(None, ["query", "exist", "556185923871"])
await whatsapp.websocket.send_message(message)
# receive a websocket message
print(await whatsapp.websocket.messages.get(message.tag))
await whatsapp.websocket.shutdown()
if __name__ == "__main__":
asyncio.run(main())
``` |
{
"source": "joinalahmed/ai4change-yolo-device",
"score": 3
} |
#### File: joinalahmed/ai4change-yolo-device/receiver.py
```python
import json
import os
import time
import cv2
from communication import Communication
# Set parameters
image_base_path = os.path.join(os.getcwd(), "images")
image_extension = "jpg"
mqtt_topic = "advice"
# The recommendation recieved via MQTT is stored in json format.
# Function get_recommendation_from_json is parsing this json to extract information required to show correct image
def get_recommendation_from_json (raw_json):
parsed_json = raw_json['output']['recommendation']
return parsed_json
# Function show_image is used to open correct image (based on recommendation recieved via MQTT)
# and show it on the screen plugged to RaspberryPi
def show_image(image_base_path, image_name, image_extension):
img = cv2.imread(os.path.join(image_base_path, (image_name + "." + image_extension)))
img_resized = cv2.resize(img, (480, 320))
cv2.imshow(image_name, img_resized)
cv2.waitKey(6000)
cv2.destroyAllWindows()
# Put everything together into the workflow
def customCallback(client, userdata, message):
str = message.payload.decode('utf-8')
raw_json = json.loads(str)
recommendation = get_recommendation_from_json(raw_json)
show_image(image_base_path, recommendation, image_extension)
# initialize Communication with AWS and subscribe "advice" topic
communication = Communication()
communication.connect()
communication.subscribe(mqtt_topic, customCallback)
while True:
time.sleep(10)
``` |
{
"source": "joinalahmed/Text-Rewrite-NLP",
"score": 3
} |
#### File: joinalahmed/Text-Rewrite-NLP/best_syn.py
```python
__author__ = 'woolz'
__git__ = 'https://github.com/woolz/Text-Rewrite-NLP'
from nltk.corpus import wordnet
import spacy
import urllib
import json
nlp = spacy.load('en')
class BestSyn:
def get_datamuse_syn_list(self):
url = "https://api.datamuse.com/words?ml=" + self.word
response = urllib.urlopen(url)
data = response.read().decode("utf-8")
json_data = json.loads(data)
word_list = []
for x in json_data:
word_list.append(x['word'])
return word_list
def __init__(self, word):
self.word = word
self.best_score = 0.0
self.best_choice = ""
def pull(self):
words_list = self.get_datamuse_syn_list()
for syn_word in words_list:
use_nltk = True
try:
nltk_raw_word = wordnet.synsets(self.word)[0]
nltk_syn_word = wordnet.synsets(syn_word)[0]
except:
use_nltk = False
spacy_raw_word = nlp(unicode(self.word.lower()))
spacy_syn_word = nlp(unicode(syn_word.lower()))
spacy_score = spacy_raw_word.similarity(spacy_syn_word)
if (use_nltk == True):
nltk_score = nltk_syn_word.wup_similarity(nltk_raw_word)
if (nltk_score == None):
nltk_score = 0
score = (nltk_score+spacy_score)/2
else:
score = spacy_score
if (score > self.best_score):
self.best_score = score
self.best_choice = syn_word
result = [self.best_score, self.best_choice]
return result
def __del__(self):
self.word = False
self.best_score = False
self.best_choice = False
``` |
{
"source": "JoinChang/LxBot-Peek-Client",
"score": 3
} |
#### File: JoinChang/LxBot-Peek-Client/config.py
```python
from configparser import ConfigParser
class Config:
def __init__(self):
self.cp = ConfigParser()
self.cp.read("config.ini")
def get(self, section_name, config_name, default_data=None):
try:
result = self.cp.get(section_name, config_name)
except:
return default_data
return result
def set(self, section_name, config_name, config_data):
self.cp.set(section_name, config_name, str(config_data))
with open("config.ini", "w") as f:
self.cp.write(f)
return True
config = Config()
```
#### File: LxBot-Peek-Client/form/Home.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(510, 321)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../src/icon.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setToolTip("")
self.centralwidget = QtWidgets.QWidget(MainWindow)
font = QtGui.QFont()
font.setFamily("微软雅黑")
self.centralwidget.setFont(font)
self.centralwidget.setObjectName("centralwidget")
self.runFrpButton = QtWidgets.QPushButton(self.centralwidget)
self.runFrpButton.setGeometry(QtCore.QRect(310, 280, 91, 21))
font = QtGui.QFont()
font.setFamily("微软雅黑")
self.runFrpButton.setFont(font)
self.runFrpButton.setObjectName("runFrpButton")
self.configureFrpButton = QtWidgets.QPushButton(self.centralwidget)
self.configureFrpButton.setGeometry(QtCore.QRect(310, 250, 91, 21))
font = QtGui.QFont()
font.setFamily("微软雅黑")
self.configureFrpButton.setFont(font)
self.configureFrpButton.setObjectName("configureFrpButton")
self.runPeekButton = QtWidgets.QPushButton(self.centralwidget)
self.runPeekButton.setGeometry(QtCore.QRect(410, 250, 91, 51))
font = QtGui.QFont()
font.setFamily("微软雅黑")
self.runPeekButton.setFont(font)
self.runPeekButton.setObjectName("runPeekButton")
self.port = QtWidgets.QLineEdit(self.centralwidget)
self.port.setGeometry(QtCore.QRect(450, 220, 51, 20))
font = QtGui.QFont()
font.setFamily("微软雅黑")
self.port.setFont(font)
self.port.setInputMask("")
self.port.setReadOnly(True)
self.port.setObjectName("port")
self.portLabel = QtWidgets.QLabel(self.centralwidget)
self.portLabel.setGeometry(QtCore.QRect(393, 220, 51, 20))
font = QtGui.QFont()
font.setFamily("微软雅黑")
self.portLabel.setFont(font)
self.portLabel.setObjectName("portLabel")
self.runInBackgroundButton = QtWidgets.QPushButton(self.centralwidget)
self.runInBackgroundButton.setEnabled(True)
self.runInBackgroundButton.setGeometry(QtCore.QRect(10, 280, 91, 21))
font = QtGui.QFont()
font.setFamily("微软雅黑")
self.runInBackgroundButton.setFont(font)
self.runInBackgroundButton.setObjectName("runInBackgroundButton")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(10, 80, 491, 131))
font = QtGui.QFont()
font.setFamily("微软雅黑")
self.groupBox.setFont(font)
self.groupBox.setObjectName("groupBox")
self.vagueLabel = QtWidgets.QLabel(self.groupBox)
self.vagueLabel.setGeometry(QtCore.QRect(10, 30, 54, 12))
font = QtGui.QFont()
font.setFamily("微软雅黑")
self.vagueLabel.setFont(font)
self.vagueLabel.setObjectName("vagueLabel")
self.vagueSlider = QtWidgets.QSlider(self.groupBox)
self.vagueSlider.setGeometry(QtCore.QRect(10, 50, 160, 16))
self.vagueSlider.setMaximum(10)
self.vagueSlider.setPageStep(1)
self.vagueSlider.setProperty("value", 6)
self.vagueSlider.setOrientation(QtCore.Qt.Horizontal)
self.vagueSlider.setObjectName("vagueSlider")
self.brightnessLabel = QtWidgets.QLabel(self.groupBox)
self.brightnessLabel.setGeometry(QtCore.QRect(10, 80, 54, 12))
font = QtGui.QFont()
font.setFamily("微软雅黑")
self.brightnessLabel.setFont(font)
self.brightnessLabel.setObjectName("brightnessLabel")
self.brightnessSlider = QtWidgets.QSlider(self.groupBox)
self.brightnessSlider.setGeometry(QtCore.QRect(10, 100, 160, 16))
self.brightnessSlider.setMaximum(10)
self.brightnessSlider.setPageStep(1)
self.brightnessSlider.setProperty("value", 10)
self.brightnessSlider.setOrientation(QtCore.Qt.Horizontal)
self.brightnessSlider.setObjectName("brightnessSlider")
self.title = QtWidgets.QLabel(self.centralwidget)
self.title.setGeometry(QtCore.QRect(80, 20, 221, 21))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(16)
self.title.setFont(font)
self.title.setObjectName("title")
self.icon = QtWidgets.QLabel(self.centralwidget)
self.icon.setGeometry(QtCore.QRect(10, 10, 61, 61))
self.icon.setAutoFillBackground(False)
self.icon.setText("")
self.icon.setPixmap(QtGui.QPixmap("src/icon.png"))
self.icon.setScaledContents(True)
self.icon.setAlignment(QtCore.Qt.AlignCenter)
self.icon.setWordWrap(False)
self.icon.setOpenExternalLinks(True)
self.icon.setObjectName("icon")
self.description = QtWidgets.QLabel(self.centralwidget)
self.description.setGeometry(QtCore.QRect(80, 40, 111, 21))
font = QtGui.QFont()
font.setFamily("微软雅黑")
self.description.setFont(font)
self.description.setObjectName("description")
self.peekStatus = QtWidgets.QCheckBox(self.centralwidget)
self.peekStatus.setGeometry(QtCore.QRect(10, 220, 71, 20))
self.peekStatus.setChecked(True)
self.peekStatus.setObjectName("peekStatus")
self.forceStopFrpButton = QtWidgets.QPushButton(self.centralwidget)
self.forceStopFrpButton.setEnabled(False)
self.forceStopFrpButton.setGeometry(QtCore.QRect(180, 280, 121, 21))
self.forceStopFrpButton.setObjectName("forceStopFrpButton")
self.aboutButton = QtWidgets.QPushButton(self.centralwidget)
self.aboutButton.setGeometry(QtCore.QRect(450, 50, 51, 23))
self.aboutButton.setObjectName("aboutButton")
self.autorun = QtWidgets.QCheckBox(self.centralwidget)
self.autorun.setGeometry(QtCore.QRect(10, 246, 71, 20))
self.autorun.setObjectName("autorun")
self.configureHotkeyButton = QtWidgets.QPushButton(self.centralwidget)
self.configureHotkeyButton.setGeometry(QtCore.QRect(310, 220, 75, 21))
self.configureHotkeyButton.setObjectName("configureHotkeyButton")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "LxBot Peek Client"))
self.runFrpButton.setToolTip(_translate("MainWindow", "您需要启动 FRP 穿透才能让 LxBot 访问您的计算机"))
self.runFrpButton.setText(_translate("MainWindow", "启动 FRP 穿透"))
self.configureFrpButton.setText(_translate("MainWindow", "配置 FRP 穿透"))
self.runPeekButton.setText(_translate("MainWindow", "启动监控服务"))
self.port.setText(_translate("MainWindow", "12345"))
self.portLabel.setText(_translate("MainWindow", "启动端口"))
self.runInBackgroundButton.setText(_translate("MainWindow", "最小化到托盘"))
self.groupBox.setTitle(_translate("MainWindow", "配置截图效果"))
self.vagueLabel.setText(_translate("MainWindow", "模糊度"))
self.brightnessLabel.setText(_translate("MainWindow", "亮度"))
self.title.setText(_translate("MainWindow", "LxBot Peek Client"))
self.description.setText(_translate("MainWindow", "软糖正在看着你 👀"))
self.peekStatus.setToolTip(_translate("MainWindow", "关闭后 LxBot 将无法获取您计算机的截图"))
self.peekStatus.setText(_translate("MainWindow", "监控状态"))
self.forceStopFrpButton.setText(_translate("MainWindow", "强制关闭 FRP 穿透"))
self.aboutButton.setText(_translate("MainWindow", "关于"))
self.autorun.setText(_translate("MainWindow", "开机自启"))
self.configureHotkeyButton.setText(_translate("MainWindow", "配置快捷键"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
```
#### File: LxBot-Peek-Client/form/HotkeyConfigure.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_HotkeyConfigure(object):
def setupUi(self, HotkeyConfigure):
HotkeyConfigure.setObjectName("HotkeyConfigure")
HotkeyConfigure.resize(260, 101)
font = QtGui.QFont()
font.setFamily("微软雅黑")
HotkeyConfigure.setFont(font)
self.switchPeekStatusLabel = QtWidgets.QLabel(HotkeyConfigure)
self.switchPeekStatusLabel.setGeometry(QtCore.QRect(10, 40, 81, 16))
self.switchPeekStatusLabel.setObjectName("switchPeekStatusLabel")
self.saveButton = QtWidgets.QPushButton(HotkeyConfigure)
self.saveButton.setGeometry(QtCore.QRect(100, 70, 71, 23))
self.saveButton.setObjectName("saveButton")
self.cancelButton = QtWidgets.QPushButton(HotkeyConfigure)
self.cancelButton.setGeometry(QtCore.QRect(180, 70, 71, 23))
self.cancelButton.setObjectName("cancelButton")
self.switchPeekStatus = QtWidgets.QLineEdit(HotkeyConfigure)
self.switchPeekStatus.setGeometry(QtCore.QRect(140, 40, 113, 20))
self.switchPeekStatus.setObjectName("switchPeekStatus")
self.hotkeyCheckBox = QtWidgets.QCheckBox(HotkeyConfigure)
self.hotkeyCheckBox.setGeometry(QtCore.QRect(10, 10, 81, 21))
self.hotkeyCheckBox.setObjectName("hotkeyCheckBox")
self.retranslateUi(HotkeyConfigure)
QtCore.QMetaObject.connectSlotsByName(HotkeyConfigure)
def retranslateUi(self, HotkeyConfigure):
_translate = QtCore.QCoreApplication.translate
HotkeyConfigure.setWindowTitle(_translate("HotkeyConfigure", "配置快捷键"))
self.switchPeekStatusLabel.setText(_translate("HotkeyConfigure", "切换监控状态"))
self.saveButton.setText(_translate("HotkeyConfigure", "保存"))
self.cancelButton.setText(_translate("HotkeyConfigure", "取消"))
self.switchPeekStatus.setInputMask(_translate("HotkeyConfigure", "Ctrl+Alt+Q"))
self.switchPeekStatus.setText(_translate("HotkeyConfigure", "Ctrl+Alt+Q"))
self.hotkeyCheckBox.setText(_translate("HotkeyConfigure", "全局快捷键"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
HotkeyConfigure = QtWidgets.QDialog()
ui = Ui_HotkeyConfigure()
ui.setupUi(HotkeyConfigure)
HotkeyConfigure.show()
sys.exit(app.exec_())
``` |
{
"source": "JoinCODED/NationalFundCommunity",
"score": 2
} |
#### File: NFproject/mailing/admin.py
```python
from django.contrib import admin
from django.core.mail import send_mail
from .models import Subscriber
def send_email(modeladmin, request, queryset):
email_list = []
for q in queryset:
email_list.append(q.email)
send_mail(
'Subscribers',
'Check our latest articles and events on our website',
'<EMAIL>',
email_list,
)
send_email.short_description = "Send Email"
class subscriberAdmin (admin.ModelAdmin):
list_display = ('email',)
actions = [send_email]
admin.site.register(Subscriber, subscriberAdmin)
```
#### File: NFproject/NFproject/views.py
```python
from django.shortcuts import render,redirect
from mailing.models import Subscriber
from mailing.forms import SubscribeForm
from articles.models import Article
from Events.models import Events
from datetime import date
def home(request):
context = {}
context['articles'] = Article.objects.all()
context['featured_articles'] = Article.objects.filter(featured=True)
context['upcoming_events'] = Events.objects.all() \
.filter(date__gte=date.today()) \
.order_by('date')
if request.method == 'POST':
form = SubscribeForm(request.POST)
if form.is_valid():
form.save()
print("here")
return redirect('home')
else:
form = SubscribeForm()
context['form'] = form
return render(request, "home.html", context=context)
``` |
{
"source": "JoinCODED/PreciousThingsAPI",
"score": 2
} |
#### File: PreciousThingsAPI/things/serializers.py
```python
from rest_framework import serializers
from django.contrib.auth.models import User
from rest_framework_jwt.settings import api_settings
from .models import Thing, PrivateThing
class UserCreateSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
token = serializers.CharField(read_only=True, allow_blank=True)
email = serializers.EmailField(write_only=True)
class Meta:
model = User
fields = ['username', 'password', 'email', 'token']
def create(self, validated_data):
username = validated_data['username']
password = validated_data['password']
email = validated_data['email']
new_user = User(username=username, email=email)
new_user.set_password(password)
new_user.save()
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(new_user)
token = jwt_encode_handler(payload)
validated_data['token'] = token
return validated_data
class ThingSerialzer(serializers.ModelSerializer):
class Meta:
model = Thing
fields = "__all__"
class PrivateThingSerialzer(serializers.ModelSerializer):
class Meta:
model = PrivateThing
fields = "__all__"
``` |
{
"source": "JoinCODED/TheIndex-API",
"score": 2
} |
#### File: TheIndex-API/books/models.py
```python
from django.db import models
from django.urls import reverse
class Author(models.Model):
created = models.DateTimeField(auto_now_add=True)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
imageUrl = models.CharField(max_length=255, blank=True, default="")
@property
def full_name(self):
return f"{self.first_name} {self.last_name}"
def __str__(self):
return self.full_name
def get_absolute_url(self):
return reverse('author-detail', args=[self.id])
class Meta:
ordering = ['last_name', 'first_name']
class Book(models.Model):
COLOR_CHOICES = (
('red', 'red'),
('blue', 'blue'),
('green', 'green'),
('yellow', 'yellow'),
('black', 'black'),
('white', 'white'),
('grey', 'grey'),
('purple', 'purple'),
('orange', 'orange'),
)
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=255)
available = models.BooleanField(default=True)
color = models.CharField(max_length=255, choices=COLOR_CHOICES)
authors = models.ManyToManyField(Author, related_name='books')
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('book-detail', args=[self.id])
class Meta:
ordering = ['title', ]
``` |
{
"source": "joined/ET4310-SupercomputingForBigData",
"score": 3
} |
#### File: part2/compare/compare.py
```python
import sys
def getLineData(line):
s = line.split()
if s[0][:3] == 'chr':
return "{},{},{},{}".format(s[0], s[1], s[3], s[4])
else:
return None
if len(sys.argv) < 3:
print("Insufficient number of arguments!")
print("Usage: python compare.py ref.vcf your.vcf")
sys.exit(1)
f1, f2 = sys.argv[1], sys.argv[2]
f1data, f2data = set(), set()
with open(f1) as f1:
for line in f1:
line_data = getLineData(line)
if line_data:
f1data.add(line_data)
with open(f2) as f2:
for line in f2:
line_data = getLineData(line)
if line_data and line_data in f1data:
f2data.add(line_data)
print("total = {}".format(len(f1data)))
print("matches = {}".format(len(f2data)))
print("diff = {}".format(len(f1data) - len(f2data)))
``` |
{
"source": "joined/IN4334-MiningSoftwareRepositories",
"score": 4
} |
#### File: report/Data collection/Gini_Computation.py
```python
import requests
import sys
import csv
import re
import numpy as np
def gini_index(array):
"""
Calculate the Gini coefficient of a numpy array
"""
array = array.flatten()
if np.amin(array) < 0:
array -= np.amin(array) # values cannot be negative
array += 0.0000001 # values cannot be 0
array = np.sort(array) # values must be sorted
index = np.arange(1, array.shape[0]+1) # index per array element
n = array.shape[0] # number of array elements
return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array)))
input_file = sys.argv[1]
# Store all the projects read from the CSV file in a list
projects = []
with open(input_file, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
# Skip the first line with the header
next(reader)
for row in reader:
# Save the url of the repo and the name in the list
projects.append((row[1], row[3]))
result = []
# Iterate over all the projects and calculate the Gini coefficient
# for each of them, storing the results in the result list
for project_tuple in projects:
project_url, project_name = project_tuple
base_url = project_url + '/contributors'
# Make request to the Github API
r = requests.get(
base_url,
auth=('joined','7fb42c90a8b83b773082e1a337fec4555f65c893'))
contributors = []
# If the project doesn't exist skip to the next one
if r.status_code != 200:
result.append({'project_name': project_name})
continue
cur_contributors = r.json()
# If the response was empty for some reason skip to the next project
if not cur_contributors:
result.append({'project_name': project_name})
continue
# Store the number of contributions of each contributor in a list
contributors = []
for contributor in r.json():
contributors.append(contributor['contributions'])
# If there are more contributors to be downloaded, do it
if 'Link' in r.headers:
# Find first and last page of the results
matches = re.findall(r'<.+?page=(\d+)>', r.headers['Link'])
next_page, last_page = (int(p) for p in matches)
# For each results page add the contributions to the list
for page in range(next_page, last_page + 1):
url = base_url + '?page={}'.format(page)
r = requests.get(
url,
auth=('joined', '<PASSWORD>'))
for contributor in r.json():
contributors.append(contributor['contributions'])
# Compute the Gini index from the array with contributions
gini_coeff = gini_index(np.array(contributors, dtype='float64'))
# Store the result in the result list
result.append({
'project_name': project_name,
'gini_index': gini_coeff,
'n_contributions': sum(contributors),
'n_contributors': len(contributors)
})
output_file = sys.argv[2]
# Save the results to the CSV output file
with open(output_file, 'w', newline='') as csvfile:
fieldnames = [
'project_name',
'gini_index',
'n_contributions',
'n_contributors'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for project in result:
writer.writerow(project)
```
#### File: report/Data collection/Gini_Index_Plot.py
```python
import requests
import re
import numpy as np
import random
import matplotlib as mpl
mpl.use('pgf')
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
def gini_index(array):
"""
Calculate the Gini coefficient of a numpy array.
"""
array = array.flatten()
if np.amin(array) < 0:
array -= np.amin(array) # values cannot be negative
array += 0.0000001 # values cannot be 0
array = np.sort(array) # values must be sorted
index = np.arange(1, array.shape[0]+1) # index per array element
n = array.shape[0] # number of array elements
return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array)))
# List of projects of which we want to draw the plot
# The first element of the tuple is used to indicate wheter a project has
# a log Gini index (False) or a high Gini index (True)
projects = [
('^', 'https://api.github.com/repos/apache/jackrabbit-oak', '#52E8BA', 'Jackrabbit Oak'),
('^', 'https://api.github.com/repos/apache/hadoop', '#5A61FF', 'Hadoop'),
('^', 'https://api.github.com/repos/apache/ofbiz', '#E0FF67', 'Ofbiz'),
('o', 'https://api.github.com/repos/apache/wicket', '#FF5A98', 'Wicket'),
('o', 'https://api.github.com/repos/apache/isis', '#616161', 'Isis'),
('o', 'https://api.github.com/repos/apache/camel', '#E8A952', 'Camel'),
]
for project_tuple in projects:
marker, url, color, name = project_tuple
base_url = url + '/contributors'
r = requests.get(
base_url, auth=('joined', '<PASSWORD> <PASSWORD>'))
# Store in a list the number of contributions of each contributor
contributors = [contributor['contributions'] for contributor in r.json()]
# If there are more contributors to retrieve, do it
if 'Link' in r.headers:
matches = re.findall(r'<.+?page=(\d+)>', r.headers['Link'])
next_page, last_page = (int(p) for p in matches)
for page in range(next_page, last_page + 1):
url = base_url + '?page={}'.format(page)
r = requests.get(
url, auth=('joined', '<PASSWORD>'))
contributors.extend([contributor['contributions'] for contributor in r.json()])
# Normalize each number of contributions by the total number of contributions
contributors = [contributions / sum(contributors) for contributions in contributors]
plt.plot(contributors, color=color, marker=marker, markersize=4, linewidth=2.0)
plt.ylabel('Number of contributions, normalized')
plt.xlabel('Contributors, ordered by n. of contributions')
plt.legend(handles=[mlines.Line2D([], [], marker=marker, color=color, linewidth=2.0, label=name)
for marker, _, color, name in projects])
plt.xlim(0, 55)
plt.ylim(0, 0.22)
plt.savefig('figure.pgf')
``` |
{
"source": "joined/PyImageFilter",
"score": 3
} |
#### File: PyImageFilter/pyimagefilter/core.py
```python
import functools
import multiprocessing
import numpy as np
import itertools as itt
from PIL import Image
def normalize_component(value):
"""Normalize component to be in the 0 - 255 range"""
if value < 0:
return 0
if value > 255:
return 255
return value
# For some reason these 2 functions must be outside the class
# in order to be pickled (multiprocessing)
def lin_calc_px(x, y, pixels, half_mask_size, mask):
"""
Calculates the new color of a single pixel,
given the mask to use, and the pixel position.
"""
# If we are on the border, return (0,0,0) = black pixel
if (x < half_mask_size or x >= pixels.shape[0] - half_mask_size or
y < half_mask_size or y >= pixels.shape[1] - half_mask_size):
return 0, 0, 0
# Extract submatrix of the same size of the mask
subm = pixels[x - half_mask_size: x + half_mask_size + 1,
y - half_mask_size: y + half_mask_size + 1]
# Compute R,G,B values flattening arrays
# to use dot product in order to improve speed
red = int(np.dot(subm[..., 0].ravel(), mask.ravel()))
green = int(np.dot(subm[..., 1].ravel(), mask.ravel()))
blue = int(np.dot(subm[..., 2].ravel(), mask.ravel()))
# Normalize out-of-scale values
red = normalize_component(red)
green = normalize_component(green)
blue = normalize_component(blue)
return red, green, blue
def volterra_new_px(x, y, pixels, N, A, B):
"""
Calculates the new color of a single pixel
using quadratic Volterra filter, given the pixel
position and the A, B coefficient arrays
"""
half_N = N // 2
# If we are on the border, return (0,0,0) = black pixel
if (x < half_N or x >= pixels.shape[0] - half_N or
y < half_N or y >= pixels.shape[1] - half_N):
return 0, 0, 0
# Extract submatrix on which we'll work on
subm = pixels[x - half_N: x + half_N + 1,
y - half_N: y + half_N + 1]
# Compute R,G,B values of the first part of the formula
# (the one relative to the A array) in the same way of
# the linear filtering
A_red = int(np.dot(subm[..., 0].ravel(), A.ravel()))
A_green = int(np.dot(subm[..., 1].ravel(), A.ravel()))
A_blue = int(np.dot(subm[..., 2].ravel(), A.ravel()))
# Compute R,G,B values of the second part of the formula
# (the one relative to the B array)
B_red, B_green, B_blue = 0, 0, 0
# This is equal to 4 nested for loops with range 0 -> N-1
for i, j, k, l in itt.product(range(N), repeat=4):
B_red += B[i, j, k, l] * subm[..., 0][i, j] \
* subm[..., 0][k, l]
B_green += B[i, j, k, l] * subm[..., 1][i, j] \
* subm[..., 1][k, l]
B_blue += B[i, j, k, l] * subm[..., 2][i, j] \
* subm[..., 2][k, l]
red = int(A_red + B_red)
green = int(A_green + B_green)
blue = int(A_blue + B_blue)
# Normalize out-of-scale values
red = normalize_component(red)
green = normalize_component(green)
blue = normalize_component(blue)
return red, green, blue
class ImageFilter:
def __init__(self, image, parallel):
self.image = image
self.parallel = parallel
def volterra_trans(self, A, B):
"""
Applies the Volterra quadratic filter to the current image object,
given the coefficients arrays
"""
# Arrays' dimension
N = A.shape[0]
half_N = N // 2
# Unpack image dimensions
image_width, image_height = self.image.size
# Extract image into array
pixels = np.array(self.image)
# Partialize shared arguments of new pixel function
partialized_new_px = functools.partial(volterra_new_px,
pixels=pixels,
N=N, A=A, B=B)
# Create iterator to use in parallel map
coords = itt.product(range(image_height), range(image_width))
if self.parallel:
# Create process pool, number of processes defaults to
# the number of CPU's cores
pool = multiprocessing.Pool()
# Run parallel map unpacking coord tuple
map_result = pool.starmap(partialized_new_px, coords)
else:
# Run map unpacking coord tuple
map_result = itt.starmap(partialized_new_px, coords)
# Transform map result to array, reshaping it
# to the same size of the original pixels array
new_pixels = np.array(list(map_result),
dtype='uint8').reshape(pixels.shape)
# Crop the image to leave out black borders
self.image = Image.fromarray(new_pixels[
half_N: image_height - half_N,
half_N: image_width - half_N])
def lin_trans(self, mask):
"""
Applies a linear filter to the current image object,
given the mask to apply
"""
# Unpack image dimensions
mask_width, mask_height = mask.shape
half_mask_size = mask_width // 2
image_width, image_height = self.image.size
# Extract image into array
pixels = np.array(self.image)
# Partialize shared arguments of new pixel function
partialized_new_px = functools.partial(lin_calc_px,
pixels=pixels,
half_mask_size=half_mask_size,
mask=mask)
# Create iterator to use in parallel map
coords = itt.product(range(image_height), range(image_width))
if self.parallel:
# Create process pool, number of processes defaults to
# the number of CPU's cores
pool = multiprocessing.Pool()
# Run parallel map unpacking coord tuple
map_result = pool.starmap(partialized_new_px, coords)
else:
# Run map unpacking coord tuple
map_result = itt.starmap(partialized_new_px, coords)
# Transform map result to array, reshaping it
# to the same size of the original pixels array
new_pixels = np.array(list(map_result),
dtype='uint8').reshape(pixels.shape)
# Crop the image to leave out black borders
self.image = Image.fromarray(new_pixels[
half_mask_size: image_height - half_mask_size,
half_mask_size: image_width - half_mask_size])
```
#### File: PyImageFilter/pyimagefilter/masks.py
```python
import numpy as np
import math
def gauss(stdev, rank):
"""Creates Gauss average mask given stdev and rank"""
def gaussian_f(r):
"""Gaussian function"""
num = math.e ** (- (r ** 2) / (2 * (stdev ** 2)))
den = stdev * math.sqrt(2 * math.pi)
return num / den
mask = np.fromfunction(
lambda x, y: gaussian_f(abs(x - rank // 2) + abs(y - rank // 2)),
(rank, rank),
dtype=float)
# Normalize mask to have unitary sum of elements
return mask / np.sum(mask)
def avg(rank):
"""Creates Average mask given rank"""
return np.ones((rank, rank)) / (rank ** 2)
# Commonly used masks for convenience
sharpen = [
np.array([[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]]),
np.array([[-1, -1, -1],
[-1, 9, -1],
[-1, -1, -1]]),
np.array([[1, -2, 1],
[-2, 5, -2],
[1, -2, 1]])
]
prewitt = [
np.array([[-1, -1, -1],
[0, 0, 0],
[1, 1, 1]]),
np.array([[-1, 0, 1],
[-1, 0, 1],
[-1, 0, 1]])
]
sobel = [
np.array([[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]]),
np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
]
``` |
{
"source": "joinee0208/auditok",
"score": 2
} |
#### File: auditok/auditok/plotting.py
```python
import matplotlib.pyplot as plt
import numpy as np
AUDITOK_PLOT_THEME = {
"figure": {"facecolor": "#482a36", "alpha": 0.2},
"plot": {"facecolor": "#282a36"},
"energy_threshold": {
"color": "#e31f8f",
"linestyle": "--",
"linewidth": 1,
},
"signal": {"color": "#40d970", "linestyle": "-", "linewidth": 1},
"detections": {
"facecolor": "#777777",
"edgecolor": "#ff8c1a",
"linewidth": 1,
"alpha": 0.75,
},
}
def _make_time_axis(nb_samples, sampling_rate):
sample_duration = 1 / sampling_rate
x = np.linspace(0, sample_duration * (nb_samples - 1), nb_samples)
return x
def _plot_line(x, y, theme, xlabel=None, ylabel=None, **kwargs):
color = theme.get("color", theme.get("c"))
ls = theme.get("linestyle", theme.get("ls"))
lw = theme.get("linewidth", theme.get("lw"))
plt.plot(x, y, c=color, ls=ls, lw=lw, **kwargs)
plt.xlabel(xlabel, fontsize=8)
plt.ylabel(ylabel, fontsize=8)
def _plot_detections(subplot, detections, theme):
fc = theme.get("facecolor", theme.get("fc"))
ec = theme.get("edgecolor", theme.get("ec"))
ls = theme.get("linestyle", theme.get("ls"))
lw = theme.get("linewidth", theme.get("lw"))
alpha = theme.get("alpha")
for (start, end) in detections:
subplot.axvspan(start, end, fc=fc, ec=ec, ls=ls, lw=lw, alpha=alpha)
def plot(
audio_region,
scale_signal=True,
detections=None,
energy_threshold=None,
show=True,
figsize=None,
save_as=None,
dpi=120,
theme="auditok",
):
y = np.asarray(audio_region)
if len(y.shape) == 1:
y = y.reshape(1, -1)
nb_subplots, nb_samples = y.shape
sampling_rate = audio_region.sampling_rate
time_axis = _make_time_axis(nb_samples, sampling_rate)
if energy_threshold is not None:
eth_log10 = energy_threshold * np.log(10) / 10
amplitude_threshold = np.sqrt(np.exp(eth_log10))
else:
amplitude_threshold = None
if detections is None:
detections = []
else:
# End of detection corresponds to the end of the last sample but
# to stay compatible with the time axis of signal plotting we want end
# of detection to correspond to the *start* of the that last sample.
detections = [
(start, end - (1 / sampling_rate)) for (start, end) in detections
]
if theme == "auditok":
theme = AUDITOK_PLOT_THEME
fig = plt.figure(figsize=figsize, dpi=dpi)
fig_theme = theme.get("figure", theme.get("fig", {}))
fig_fc = fig_theme.get("facecolor", fig_theme.get("ffc"))
fig_alpha = fig_theme.get("alpha", 1)
fig.patch.set_facecolor(fig_fc)
fig.patch.set_alpha(fig_alpha)
plot_theme = theme.get("plot", {})
plot_fc = plot_theme.get("facecolor", plot_theme.get("pfc"))
if nb_subplots > 2 and nb_subplots % 2 == 0:
nb_rows = nb_subplots // 2
nb_columns = 2
else:
nb_rows = nb_subplots
nb_columns = 1
for sid, samples in enumerate(y, 1):
ax = fig.add_subplot(nb_rows, nb_columns, sid)
ax.set_facecolor(plot_fc)
if scale_signal:
std = samples.std()
if std > 0:
mean = samples.mean()
std = samples.std()
samples = (samples - mean) / std
max_ = samples.max()
plt.ylim(-1.5 * max_, 1.5 * max_)
if amplitude_threshold is not None:
if scale_signal and std > 0:
amp_th = (amplitude_threshold - mean) / std
else:
amp_th = amplitude_threshold
eth_theme = theme.get("energy_threshold", theme.get("eth", {}))
_plot_line(
[time_axis[0], time_axis[-1]],
[amp_th] * 2,
eth_theme,
label="Detection threshold",
)
if sid == 1:
legend = plt.legend(
["Detection threshold"],
facecolor=fig_fc,
framealpha=0.1,
bbox_to_anchor=(0.0, 1.15, 1.0, 0.102),
loc=2,
)
legend = plt.gca().add_artist(legend)
signal_theme = theme.get("signal", {})
_plot_line(
time_axis,
samples,
signal_theme,
xlabel="Time (seconds)",
ylabel="Signal{}".format(" (scaled)" if scale_signal else ""),
)
detections_theme = theme.get("detections", {})
_plot_detections(ax, detections, detections_theme)
plt.title("Channel {}".format(sid), fontsize=10)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.tight_layout()
if save_as is not None:
plt.savefig(save_as, dpi=dpi)
if show:
plt.show()
```
#### File: auditok/auditok/workers.py
```python
import os
import sys
from tempfile import NamedTemporaryFile
from abc import ABCMeta, abstractmethod
from threading import Thread
from datetime import datetime, timedelta
from collections import namedtuple
import wave
import subprocess
from queue import Queue, Empty
from .io import _guess_audio_format
from .util import AudioDataSource, make_duration_formatter
from .core import split
from .exceptions import (
EndOfProcessing,
AudioEncodingError,
AudioEncodingWarning,
)
_STOP_PROCESSING = "STOP_PROCESSING"
_Detection = namedtuple("_Detection", "id start end duration")
def _run_subprocess(command):
try:
with subprocess.Popen(
command,
stdin=open(os.devnull, "rb"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as proc:
stdout, stderr = proc.communicate()
return proc.returncode, stdout, stderr
except Exception:
err_msg = "Couldn't export audio using command: '{}'".format(command)
raise AudioEncodingError(err_msg)
class Worker(Thread, metaclass=ABCMeta):
def __init__(self, timeout=0.5, logger=None):
self._timeout = timeout
self._logger = logger
self._inbox = Queue()
Thread.__init__(self)
def run(self):
while True:
message = self._get_message()
if message == _STOP_PROCESSING:
break
if message is not None:
self._process_message(message)
self._post_process()
@abstractmethod
def _process_message(self, message):
"""Process incoming messages"""
def _post_process(self):
pass
def _log(self, message):
self._logger.info(message)
def _stop_requested(self):
try:
message = self._inbox.get_nowait()
if message == _STOP_PROCESSING:
return True
except Empty:
return False
def stop(self):
self.send(_STOP_PROCESSING)
self.join()
def send(self, message):
self._inbox.put(message)
def _get_message(self):
try:
message = self._inbox.get(timeout=self._timeout)
return message
except Empty:
return None
class TokenizerWorker(Worker, AudioDataSource):
def __init__(self, reader, observers=None, logger=None, **kwargs):
self._observers = observers if observers is not None else []
self._reader = reader
self._audio_region_gen = split(self, **kwargs)
self._detections = []
self._log_format = "[DET]: Detection {0.id} (start: {0.start:.3f}, "
self._log_format += "end: {0.end:.3f}, duration: {0.duration:.3f})"
Worker.__init__(self, timeout=0.2, logger=logger)
def _process_message(self):
pass
@property
def detections(self):
return self._detections
def _notify_observers(self, message):
for observer in self._observers:
observer.send(message)
def run(self):
self._reader.open()
start_processing_timestamp = datetime.now()
for _id, audio_region in enumerate(self._audio_region_gen, start=1):
timestamp = start_processing_timestamp + timedelta(
seconds=audio_region.meta.start
)
audio_region.meta.timestamp = timestamp
detection = _Detection(
_id,
audio_region.meta.start,
audio_region.meta.end,
audio_region.duration,
)
self._detections.append(detection)
if self._logger is not None:
message = self._log_format.format(detection)
self._log(message)
self._notify_observers((_id, audio_region))
self._notify_observers(_STOP_PROCESSING)
self._reader.close()
def start_all(self):
for observer in self._observers:
observer.start()
self.start()
def stop_all(self):
self.stop()
for observer in self._observers:
observer.stop()
self._reader.close()
def read(self):
if self._stop_requested():
return None
else:
return self._reader.read()
def __getattr__(self, name):
return getattr(self._reader, name)
class StreamSaverWorker(Worker):
def __init__(
self,
audio_reader,
filename,
export_format=None,
cache_size_sec=0.5,
timeout=0.2,
):
self._reader = audio_reader
sample_size_bytes = self._reader.sw * self._reader.ch
self._cache_size = cache_size_sec * self._reader.sr * sample_size_bytes
self._output_filename = filename
self._export_format = _guess_audio_format(export_format, filename)
if self._export_format is None:
self._export_format = "wav"
self._init_output_stream()
self._exported = False
self._cache = []
self._total_cached = 0
Worker.__init__(self, timeout=timeout)
def _get_non_existent_filename(self):
filename = self._output_filename + ".wav"
i = 0
while os.path.exists(filename):
i += 1
filename = self._output_filename + "({}).wav".format(i)
return filename
def _init_output_stream(self):
if self._export_format != "wav":
self._tmp_output_filename = self._get_non_existent_filename()
else:
self._tmp_output_filename = self._output_filename
self._wfp = wave.open(self._tmp_output_filename, "wb")
self._wfp.setframerate(self._reader.sr)
self._wfp.setsampwidth(self._reader.sw)
self._wfp.setnchannels(self._reader.ch)
@property
def sr(self):
return self._reader.sampling_rate
@property
def sw(self):
return self._reader.sample_width
@property
def ch(self):
return self._reader.channels
def __del__(self):
self._post_process()
if (
(self._tmp_output_filename != self._output_filename)
and self._exported
and os.path.exists(self._tmp_output_filename)
):
os.remove(self._tmp_output_filename)
def _process_message(self, data):
self._cache.append(data)
self._total_cached += len(data)
if self._total_cached >= self._cache_size:
self._write_cached_data()
def _post_process(self):
while True:
try:
data = self._inbox.get_nowait()
if data != _STOP_PROCESSING:
self._cache.append(data)
self._total_cached += len(data)
except Empty:
break
self._write_cached_data()
self._wfp.close()
def _write_cached_data(self):
if self._cache:
data = b"".join(self._cache)
self._wfp.writeframes(data)
self._cache = []
self._total_cached = 0
def open(self):
self._reader.open()
def close(self):
self._reader.close()
self.stop()
def rewind(self):
# ensure compatibility with AudioDataSource with record=True
pass
@property
def data(self):
with wave.open(self._tmp_output_filename, "rb") as wfp:
return wfp.readframes(-1)
def save_stream(self):
if self._exported:
return self._output_filename
if self._export_format in ("raw", "wav"):
if self._export_format == "raw":
self._export_raw()
self._exported = True
return self._output_filename
try:
self._export_with_ffmpeg_or_avconv()
except AudioEncodingError:
try:
self._export_with_sox()
except AudioEncodingError:
warn_msg = "Couldn't save audio data in the desired format "
warn_msg += "'{}'. Either none of 'ffmpeg', 'avconv' or 'sox' "
warn_msg += "is installed or this format is not recognized.\n"
warn_msg += "Audio file was saved as '{}'"
raise AudioEncodingWarning(
warn_msg.format(
self._export_format, self._tmp_output_filename
)
)
finally:
self._exported = True
return self._output_filename
def _export_raw(self):
with open(self._output_filename, "wb") as wfp:
wfp.write(self.data)
def _export_with_ffmpeg_or_avconv(self):
command = [
"-y",
"-f",
"wav",
"-i",
self._tmp_output_filename,
"-f",
self._export_format,
self._output_filename,
]
returncode, stdout, stderr = _run_subprocess(["ffmpeg"] + command)
if returncode != 0:
returncode, stdout, stderr = _run_subprocess(["avconv"] + command)
if returncode != 0:
raise AudioEncodingError(stderr)
return stdout, stderr
def _export_with_sox(self):
command = [
"sox",
"-t",
"wav",
self._tmp_output_filename,
self._output_filename,
]
returncode, stdout, stderr = _run_subprocess(command)
if returncode != 0:
raise AudioEncodingError(stderr)
return stdout, stderr
def close_output(self):
self._wfp.close()
def read(self):
data = self._reader.read()
if data is not None:
self.send(data)
else:
self.send(_STOP_PROCESSING)
return data
def __getattr__(self, name):
if name == "data":
return self.data
return getattr(self._reader, name)
class PlayerWorker(Worker):
def __init__(self, player, progress_bar=False, timeout=0.2, logger=None):
self._player = player
self._progress_bar = progress_bar
self._log_format = "[PLAY]: Detection {id} played"
Worker.__init__(self, timeout=timeout, logger=logger)
def _process_message(self, message):
_id, audio_region = message
if self._logger is not None:
message = self._log_format.format(id=_id)
self._log(message)
audio_region.play(
player=self._player, progress_bar=self._progress_bar, leave=False
)
class RegionSaverWorker(Worker):
def __init__(
self,
filename_format,
audio_format=None,
timeout=0.2,
logger=None,
**audio_parameters
):
self._filename_format = filename_format
self._audio_format = audio_format
self._audio_parameters = audio_parameters
self._debug_format = "[SAVE]: Detection {id} saved as '{filename}'"
Worker.__init__(self, timeout=timeout, logger=logger)
def _process_message(self, message):
_id, audio_region = message
filename = self._filename_format.format(
id=_id,
start=audio_region.meta.start,
end=audio_region.meta.end,
duration=audio_region.duration,
)
filename = audio_region.save(
filename, self._audio_format, **self._audio_parameters
)
if self._logger:
message = self._debug_format.format(id=_id, filename=filename)
self._log(message)
class CommandLineWorker(Worker):
def __init__(self, command, timeout=0.2, logger=None):
self._command = command
Worker.__init__(self, timeout=timeout, logger=logger)
self._debug_format = "[COMMAND]: Detection {id} command: '{command}'"
def _process_message(self, message):
_id, audio_region = message
with NamedTemporaryFile(delete=False) as file:
filename = audio_region.save(file.name, audio_format="wav")
command = self._command.format(file=filename)
os.system(command)
if self._logger is not None:
message = self._debug_format.format(id=_id, command=command)
self._log(message)
class PrintWorker(Worker):
def __init__(
self,
print_format="{start} {end}",
time_format="%S",
timestamp_format="%Y/%m/%d %H:%M:%S.%f",
timeout=0.2,
):
self._print_format = print_format
self._format_time = make_duration_formatter(time_format)
self._timestamp_format = timestamp_format
self.detections = []
Worker.__init__(self, timeout=timeout)
def _process_message(self, message):
_id, audio_region = message
timestamp = audio_region.meta.timestamp
timestamp = timestamp.strftime(self._timestamp_format)
text = self._print_format.format(
id=_id,
start=self._format_time(audio_region.meta.start),
end=self._format_time(audio_region.meta.end),
duration=self._format_time(audio_region.duration),
timestamp=timestamp,
)
print(text)
```
#### File: auditok/tests/test_AudioSource.py
```python
from array import array
import unittest
from genty import genty, genty_dataset
from auditok.io import (
AudioParameterError,
BufferAudioSource,
RawAudioSource,
WaveAudioSource,
)
from auditok.signal import FORMAT
from test_util import PURE_TONE_DICT, _sample_generator
def audio_source_read_all_gen(audio_source, size=None):
if size is None:
size = int(audio_source.sr * 0.1) # 100ms
while True:
data = audio_source.read(size)
if data is None:
break
yield data
@genty
class TestAudioSource(unittest.TestCase):
# TODO when use_channel is None, return samples from all channels
@genty_dataset(
mono=("mono_400Hz", (400,)),
multichannel=("3channel_400-800-1600Hz", (400, 800, 1600)),
)
def test_BufferAudioSource_read_all(self, file_suffix, frequencies):
file = "tests/data/test_16KHZ_{}.raw".format(file_suffix)
with open(file, "rb") as fp:
expected = fp.read()
channels = len(frequencies)
audio_source = BufferAudioSource(expected, 16000, 2, channels)
audio_source.open()
data = audio_source.read(None)
self.assertEqual(data, expected)
audio_source.rewind()
data = audio_source.read(-10)
self.assertEqual(data, expected)
audio_source.close()
@genty_dataset(
mono=("mono_400Hz", (400,)),
multichannel=("3channel_400-800-1600Hz", (400, 800, 1600)),
)
def test_RawAudioSource(self, file_suffix, frequencies):
file = "tests/data/test_16KHZ_{}.raw".format(file_suffix)
channels = len(frequencies)
audio_source = RawAudioSource(file, 16000, 2, channels)
audio_source.open()
data_read_all = b"".join(audio_source_read_all_gen(audio_source))
audio_source.close()
mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
fmt = FORMAT[audio_source.sample_width]
expected = array(fmt, _sample_generator(*mono_channels)).tobytes()
self.assertEqual(data_read_all, expected)
# assert read all data with None
audio_source = RawAudioSource(file, 16000, 2, channels)
audio_source.open()
data_read_all = audio_source.read(None)
audio_source.close()
self.assertEqual(data_read_all, expected)
# assert read all data with a negative size
audio_source = RawAudioSource(file, 16000, 2, channels)
audio_source.open()
data_read_all = audio_source.read(-10)
audio_source.close()
self.assertEqual(data_read_all, expected)
@genty_dataset(
mono=("mono_400Hz", (400,)),
multichannel=("3channel_400-800-1600Hz", (400, 800, 1600)),
)
def test_WaveAudioSource(self, file_suffix, frequencies):
file = "tests/data/test_16KHZ_{}.wav".format(file_suffix)
audio_source = WaveAudioSource(file)
audio_source.open()
data = b"".join(audio_source_read_all_gen(audio_source))
audio_source.close()
mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
fmt = FORMAT[audio_source.sample_width]
expected = array(fmt, _sample_generator(*mono_channels)).tobytes()
self.assertEqual(data, expected)
# assert read all data with None
audio_source = WaveAudioSource(file)
audio_source.open()
data_read_all = audio_source.read(None)
audio_source.close()
self.assertEqual(data_read_all, expected)
# assert read all data with a negative size
audio_source = WaveAudioSource(file)
audio_source.open()
data_read_all = audio_source.read(-10)
audio_source.close()
self.assertEqual(data_read_all, expected)
@genty
class TestBufferAudioSource_SR10_SW1_CH1(unittest.TestCase):
def setUp(self):
self.data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"
self.audio_source = BufferAudioSource(
data=self.data, sampling_rate=10, sample_width=1, channels=1
)
self.audio_source.open()
def tearDown(self):
self.audio_source.close()
def test_sr10_sw1_ch1_read_1(self):
block = self.audio_source.read(1)
exp = b"A"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr10_sw1_ch1_read_6(self):
block = self.audio_source.read(6)
exp = b"ABCDEF"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr10_sw1_ch1_read_multiple(self):
block = self.audio_source.read(1)
exp = b"A"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(6)
exp = b"BCDEFG"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(13)
exp = b"HIJKLMNOPQRST"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(9999)
exp = b"UVWXYZ012345"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr10_sw1_ch1_read_all(self):
block = self.audio_source.read(9999)
self.assertEqual(
block,
self.data,
msg="wrong block, expected: {}, found: {} ".format(
self.data, block
),
)
block = self.audio_source.read(1)
self.assertEqual(
block,
None,
msg="wrong block, expected: {}, found: {} ".format(None, block),
)
def test_sr10_sw1_ch1_sampling_rate(self):
srate = self.audio_source.sampling_rate
self.assertEqual(
srate,
10,
msg="wrong sampling rate, expected: 10, found: {0} ".format(srate),
)
def test_sr10_sw1_ch1_sample_width(self):
swidth = self.audio_source.sample_width
self.assertEqual(
swidth,
1,
msg="wrong sample width, expected: 1, found: {0} ".format(swidth),
)
def test_sr10_sw1_ch1_channels(self):
channels = self.audio_source.channels
self.assertEqual(
channels,
1,
msg="wrong number of channels, expected: 1, found: {0} ".format(
channels
),
)
@genty_dataset(
empty=([], 0, 0, 0),
zero=([0], 0, 0, 0),
five=([5], 5, 0.5, 500),
multiple=([5, 20], 25, 2.5, 2500),
)
def test_position(
self, block_sizes, expected_sample, expected_second, expected_ms
):
for block_size in block_sizes:
self.audio_source.read(block_size)
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, position_s
),
)
position_ms = self.audio_source.position_ms
self.assertEqual(
position_ms,
expected_ms,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_ms, position_ms
),
)
@genty_dataset(
zero=(0, 0, 0, 0),
one=(1, 1, 0.1, 100),
ten=(10, 10, 1, 1000),
negative_1=(-1, 31, 3.1, 3100),
negative_2=(-7, 25, 2.5, 2500),
)
def test_position_setter(
self, position, expected_sample, expected_second, expected_ms
):
self.audio_source.position = position
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, position_s
),
)
position_ms = self.audio_source.position_ms
self.assertEqual(
position_ms,
expected_ms,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_ms, position_ms
),
)
@genty_dataset(
zero=(0, 0, 0, 0),
one=(0.1, 1, 0.1, 100),
ten=(1, 10, 1, 1000),
negative_1=(-0.1, 31, 3.1, 3100),
negative_2=(-0.7, 25, 2.5, 2500),
)
def test_position_s_setter(
self, position_s, expected_sample, expected_second, expected_ms
):
self.audio_source.position_s = position_s
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, position_s
),
)
position_ms = self.audio_source.position_ms
self.assertEqual(
position_ms,
expected_ms,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_ms, position_ms
),
)
@genty_dataset(
zero=(0, 0, 0, 0),
one=(100, 1, 0.1, 100),
ten=(1000, 10, 1, 1000),
negative_1=(-100, 31, 3.1, 3100),
negative_2=(-700, 25, 2.5, 2500),
)
def test_position_ms_setter(
self, position_ms, expected_sample, expected_second, expected_ms
):
self.audio_source.position_ms = position_ms
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, position_s
),
)
position_ms = self.audio_source.position_ms
self.assertEqual(
position_ms,
expected_ms,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_ms, position_ms
),
)
@genty_dataset(positive=((100,)), negative=(-100,))
def test_position_setter_out_of_range(self, position):
with self.assertRaises(IndexError):
self.audio_source.position = position
@genty_dataset(positive=((100,)), negative=(-100,))
def test_position_s_setter_out_of_range(self, position_s):
with self.assertRaises(IndexError):
self.audio_source.position_s = position_s
@genty_dataset(positive=((10000,)), negative=(-10000,))
def test_position_ms_setter_out_of_range(self, position_ms):
with self.assertRaises(IndexError):
self.audio_source.position_ms = position_ms
def test_sr10_sw1_ch1_initial_position_s_0(self):
tp = self.audio_source.position_s
self.assertEqual(
tp,
0.0,
msg="wrong time position, expected: 0.0, found: {0} ".format(tp),
)
def test_sr10_sw1_ch1_position_s_1_after_read(self):
srate = self.audio_source.sampling_rate
# read one second
self.audio_source.read(srate)
tp = self.audio_source.position_s
self.assertEqual(
tp,
1.0,
msg="wrong time position, expected: 1.0, found: {0} ".format(tp),
)
def test_sr10_sw1_ch1_position_s_2_5(self):
# read 2.5 seconds
self.audio_source.read(25)
tp = self.audio_source.position_s
self.assertEqual(
tp,
2.5,
msg="wrong time position, expected: 2.5, found: {0} ".format(tp),
)
def test_sr10_sw1_ch1_position_s_0(self):
self.audio_source.read(10)
self.audio_source.position_s = 0
tp = self.audio_source.position_s
self.assertEqual(
tp,
0.0,
msg="wrong time position, expected: 0.0, found: {0} ".format(tp),
)
def test_sr10_sw1_ch1_position_s_1(self):
self.audio_source.position_s = 1
tp = self.audio_source.position_s
self.assertEqual(
tp,
1.0,
msg="wrong time position, expected: 1.0, found: {0} ".format(tp),
)
def test_sr10_sw1_ch1_rewind(self):
self.audio_source.read(10)
self.audio_source.rewind()
tp = self.audio_source.position
self.assertEqual(
tp, 0, msg="wrong position, expected: 0.0, found: {0} ".format(tp)
)
def test_sr10_sw1_ch1_read_closed(self):
self.audio_source.close()
with self.assertRaises(Exception):
self.audio_source.read(1)
@genty
class TestBufferAudioSource_SR16_SW2_CH1(unittest.TestCase):
def setUp(self):
self.data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"
self.audio_source = BufferAudioSource(
data=self.data, sampling_rate=16, sample_width=2, channels=1
)
self.audio_source.open()
def tearDown(self):
self.audio_source.close()
def test_sr16_sw2_ch1_read_1(self):
block = self.audio_source.read(1)
exp = b"AB"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr16_sw2_ch1_read_6(self):
block = self.audio_source.read(6)
exp = b"ABCDEFGHIJKL"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr16_sw2_ch1_read_multiple(self):
block = self.audio_source.read(1)
exp = b"AB"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(6)
exp = b"CDEFGHIJKLMN"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(5)
exp = b"OPQRSTUVWX"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(9999)
exp = b"YZ012345"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr16_sw2_ch1_read_all(self):
block = self.audio_source.read(9999)
self.assertEqual(
block,
self.data,
msg="wrong block, expected: {0}, found: {1} ".format(
self.data, block
),
)
block = self.audio_source.read(1)
self.assertEqual(
block,
None,
msg="wrong block, expected: {0}, found: {1} ".format(None, block),
)
def test_sr16_sw2_ch1_sampling_rate(self):
srate = self.audio_source.sampling_rate
self.assertEqual(
srate,
16,
msg="wrong sampling rate, expected: 10, found: {0} ".format(srate),
)
def test_sr16_sw2_ch1_sample_width(self):
swidth = self.audio_source.sample_width
self.assertEqual(
swidth,
2,
msg="wrong sample width, expected: 1, found: {0} ".format(swidth),
)
def test_sr16_sw2_ch1_channels(self):
channels = self.audio_source.channels
self.assertEqual(
channels,
1,
msg="wrong number of channels, expected: 1, found: {0} ".format(
channels
),
)
@genty_dataset(
empty=([], 0, 0, 0),
zero=([0], 0, 0, 0),
two=([2], 2, 2 / 16, int(2000 / 16)),
eleven=([11], 11, 11 / 16, int(11 * 1000 / 16)),
multiple=([4, 8], 12, 0.75, 750),
)
def test_position(
self, block_sizes, expected_sample, expected_second, expected_ms
):
for block_size in block_sizes:
self.audio_source.read(block_size)
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, position_s
),
)
position_ms = self.audio_source.position_ms
self.assertEqual(
position_ms,
expected_ms,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_ms, position_ms
),
)
def test_sr16_sw2_ch1_read_position_0(self):
self.audio_source.read(10)
self.audio_source.position = 0
pos = self.audio_source.position
self.assertEqual(
pos, 0, msg="wrong position, expected: 0, found: {0} ".format(pos)
)
@genty_dataset(
zero=(0, 0, 0, 0),
one=(1, 1, 1 / 16, int(1000 / 16)),
ten=(10, 10, 10 / 16, int(10000 / 16)),
negative_1=(-1, 15, 15 / 16, int(15000 / 16)),
negative_2=(-7, 9, 9 / 16, int(9000 / 16)),
)
def test_position_setter(
self, position, expected_sample, expected_second, expected_ms
):
self.audio_source.position = position
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, position_s
),
)
position_ms = self.audio_source.position_ms
self.assertEqual(
position_ms,
expected_ms,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_ms, position_ms
),
)
@genty_dataset(
zero=(0, 0, 0, 0),
one=(0.1, 1, 1 / 16, int(1000 / 16)),
two=(1 / 8, 2, 1 / 8, int(1 / 8 * 1000)),
twelve=(0.75, 12, 0.75, 750),
negative_1=(-0.1, 15, 15 / 16, int(15000 / 16)),
negative_2=(-0.7, 5, 5 / 16, int(5000 / 16)),
)
def test_position_s_setter(
self, position_s, expected_sample, expected_second, expected_ms
):
self.audio_source.position_s = position_s
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, position_s
),
)
position_ms = self.audio_source.position_ms
self.assertEqual(
position_ms,
expected_ms,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_ms, position_ms
),
)
@genty_dataset(
zero=(0, 0, 0, 0),
one=(100, 1, 1 / 16, int(1000 / 16)),
ten=(1000, 16, 1, 1000),
negative_1=(-100, 15, 15 / 16, int(15 * 1000 / 16)),
negative_2=(-500, 8, 0.5, 500),
negative_3=(-700, 5, 5 / 16, int(5 * 1000 / 16)),
)
def test_position_ms_setter(
self, position_ms, expected_sample, expected_second, expected_ms
):
self.audio_source.position_ms = position_ms
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, position_s
),
)
position_ms = self.audio_source.position_ms
self.assertEqual(
position_ms,
expected_ms,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_ms, position_ms
),
)
def test_sr16_sw2_ch1_rewind(self):
self.audio_source.read(10)
self.audio_source.rewind()
tp = self.audio_source.position
self.assertEqual(
tp, 0, msg="wrong position, expected: 0.0, found: {0} ".format(tp)
)
class TestBufferAudioSource_SR11_SW4_CH1(unittest.TestCase):
def setUp(self):
self.data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefgh"
self.audio_source = BufferAudioSource(
data=self.data, sampling_rate=11, sample_width=4, channels=1
)
self.audio_source.open()
def tearDown(self):
self.audio_source.close()
def test_sr11_sw4_ch1_read_1(self):
block = self.audio_source.read(1)
exp = b"ABCD"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr11_sw4_ch1_read_6(self):
block = self.audio_source.read(6)
exp = b"ABCDEFGHIJKLMNOPQRSTUVWX"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr11_sw4_ch1_read_multiple(self):
block = self.audio_source.read(1)
exp = b"ABCD"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(6)
exp = b"EFGHIJKLMNOPQRSTUVWXYZ01"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(3)
exp = b"23456789abcd"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(9999)
exp = b"efgh"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr11_sw4_ch1_read_all(self):
block = self.audio_source.read(9999)
self.assertEqual(
block,
self.data,
msg="wrong block, expected: {0}, found: {1} ".format(
self.data, block
),
)
block = self.audio_source.read(1)
self.assertEqual(
block,
None,
msg="wrong block, expected: {0}, found: {1} ".format(None, block),
)
def test_sr11_sw4_ch1_sampling_rate(self):
srate = self.audio_source.sampling_rate
self.assertEqual(
srate,
11,
msg="wrong sampling rate, expected: 10, found: {0} ".format(srate),
)
def test_sr11_sw4_ch1_sample_width(self):
swidth = self.audio_source.sample_width
self.assertEqual(
swidth,
4,
msg="wrong sample width, expected: 1, found: {0} ".format(swidth),
)
def test_sr11_sw4_ch1_channels(self):
channels = self.audio_source.channels
self.assertEqual(
channels,
1,
msg="wrong number of channels, expected: 1, found: {0} ".format(
channels
),
)
def test_sr11_sw4_ch1_intial_position_0(self):
pos = self.audio_source.position
self.assertEqual(
pos, 0, msg="wrong position, expected: 0, found: {0} ".format(pos)
)
def test_sr11_sw4_ch1_position_5(self):
self.audio_source.read(5)
pos = self.audio_source.position
self.assertEqual(
pos, 5, msg="wrong position, expected: 5, found: {0} ".format(pos)
)
def test_sr11_sw4_ch1_position_9(self):
self.audio_source.read(5)
self.audio_source.read(4)
pos = self.audio_source.position
self.assertEqual(
pos, 9, msg="wrong position, expected: 5, found: {0} ".format(pos)
)
def test_sr11_sw4_ch1_position_0(self):
self.audio_source.read(10)
self.audio_source.position = 0
pos = self.audio_source.position
self.assertEqual(
pos, 0, msg="wrong position, expected: 0, found: {0} ".format(pos)
)
def test_sr11_sw4_ch1_position_10(self):
self.audio_source.position = 10
pos = self.audio_source.position
self.assertEqual(
pos,
10,
msg="wrong position, expected: 10, found: {0} ".format(pos),
)
def test_sr11_sw4_ch1_initial_position_s_0(self):
tp = self.audio_source.position_s
self.assertEqual(
tp,
0.0,
msg="wrong time position, expected: 0.0, found: {0} ".format(tp),
)
def test_sr11_sw4_ch1_position_s_1_after_read(self):
srate = self.audio_source.sampling_rate
# read one second
self.audio_source.read(srate)
tp = self.audio_source.position_s
self.assertEqual(
tp,
1.0,
msg="wrong time position, expected: 1.0, found: {0} ".format(tp),
)
def test_sr11_sw4_ch1_position_s_0_63(self):
# read 2.5 seconds
self.audio_source.read(7)
tp = self.audio_source.position_s
self.assertAlmostEqual(
tp,
0.636363636364,
msg="wrong time position, expected: 0.636363636364, "
"found: {0} ".format(tp),
)
def test_sr11_sw4_ch1_position_s_0(self):
self.audio_source.read(10)
self.audio_source.position_s = 0
tp = self.audio_source.position_s
self.assertEqual(
tp,
0.0,
msg="wrong time position, expected: 0.0, found: {0} ".format(tp),
)
def test_sr11_sw4_ch1_position_s_1(self):
self.audio_source.position_s = 1
tp = self.audio_source.position_s
self.assertEqual(
tp,
1.0,
msg="wrong time position, expected: 1.0, found: {0} ".format(tp),
)
def test_sr11_sw4_ch1_rewind(self):
self.audio_source.read(10)
self.audio_source.rewind()
tp = self.audio_source.position
self.assertEqual(
tp, 0, msg="wrong position, expected: 0.0, found: {0} ".format(tp)
)
class TestBufferAudioSourceCreationException(unittest.TestCase):
def test_wrong_sample_width_value(self):
with self.assertRaises(AudioParameterError) as audio_param_err:
_ = BufferAudioSource(
data=b"ABCDEFGHI", sampling_rate=9, sample_width=3, channels=1
)
self.assertEqual(
"Sample width must be one of: 1, 2 or 4 (bytes)",
str(audio_param_err.exception),
)
def test_wrong_data_buffer_size(self):
with self.assertRaises(AudioParameterError) as audio_param_err:
_ = BufferAudioSource(
data=b"ABCDEFGHI", sampling_rate=8, sample_width=2, channels=1
)
self.assertEqual(
"The length of audio data must be an integer "
"multiple of `sample_width * channels`",
str(audio_param_err.exception),
)
class TestAudioSourceProperties(unittest.TestCase):
def test_read_properties(self):
data = b""
sampling_rate = 8000
sample_width = 2
channels = 1
a_source = BufferAudioSource(
data, sampling_rate, sample_width, channels
)
self.assertEqual(a_source.sampling_rate, sampling_rate)
self.assertEqual(a_source.sample_width, sample_width)
self.assertEqual(a_source.channels, channels)
def test_set_readonly_properties_exception(self):
data = b""
sampling_rate = 8000
sample_width = 2
channels = 1
a_source = BufferAudioSource(
data, sampling_rate, sample_width, channels
)
with self.assertRaises(AttributeError):
a_source.sampling_rate = 16000
a_source.sample_width = 1
a_source.channels = 2
class TestAudioSourceShortProperties(unittest.TestCase):
def test_read_short_properties(self):
data = b""
sampling_rate = 8000
sample_width = 2
channels = 1
a_source = BufferAudioSource(
data, sampling_rate, sample_width, channels
)
self.assertEqual(a_source.sr, sampling_rate)
self.assertEqual(a_source.sw, sample_width)
self.assertEqual(a_source.ch, channels)
def test_set_readonly_short_properties_exception(self):
data = b""
sampling_rate = 8000
sample_width = 2
channels = 1
a_source = BufferAudioSource(
data, sampling_rate, sample_width, channels
)
with self.assertRaises(AttributeError):
a_source.sr = 16000
a_source.sw = 1
a_source.ch = 2
if __name__ == "__main__":
unittest.main()
```
#### File: auditok/tests/test_plotting.py
```python
import os
import sys
import unittest
from unittest import TestCase
from tempfile import TemporaryDirectory
from genty import genty, genty_dataset
import matplotlib
matplotlib.use("AGG") # noqa E402
import matplotlib.pyplot as plt
from auditok.core import AudioRegion
if sys.version_info.minor <= 5:
PREFIX = "py34_py35/"
else:
PREFIX = ""
matplotlib.rcParams["figure.figsize"] = (10, 4)
@genty
class TestPlotting(TestCase):
@genty_dataset(mono=(1,), stereo=(2,))
def test_region_plot(self, channels):
type_ = "mono" if channels == 1 else "stereo"
audio_filename = "tests/data/test_split_10HZ_{}.raw".format(type_)
image_filename = "tests/images/{}plot_{}_region.png".format(
PREFIX, type_
)
expected_image = plt.imread(image_filename)
with TemporaryDirectory() as tmpdir:
output_image_filename = os.path.join(tmpdir, "image.png")
region = AudioRegion.load(audio_filename, sr=10, sw=2, ch=channels)
region.plot(show=False, save_as=output_image_filename)
output_image = plt.imread(output_image_filename)
self.assertTrue((output_image == expected_image).all())
@genty_dataset(
mono=(1,),
stereo_any=(2, "any"),
stereo_uc_0=(2, 0),
stereo_uc_1=(2, 1),
stereo_uc_mix=(2, "mix"),
)
def test_region_split_and_plot(self, channels, use_channel=None):
type_ = "mono" if channels == 1 else "stereo"
audio_filename = "tests/data/test_split_10HZ_{}.raw".format(type_)
if type_ == "mono":
fmt = "tests/images/{}split_and_plot_mono_region.png"
else:
fmt = "tests/images/{}split_and_plot_uc_{}_stereo_region.png"
image_filename = fmt.format(PREFIX, use_channel)
expected_image = plt.imread(image_filename)
with TemporaryDirectory() as tmpdir:
output_image_filename = os.path.join(tmpdir, "image.png")
region = AudioRegion.load(audio_filename, sr=10, sw=2, ch=channels)
region.split_and_plot(
aw=0.1,
uc=use_channel,
max_silence=0,
show=False,
save_as=output_image_filename,
)
output_image = plt.imread(output_image_filename)
self.assertTrue((output_image == expected_image).all())
if __name__ == "__main__":
unittest.main()
```
#### File: auditok/tests/test_workers.py
```python
import os
import unittest
from unittest import TestCase
from unittest.mock import patch, call, Mock
from tempfile import TemporaryDirectory
from genty import genty, genty_dataset
from auditok import AudioRegion, AudioDataSource
from auditok.exceptions import AudioEncodingWarning
from auditok.cmdline_util import make_logger
from auditok.workers import (
TokenizerWorker,
StreamSaverWorker,
RegionSaverWorker,
PlayerWorker,
CommandLineWorker,
PrintWorker,
)
@genty
class TestWorkers(TestCase):
def setUp(self):
self.reader = AudioDataSource(
input="tests/data/test_split_10HZ_mono.raw",
block_dur=0.1,
sr=10,
sw=2,
ch=1,
)
self.expected = [
(0.2, 1.6),
(1.7, 3.1),
(3.4, 5.4),
(5.4, 7.4),
(7.4, 7.6),
]
def tearDown(self):
self.reader.close()
def test_TokenizerWorker(self):
with TemporaryDirectory() as tmpdir:
file = os.path.join(tmpdir, "file.log")
logger = make_logger(file=file, name="test_TokenizerWorker")
tokenizer = TokenizerWorker(
self.reader,
logger=logger,
min_dur=0.3,
max_dur=2,
max_silence=0.2,
drop_trailing_silence=False,
strict_min_dur=False,
eth=50,
)
tokenizer.start_all()
tokenizer.join()
# Get logged text
with open(file) as fp:
log_lines = fp.readlines()
log_fmt = "[DET]: Detection {} (start: {:.3f}, "
log_fmt += "end: {:.3f}, duration: {:.3f})"
self.assertEqual(len(tokenizer.detections), len(self.expected))
for i, (det, exp, log_line) in enumerate(
zip(tokenizer.detections, self.expected, log_lines), 1
):
start, end = exp
exp_log_line = log_fmt.format(i, start, end, end - start)
self.assertAlmostEqual(det.start, start)
self.assertAlmostEqual(det.end, end)
# remove timestamp part and strip new line
self.assertEqual(log_line[28:].strip(), exp_log_line)
def test_PlayerWorker(self):
with TemporaryDirectory() as tmpdir:
file = os.path.join(tmpdir, "file.log")
logger = make_logger(file=file, name="test_RegionSaverWorker")
player_mock = Mock()
observers = [PlayerWorker(player_mock, logger=logger)]
tokenizer = TokenizerWorker(
self.reader,
logger=logger,
observers=observers,
min_dur=0.3,
max_dur=2,
max_silence=0.2,
drop_trailing_silence=False,
strict_min_dur=False,
eth=50,
)
tokenizer.start_all()
tokenizer.join()
tokenizer._observers[0].join()
# Get logged text
with open(file) as fp:
log_lines = [
line
for line in fp.readlines()
if line.startswith("[PLAY]")
]
self.assertTrue(player_mock.play.called)
self.assertEqual(len(tokenizer.detections), len(self.expected))
log_fmt = "[PLAY]: Detection {id} played"
for i, (det, exp, log_line) in enumerate(
zip(tokenizer.detections, self.expected, log_lines), 1
):
start, end = exp
exp_log_line = log_fmt.format(id=i)
self.assertAlmostEqual(det.start, start)
self.assertAlmostEqual(det.end, end)
# Remove timestamp part and strip new line
self.assertEqual(log_line[28:].strip(), exp_log_line)
def test_RegionSaverWorker(self):
filename_format = (
"Region_{id}_{start:.6f}-{end:.3f}_{duration:.3f}.wav"
)
with TemporaryDirectory() as tmpdir:
file = os.path.join(tmpdir, "file.log")
logger = make_logger(file=file, name="test_RegionSaverWorker")
observers = [RegionSaverWorker(filename_format, logger=logger)]
tokenizer = TokenizerWorker(
self.reader,
logger=logger,
observers=observers,
min_dur=0.3,
max_dur=2,
max_silence=0.2,
drop_trailing_silence=False,
strict_min_dur=False,
eth=50,
)
with patch("auditok.core.AudioRegion.save") as patched_save:
tokenizer.start_all()
tokenizer.join()
tokenizer._observers[0].join()
# Get logged text
with open(file) as fp:
log_lines = [
line
for line in fp.readlines()
if line.startswith("[SAVE]")
]
# Assert RegionSaverWorker ran as expected
expected_save_calls = [
call(
filename_format.format(
id=i, start=exp[0], end=exp[1], duration=exp[1] - exp[0]
),
None,
)
for i, exp in enumerate(self.expected, 1)
]
# Get calls to 'AudioRegion.save'
mock_calls = [
c for i, c in enumerate(patched_save.mock_calls) if i % 2 == 0
]
self.assertEqual(mock_calls, expected_save_calls)
self.assertEqual(len(tokenizer.detections), len(self.expected))
log_fmt = "[SAVE]: Detection {id} saved as '{filename}'"
for i, (det, exp, log_line) in enumerate(
zip(tokenizer.detections, self.expected, log_lines), 1
):
start, end = exp
expected_filename = filename_format.format(
id=i, start=start, end=end, duration=end - start
)
exp_log_line = log_fmt.format(i, expected_filename)
self.assertAlmostEqual(det.start, start)
self.assertAlmostEqual(det.end, end)
# Remove timestamp part and strip new line
self.assertEqual(log_line[28:].strip(), exp_log_line)
def test_CommandLineWorker(self):
command_format = "do nothing with"
with TemporaryDirectory() as tmpdir:
file = os.path.join(tmpdir, "file.log")
logger = make_logger(file=file, name="test_CommandLineWorker")
observers = [CommandLineWorker(command_format, logger=logger)]
tokenizer = TokenizerWorker(
self.reader,
logger=logger,
observers=observers,
min_dur=0.3,
max_dur=2,
max_silence=0.2,
drop_trailing_silence=False,
strict_min_dur=False,
eth=50,
)
with patch("auditok.workers.os.system") as patched_os_system:
tokenizer.start_all()
tokenizer.join()
tokenizer._observers[0].join()
# Get logged text
with open(file) as fp:
log_lines = [
line
for line in fp.readlines()
if line.startswith("[COMMAND]")
]
# Assert CommandLineWorker ran as expected
expected_save_calls = [call(command_format) for _ in self.expected]
self.assertEqual(patched_os_system.mock_calls, expected_save_calls)
self.assertEqual(len(tokenizer.detections), len(self.expected))
log_fmt = "[COMMAND]: Detection {id} command '{command}'"
for i, (det, exp, log_line) in enumerate(
zip(tokenizer.detections, self.expected, log_lines), 1
):
start, end = exp
exp_log_line = log_fmt.format(i, command_format)
self.assertAlmostEqual(det.start, start)
self.assertAlmostEqual(det.end, end)
# Remove timestamp part and strip new line
self.assertEqual(log_line[28:].strip(), exp_log_line)
def test_PrintWorker(self):
observers = [
PrintWorker(print_format="[{id}] {start} {end}, dur: {duration}")
]
tokenizer = TokenizerWorker(
self.reader,
observers=observers,
min_dur=0.3,
max_dur=2,
max_silence=0.2,
drop_trailing_silence=False,
strict_min_dur=False,
eth=50,
)
with patch("builtins.print") as patched_print:
tokenizer.start_all()
tokenizer.join()
tokenizer._observers[0].join()
# Assert PrintWorker ran as expected
expected_print_calls = [
call(
"[{}] {:.3f} {:.3f}, dur: {:.3f}".format(
i, exp[0], exp[1], exp[1] - exp[0]
)
)
for i, exp in enumerate(self.expected, 1)
]
self.assertEqual(patched_print.mock_calls, expected_print_calls)
self.assertEqual(len(tokenizer.detections), len(self.expected))
for det, exp in zip(tokenizer.detections, self.expected):
start, end = exp
self.assertAlmostEqual(det.start, start)
self.assertAlmostEqual(det.end, end)
def test_StreamSaverWorker_wav(self):
with TemporaryDirectory() as tmpdir:
expected_filename = os.path.join(tmpdir, "output.wav")
saver = StreamSaverWorker(self.reader, expected_filename)
saver.start()
tokenizer = TokenizerWorker(saver)
tokenizer.start_all()
tokenizer.join()
saver.join()
output_filename = saver.save_stream()
region = AudioRegion.load(
"tests/data/test_split_10HZ_mono.raw", sr=10, sw=2, ch=1
)
expected_region = AudioRegion.load(output_filename)
self.assertEqual(output_filename, expected_filename)
self.assertEqual(region, expected_region)
self.assertEqual(saver.data, bytes(expected_region))
def test_StreamSaverWorker_raw(self):
with TemporaryDirectory() as tmpdir:
expected_filename = os.path.join(tmpdir, "output")
saver = StreamSaverWorker(
self.reader, expected_filename, export_format="raw"
)
saver.start()
tokenizer = TokenizerWorker(saver)
tokenizer.start_all()
tokenizer.join()
saver.join()
output_filename = saver.save_stream()
region = AudioRegion.load(
"tests/data/test_split_10HZ_mono.raw", sr=10, sw=2, ch=1
)
expected_region = AudioRegion.load(
output_filename, sr=10, sw=2, ch=1, audio_format="raw"
)
self.assertEqual(output_filename, expected_filename)
self.assertEqual(region, expected_region)
self.assertEqual(saver.data, bytes(expected_region))
def test_StreamSaverWorker_encode_audio(self):
with TemporaryDirectory() as tmpdir:
with patch("auditok.workers._run_subprocess") as patch_rsp:
patch_rsp.return_value = (1, None, None)
expected_filename = os.path.join(tmpdir, "output.ogg")
tmp_expected_filename = expected_filename + ".wav"
saver = StreamSaverWorker(self.reader, expected_filename)
saver.start()
tokenizer = TokenizerWorker(saver)
tokenizer.start_all()
tokenizer.join()
saver.join()
with self.assertRaises(AudioEncodingWarning) as rt_warn:
saver.save_stream()
warn_msg = "Couldn't save audio data in the desired format "
warn_msg += "'ogg'. Either none of 'ffmpeg', 'avconv' or 'sox' "
warn_msg += "is installed or this format is not recognized.\n"
warn_msg += "Audio file was saved as '{}'"
self.assertEqual(
warn_msg.format(tmp_expected_filename), str(rt_warn.exception)
)
ffmpef_avconv = [
"-y",
"-f",
"wav",
"-i",
tmp_expected_filename,
"-f",
"ogg",
expected_filename,
]
expected_calls = [
call(["ffmpeg"] + ffmpef_avconv),
call(["avconv"] + ffmpef_avconv),
call(
[
"sox",
"-t",
"wav",
tmp_expected_filename,
expected_filename,
]
),
]
self.assertEqual(patch_rsp.mock_calls, expected_calls)
region = AudioRegion.load(
"tests/data/test_split_10HZ_mono.raw", sr=10, sw=2, ch=1
)
self.assertTrue(saver._exported)
self.assertEqual(saver.data, bytes(region))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joinemm/miso-bot",
"score": 2
} |
#### File: miso-bot/modules/cache.py
```python
from modules import log
logger = log.get_logger(__name__)
log.get_logger(__name__)
class Cache:
def __init__(self, bot):
self.bot = bot
self.log_emoji = False
self.prefixes = {}
self.rolepickers = set()
self.votechannels = set()
self.autoresponse = {}
self.levelupmessage = {}
self.blacklist = {}
self.marriages = set()
self.starboard_settings = {}
self.starboard_blacklisted_channels = set()
self.event_triggers = {
"message": 0,
"message_delete": 0,
"message_edit": 0,
"reaction_add": 0,
"reaction_remove": 0,
"member_join": 0,
"member_remove": 0,
"guild_join": 0,
"guild_remove": 0,
"member_ban": 0,
"member_unban": 0,
}
self.stats_notifications_sent = 0
self.stats_lastfm_requests = 0
self.stats_html_rendered = 0
bot.loop.create_task(self.initialize_settings_cache())
async def cache_starboard_settings(self):
data = await self.bot.db.execute(
"""
SELECT guild_id, is_enabled, channel_id, reaction_count,
emoji_name, emoji_id, emoji_type, log_channel_id
FROM starboard_settings
"""
)
if not data:
return
for (
guild_id,
is_enabled,
channel_id,
reaction_count,
emoji_name,
emoji_id,
emoji_type,
log_channel_id,
) in data:
self.starboard_settings[str(guild_id)] = [
is_enabled,
channel_id,
reaction_count,
emoji_name,
emoji_id,
emoji_type,
log_channel_id,
]
self.starboard_blacklisted_channels = set(
await self.bot.db.execute(
"SELECT channel_id FROM starboard_blacklist",
as_list=True,
)
)
async def initialize_settings_cache(self):
self.bot.logger.info("Caching settings...")
prefixes = await self.bot.db.execute("SELECT guild_id, prefix FROM guild_prefix")
for guild_id, prefix in prefixes:
self.prefixes[str(guild_id)] = prefix
self.rolepickers = set(
await self.bot.db.execute("SELECT channel_id FROM rolepicker_settings", as_list=True)
)
self.votechannels = set(
await self.bot.db.execute("SELECT channel_id FROM voting_channel", as_list=True)
)
guild_settings = await self.bot.db.execute(
"SELECT guild_id, levelup_messages, autoresponses FROM guild_settings"
)
for guild_id, levelup_messages, autoresponses in guild_settings:
self.autoresponse[str(guild_id)] = autoresponses
self.levelupmessage[str(guild_id)] = levelup_messages
self.blacklist = {
"global": {
"user": set(
await self.bot.db.execute("SELECT user_id FROM blacklisted_user", as_list=True)
),
"guild": set(
await self.bot.db.execute(
"SELECT guild_id FROM blacklisted_guild", as_list=True
)
),
"channel": set(
await self.bot.db.execute(
"SELECT channel_id FROM blacklisted_channel", as_list=True
)
),
}
}
self.marriages = [
set(pair)
for pair in await self.bot.db.execute(
"SELECT first_user_id, second_user_id FROM marriage"
)
]
for guild_id, user_id in await self.bot.db.execute(
"SELECT guild_id, user_id FROM blacklisted_member"
):
try:
self.blacklist[str(guild_id)]["member"].add(user_id)
except KeyError:
self.blacklist[str(guild_id)] = {"member": {user_id}, "command": set()}
for guild_id, command_name in await self.bot.db.execute(
"SELECT guild_id, command_name FROM blacklisted_command"
):
try:
self.blacklist[str(guild_id)]["command"].add(command_name.lower())
except KeyError:
self.blacklist[str(guild_id)] = {
"member": set(),
"command": {command_name.lower()},
}
await self.cache_starboard_settings()
``` |
{
"source": "Joiner12/TimeVisual",
"score": 2
} |
#### File: TimeVisual/python/DrawMap.py
```python
from pyecharts import options as opts
from pyecharts.charts import Geo
from pyecharts.globals import ChartType, SymbolType
import pandas as pd
from datetime import datetime
extraPosition = {
"多哈": (25.261101, 51.565102),
"德黑兰": (35.7, 51.41666),
"缅甸仰光": (16.77, 96.15),
"芝加哥": (41.85, 87.683-180),
"加德满都": (27.7, 85.3166667),
"马德拉斯": (13.22, 81.33),
"马德里": (40.43, 3.7),
"邦达": (31.13, 97.18),
"稻城亚丁": (29.323056, 100.053333),
"新加坡樟宜": (1.36604863171, 104.003205457),
"金边": (11.3623, 104.9154),
"曼谷素万那普": (13.083, 100.483),
"九寨黄龙": (32.85, 103.68),
"孟买": (18.93, 72.85)
}
def DrawMap(FlightArrivalFile="..//data//FlightArrival.xlsx",
FlightDepartureFile="..//data//FlightDeparture.xlsx",**kw):
if 'dataDay' in kw:
dataDay = kw['dataDay']
else:
dataDay = 'test Day'
# load data from excle
ArrialInfo = pd.read_excel(FlightArrivalFile)
DepartureInfo = pd.read_excel(FlightDepartureFile)
FromCd = DepartureInfo['目的地'].to_list()
ToCd = ArrialInfo['始发地'].to_list()
FromCdD = dict()
ToCdD = dict()
for k in FromCd:
if k in FromCdD.keys():
FromCdD[k] += 1
else:
FromCdD[k] = 1
for j in ToCd:
if j in ToCdD.keys():
ToCdD[j] += 1
else:
ToCdD[j] = 1
geoData1 = list()
geoData2 = list()
geoData3 = list()
testGeo = Geo()
for k1, k2 in zip(FromCdD.keys(), FromCdD.values()):
# lat[3.86 53.55],lon[73.66 135.05]——中国
try:
a = testGeo.get_coordinate(k1)
if a[1] >= 3.86 and a[1] <= 53.55:
if a[0] >= 73.66 and a[0] <= 135.05:
geoData1.append((k1, k2))
geoData2.append(("成都双流", k1))
except:
pass
for k1, k2 in zip(ToCdD.keys(), ToCdD.values()):
try:
if True:
b = testGeo.get_coordinate(str(k1))
if b[1] >= 3.86 and b[1] <= 53.55:
if b[0] >= 73.66 and b[0] <= 135.05:
geoData3.append((k1, "成都双流"))
else:
geoData3.append((k1, "成都双流"))
except:
pass
geoAd = geoData2+geoData3
c = Geo(init_opts=opts.InitOpts(page_title="Map-CDC",theme="light", bg_color="transparent"))
# 添加其他位置
for j in extraPosition.keys():
c.add_coordinate(j, extraPosition[j][1], extraPosition[j][0])
if False:
c = (
Geo(init_opts=opts.InitOpts(page_title="Map-CDC", bg_color="#2B3427"))
.add_schema(maptype="china-cities")
.add("geo", geoData1, color="blue", symbol_size=5)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(title_opts=opts.TitleOpts(title="CDC"))
.render("..//html//geoTest.html")
)
else:
c.add_schema(maptype="china-cities")
c.add("Destination", geoData1, type_=ChartType.EFFECT_SCATTER)
c.add("Arrrial/Departure", geoAd, type_=ChartType.LINES,
effect_opts=opts.EffectOpts(symbol=SymbolType.ARROW,
symbol_size=3,
color="#475EEA"),
linestyle_opts=opts.LineStyleOpts(curve=0.3, opacity=0.1, color="#5A98D4"))
c.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
c.set_global_opts(title_opts=opts.TitleOpts(title="Shuangliu Internatinal Airport-"+dataDay,
subtitle="Update:"
+datetime.now().strftime('%Y-%m-%d')),
visualmap_opts=opts.VisualMapOpts(is_piecewise=True, max_=50))
c.render("..//html//geoTest.html")
print("draw map run finished...\n")
return c
if __name__ == "__main__":
DrawMap(FlightArrivalFile="..//data//FlightArrival-2021-08-13.xlsx",
FlightDepartureFile="..//data//FlightDeparture-2021-08-13.xlsx")
# DrawMap()
```
#### File: TimeVisual/python/DrawWordCloud.py
```python
from pyecharts import options as opts
from pyecharts.charts import WordCloud
from os import path
from datetime import datetime
"""
函数:
调用pyecharts绘制词云
定义:
def DrawWordCloud(words, renderfile="", backgroundpic="")
输入:
words,词频+词语
renderfile,渲染输出文件
backgroundpic,背景图片
输出:
c,html
"""
def DrawWordCloud(words, *w, **kw):
c = WordCloud(init_opts=opts.InitOpts(
page_title="word cloud "+datetime.now().strftime('%Y-%m-%d'),
theme="shine"))
if 'backgroundpic' in kw:
backgroundpic = kw['backgroundpic']
else:
backgroundpic = str()
if not path.isfile(backgroundpic):
c.add("",
words,
word_size_range=[20, 80],
# 将图片放在指定位置,然后读取
# mask_image=backgroundpic,
shape="circle")
else:
c.add("",
words,
word_size_range=[20, 80],
# 将图片放在指定位置,然后读取
mask_image=backgroundpic,
shape="circle")
c.render("..//html//wordCloudTest.html")
print("word cloud run finished...\n")
return c
if __name__ == "__main__":
#
if True:
data = [("幺鸡", "12"), ("垮", "50"), ("🀍", "7"), ("LOL", "20"),
("🔞", "3"), ("pubg", "15"), ("🤣", "21"), ("杠", "18"),
("🈹", "12"), ("⚅", "7"), ("🤏", "23"), ("蹦子", "18"),
("下棋", "15")]
pic = "..//pic//zan.png"
DrawWordCloud(data, backgroundpic="")
```
#### File: TimeVisual/python/ReferenceNews.py
```python
import requests
from bs4 import BeautifulSoup
from datetime import date, datetime
import re
import os
import dominate
from dominate.tags import *
class RefNews():
BaseUrl = r"http://www.jdqu.com"
html = 0
picDir = ''
filePath = ''
htmlFile = ''
pageDate = ''
pictures = list()
def __init__(self, *pk, **pkw):
super().__init__()
# change workspace into script dir
self.filePath = os.path.split(os.path.abspath(__file__))[0]
os.chdir(self.filePath)
# get base web
self.headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36 Edg/94.0.992.31'
}
self.html = requests.get(self.BaseUrl, headers=self.headers)
if not self.html.status_code == 200:
return
else:
print('Request From:', self.BaseUrl, 'Success')
todayRnsLink = self._GetPaperLink()
validSubUrls = self._ParseValidPageUrl(
todayRnsLink[0], todayRnsLink[1])
if not validSubUrls is None:
self._DownLoadPagePicture(validSubUrls)
self._WriteHtml()
def _GetPaperLink(self, *pk, **pkw):
newspaper = dict()
self.html.encoding = self.html.apparent_encoding
htmlBs = BeautifulSoup(self.html.text, "lxml")
ref = htmlBs.select('.img-wrap a')
for j in ref:
papertitle = j.get("title")
paperLink = self.BaseUrl + j.get("href")
# print(papertitle, paperLink)
newspaper[papertitle] = paperLink
# todo:filter newspaper by date and name
todayReferenceNewsLink = list(newspaper.values())
todayReferenceNewsLink = todayReferenceNewsLink[0]
todayReferenceNewsName = list(newspaper.keys())
todayReferenceNewsName = todayReferenceNewsName[0].replace(
'点击阅读 ', '')
# print(todayReferenceNewsName[0])
return (todayReferenceNewsName, todayReferenceNewsLink)
def _ParseValidPageUrl(self, papername="", paperbaselink="", *pk, **pkw):
name = papername
mat = re.findall(r"(\d{4}-\d{1,2}-\d{1,2})", name)
self.pageDate = mat[0]
# folder to save picture
if datetime.strptime(self.pageDate, '%Y-%m-%d').date() == date.today():
print("Today News", papername)
else:
print("Yesterday Flower", papername)
# check for valid picture page url
subUrl = paperbaselink.replace('.html', '')
validPageLinks = list()
try:
pageCounter = 0
while True:
pageCounter += 1
testUrl = subUrl+'-'+str(pageCounter)+'.html'
html = requests.get(testUrl, headers=self.headers)
if not html.status_code == 200 or pageCounter > 40:
break
validPageLinks.append(testUrl)
except:
print('page is not accessible')
return
return validPageLinks
def _DownLoadPagePicture(self, urls=[], *pk, **pkw):
# picture folder
self.picDir = os.path.join(self.filePath, 'NewsPic')
if not os.path.isdir(self.picDir):
os.mkdir(self.picDir)
self.picDir = os.path.join(self.picDir, self.pageDate)
# specific day's folder
if not os.path.isdir(self.picDir):
os.mkdir(self.picDir)
for url in urls:
html = requests.get(url, headers=self.headers)
html.encoding = html.apparent_encoding
htmlBs = BeautifulSoup(html.text, "lxml")
imgHtml = htmlBs.select('img')
# imgName = imgHtml[0].get('alt')
imgUrl = imgHtml[0].get('src')
curPic = os.path.join(self.picDir, imgUrl.split('/')[-1])
# print(curPic)
r = requests.get(imgUrl)
with open(curPic, "wb")as f:
f.write(r.content)
f.close()
i = urls.index(url)
self.pictures.append(curPic)
print('\rLoading:{0}{1}%'.format(
'▉'*(i+1), ((i+1)*100/len(urls))), end='')
def _WriteHtml(self, *pw, **pkw):
self.htmlFile = os.path.join(
self.picDir, 'ReferenceNews'+self.pageDate+'.html')
doc = dominate.document(title='参考消息')
doc.body['style'] = """background-color:white;text-align:center;"""
with doc.head:
link(rel='stylesheet', href='style.css')
script(type='text/javascript', src='script.js')
with doc:
div(id='title').add(
img(src="https://gitee.com/RiskyJR/pic-bed/raw/master/20211012101706.png"))
with doc:
for i in self.pictures:
with div(id="content", style="text-align:center;"):
img(src=i)
with open(self.htmlFile, 'w') as f:
f.write(doc.render())
if __name__ == "__main__":
Rn = RefNews()
```
#### File: TimeVisual/python/TimeCharts.py
```python
from os import path, listdir, remove
from pyecharts.faker import Faker
from readDataFromExcel import DataFromExcel
import math
from datetime import datetime, date, timedelta
import pandas as pd
from DrawBar import DrawBar
from DrawMap import DrawMap
from DrawLine import DrawLine
from DrawWordCloud import DrawWordCloud
from DrawPie import DrawPie
from GetFlightInfo import FlightInfo
from bs4 import BeautifulSoup
from DrawImage import UpdateTimeLineImage
class TimeCharts():
def __init__(self, excelFile, *w, **kw):
self.exlsData = list()
# 数据文件检查
# todo:*.xlsx文件后缀检查
if path.isfile(excelFile):
self.gatte = excelFile
df = DataFromExcel(self.gatte)
self.exlsData = df.getData()
else:
self.gatte = "not a exist file"
print("%s,doesn't exist\n" % (excelFile))
"""
function:
获取指定日期(2021-8-1)段内的记录数据
definition:
getDateSpecTime(self, startDay: str = "today", endDay: str = "today")
params:
startDay,起始日期
endDay,结束日期
return:
pyecharts-Pie
"""
def getDateSpecTime(self,
startDay: str = "today",
endDay: str = "today",
**kw):
setTimeStrFormat = '%Y-%m-%d'
retSegData = pd.DataFrame(columns=['起始', '终止', '事件', '时长', 'other'])
if startDay == "today":
startDay_i = datetime.combine(date.today(), datetime.min.time())
else:
startDay_i = datetime.strptime(startDay, setTimeStrFormat)
if endDay == "today":
endDay_i = datetime.combine(
date.today(), datetime.min.time()) + timedelta(days=1)
else:
endDay_i = datetime.strptime(endDay, setTimeStrFormat)
curSheet = self.exlsData
startTickList = curSheet['起始'].tolist()
for j in startTickList:
# year month day
jJudge = j.strftime(setTimeStrFormat)
jJudge = datetime.strptime(jJudge, setTimeStrFormat)
if jJudge >= startDay_i and jJudge <= endDay_i:
curIndex = startTickList.index(j)
retSegData = retSegData.append(
{
'起始': curSheet.iloc[curIndex, 0],
'终止': curSheet.iloc[curIndex, 1],
'事件': curSheet.iloc[curIndex, 2],
'时长': curSheet.iloc[curIndex, 3],
'other': curSheet.iloc[curIndex, 4],
},
ignore_index=True)
return retSegData
"""
function:
daily pie(根据dateDraw设置参数绘制饼图)
definition:
def dailyPie(self,startDay: str = "today", endDay: str = "today")
params:
startDay,起始日期
endDay,结束日期
return:
pyecharts-Pie
"""
def dailyPie(self, startDay: str = "today", endDay: str = "today", **kw):
try:
# today
startDayIn = startDay
endDayIn = endDay
dataDraw = self.getDateSpecTime(startDayIn, endDayIn)
pieData = mergeListToDict(dataDraw['事件'].tolist(),
dataDraw['时长'].tolist())
titleIn = startDay
if startDay == "today":
titleIn = date.today().strftime('%Y-%m-%d')
# throw out error:ZeroDivisionError: division by zero
1 / len(pieData)
return DrawPie(pieData, title=titleIn)
except:
return DrawPie(pieData, title=titleIn)
"""
function:
绘制一段时间内事件图云(默认为最近一周事件)
definition:
periodWordCloud(self)
params:
startDay,起始日期
endDay,结束日期
return:
pyecharts-Pie
"""
def periodWordCloud(self, endDay="today", *k, **kw):
try:
endDayIn = endDay
if endDay == "today":
endDayIn = date.today().strftime('%Y-%m-%d')
startDayIn = datetime.strptime(endDayIn,
'%Y-%m-%d') - timedelta(days=7)
startDayIn = startDayIn.strftime('%Y-%m-%d')
dataDraw = self.getDateSpecTime(startDayIn, endDayIn)
word_dict = dict()
word_mesh = list()
eventList = dataDraw['事件'].tolist()
eventStr = str()
for j in eventList:
eventStr += str(j) + "-"
eventSplit = eventStr.split("-")
for k in eventSplit:
if k in word_dict.keys():
word_dict[k] += 1
else:
word_dict[k] = 1
for i in word_dict.keys():
word_mesh.append([i, word_dict[i]])
# check the list is empty
1 / len(word_mesh)
except:
word_mesh = [("幺鸡", "12"), ("垮", "50"), ("🀍", "7"), ("LOL", "20"),
("🔞", "3"), ("pubg", "15"), ("🤣", "21"), ("杠", "18"),
("🈹", "12"), ("⚅", "7"), ("🤏", "23"), ("蹦子", "18"),
("下棋", "15")]
return DrawWordCloud(word_mesh, backgroundpic="")
"""
function:
绘制一段时间内事件时序图(默认为最近一天事件)
definition:
dailyLine(self, day="today")
params:
day,日期('%Y-%m-%d')
return:
pyecharts-Line
"""
def dailyLine(self, startDay: str = "today", endDay: str = "today", **kw):
try:
startDayIn = startDay
endDayIn = endDay
titleIn = startDay
dataDraw = self.getDateSpecTime(startDayIn, endDayIn)
startTickList = dataDraw['起始'].tolist()
if startDay == "today":
titleIn = date.today().strftime('%Y-%m-%d')
event_x = list()
event_y = list()
for j in startTickList:
curIndex = startTickList.index(j)
event_x.append(
str(dataDraw.iloc[curIndex, 2]) + '\n' +
j.strftime("%H-%M"))
event_y.append(int(dataDraw.iloc[curIndex, 3]))
xDataIn = event_x
yDataIn = event_y
1 / (len(xDataIn) * len(yDataIn))
except:
xDataIn = Faker.choose()
yDataIn = Faker.values()
titleIn = "Test Data"
return DrawLine(xDataIn, yDataIn, title=titleIn)
"""
function:
绘制一段时间内事件柱状图(默认为最近一天事件)
definition:
dailyBar(self, day="today")
params:
day,日期('%Y-%m-%d')
return:
pyecharts-Bar
"""
def dailyBar(self, startDay: str = "today", endDay: str = "today", **kw):
try:
startDayIn = startDay
endDayIn = endDay
titleIn = startDay
dataDraw = self.getDateSpecTime(startDayIn, endDayIn)
startTickList = dataDraw['起始'].tolist()
if startDay == "today":
titleIn = date.today().strftime('%Y-%m-%d')
event_x = list()
event_y = list()
for j in startTickList:
curIndex = startTickList.index(j)
event_x.append(
str(dataDraw.iloc[curIndex, 2]) + '\n' +
j.strftime("%H-%M"))
event_y.append(int(dataDraw.iloc[curIndex, 3]))
xDataIn = event_x
yDataIn = event_y
1 / (len(xDataIn) * len(yDataIn))
except:
xDataIn = Faker.choose()
yDataIn = Faker.values()
titleIn = "Test Data"
return DrawBar(xDataIn, yDataIn, title=titleIn)
"""
函数:
航班信息
定义:
flightMap(self)
输入:
updateData,bool
输出:
pyecharts,geo
"""
def flightMap(self, updateData=True, *k, **kw):
# delte other flight infomation data
if 'removeFlightData' in kw and kw['removeFlightData']:
dataRelPath = './/..//data'
remainData = [
'FlightDeparture-test.xlsx', 'FlightArrival-test.xlsx',
'FlightDeparture-' + datetime.now().strftime('%Y-%m-%d') +
'.xlsx', 'FlightArrival-' +
datetime.now().strftime('%Y-%m-%d') + '.xlsx'
]
a = listdir(dataRelPath)
b = path.abspath(dataRelPath)
for k in a:
if (not k in remainData) and ('Flight' in k):
remove(path.join(b, k))
# path.listdir()
if updateData:
filePostfix = datetime.now().strftime("%Y-%m-%d") + ".xlsx"
ArrivalFile = "..//data//FlightArrival-" + filePostfix
DepartureFile = "..//data//FlightDeparture-" + filePostfix
if not path.isfile(ArrivalFile) or not path.isfile(DepartureFile):
FlightInfo(ArrivalFile, DepartureFile)
else:
ArrivalFile = "..//data//FlightArrival-test.xlsx"
DepartureFile = "..//data//FlightDeparture-test.xlsx"
return DrawMap(FlightArrivalFile=ArrivalFile,
FlightDepartureFile=DepartureFile)
"""
函数:
水平时间线(图)
定义:
horizontalLineImage(self)
输入:
none
输出:
pyecharts,image
"""
def horizontalLineImage(self,
startDay: str = "today",
endDay: str = "today",
**kw):
try:
startDayIn = startDay
endDayIn = endDay
dataDraw = self.getDateSpecTime(startDayIn, endDayIn)
startTickList = dataDraw['起始'].tolist()
startTickIn = [
x.strftime("%Y-%m-%d %H:%M:%S") for x in startTickList
]
eventNameIn = [str(y) for y in dataDraw['事件'].tolist()]
eventLastIn = [int(z) for z in dataDraw['时长'].tolist()]
1 / len(startTickIn) / len(eventNameIn) / len(eventLastIn)
except:
startTickIn = [
'2021-08-09 09:00:00', '2021-08-09 09:45:00',
'2021-08-09 11:11:00', '2021-08-09 14:30:00',
'2021-08-09 15:18:00', '2021-08-09 16:40:00',
'2021-08-09 17:19:00'
]
eventNameIn = [
'开会', '发票', 'visual-code', '舆情分析', 'AOA-Paper', 'AOA-Paper',
'visual-code'
]
eventLastIn = [30, 78, 33, 47, 69, 39, 15]
UpdateTimeLineImage(startTickIn, eventNameIn, eventLastIn)
"""
函数:
将两个list合并为dict,list_name标签列表,list_value值列表
定义:
def mergeListToDict(list_name, list_value)
输入:
list_name,name(list)
list_value,value(list)
输出:
{'list_name',list_value}
"""
def mergeListToDict(list_name, list_value):
# 删除nan
list_name_c = list()
for i in list_name:
if isinstance(i, float):
if not math.isnan(i):
list_name_c.append(i)
else:
list_name_c.append(i)
list_value_c = [x for x in list_value if not math.isnan(x)]
mergeDict = dict()
for k, j in zip(list_name_c, list_value_c):
if k in list(mergeDict.keys()):
mergeDict[k] = j + mergeDict[k]
else:
mergeDict[k] = j
return mergeDict
def modifyMainPage(mainpagefile="..//html//mainpage.html"):
with open(mainpagefile, "r+", encoding='utf-8') as html:
html_bf = BeautifulSoup(html, 'lxml')
# 修改网页背景色
body = html_bf.find("body")
body["style"] = "background-color:#D6D7C5;"
# 修改header标签
header = html_bf.find("header")
if header is None:
header = html_bf.new_tag("header")
html_bf.html.body.insert(1, header)
header.string = datetime.now().strftime("%Y-%m-%d")
header["style"] = "background-color:#D6D7C5;font-size:50px;" + \
"text-align:center;font-family:'Impact';"+"color:#58B4B9;"
html_new = str(html_bf)
html.seek(0, 0)
html.truncate()
html.write(html_new)
html.close()
"""
main page
"""
def mainPage():
# generate module
if False:
Tc_1 = TimeCharts('..//data//gatte-test-1.xlsx')
Tc_1.dailyPie(startDay="2021-09-22", endDay="2021-09-23")
Tc_1.periodWordCloud(endDay="2021-09-23")
Tc_1.dailyLine(startDay="2021-09-22", endDay="2021-09-23")
Tc_1.dailyBar(startDay="2021-09-22", endDay="2021-09-23")
Tc_1.flightMap(updateData=True, removeFlightData=True)
Tc_1.horizontalLineImage(startDay="2021-09-22", endDay="2021-09-22")
modifyMainPage()
else:
Tc_1 = TimeCharts('..//data//gatte-test-1.xlsx')
Tc_1.dailyPie()
Tc_1.periodWordCloud()
Tc_1.dailyLine()
Tc_1.dailyBar()
Tc_1.flightMap(updateData=True, removeFlightData=True)
Tc_1.horizontalLineImage()
modifyMainPage()
if __name__ == "__main__":
mainPage()
print('main page run finished....')
```
#### File: TimeVisual/ToolPy/AutoScript.py
```python
"""
自动处理公文系统文件
"""
from selenium import webdriver
# from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from numpy import random
import time
def testWebdriver():
# _ _ __ _ _ _ _ _
# ___| |__ ___ ___| | __ / _| ___ _ __ | |_ ___ __| | ___ | (_)___| |_
# / __| '_ \ / _ \/ __| |/ / | |_ / _ \| '__| | __/ _ \ / _` |/ _ \ | | / __| __|
#| (__| | | | __/ (__| < | _| (_) | | | || (_) | | (_| | (_) | | | \__ \ |_
# \___|_| |_|\___|\___|_|\_\ |_| \___/|_| \__\___/ \__,_|\___/ |_|_|___/\__|
print(
r" _ _ __ _ _ _ _ _ "
)
print(
r" ___| |__ ___ ___| | __ / _| ___ _ __ | |_ ___ __| | ___ | (_)___| |_"
)
print(
r" / __| '_ \ / _ \/ __| |/ / | |_ / _ \| '__| | __/ _ \ / _` |/ _ \ | | / __| __|"
)
print(
r"| (__| | | | __/ (__| < | _| (_) | | | || (_) | | (_| | (_) | | | \__ \ |_"
)
print(
r" \___|_| |_|\___|\___|_|\_\ |_| \___/|_| \__\___/ \__,_|\___/ |_|_|___/\__|"
)
"""
embeded function
"""
def wait(locator, timeout=2):
WebDriverWait(browser, timeout).until(
expected_conditions.presence_of_all_elements_located(locator))
TargetBaseUrl = r'http://172.18.0.28:8080/cas/login?service=http://172.18.0.29/cas#/portal/index'
# browserOptions = Options()
# browserOptions.add_argument('headless')
# chrome
# browser = webdriver.Chrome(executable_path=r"D:\Code\TimeVisual\ToolPy\driver\msedgedriver.exe",
# options=browserOptions)
# edge
EDGE = {
"browserName": "MicrosoftEdge",
"version": "",
"platform": "WINDOWS",
# 关键是下面这个
"ms:edgeOptions": {
'extensions': [],
'args': [
'--headless'
# '--disable-gpu',
# '--remote-debugging-port=9222',
]
}
}
browser = webdriver.Edge(
executable_path=r"D:\Code\TimeVisual\ToolPy\driver\msedgedriver.exe",
capabilities=EDGE)
# browser.set_window_size(200, 200)
browser.get(TargetBaseUrl)
# login status
longInButtonId = "normalLoginButton"
wait((By.ID, longInButtonId))
browser.find_element_by_id(longInButtonId)
browser.find_element_by_id("username").clear()
browser.find_element_by_id("username").send_keys('wuhao3')
browser.find_element_by_id("password").clear()
browser.find_element_by_id("password").send_keys('<PASSWORD>')
browser.find_element_by_id(longInButtonId).click()
# for k in range(10):
# browser = click_upcoming_item(browser)
while True:
try:
browser = click_upcoming_item(browser)
except:
break
print(r" _ ")
print(r" ___ _ __ ___ _ __ | |_ _ _ ")
print(r" / _ \ '_ ` _ \| '_ \| __| | | |")
print(r" | __/ | | | | | |_) | |_| |_| |")
print(r" \___|_| |_| |_| .__/ \__|\__, |")
print(r" |_| |___/ ")
browser.quit()
def click_upcoming_item(browser, *args, **kwargs):
"""处理待办子功能模块
参数
----------
browser : webdriver
浏览器驱动
返回值
-------
browser : webdriver
浏览器驱动
"""
def wait(locator, timeout=2):
WebDriverWait(browser, timeout).until(
expected_conditions.presence_of_all_elements_located(locator))
# scroll for load element js
browser.execute_script("window.scrollBy(0,3000)")
wait((
By.XPATH,
r'//*[@id="app"]/div/div/div[2]/div[3]/div/div[2]/div[2]/div[2]/div/ul/li[1]/a'
))
elementUpcoming = browser.find_element_by_xpath(
r'//*[@id="app"]/div/div/div[2]/div[3]/div/div[2]/div[2]/div[2]/div/ul/li[1]/a'
)
print(elementUpcoming.text)
browser.execute_script("arguments[0].click();", elementUpcoming)
time.sleep(3 + random.rand())
all_h = browser.window_handles
browser.switch_to.window(all_h[1])
browser.execute_script("window.scrollBy(0,3000)")
time.sleep(3 + random.rand())
wait((By.CSS_SELECTOR, '.submit_btn.form_btn1'))
browser.execute_script(
"arguments[0].click();",
browser.find_element_by_css_selector('.submit_btn.form_btn1'))
time.sleep(1 + random.rand())
# traceback
browser.switch_to.window(all_h[0])
browser.execute_script("window.scrollBy(0,1)")
return browser
if __name__ == "__main__":
# for k in range(0, 20):
testWebdriver()
```
#### File: TimeVisual/ToolPy/passwordblasting.py
```python
import queue
from concurrent.futures import ThreadPoolExecutor
import zipfile
import itertools
class BoundedThreadPoolExecutor(ThreadPoolExecutor):
def __init__(self, max_workers=None, thread_name_prefix=''):
super().__init__(max_workers, thread_name_prefix)
self._work_queue = queue.Queue(self._max_workers * 2) # 设置队列大小
def extract(file, password):
if not flag:
return
file.extractall(path='.', pwd=''.join(password).encode('utf-8'))
def result(f):
exception = f.exception()
if not exception:
# 如果获取不到异常说明破解成功
print('密码为:', f.pwd)
global flag
flag = False
if __name__ == '__main__':
# 创建一个标志用于判断密码是否破解成功
flag = True
# 创建一个线程池
pool = ThreadPoolExecutor(100)
nums = [str(i) for i in range(10)]
upper_chrs = [chr(i) for i in range(65, 91)]
lower_chrs = [chr(i) for i in range(97, 123)]
# 尝试1-15位纯数字
for k in range(1, 11, 1):
if not flag:
break
print('尝试纯数字密码位数:%d' % (k+1))
password_lst = itertools.permutations(nums, k)
# 创建文件句柄
zfile = zipfile.ZipFile(
r"C:\Users\W-H\Desktop\python科学计算第二版_.zip", 'r')
for pwd in password_lst:
# print('try:', pwd)
if not flag:
break
f = pool.submit(extract, zfile, pwd)
f.pwd = pwd
f.pool = pool
f.add_done_callback(result)
# 尝试6-10位数字+大小写字母
for k in range(6, 11, 1):
if not flag:
break
print('尝试纯数字+字母位数:%d' % (k))
password_lst = itertools.permutations(nums+upper_chrs+lower_chrs, k)
# 创建文件句柄
zfile = zipfile.ZipFile(
r"C:\Users\W-H\Desktop\python科学计算第二版_.zip", 'r')
for pwd in password_lst:
# print('try:', pwd)
if not flag:
break
f = pool.submit(extract, zfile, pwd)
f.pwd = <PASSWORD>
f.pool = pool
f.add_done_callback(result)
```
#### File: TimeVisual/ToolPy/PicVideo.py
```python
import cv2
import numpy as np
import os
# PIL : Python Imaging Library
from PIL import Image
import re
class PicVideo():
def __init__(self):
pass
def VideoToImage(self, videofile):
# 导入所需要的库
# 定义保存图片函数
# image:要保存的图片名字
# addr;图片地址与相片名字的前部分
# num: 相片,名字的后缀。int 类型
def save_image(image, addr, num):
address = addr + str(num) + '.png'
cv2.imwrite(address, image)
# 读取视频文件
videoCapture = cv2.VideoCapture(videofile)
# fps = videoCapture.get(cv2.CAP_PROP_FPS) # 获取帧率
# width = int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH)) # 获取宽度
# height = int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 获取高度
# suc = videoCapture.isOpened() # 是否成功打开
# 读帧
success, frame = videoCapture.read()
i = 0
timeF = 4
j = 0
while success:
i = i + 1
if (i % timeF == 0):
j = j + 1
save_image(frame, './image/', j-1)
print('save image:', i)
success, frame = videoCapture.read()
videoCapture.release()
def Png2Icon():
inputimg = r'D:\Code\TimeVisual\python\image\1.png'
thresh = 255
img1 = cv2.imread(inputimg, 0)
ret, thresh1 = cv2.threshold(img1, 127, 255, cv2.THRESH_BINARY)
img = cv2.imread(inputimg)
b, g, r = cv2.split(img)
b[thresh1 > thresh] = 0
g[thresh1 > thresh] = 0
r[thresh1 > thresh] = 0
alpha_channel = np.zeros(b.shape, dtype=b.dtype)
alpha_channel[thresh1 < thresh] = 255
img = cv2.merge([b, g, r, alpha_channel])
cv2.imwrite("1.png", img)
def Pic2Icon(picfile, tarpath):
size = (32, 32)
# 分离文件名与扩展名
[filepath, filename] = os.path.split(picfile)
tmp = os.path.splitext(filename)
# 因为python文件跟图片在同目录,所以需要判断一下
if tmp[1] == '.png':
outName = tmp[0] + '.ico'
# 打开图片并设置大小
# im = Image.open(picfile).resize(size)
im = Image.open(picfile)
try:
# 图标文件保存至icon目录
path = os.path.join(tarpath, outName)
# im.save(path, format='ICO', sizes=[(32, 32)])
im.save(path, format='ICO')
print('{} --> {}'.format(picfile, outName))
except IOError:
print('connot convert :', picfile)
def CaputerGif(giffile, tarFolder):
if not os.path.exists(tarFolder):
os.mkdir(tarFolder)
# 常用格式图片保存为透明背景图片
def AlphaBackGround(PicFile):
img = cv2.imread(PicFile)
# cv2.imshow('src', img)
# 让我们使用新的宽度和高度缩小图像
down_width = 32
down_height = 32
down_points = (down_width, down_height)
img = cv2.resize(img, down_points, interpolation=cv2.INTER_LINEAR)
# print(img.shape)
result = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
if True:
for i in range(0, img.shape[0]): # 访问所有行
for j in range(0, img.shape[1]): # 访问所有列
if img[i, j, 0] > 200 and img[i, j, 1] > 200 and img[i, j, 2] > 200:
result[i, j, 3] = 0
splitOut = os.path.split(PicFile)
picName = os.path.splitext(splitOut[-1])
# picName = os.path.join(splitOut[0], picName[0]+'_alpha'+'.png')
newname = os.path.join(
splitOut[0], r'light_monkey_'+str(picName[0])+'.png')
cv2.imwrite(newname, result, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
Pic2Icon(newname, r'D:\Code\TimeVisual\python\icon')
os.remove(newname)
newname = os.path.join(
splitOut[0], r'dark_monkey_'+str(picName[0])+'.png')
cv2.imwrite(newname, result, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
Pic2Icon(newname, r'D:\Code\TimeVisual\python\icon')
os.remove(newname)
else:
B, G, R = cv2.split(img)
_, Alpha = cv2.threshold(R, 200, 255, cv2.THRESH_BINARY)
cv2.imshow('thres', Alpha)
B2, G2, R2, A2 = cv2.split(result)
A2 = Alpha
result = cv2.merge([B2, G2, R2, A2]) # 通道合并
splitOut = os.path.split(PicFile)
picName = os.path.splitext(splitOut[-1])
picName = os.path.join(splitOut[0], picName[0]+'_alpha'+'.png')
cv2.imwrite(picName, result)
print(result.shape)
# cv2.imshow('result', result)
if False:
B, G, R, A = cv2.split(result)
cv2.imshow('B', B)
cv2.imshow('G', G)
cv2.imshow('R', R)
cv2.imshow('A', A)
cv2.waitKey()
cv2.destroyAllWindows()
# convert rgb (224,224,3 ) to gray (224,224) image
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114]) # 分别对应通道 R G B
def BatchRename():
srcDir = r"C:\Users\W-H\Desktop\RunCat_for_windows-master\RunCat\resources\doge"
tarDir = r"C:\Users\W-H\Desktop\RunCat_for_windows-master\RunCat\resources\catnew"
files = os.listdir(srcDir)
for pic in files:
out = re.findall('\d{1,}', pic)
oldname = os.path.join(srcDir, pic)
if re.findall('light.*', pic):
newname = os.path.join(
tarDir, r'light_cat_'+str(int(out[0]))+'.png')
else:
newname = os.path.join(
tarDir, r'dark_cat_'+str(int(out[0]))+'.png')
os.rename(oldname, newname)
print(oldname, out[0])
def BatchAlpha():
pics = os.listdir(r'D:\Code\TimeVisual\python\image')
for k in pics:
AlphaBackGround(os.path.join(r'D:\Code\TimeVisual\python\image', k))
# 灰度图
def TransIntoGray(picfile, *pk, **pkw):
img = cv2.imread(picfile) # 读入图片
Grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 先要转换为灰度图片
ret, thresh = cv2.threshold(
Grayimg, 150, 255, cv2.THRESH_BINARY) # 这里的第二个参数要调,是阈值!!
cv2.imwrite('33.png', thresh) # 存成一张图片!!!
if __name__ == "__main__":
PicVideo().VideoToImage("aysao-e2vn1.gif")
# BatchRename()
BatchAlpha()
# Pic2Icon(r'D:\Code\TimeVisual\python\33.png', r'D:\Code\TimeVisual\python')
# TransIntoGray(r'D:\Code\TimeVisual\python\image\0.png')
``` |
{
"source": "joinik/meiduo_mall",
"score": 2
} |
#### File: apps/verifications/views.py
```python
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
# Create your views here.
from django.views import View
from django_redis import get_redis_connection
from utils.captcha.captcha import captcha
from celery_tasks.sms.tasks import celery_send_sms_code
"""
图片验证码
前端: 用户点击 图片验证码 生成 UUID,拼接url , 发送 请求
后端:
请求 get 地址里的UUID
路由 get '/inage_codes/<uuid>/'
业务逻辑: 接收uuid ,生成 图片验证码 ,保存到redis 数据库
响应 返回二进制数据
"""
class ImageCodeView(View):
def get(self, request, uuid):
# 1. 获取uuid,
print(uuid)
# 2. 生成验证码图片,二进制数据
text, image = captcha.generate_captcha()
print(text)
# 3. 存到redis UUID作为key
# get_redis_connection 获取redis 数据库
redis_cli = get_redis_connection("image_code")
# uuid 为key 120s是过期时间
redis_cli.setex(uuid,120,text)
# 4. 返回二进制图片数据
return HttpResponse(image,content_type='image/jpeg')
"""
前端:用户输入手机号, 点击获取短信验证码, 发送 axiou 请求
后端:
接收请求; 参数 1,手机号,2图片验证码,3.uuid,
逻辑: 验证参数 图片验证码,生成短信验证码, 存入redis 数据库, 发送短信
响应 路由 "sms_codes/<mobile>/?image_code=xxx&image_code_id =xxxxx"
"""
class MsmCodeView(View):
def get(self, request, mobile):
# 1. 获取参数,
mobile = mobile
image_code = request.GET.get('image_code')
uuid = request.GET.get('image_code_id')
# 2. 验证参数, 是否存在
if not all([image_code, uuid]):
return JsonResponse({'code': 400, 'errmsg': "参数不全"})
# 3. 图片验证码
redis_cli = get_redis_connection("image_code")
redis_image_code = redis_cli.get(uuid)
# 删除图片验证码
try:
redis_cli.delete(uuid)
except Exception as e:
print("删除图片验证码")
# 3.2判断是否过有效期
if redis_image_code is None:
return JsonResponse({'code': 400, 'errmsg': "图片验证码过期"})
# 3.3用户发过来的对比 redis_image_code是二进制 需要decode
if redis_image_code.decode().lower() != image_code.lower():
return JsonResponse({'code': 400, 'errmsg': "图片验证码输入错误"})
# 4. 生成短信验证码
# 0-999999
from random import randint
# "%06d" 让数字保存6位 如果不够 左侧补0
sms_code = "%06d" % randint(0, 999999)
print("sms_code", sms_code)
# 防止发送短信 频繁
send_flag = redis_cli.get("send_flag_%s" % mobile)
if send_flag:
return JsonResponse({'code': 400, 'errmsg': "短信发送过于频繁 "})
# 创建Redis 管道
pl = redis_cli.pipeline()
pl.setex("send_flag_%s" % mobile, 60, 1)
pl.setex("sms_%s" % mobile, 300, sms_code)
# 5. 保存短信验证码到redis
# redis_cli.setex(=
# 执行请求
pl.execute()
# 6. 发送短信
# from celery_tasks.sms.SendMessage import Sms
# Sms().send_message(mobile, (sms_code,2))
# 6. 发送短信 使用celery
print('------->>>')
celery_send_sms_code.delay(mobile, sms_code)
print('>>>>>异步')
# 7 返回响应
return JsonResponse({'code': 0, 'errmsg': "ok"})
```
#### File: utils/alibabacloud_sample/sample.py
```python
import sys
from typing import List
from Tea.core import TeaCore
from alibabacloud_dysmsapi20170525.client import Client as Dysmsapi20170525Client
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_dysmsapi20170525 import models as dysmsapi_20170525_models
from alibabacloud_tea_console.client import Client as ConsoleClient
from alibabacloud_tea_util.client import Client as UtilClient
class Sample:
def __new__(cls, *args, **kwargs):
# if cls._instance is None:
# cls._instance = super().__new__(cls, *args, **kwargs)
if not hasattr(Sample, "_instance"): # 是否有_instance属性
cls._instance = super().__new__(cls, *args, **kwargs)
# 创建一个SmsSDK对象 这里只执行一次 所以SmsSDK对象只有一个
cls._instance.sms_sdk = SmsSDK(accId, accToken, appId)
return cls._instance
def __init__(self):
pass
@staticmethod
def create_client(
access_key_id: str,
access_key_secret: str,
) -> Dysmsapi20170525Client:
"""
使用AK&SK初始化账号Client
@param access_key_id:
@param access_key_secret:
@return: Client
@throws Exception
"""
config = open_api_models.Config(
# 您的AccessKey ID,
access_key_id="",
# 您的AccessKey Secret,
access_key_secret=""
)
# 访问的域名
config.endpoint = 'dysmsapi.aliyuncs.com'
return Dysmsapi20170525Client(config)
@staticmethod
def main(
args: List[str],
) -> None:
client = Sample.create_client('ACCESS_KEY_ID', 'ACCESS_KEY_SECRET')
send_sms_request = dysmsapi_20170525_models.SendSmsRequest(
phone_numbers=args[0],
sign_name=args[1],
template_code=args[2]
)
resp = client.send_sms(send_sms_request)
ConsoleClient.log(UtilClient.to_jsonstring(TeaCore.to_map(resp)))
@staticmethod
async def main_async(
args: List[str],
) -> None:
client = Sample.create_client('ACCESS_KEY_ID', 'ACCESS_KEY_SECRET')
send_sms_request = dysmsapi_20170525_models.SendSmsRequest(
phone_numbers=args[0],
sign_name=args[1],
template_code=args[2]
)
resp = await client.send_sms_async(send_sms_request)
print(">>>>>>")
print(resp.body)
# ConsoleClient.log(UtilClient.to_jsonstring(TeaCore.to_map(resp)))
if __name__ == '__main__':
Sample.main_async(["17708764930", "meiduo个人", "DjangoTest01"])
# Sample.main(sys.argv[1:])
```
#### File: meiduo_mall/utils/myconverters.py
```python
class UsernameConverter:
regex = '[a-zA-Z0-9_-]{5,20}'
def to_python(self, value):
print(value)
return value
def to_url(self, value):
print("to_url>>>")
return value
"""手机号 转换器"""
class PhoneConverter:
regex = '1[3-9]\d{9}'
def to_python(self, value):
print(value)
return value
def to_url(self, value):
return value
``` |
{
"source": "Joinn99/UltraGCN",
"score": 2
} |
#### File: Joinn99/UltraGCN/main.py
```python
from IPython import embed
import torch
import torch.nn as nn
import torch.nn.functional as F
import pickle
import numpy as np
import torch.utils.data as data
import scipy.sparse as sp
import os
import gc
import configparser
import time
import argparse
from torch.utils.tensorboard import SummaryWriter
def data_param_prepare(config_file):
config = configparser.ConfigParser()
config.read(config_file)
params = {}
embedding_dim = config.getint('Model', 'embedding_dim')
params['embedding_dim'] = embedding_dim
ii_neighbor_num = config.getint('Model', 'ii_neighbor_num')
params['ii_neighbor_num'] = ii_neighbor_num
model_save_path = config['Model']['model_save_path']
params['model_save_path'] = model_save_path
max_epoch = config.getint('Model', 'max_epoch')
params['max_epoch'] = max_epoch
params['enable_tensorboard'] = config.getboolean('Model', 'enable_tensorboard')
initial_weight = config.getfloat('Model', 'initial_weight')
params['initial_weight'] = initial_weight
dataset = config['Training']['dataset']
params['dataset'] = dataset
train_file_path = config['Training']['train_file_path']
gpu = config['Training']['gpu']
params['gpu'] = gpu
device = torch.device('cuda:'+ params['gpu'] if torch.cuda.is_available() else "cpu")
params['device'] = device
lr = config.getfloat('Training', 'learning_rate')
params['lr'] = lr
batch_size = config.getint('Training', 'batch_size')
params['batch_size'] = batch_size
early_stop_epoch = config.getint('Training', 'early_stop_epoch')
params['early_stop_epoch'] = early_stop_epoch
w1 = config.getfloat('Training', 'w1')
w2 = config.getfloat('Training', 'w2')
w3 = config.getfloat('Training', 'w3')
w4 = config.getfloat('Training', 'w4')
params['w1'] = w1
params['w2'] = w2
params['w3'] = w3
params['w4'] = w4
negative_num = config.getint('Training', 'negative_num')
negative_weight = config.getfloat('Training', 'negative_weight')
params['negative_num'] = negative_num
params['negative_weight'] = negative_weight
gamma = config.getfloat('Training', 'gamma')
params['gamma'] = gamma
lambda_ = config.getfloat('Training', 'lambda')
params['lambda'] = lambda_
sampling_sift_pos = config.getboolean('Training', 'sampling_sift_pos')
params['sampling_sift_pos'] = sampling_sift_pos
test_batch_size = config.getint('Testing', 'test_batch_size')
params['test_batch_size'] = test_batch_size
topk = config.getint('Testing', 'topk')
params['topk'] = topk
test_file_path = config['Testing']['test_file_path']
# dataset processing
train_data, test_data, train_mat, user_num, item_num, constraint_mat = load_data(train_file_path, test_file_path)
train_loader = data.DataLoader(train_data, batch_size=batch_size, shuffle = True, num_workers=5)
test_loader = data.DataLoader(list(range(user_num)), batch_size=test_batch_size, shuffle=False, num_workers=5)
params['user_num'] = user_num
params['item_num'] = item_num
# mask matrix for testing to accelarate testing speed
mask = torch.zeros(user_num, item_num)
interacted_items = [[] for _ in range(user_num)]
for (u, i) in train_data:
mask[u][i] = -np.inf
interacted_items[u].append(i)
# test user-item interaction, which is ground truth
test_ground_truth_list = [[] for _ in range(user_num)]
for (u, i) in test_data:
test_ground_truth_list[u].append(i)
# Compute \Omega to extend UltraGCN to the item-item occurrence graph
ii_cons_mat_path = './' + dataset + '_ii_constraint_mat'
ii_neigh_mat_path = './' + dataset + '_ii_neighbor_mat'
if os.path.exists(ii_cons_mat_path):
ii_constraint_mat = pload(ii_cons_mat_path)
ii_neighbor_mat = pload(ii_neigh_mat_path)
else:
ii_neighbor_mat, ii_constraint_mat = get_ii_constraint_mat(train_mat, ii_neighbor_num)
pstore(ii_neighbor_mat, ii_neigh_mat_path)
pstore(ii_constraint_mat, ii_cons_mat_path)
return params, constraint_mat, ii_constraint_mat, ii_neighbor_mat, train_loader, test_loader, mask, test_ground_truth_list, interacted_items
def get_ii_constraint_mat(train_mat, num_neighbors, ii_diagonal_zero = False):
print('Computing \\Omega for the item-item graph... ')
A = train_mat.T.dot(train_mat) # I * I
n_items = A.shape[0]
res_mat = torch.zeros((n_items, num_neighbors))
res_sim_mat = torch.zeros((n_items, num_neighbors))
if ii_diagonal_zero:
A[range(n_items), range(n_items)] = 0
items_D = np.sum(A, axis = 0).reshape(-1)
users_D = np.sum(A, axis = 1).reshape(-1)
beta_uD = (np.sqrt(users_D + 1) / users_D).reshape(-1, 1)
beta_iD = (1 / np.sqrt(items_D + 1)).reshape(1, -1)
all_ii_constraint_mat = torch.from_numpy(beta_uD.dot(beta_iD))
for i in range(n_items):
row = all_ii_constraint_mat[i] * torch.from_numpy(A.getrow(i).toarray()[0])
row_sims, row_idxs = torch.topk(row, num_neighbors)
res_mat[i] = row_idxs
res_sim_mat[i] = row_sims
if i % 15000 == 0:
print('i-i constraint matrix {} ok'.format(i))
print('Computation \\Omega OK!')
return res_mat.long(), res_sim_mat.float()
def load_data(train_file, test_file):
trainUniqueUsers, trainItem, trainUser = [], [], []
testUniqueUsers, testItem, testUser = [], [], []
n_user, m_item = 0, 0
trainDataSize, testDataSize = 0, 0
with open(train_file, 'r') as f:
for l in f.readlines():
if len(l) > 0:
l = l.strip('\n').split(' ')
items = [int(i) for i in l[1:]]
uid = int(l[0])
trainUniqueUsers.append(uid)
trainUser.extend([uid] * len(items))
trainItem.extend(items)
m_item = max(m_item, max(items))
n_user = max(n_user, uid)
trainDataSize += len(items)
trainUniqueUsers = np.array(trainUniqueUsers)
trainUser = np.array(trainUser)
trainItem = np.array(trainItem)
with open(test_file) as f:
for l in f.readlines():
if len(l) > 0:
l = l.strip('\n').split(' ')
try:
items = [int(i) for i in l[1:]]
except:
items = []
uid = int(l[0])
testUniqueUsers.append(uid)
testUser.extend([uid] * len(items))
testItem.extend(items)
try:
m_item = max(m_item, max(items))
except:
m_item = m_item
n_user = max(n_user, uid)
testDataSize += len(items)
train_data = []
test_data = []
n_user += 1
m_item += 1
for i in range(len(trainUser)):
train_data.append([trainUser[i], trainItem[i]])
for i in range(len(testUser)):
test_data.append([testUser[i], testItem[i]])
train_mat = sp.dok_matrix((n_user, m_item), dtype=np.float32)
for x in train_data:
train_mat[x[0], x[1]] = 1.0
# construct degree matrix for graphmf
items_D = np.sum(train_mat, axis = 0).reshape(-1)
users_D = np.sum(train_mat, axis = 1).reshape(-1)
beta_uD = (np.sqrt(users_D + 1) / users_D).reshape(-1, 1)
beta_iD = (1 / np.sqrt(items_D + 1)).reshape(1, -1)
constraint_mat = {"beta_uD": torch.from_numpy(beta_uD).reshape(-1),
"beta_iD": torch.from_numpy(beta_iD).reshape(-1)}
return train_data, test_data, train_mat, n_user, m_item, constraint_mat
'''
Useful functions
'''
def pload(path):
with open(path, 'rb') as f:
res = pickle.load(f)
print('load path = {} object'.format(path))
return res
def pstore(x, path):
with open(path, 'wb') as f:
pickle.dump(x, f)
print('store object in path = {} ok'.format(path))
def Sampling(pos_train_data, item_num, neg_ratio, interacted_items, sampling_sift_pos):
neg_candidates = np.arange(item_num)
if sampling_sift_pos:
neg_items = []
for u in pos_train_data[0]:
probs = np.ones(item_num)
probs[interacted_items[u]] = 0
probs /= np.sum(probs)
u_neg_items = np.random.choice(neg_candidates, size = neg_ratio, p = probs, replace = True).reshape(1, -1)
neg_items.append(u_neg_items)
neg_items = np.concatenate(neg_items, axis = 0)
else:
neg_items = np.random.choice(neg_candidates, (len(pos_train_data[0]), neg_ratio), replace = True)
neg_items = torch.from_numpy(neg_items)
return pos_train_data[0], pos_train_data[1], neg_items # users, pos_items, neg_items
'''
Model Definition
'''
class UltraGCN(nn.Module):
def __init__(self, params, constraint_mat, ii_constraint_mat, ii_neighbor_mat):
super(UltraGCN, self).__init__()
self.user_num = params['user_num']
self.item_num = params['item_num']
self.embedding_dim = params['embedding_dim']
self.w1 = params['w1']
self.w2 = params['w2']
self.w3 = params['w3']
self.w4 = params['w4']
self.negative_weight = params['negative_weight']
self.gamma = params['gamma']
self.lambda_ = params['lambda']
self.user_embeds = nn.Embedding(self.user_num, self.embedding_dim)
self.item_embeds = nn.Embedding(self.item_num, self.embedding_dim)
self.constraint_mat = constraint_mat
self.ii_constraint_mat = ii_constraint_mat
self.ii_neighbor_mat = ii_neighbor_mat
self.initial_weight = params['initial_weight']
self.initial_weights()
def initial_weights(self):
nn.init.normal_(self.user_embeds.weight, std=self.initial_weight)
nn.init.normal_(self.item_embeds.weight, std=self.initial_weight)
def get_omegas(self, users, pos_items, neg_items):
device = self.get_device()
if self.w2 > 0:
pos_weight = torch.mul(self.constraint_mat['beta_uD'][users], self.constraint_mat['beta_iD'][pos_items]).to(device)
pow_weight = self.w1 + self.w2 * pos_weight
else:
pos_weight = self.w1 * torch.ones(len(pos_items)).to(device)
# users = (users * self.item_num).unsqueeze(0)
if self.w4 > 0:
neg_weight = torch.mul(torch.repeat_interleave(self.constraint_mat['beta_uD'][users], neg_items.size(1)), self.constraint_mat['beta_iD'][neg_items.flatten()]).to(device)
neg_weight = self.w3 + self.w4 * neg_weight
else:
neg_weight = self.w3 * torch.ones(neg_items.size(0) * neg_items.size(1)).to(device)
weight = torch.cat((pow_weight, neg_weight))
return weight
def cal_loss_L(self, users, pos_items, neg_items, omega_weight):
device = self.get_device()
user_embeds = self.user_embeds(users)
pos_embeds = self.item_embeds(pos_items)
neg_embeds = self.item_embeds(neg_items)
pos_scores = (user_embeds * pos_embeds).sum(dim=-1) # batch_size
user_embeds = user_embeds.unsqueeze(1)
neg_scores = (user_embeds * neg_embeds).sum(dim=-1) # batch_size * negative_num
neg_labels = torch.zeros(neg_scores.size()).to(device)
neg_loss = F.binary_cross_entropy_with_logits(neg_scores, neg_labels, weight = omega_weight[len(pos_scores):].view(neg_scores.size()), reduction='none').mean(dim = -1)
pos_labels = torch.ones(pos_scores.size()).to(device)
pos_loss = F.binary_cross_entropy_with_logits(pos_scores, pos_labels, weight = omega_weight[:len(pos_scores)], reduction='none')
loss = pos_loss + neg_loss * self.negative_weight
return loss.sum()
def cal_loss_I(self, users, pos_items):
device = self.get_device()
neighbor_embeds = self.item_embeds(self.ii_neighbor_mat[pos_items].to(device)) # len(pos_items) * num_neighbors * dim
sim_scores = self.ii_constraint_mat[pos_items].to(device) # len(pos_items) * num_neighbors
user_embeds = self.user_embeds(users).unsqueeze(1)
loss = -sim_scores * (user_embeds * neighbor_embeds).sum(dim=-1).sigmoid().log()
# loss = loss.sum(-1)
return loss.sum()
def norm_loss(self):
loss = 0.0
for parameter in self.parameters():
loss += torch.sum(parameter ** 2)
return loss / 2
def forward(self, users, pos_items, neg_items):
omega_weight = self.get_omegas(users, pos_items, neg_items)
loss = self.cal_loss_L(users, pos_items, neg_items, omega_weight)
loss += self.gamma * self.norm_loss()
loss += self.lambda_ * self.cal_loss_I(users, pos_items)
return loss
def test_foward(self, users):
items = torch.arange(self.item_num).to(users.device)
user_embeds = self.user_embeds(users)
item_embeds = self.item_embeds(items)
return user_embeds.mm(item_embeds.t())
def get_device(self):
return self.user_embeds.weight.device
'''
Train
'''
########################### TRAINING #####################################
def train(model, optimizer, train_loader, test_loader, mask, test_ground_truth_list, interacted_items, params):
device = params['device']
best_epoch, best_recall, best_ndcg = 0, 0, 0
early_stop_count = 0
early_stop = False
batches = len(train_loader.dataset) // params['batch_size']
if len(train_loader.dataset) % params['batch_size'] != 0:
batches += 1
print('Total training batches = {}'.format(batches))
if params['enable_tensorboard']:
writer = SummaryWriter()
for epoch in range(params['max_epoch']):
model.train()
start_time = time.time()
for batch, x in enumerate(train_loader): # x: tensor:[users, pos_items]
users, pos_items, neg_items = Sampling(x, params['item_num'], params['negative_num'], interacted_items, params['sampling_sift_pos'])
users = users.to(device)
pos_items = pos_items.to(device)
neg_items = neg_items.to(device)
model.zero_grad()
loss = model(users, pos_items, neg_items)
if params['enable_tensorboard']:
writer.add_scalar("Loss/train_batch", loss, batches * epoch + batch)
loss.backward()
optimizer.step()
train_time = time.strftime("%H: %M: %S", time.gmtime(time.time() - start_time))
if params['enable_tensorboard']:
writer.add_scalar("Loss/train_epoch", loss, epoch)
need_test = True
if epoch < 50 and epoch % 5 != 0:
need_test = False
if need_test:
start_time = time.time()
F1_score, Precision, Recall, NDCG = test(model, test_loader, test_ground_truth_list, mask, params['topk'], params['user_num'])
if params['enable_tensorboard']:
writer.add_scalar('Results/recall@20', Recall, epoch)
writer.add_scalar('Results/ndcg@20', NDCG, epoch)
test_time = time.strftime("%H: %M: %S", time.gmtime(time.time() - start_time))
print('The time for epoch {} is: train time = {}, test time = {}'.format(epoch, train_time, test_time))
print("Loss = {:.5f}, F1-score: {:5f} \t Precision: {:.5f}\t Recall: {:.5f}\tNDCG: {:.5f}".format(loss.item(), F1_score, Precision, Recall, NDCG))
if Recall > best_recall:
best_recall, best_ndcg, best_epoch = Recall, NDCG, epoch
early_stop_count = 0
torch.save(model.state_dict(), params['model_save_path'])
else:
early_stop_count += 1
if early_stop_count == params['early_stop_epoch']:
early_stop = True
if early_stop:
print('##########################################')
print('Early stop is triggered at {} epochs.'.format(epoch))
print('Results:')
print('best epoch = {}, best recall = {}, best ndcg = {}'.format(best_epoch, best_recall, best_ndcg))
print('The best model is saved at {}'.format(params['model_save_path']))
break
writer.flush()
print('Training end!')
# The below 7 functions (hit, ndcg, RecallPrecision_ATk, MRRatK_r, NDCGatK_r, test_one_batch, getLabel) follow this license.
# MIT License
# Copyright (c) 2020 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
########################### TESTING #####################################
'''
Test and metrics
'''
def hit(gt_item, pred_items):
if gt_item in pred_items:
return 1
return 0
def ndcg(gt_item, pred_items):
if gt_item in pred_items:
index = pred_items.index(gt_item)
return np.reciprocal(np.log2(index+2))
return 0
def RecallPrecision_ATk(test_data, r, k):
"""
test_data should be a list? cause users may have different amount of pos items. shape (test_batch, k)
pred_data : shape (test_batch, k) NOTE: pred_data should be pre-sorted
k : top-k
"""
right_pred = r[:, :k].sum(1)
precis_n = k
recall_n = np.array([len(test_data[i]) for i in range(len(test_data))])
recall_n = np.where(recall_n != 0, recall_n, 1)
recall = np.sum(right_pred / recall_n)
precis = np.sum(right_pred) / precis_n
return {'recall': recall, 'precision': precis}
def MRRatK_r(r, k):
"""
Mean Reciprocal Rank
"""
pred_data = r[:, :k]
scores = np.log2(1. / np.arange(1, k + 1))
pred_data = pred_data / scores
pred_data = pred_data.sum(1)
return np.sum(pred_data)
def NDCGatK_r(test_data, r, k):
"""
Normalized Discounted Cumulative Gain
rel_i = 1 or 0, so 2^{rel_i} - 1 = 1 or 0
"""
assert len(r) == len(test_data)
pred_data = r[:, :k]
test_matrix = np.zeros((len(pred_data), k))
for i, items in enumerate(test_data):
length = k if k <= len(items) else len(items)
test_matrix[i, :length] = 1
max_r = test_matrix
idcg = np.sum(max_r * 1. / np.log2(np.arange(2, k + 2)), axis=1)
dcg = pred_data * (1. / np.log2(np.arange(2, k + 2)))
dcg = np.sum(dcg, axis=1)
idcg[idcg == 0.] = 1.
ndcg = dcg / idcg
ndcg[np.isnan(ndcg)] = 0.
return np.sum(ndcg)
def test_one_batch(X, k):
sorted_items = X[0].numpy()
groundTrue = X[1]
r = getLabel(groundTrue, sorted_items)
ret = RecallPrecision_ATk(groundTrue, r, k)
return ret['precision'], ret['recall'], NDCGatK_r(groundTrue,r,k)
def getLabel(test_data, pred_data):
r = []
for i in range(len(test_data)):
groundTrue = test_data[i]
predictTopK = pred_data[i]
pred = list(map(lambda x: x in groundTrue, predictTopK))
pred = np.array(pred).astype("float")
r.append(pred)
return np.array(r).astype('float')
def test(model, test_loader, test_ground_truth_list, mask, topk, n_user):
users_list = []
rating_list = []
groundTrue_list = []
with torch.no_grad():
model.eval()
for idx, batch_users in enumerate(test_loader):
batch_users = batch_users.to(model.get_device())
rating = model.test_foward(batch_users)
rating = rating.cpu()
rating += mask[batch_users]
_, rating_K = torch.topk(rating, k=topk)
rating_list.append(rating_K)
groundTrue_list.append([test_ground_truth_list[u] for u in batch_users])
X = zip(rating_list, groundTrue_list)
Recall, Precision, NDCG = 0, 0, 0
for i, x in enumerate(X):
precision, recall, ndcg = test_one_batch(x, topk)
Recall += recall
Precision += precision
NDCG += ndcg
Precision /= n_user
Recall /= n_user
NDCG /= n_user
F1_score = 2 * (Precision * Recall) / (Precision + Recall)
return F1_score, Precision, Recall, NDCG
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', type=str, help='config file path')
args = parser.parse_args()
print('###################### UltraGCN ######################')
print('1. Loading Configuration...')
params, constraint_mat, ii_constraint_mat, ii_neighbor_mat, train_loader, test_loader, mask, test_ground_truth_list, interacted_items = data_param_prepare(args.config_file)
print('Load Configuration OK, show them below')
print('Configuration:')
print(params)
ultragcn = UltraGCN(params, constraint_mat, ii_constraint_mat, ii_neighbor_mat)
ultragcn = ultragcn.to(params['device'])
optimizer = torch.optim.Adam(ultragcn.parameters(), lr=params['lr'])
train(ultragcn, optimizer, train_loader, test_loader, mask, test_ground_truth_list, interacted_items, params)
print('END')
``` |
{
"source": "joinnector/joinnector-merchant-python-server-example",
"score": 3
} |
#### File: joinnector/joinnector-merchant-python-server-example/app.py
```python
import os
from flask import Flask, request, jsonify, make_response
from flask_cors import CORS
from joinnector import SDK
# through the custom helperclient
from src.client.nector_client import NectorClient
client_sdk = NectorClient(os.environ.get("API_KEY"), os.environ.get(
"API_SECRET"), os.environ.get("API_MODE"))
# through the sdk helper client
sdk = SDK(os.environ.get("API_KEY"), os.environ.get(
"API_SECRET"), os.environ.get("API_MODE"))
delegate_client = SDK.get_delegate_client()
'''
For security purpose these methods can not be triggered from client calls
To whitelist calls directly from client side, remove the method name from the array
It is requested to call the "not_allowed_controller_method_names" only from other backend functions (idealy they should be called while performing business operations) since they cause quota consumption on nector.
not_allowed_controller_method_names = [
"reward_deals", "create_leads", "save_leads", "get_subscriptions",
"create_taskactivities", "create_wallets", "create_wallettransactions"
];
whitelist all the methods by default methods in not_allowed_controller_method_names are blocklisted to be called from frontend app or website directly for security reasons
'''
delegatesdk = delegate_client(sdk, [])
app = Flask(__name__)
CORS(app)
def make_json_response(json_data, status=200):
response = make_response(
jsonify(json_data),
status
)
response.headers["Content-Type"] = "application/json"
return response
@app.route('/', methods=['GET'])
def health():
return make_json_response({"message": "Server is running"})
@app.route('/nector-delegate', methods=['POST'])
def delegate():
try:
response = client_sdk.delegate_method()
if response.json() is not None:
return make_json_response(response.json(), response.status_code)
except Exception as ex:
print(ex)
return make_json_response({"message": "Something went wrong, please try after sometime"}, 422)
@app.route('/nector-direct-delegate', methods=['POST'])
def direct_delegate():
try:
response = delegatesdk.delegate_method(request.get_json())
if response.json() is not None:
return make_json_response(response.json(), response.status_code)
except Exception as ex:
print(ex)
return make_json_response({"message": "Something went wrong, please try after sometime"}, 422)
``` |
{
"source": "joinnector/rewardpythonsdk",
"score": 2
} |
#### File: joinnector/client/security_client.py
```python
import hmac
import hashlib
class SecurityClient(object):
def process_hmac_signature(self, value, password):
return hmac.new(password.encode('utf8'), value.encode('utf8'), hashlib.sha256).hexdigest()
```
#### File: joinnector/helper/constant_helper.py
```python
from joinnector.helper.sub_contant_helper import setting_constant_helper
class ConstantHelper(object):
@staticmethod
def get_setting_constant():
return setting_constant_helper
```
#### File: rewardpythonsdk/joinnector/__init__.py
```python
from joinnector.client.delegate_client import DelegateClient
from joinnector.wrapper.security_wrapper import sucurity_wrapper
from joinnector.wrapper.logging_wrapper import logging_wrapper
from joinnector.wrapper.request_wrapper import request_wrapper
# service
from joinnector.service.coupon_service import coupon_service
from joinnector.service.currency_service import currency_service
from joinnector.service.deal_service import deal_service
from joinnector.service.offer_service import offer_service
from joinnector.service.store_service import store_service
from joinnector.service.lead_service import lead_service
from joinnector.service.notification_service import notification_service
from joinnector.service.review_service import review_service
from joinnector.service.setting_service import setting_service
from joinnector.service.swap_service import swap_service
from joinnector.service.task_service import task_service
from joinnector.service.taskactivity_service import taskactivity_service
from joinnector.service.surprise_service import surprise_service
from joinnector.service.surpriseactivity_service import surpriseactivity_service
from joinnector.service.wallet_service import wallet_service
from joinnector.service.wallettransaction_service import wallettransaction_service
class SDK(object):
def __init__(self, key, secret, mode="prod"):
self.init_wrappers(key=key, secret=secret, mode=mode)
def init_wrappers(self, key, secret, mode):
sucurity_wrapper.init()
logging_wrapper.init()
request_wrapper.init(key=key, secret=secret, mode=mode)
@staticmethod
def get_delegate_client():
return DelegateClient
def get_coupon_service(self):
return coupon_service
def get_currency_service(self):
return currency_service
def get_deal_service(self):
return deal_service
def get_offer_service(self):
return offer_service
def get_store_service(self):
return store_service
def get_lead_service(self):
return lead_service
def get_notification_service(self):
return notification_service
def get_review_service(self):
return review_service
def get_setting_service(self):
return setting_service
def get_swap_service(self):
return swap_service
def get_task_service(self):
return task_service
def get_taskactivity_service(self):
return taskactivity_service
def get_surprise_service(self):
return surprise_service
def get_surpriseactivity_service(self):
return surpriseactivity_service
def get_wallet_service(self):
return wallet_service
def get_wallettransaction_service(self):
return wallettransaction_service
```
#### File: joinnector/service/lead_service.py
```python
from joinnector.service.base_sdk_service import BaseSDKService
class LeadService(BaseSDKService):
def __init__(self, name):
super().__init__(name)
def get_by_customer_id(self, customer_id, swap_id=None):
return super().get_by("customer_id", customer_id, swap_id)
def get_by_email(self, email, swap_id=None):
return super().get_by("email", email, swap_id)
def get_by_mobile(self, mobile, swap_id=None):
return super().get_by("mobile", mobile, swap_id)
lead_service = LeadService("lead")
```
#### File: joinnector/service/setting_service.py
```python
from joinnector.service.base_sdk_service import BaseSDKService
class SettingService(BaseSDKService):
def __init__(self, name):
super().__init__(name)
setting_service = SettingService("setting")
```
#### File: joinnector/wrapper/security_wrapper.py
```python
from joinnector.client.security_client import SecurityClient
class SecurityWrapper(object):
def init(self):
self.process_common_wrapper()
def process_common_wrapper(self):
self.security_client = SecurityClient()
def get_wrapper(self):
return self.security_client
sucurity_wrapper = SecurityWrapper()
``` |
{
"source": "joinourtalents/django-popup-forms",
"score": 2
} |
#### File: django-popup-forms/popup_forms/context_processors.py
```python
from django.utils.importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def popup_forms(request):
"""Puts all popup forms to 'popup_forms' context variable."""
popup_forms = {}
for popup_form in settings.POPUP_FORMS:
module_name, sep, form_name = popup_form.rpartition('.')
try:
mod = import_module(module_name)
except ImportError, e:
raise ImproperlyConfigured('Error importing popup form '
'from module {0}: "{1}"'.format(module_name, e))
try:
popup_forms[form_name] = getattr(mod, form_name)
except AttributeError:
raise ImproperlyConfigured('Module "{0}" does not define'
' a "{1}" form class'.format(module_name, form_name))
return {'popup_forms': popup_forms}
``` |
{
"source": "joinquanter/jqfactor_analyzer",
"score": 3
} |
#### File: jqfactor_analyzer/jqfactor_analyzer/prepare.py
```python
from __future__ import division
import pandas as pd
import numpy as np
from .exceptions import MaxLossExceededError, non_unique_bin_edges_error
from .utils import get_forward_returns_columns
@non_unique_bin_edges_error
def quantize_factor(
factor_data, quantiles=5, bins=None, by_group=False, no_raise=False, zero_aware=False,
):
"""
计算每期因子分位数
参数
----------
factor_data : pd.DataFrame - MultiIndex
一个 DataFrame, index 为日期 (level 0) 和资产(level 1) 的 MultiIndex,
values 包括因子的值, 各期因子远期收益, 因子分位数,
因子分组(可选), 因子权重(可选)
quantiles : int or sequence[float]
在因子分组中按照因子值大小平均分组的组数。
或分位数序列, 允许不均匀分组
例如 [0, .10, .5, .90, 1.] 或 [.05, .5, .95]
'quantiles' 和 'bins' 有且只能有一个不为 None
bins : int or sequence[float]
在因子分组中使用的等宽 (按照因子值) 区间的数量
或边界值序列, 允许不均匀的区间宽度
例如 [-4, -2, -0.5, 0, 10]
'quantiles' 和 'bins' 有且只能有一个不为 None
by_group : bool
如果是 True, 按照 group 分别计算分位数
no_raise: bool, optional
如果为 True,则不抛出任何异常,并且将抛出异常的值设置为 np.NaN
zero_aware : bool, optional
如果为True,则分别为正负因子值计算分位数。
适用于您的信号聚集并且零是正值和负值的分界线的情况.
返回值
-------
factor_quantile : pd.Series
index 为日期 (level 0) 和资产(level 1) 的因子分位数
"""
if not ((quantiles is not None and bins is None) or
(quantiles is None and bins is not None)):
raise ValueError('quantiles 和 bins 至少要输入一个')
if zero_aware and not (isinstance(quantiles, int)
or isinstance(bins, int)):
msg = ("只有 quantiles 或 bins 为 int 类型时, 'zero_aware' 才能为 True")
raise ValueError(msg)
def quantile_calc(x, _quantiles, _bins, _zero_aware, _no_raise):
try:
if _quantiles is not None and _bins is None and not _zero_aware:
return pd.qcut(x, _quantiles, labels=False) + 1
elif _quantiles is not None and _bins is None and _zero_aware:
pos_quantiles = pd.qcut(x[x >= 0], _quantiles // 2,
labels=False) + _quantiles // 2 + 1
neg_quantiles = pd.qcut(x[x < 0], _quantiles // 2,
labels=False) + 1
return pd.concat([pos_quantiles, neg_quantiles]).sort_index()
elif _bins is not None and _quantiles is None and not _zero_aware:
return pd.cut(x, _bins, labels=False) + 1
elif _bins is not None and _quantiles is None and _zero_aware:
pos_bins = pd.cut(x[x >= 0], _bins // 2,
labels=False) + _bins // 2 + 1
neg_bins = pd.cut(x[x < 0], _bins // 2,
labels=False) + 1
return pd.concat([pos_bins, neg_bins]).sort_index()
except Exception as e:
if _no_raise:
return pd.Series(index=x.index)
raise e
grouper = [factor_data.index.get_level_values('date')]
if by_group:
if 'group' not in factor_data.columns:
raise ValueError('只有输入了 groupby 参数时 binning_by_group 才能为 True')
grouper.append('group')
factor_quantile = factor_data.groupby(grouper)['factor'] \
.apply(quantile_calc, quantiles, bins, zero_aware, no_raise)
factor_quantile.name = 'factor_quantile'
return factor_quantile.dropna()
def compute_forward_returns(factor,
prices,
periods=(1, 5, 10)):
"""
计算每个因子值对应的 N 期因子远期收益
参数
----------
factor : pd.Series - MultiIndex
一个 Series, index 为日期 (level 0) 和资产(level 1) 的 MultiIndex,
values 为因子值
prices : pd.DataFrame
用于计算因子远期收益的价格数据
columns 为资产, index 为 日期.
价格数据必须覆盖因子分析时间段以及额外远期收益计算中的最大预期期数.
periods : sequence[int]
远期收益的期数
Returns
-------
forward_returns : pd.DataFrame - MultiIndex
因子远期收益
index 为日期 (level 0) 和资产(level 1) 的 MultiIndex
column 为远期收益的期数
"""
factor_dateindex = factor.index.levels[0]
factor_dateindex = factor_dateindex.intersection(prices.index)
if len(factor_dateindex) == 0:
raise ValueError("Factor and prices indices don't match: make sure "
"they have the same convention in terms of datetimes "
"and symbol-names")
prices = prices.filter(items=factor.index.levels[1])
forward_returns = pd.DataFrame(
index=pd.MultiIndex
.from_product([prices.index, prices.columns], names=['date', 'asset'])
)
for period in periods:
delta = prices.pct_change(period).shift(-period).reindex(factor_dateindex)
forward_returns['period_{p}'.format(p=period)] = delta.stack()
forward_returns.index = forward_returns.index.rename(['date', 'asset'])
return forward_returns
def demean_forward_returns(factor_data, grouper=None):
"""
根据相关分组为因子远期收益去均值.
分组去均值包含了投资组合分组中性化约束的假设,因此允许跨组评估因子.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
因子远期收益
index 为日期 (level 0) 和资产(level 1) 的 MultiIndex
column 为远期收益的期数
grouper : list
如果为 None, 则只根据日期去均值
否则则根据列表中提供的组分组去均值
返回值
-------
adjusted_forward_returns : pd.DataFrame - MultiIndex
和 factor_data 相同形状的 DataFrame, 但每个收益都被分组去均值了
"""
factor_data = factor_data.copy()
if not grouper:
grouper = factor_data.index.get_level_values('date')
cols = get_forward_returns_columns(factor_data.columns)
factor_data[cols] = factor_data.groupby(
grouper, as_index=False
)[cols.append(pd.Index(['weights']))].apply(
lambda x: x[cols].subtract(
np.average(x[cols], axis=0, weights=x['weights'].fillna(0.0).values),
axis=1
)
)
return factor_data
def get_clean_factor(factor,
forward_returns,
groupby=None,
weights=None,
binning_by_group=False,
quantiles=5,
bins=None,
max_loss=0.35,
zero_aware=False):
"""
将因子值, 因子远期收益, 因子分组数据, 因子权重数据
格式化为以时间和资产的 MultiIndex 作为索引的 DataFrame.
参数
----------
factor : pd.Series - MultiIndex
一个 Series, index 为日期 (level 0) 和资产(level 1) 的 MultiIndex,
values 为因子的值
forward_returns : pd.DataFrame - MultiIndex
一个 DataFrame, index 为日期 (level 0) 和资产(level 1) 的 MultiIndex,
values 为因子的远期收益, columns 为因子远期收益的期数.
groupby : pd.Series - MultiIndex or dict
index 为日期和资产的 Series,为每个资产每天的分组,或资产-分组映射的字典.
如果传递了dict,则假定分组映射在整个时间段内保持不变.
weights : pd.Series - MultiIndex or dict
index 为日期和资产的 Series,为每个资产每天的权重,或资产-权重映射的字典.
如果传递了dict,则假定权重映射在整个时间段内保持不变.
binning_by_group : bool
如果为 True, 则对每个组分别计算分位数.
适用于因子值范围在各个组上变化很大的情况.
如果要分析分组(行业)中性的组合, 您最好设置为 True
quantiles : int or sequence[float]
在因子分组中按照因子值大小平均分组的组数。
或分位数序列, 允许不均匀分组
例如 [0, .10, .5, .90, 1.] 或 [.05, .5, .95]
'quantiles' 和 'bins' 有且只能有一个不为 None
bins : int or sequence[float]
在因子分组中使用的等宽 (按照因子值) 区间的数量
或边界值序列, 允许不均匀的区间宽度
例如 [-4, -2, -0.5, 0, 10]
'quantiles' 和 'bins' 有且只能有一个不为 None
max_loss : float, optional
允许的丢弃因子数据的最大百分比 (0.00 到 1.00),
计算比较输入因子索引中的项目数和输出 DataFrame 索引中的项目数.
因子数据本身存在缺陷 (例如 NaN),
没有提供足够的价格数据来计算所有因子值的远期收益,
或者因为分组失败, 因此可以部分地丢弃因子数据
设置 max_loss = 0 以停止异常捕获.
zero_aware : bool, optional
如果为True,则分别为正负因子值计算分位数。
适用于您的信号聚集并且零是正值和负值的分界线的情况.
返回值
-------
merged_data : pd.DataFrame - MultiIndex
一个 DataFrame, index 为日期 (level 0) 和资产(level 1) 的 MultiIndex,
values 包括因子的值, 各期因子远期收益, 因子分位数,
因子分组(可选), 因子权重(可选)
- 各期因子远期收益的列名满足 'period_1', 'period_5' 的格式
"""
initial_amount = float(len(factor.index))
factor_copy = factor.copy()
factor_copy.index = factor_copy.index.rename(['date', 'asset'])
merged_data = forward_returns.copy()
merged_data['factor'] = factor_copy
if groupby is not None:
if isinstance(groupby, dict):
diff = set(factor_copy.index.get_level_values(
'asset')) - set(groupby.keys())
if len(diff) > 0:
raise KeyError(
"Assets {} not in group mapping".format(
list(diff)))
ss = pd.Series(groupby)
groupby = pd.Series(index=factor_copy.index,
data=ss[factor_copy.index.get_level_values(
'asset')].values)
elif isinstance(groupby, pd.DataFrame):
groupby = groupby.stack()
merged_data['group'] = groupby
if weights is not None:
if isinstance(weights, dict):
diff = set(factor_copy.index.get_level_values(
'asset')) - set(weights.keys())
if len(diff) > 0:
raise KeyError(
"Assets {} not in weights mapping".format(
list(diff)))
ww = pd.Series(weights)
weights = pd.Series(index=factor_copy.index,
data=ww[factor_copy.index.get_level_values(
'asset')].values)
elif isinstance(weights, pd.DataFrame):
weights = weights.stack()
merged_data['weights'] = weights
merged_data = merged_data.dropna()
quantile_data = quantize_factor(
merged_data,
quantiles,
bins,
binning_by_group,
True,
zero_aware
)
merged_data['factor_quantile'] = quantile_data
merged_data = merged_data.dropna()
merged_data['factor_quantile'] = merged_data['factor_quantile'].astype(int)
if 'weights' in merged_data.columns:
merged_data['weights'] = merged_data.set_index(
'factor_quantile', append=True
).groupby(level=['date', 'factor_quantile'])['weights'].apply(
lambda s: s.divide(s.sum())
).reset_index('factor_quantile', drop=True)
binning_amount = float(len(merged_data.index))
tot_loss = (initial_amount - binning_amount) / initial_amount
no_raise = True if max_loss == 0 else False
if tot_loss > max_loss and not no_raise:
message = ("max_loss (%.1f%%) 超过 %.1f%%"
% (tot_loss * 100, max_loss * 100))
raise MaxLossExceededError(message)
return merged_data
def get_clean_factor_and_forward_returns(factor,
prices,
groupby=None,
weights=None,
binning_by_group=False,
quantiles=5,
bins=None,
periods=(1, 5, 10),
max_loss=0.35,
zero_aware=False):
"""
将因子数据, 价格数据, 分组映射和权重映射格式化为
由包含时间和资产的 MultiIndex 作为索引的 DataFrame
参数
----------
factor : pd.Series - MultiIndex
一个 Series, index 为日期 (level 0) 和资产(level 1) 的 MultiIndex,
values 为因子的值
prices : pd.DataFrame
用于计算因子远期收益的价格数据
columns 为资产, index 为 日期.
价格数据必须覆盖因子分析时间段以及额外远期收益计算中的最大预期期数.
groupby : pd.Series - MultiIndex or dict
index 为日期和资产的 Series,为每个资产每天的分组,或资产-分组映射的字典.
如果传递了dict,则假定分组映射在整个时间段内保持不变.
weights : pd.Series - MultiIndex or dict
index 为日期和资产的 Series,为每个资产每天的权重,或资产-权重映射的字典.
如果传递了dict,则假定权重映射在整个时间段内保持不变.
binning_by_group : bool
如果为 True, 则对每个组分别计算分位数.
适用于因子值范围在各个组上变化很大的情况.
如果要分析分组(行业)中性的组合, 您最好设置为 True
quantiles : int or sequence[float]
在因子分组中按照因子值大小平均分组的组数。
或分位数序列, 允许不均匀分组
例如 [0, .10, .5, .90, 1.] 或 [.05, .5, .95]
'quantiles' 和 'bins' 有且只能有一个不为 None
bins : int or sequence[float]
在因子分组中使用的等宽 (按照因子值) 区间的数量
或边界值序列, 允许不均匀的区间宽度
例如 [-4, -2, -0.5, 0, 10]
'quantiles' 和 'bins' 有且只能有一个不为 None
periods : sequence[int]
远期收益的期数
max_loss : float, optional
允许的丢弃因子数据的最大百分比 (0.00 到 1.00),
计算比较输入因子索引中的项目数和输出 DataFrame 索引中的项目数.
因子数据本身存在缺陷 (例如 NaN),
没有提供足够的价格数据来计算所有因子值的远期收益,
或者因为分组失败, 因此可以部分地丢弃因子数据
设置 max_loss = 0 以停止异常捕获.
zero_aware : bool, optional
如果为True,则分别为正负因子值计算分位数。
适用于您的信号聚集并且零是正值和负值的分界线的情况.
返回值
-------
merged_data : pd.DataFrame - MultiIndex
一个 DataFrame, index 为日期 (level 0) 和资产(level 1) 的 MultiIndex,
values 包括因子的值, 各期因子远期收益, 因子分位数,
因子分组(可选), 因子权重(可选)
- 各期因子远期收益的列名满足 'period_1', 'period_5' 的格式
"""
forward_returns = compute_forward_returns(factor, prices, periods)
factor_data = get_clean_factor(factor, forward_returns, groupby=groupby,
weights=weights,
quantiles=quantiles, bins=bins,
binning_by_group=binning_by_group,
max_loss=max_loss, zero_aware=zero_aware)
return factor_data
def common_start_returns(
factor,
prices,
before,
after,
cumulative=False,
mean_by_date=False,
demean_by=None
):
if cumulative:
returns = prices
else:
returns = prices.pct_change(axis=0)
all_returns = []
for timestamp, df in factor.groupby(level='date'):
equities = df.index.get_level_values('asset')
try:
day_zero_index = returns.index.get_loc(timestamp)
except KeyError:
continue
starting_index = max(day_zero_index - before, 0)
ending_index = min(day_zero_index + after + 1, len(returns.index))
equities_slice = set(equities)
if demean_by is not None:
demean_equities = demean_by.loc[timestamp] \
.index.get_level_values('asset')
equities_slice |= set(demean_equities)
series = returns.loc[returns.
index[starting_index:ending_index], equities_slice]
series.index = range(
starting_index - day_zero_index, ending_index - day_zero_index
)
if cumulative:
series = (series / series.loc[0, :]) - 1
if demean_by is not None:
mean = series.loc[:, demean_equities].mean(axis=1)
series = series.loc[:, equities]
series = series.sub(mean, axis=0)
if mean_by_date:
series = series.mean(axis=1)
all_returns.append(series)
return pd.concat(all_returns, axis=1)
def rate_of_return(period_ret):
"""
转换回报率为"每期"回报率:如果收益以稳定的速度增长, 则相当于每期的回报率
"""
period = int(period_ret.name.replace('period_', ''))
return period_ret.add(1).pow(1. / period).sub(1)
def std_conversion(period_std):
"""
转换回报率标准差为"每期"回报率标准差
"""
period_len = int(period_std.name.replace('period_', ''))
return period_std / np.sqrt(period_len)
```
#### File: jqfactor_analyzer/jqfactor_analyzer/utils.py
```python
import re
import six
import warnings
from functools import wraps
from collections import Iterable
import pandas as pd
def get_forward_returns_columns(columns):
syntax = re.compile("^period_\\d+$")
return columns[columns.astype('str').str.contains(syntax, regex=True)]
def convert_to_forward_returns_columns(period):
try:
return 'period_{:d}'.format(period)
except ValueError:
return period
def ignore_warning(message='', category=Warning, module='', lineno=0, append=False):
"""过滤 warnings"""
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=message, category=category,
module=module, lineno=lineno, append=append)
return func(*args, **kwargs)
return func_wrapper
return decorator
def ensure_tuple(x):
if isinstance(x, six.string_types) or not isinstance(x, Iterable):
return (x,)
else:
return tuple(x)
``` |
{
"source": "JoinQuant/gunicorn_thrift",
"score": 2
} |
#### File: gunicorn_thrift/gunicorn_thrift/config.py
```python
from gunicorn import six
from gunicorn.config import Setting, validate_string, validate_pos_int,\
WorkerClass, validate_callable, validate_bool, validate_dict
from .six import DEFAULT_WORKER, DEFAULT_TRANSPORT, DEFAULT_PROTOCOL
WorkerClass.default = DEFAULT_WORKER
class ThriftTransportFactoryClass(Setting):
name = "thrift_transport_factory"
section = "Thrift"
cli = ["--thrift-transport-factory"]
validator = validate_string
default = DEFAULT_TRANSPORT
desc = """\
The factory class for thrift transport.
"""
class ThriftProtocolFactoryClass(Setting):
name = "thrift_protocol_factory"
section = "Thrift"
cli = ["--thrift-protocol-factory"]
validator = validate_string
default = DEFAULT_PROTOCOL
desc = """\
The factory class for thrift transport.
"""
class ThriftClientTimeout(Setting):
name = "thrift_client_timeout"
section = "Thrift"
cli = ["--thrift-client-timeout"]
validator = validate_pos_int
default = None
desc = """\
Seconds to timeout a client if client is silent after this duration
"""
class ProcessorAsFactory(Setting):
name = "thrift_processor_as_factory"
section = "Thrift"
cli = ["--thrift-processor-as-factory"]
validator = validate_bool
default = False
desc = """\
Treat app as processor factory instead of a single processor.
"""
class WorkerTerm(Setting):
name = "worker_term"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def worker_term(worker):
pass
default = staticmethod(worker_term)
desc = """\
Called just after a worker received SIGTERM, and about to gracefully
shutdown.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class ClientConnected(Setting):
name = "on_connected"
section = "Server Hooks"
validator = validate_callable(2)
type = six.callable
def on_connected(worker, addr):
pass
default = staticmethod(on_connected)
desc = """\
Called just after a connection is made.
The callable needs to accept two instance variable for the worker and
the connected client address.
"""
class TDecodeExceptionRaised(Setting):
name = "on_tdecode_exception"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def on_tdecode_exception(err):
pass
default = staticmethod(on_tdecode_exception)
desc = """\
Called if tdecode exception is raised
The callable needs to accept one variable for the exception raised.
"""
class ClientConnectClosed(Setting):
name = "post_connect_closed"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def post_connect_closed(worker):
pass
default = staticmethod(post_connect_closed)
desc = """\
Called just after a connection is closed.
The callable needs to accept one instance variable for the worker.
"""
class ServiceRegisterConf(Setting):
name = "service_register_conf"
section = "Service Register Conf"
default = {}
validator = validate_dict
desc = """\
Config used to connect to service register watcher
"""
class ServiceRegisterClass(Setting):
name = "service_register_cls"
section = "Service Register Class"
cli = ["--service-register-cls"]
validator = validate_string
default = ''
desc = """\
The class used for register service
"""
class GeventCheckInterval(Setting):
name = "gevent_check_interval"
section = "Thrift"
cli = ["--gevent-check-interval"]
validator = validate_pos_int
default = 0
desc = """\
The inteval in which to check if gevent ioloop is blocked.
"""
class WorkerTimeout(Setting):
name = "worker_timeout"
section = "Worker Processes"
cli = ["--worker-timeout"]
validator = validate_pos_int
default = None
desc = """\
The workers will be restarted gracefully when worked after a period of time.
"""
class MaxMemoryPercent(Setting):
name = "max_memory_percent"
section = "Max Memory Percent"
cli = ["--max-memory-percent"]
validator = validate_pos_int
default = 90
desc = """\
The workers will be restarted gracefully when worker using memory percent more than this value.
"""
``` |
{
"source": "joinself/NFCPassportReader",
"score": 3
} |
#### File: NFCPassportReader/scripts/extract.py
```python
import os
import sys
import base64
import subprocess
import re
fingerprints = []
totalCerts = 0
certNr = 1
def main( filename ):
global certNr
# First, make sure that the version of openssl we are using supports the cms command
# the version supplied with OS X doesn't - you would need to get a different one in this case
# e.g. through Homebrew
out, err = execute( "openssl cms" )
if err.decode().find( "'cms' is an invalid command" ) != -1:
print( "The version of OpenSSL you are using doesn't support the CMS command" )
print( "You need to get a version that does (e.g. from Homebrew)" )
exit( 1 )
# remove old master list
if os.path.exists( "masterList.pem" ):
os.remove( "masterList.pem" )
# Identify Type of file - either LDIF or MasterList (CMS)
if filename.lower().endswith( ".ldif" ):
# Read and parse LDIF File
(cns,masterLists) = readAndExtractLDIFFile( filename )
elif filename.lower().endswith( ".ml" ):
masterLists = readInMasterListFile( filename )
print( f"Read in {len(masterLists)} masterlist files" )
print( f"Read in {cns} CNS" )
for index, ml in enumerate(masterLists):
certNr = 1
print( "-----------------------------------" )
print(f"Verifying and extracting MasterList {index} - {cns[index]}")
try:
extractCertsFromMasterlist( ml )
except Exception as e:
print( "Error extracting certs from masterlist")
print( "Skipping this masterlist - certs from this list will not be included.")
print( "====================================" )
print( f"Created MasterList.pem containing {totalCerts} certificates")
def readAndExtractLDIFFile( file ):
adding = False
certs = []
cns = []
cn = ""
with open(file, "r") as inf:
for line in inf:
if line.startswith( "cn: "):
cn = line[4:]
elif line.startswith( "CscaMasterListData:: "):
cert = line[21:]
adding = True
elif not line.startswith(" ") and adding == True:
adding = False
certs.append( cert )
cns.append( cn )
cert = ""
elif adding == True:
cert += line
if cert != "":
certs.append( cert )
cns.append( cn )
print( f"Read {len(certs)} certs" )
masterLists = []
for index, cert in enumerate(certs):
data = base64.b64decode(cert)
masterLists.append( data )
return (cns,masterLists)
def readInMasterListFile( file ):
with open(file, "rb") as inf:
data = inf.read()
return [data]
def extractCertsFromMasterlist( masterList ):
global totalCerts
# Run openssl cms to verify and extract the signed data
cmd = f"openssl cms -inform der -noverify -verify"
(signedData, err) = execute( cmd, masterList )
if err.decode("utf8").strip() != "Verification successful":
print( f"[{err.decode('utf8')}]" )
raise Exception( "Verification of Masterlist data failed" )
print( "MasterList Verification successful" )
certList = extractPEMCertificates( signedData )
print( "Removing duplicates")
uniqueCerts = [x for x in certList if uniqueHash(x)]
print( f"Removed {len(certList)-len(uniqueCerts)} duplicate certificates")
totalCerts += len(uniqueCerts)
# Append to masterList.pem
with open("masterList.pem", "ab") as f:
for c in uniqueCerts:
f.write(c)
def extractPEMCertificates( signedData ):
global certNr
print( "Extracting all certificates from payload" )
cmd = f"openssl asn1parse -inform der -i"
(data, err) = execute( cmd, signedData )
lines = data.decode("utf8").strip().split( "\n" )
valid = False
certs = []
certCount = len([i for i in lines if "d=2" in i])
for line in lines:
if re.search( r":d=1", line ):
valid = False
if re.search( r"d=1.*SET", line ):
valid = True
if re.search( r"d=2", line ) and valid:
# Parse line
match = re.search( r"^ *([0-9]*).*hl= *([0-9]*).*l= *([0-9]*).*", line)
if match:
print( f"Extracting cert {certNr} of {certCount}", end="\r" )
certNr += 1
offset = int(match.group(1))
header_len = int(match.group(2))
octet_len = int(match.group(3))
# Extract PEM certificate
data = signedData[offset : offset+header_len+octet_len]
(cert,err) = execute( f"openssl x509 -inform der -outform pem", data )
certs.append(cert)
else:
print( "Failed match")
print( f"\nExtracted {len(certs)} certs")
return certs
def uniqueHash( cert ):
(data,err) = execute( "openssl x509 -hash -fingerprint -inform PEM -noout", cert )
items = data.decode("utf8").split("\n")
hash = items[0].strip()
fingerprint = items[1].strip()
if fingerprint not in fingerprints:
fingerprints.append( fingerprint )
return True
#print( f"Found duplicate hash - {hash}")
return False
def writeToDisk(name, data):
with open( name, "wb" ) as f:
f.write(data)
def removeFromDisk(name):
try:
os.remove(name)
except:
pass
def execute(cmd, data = None, empty=False):
res = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if data != None:
res.stdin.write(data)
res.stdin.close()
out = res.stdout.read()
err = res.stderr.read()
return (out, err)
if __name__ == "__main__":
if len(sys.argv) != 2:
print( "Invalid number of parameters: ")
print( "" )
print( "Usage - python extract.py [masterlist ml file|icao ldif file]")
print( "" )
exit(1)
main( sys.argv[1] )
``` |
{
"source": "joinsion/google-api-python-client",
"score": 4
} |
#### File: samples/searchforshopping/crowding.py
```python
import pprint
from googleapiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = '<KEY>'
def main():
"""Get and print a feed of public products in the United States mathing a
text search query for 'digital camera' and grouped by the 8 top brands.
The list method of the resource should be called with the "crowdBy"
parameter. Each parameter should be designed as <attribute>:<occurence>,
where <occurrence> is the number of that <attribute> that will be used. For
example, to crowd by the 5 top brands, the parameter would be "brand:5". The
possible rules for crowding are currently:
account_id:<occurrence> (eg account_id:5)
brand:<occurrence> (eg brand:5)
condition:<occurrence> (eg condition:3)
gtin:<occurrence> (eg gtin:10)
price:<occurrence> (eg price:10)
Multiple crowding rules should be specified by separating them with a comma,
for example to crowd by the top 5 brands and then condition of those items,
the parameter should be crowdBy="brand:5,condition:3"
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# The crowdBy parameter to the list method causes the results to be grouped,
# in this case by the top 8 brands.
request = resource.list(source='public', country='US', q=u'digital camera',
crowdBy='brand:8')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
``` |
{
"source": "joinssmith/zongmutech-car-wheel-object-detection",
"score": 2
} |
#### File: zongmutech-car-wheel-object-detection/datasets/fasterrcnn_dataset.py
```python
import os, random
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms.functional as FT
from PIL import Image
import torchvision.transforms as transforms
from utils import *
class mydateset(Dataset): # MARK
def __init__(self, root='data', mode='train', transform=False):
assert mode == 'train' or mode == 'test', f'Expect mode to be train or test, but got {mode}'
self.root = root
self.transform = transform
self.mode = mode
self.path = f'/{self.mode}'
self.img_files = os.listdir(self.root + self.path)
def __getitem__(self, idx):
img = self.img_files[idx]
img_name = img.split('.')[0]
img = Image.open(self.root + '/' + self.path + '/' + img)
img = img.convert('RGB')
# mask = Image.open(self.root + f'/{self.mode}_mask/' + img_name + '_mask.png')
if self.mode == 'train' or self.mode == 'test':
if self.mode == 'train':
# print(img_name)
classes, xymin, xymax = get_inf(self.root + self.path + '_label/' + img_name + '.xml')
else:
classes, xymin, xymax, difficulties = get_inf(self.root + self.path + '_label/' + img_name + '.xml',
get_diff=True)
label = [label_map[i] for i in classes]
label = torch.FloatTensor(label)
boxes = [list(xymin[i] + xymax[i]) for i in range(len(label))]
boxes = torch.FloatTensor(boxes)
if self.transform and self.mode == 'train':
# filp
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
filp_boxes = boxes
filp_boxes[:, 0] = img.width - boxes[:, 0] - 1
filp_boxes[:, 2] = img.width - boxes[:, 2] - 1
filp_boxes = filp_boxes[:, [2, 1, 0, 3]]
boxes = filp_boxes
# photometric distort
img = self.photometric_distort(img)
# random crop
img, boxes, label = self.random_crop(img, boxes, label)
img = transforms.ToPILImage()(img)
# resize
new_boxes = self.box_resize(boxes, img)
img = transforms.Resize((800, 1280))(img)
# totensor && norm
img = transforms.ToTensor()(img)
# img = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img)
return img, new_boxes, label
elif self.mode == 'test':
# resize
new_boxes = self.box_resize(boxes, img)
img = transforms.Resize((800, 1280))(img)
# totensor && norm
img = transforms.ToTensor()(img)
# img = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img)
return img, new_boxes, label, difficulties
def __len__(self):
return len(self.img_files)
def random_crop(self, image, boxes, labels):
image = transforms.ToTensor()(image)
original_h = image.size(1)
original_w = image.size(2)
# Keep choosing a minimum overlap until a successful crop is made
while True:
# Randomly draw the value for minimum overlap
min_overlap = random.choice([0., .1, .3, .5, .7, .9, None]) # 'None' refers to no cropping
# If not cropping
if min_overlap is None:
return image, boxes, labels
# Try up to 50 times for this choice of minimum overlap
# This isn't mentioned in the paper, of course, but 50 is chosen in paper authors' original Caffe repo
max_trials = 50
for _ in range(max_trials):
# Crop dimensions must be in [0.3, 1] of original dimensions
# Note - it's [0.1, 1] in the paper, but actually [0.3, 1] in the authors' repo
min_scale = 0.3
scale_h = random.uniform(min_scale, 1)
scale_w = random.uniform(min_scale, 1)
new_h = int(scale_h * original_h)
new_w = int(scale_w * original_w)
# Aspect ratio has to be in [0.5, 2]
aspect_ratio = new_h / new_w
if not 0.5 < aspect_ratio < 2:
continue
# Crop coordinates (origin at top-left of image)
left = random.randint(0, original_w - new_w)
right = left + new_w
top = random.randint(0, original_h - new_h)
bottom = top + new_h
crop = torch.FloatTensor([left, top, right, bottom]) # (4)
# Calculate Jaccard overlap between the crop and the bounding boxes
overlap = find_jaccard_overlap(crop.unsqueeze(0),
boxes) # (1, n_objects), n_objects is the no. of objects in this image
overlap = overlap.squeeze(0) # (n_objects)
# If not a single bounding box has a Jaccard overlap of greater than the minimum, try again
if overlap.max().item() < min_overlap:
continue
# Crop image and mask
new_image = image[:, top:bottom, left:right] # (3, new_h, new_w)
# Find centers of original bounding boxes
bb_centers = (boxes[:, :2] + boxes[:, 2:]) / 2. # (n_objects, 2)
# Find bounding boxes whose centers are in the crop
centers_in_crop = (bb_centers[:, 0] > left) * (bb_centers[:, 0] < right) * (bb_centers[:, 1] > top) * (
bb_centers[:,
1] < bottom) # (n_objects), a Torch uInt8/Byte tensor, can be used as a boolean index
# If not a single bounding box has its center in the crop, try again
if not centers_in_crop.any():
continue
# Discard bounding boxes that don't meet this criterion
new_boxes = boxes[centers_in_crop, :]
new_labels = labels[centers_in_crop]
# Calculate bounding boxes' new coordinates in the crop
new_boxes[:, :2] = torch.max(new_boxes[:, :2], crop[:2]) # crop[:2] is [left, top]
new_boxes[:, :2] -= crop[:2]
new_boxes[:, 2:] = torch.min(new_boxes[:, 2:], crop[2:]) # crop[2:] is [right, bottom]
new_boxes[:, 2:] -= crop[:2]
return new_image, new_boxes, new_labels
def photometric_distort(self, image):
"""
Distort brightness, contrast, saturation, and hue, each with a 50% chance, in random order.
:param image: image, a PIL Image
:return: distorted image
"""
new_image = image
distortions = [FT.adjust_brightness,
FT.adjust_contrast,
FT.adjust_saturation,
FT.adjust_hue]
random.shuffle(distortions)
for d in distortions:
if random.random() < 0.5:
if d.__name__ is 'adjust_hue':
# Caffe repo uses a 'hue_delta' of 18 - we divide by 255 because PyTorch needs a normalized value
adjust_factor = random.uniform(-18 / 255., 18 / 255.)
else:
# Caffe repo uses 'lower' and 'upper' values of 0.5 and 1.5 for brightness, contrast, and saturation
adjust_factor = random.uniform(0.5, 1.5)
# Apply this distortion
new_image = d(new_image, adjust_factor)
return new_image
def box_resize(self, boxes, img, dims=(800, 1280), return_percent_coords=False):
new_boxes = []
old_dims = torch.FloatTensor([img.width, img.height, img.width, img.height]).unsqueeze(0)
for box in boxes:
new_box = box / old_dims
if not return_percent_coords:
new_dims = torch.FloatTensor([dims[1], dims[0], dims[1], dims[0]]).unsqueeze(0)
new_box = new_box * new_dims
new_boxes.append(new_box)
return new_boxes
def collate_fn(self, batch):
if self.mode == 'train':
images = list()
boxes = list()
labels = list()
for b in batch:
images.append(b[0])
boxes.append(b[1])
labels.append(b[2])
images = torch.stack(images, dim=0)
return images, boxes, labels
elif self.mode == 'test':
images = list()
boxes = list()
labels = list()
difficulties = list()
for b in batch:
images.append(b[0])
boxes.append(b[1])
labels.append(b[2])
difficulties.append(b[3])
images = torch.stack(images, dim=0)
return images, boxes, labels, difficulties
def dataloader_test():
ds = mydateset('../data', mode='train', transform=True)
dataloader = DataLoader(
ds, batch_size=2, shuffle=True, num_workers=1, collate_fn=ds.collate_fn
)
img, boxes, labels = next(iter(dataloader))
print(img.shape)
targets = []
for i in range(img.shape[0]):
t = {}
t['boxes'] = torch.cat([j.cuda() for j in boxes[i]])
t['labels'] = torch.cat([j.unsqueeze(0).cuda() for j in labels[i]])
targets.append(t)
print(targets)
if __name__ == '__main__':
dataloader_test()
```
#### File: zongmutech-car-wheel-object-detection/detects/detect_wheel.py
```python
import torch
from PIL import Image, ImageDraw
from torchvision import transforms
import matplotlib.pyplot as plt
import cv2
import numpy as np
from utils import *
from nets.seg_model import DeepLabv3_plus
model = DeepLabv3_plus(nInputChannels=3, n_classes=3, os=16,
pretrained=False, _print=False, fpn=False)
print(f'Load DeepLabv3...')
checkpoint = torch.load('checkpoints/resnet101_seg.pth.tar')
state_dict = checkpoint['model']
model = model.cuda()
model.load_state_dict(state_dict)
model.eval()
print(f'DeepLabv3 Done\n')
label_color_map = {'rear': 'blue', 'front': 'red'}
def detect(img, vis=False):
draw_img = img.copy()
tsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
img = tsfm(img).unsqueeze(0).cuda()
pred = model(img)
val, decode_mask = torch.max(pred, 1)
index = decode_mask.cpu().detach().numpy().squeeze().astype('int32')
_map = decode_segmap(index)
if vis:
plt.imshow(_map)
plt.show()
imgray = cv2.cvtColor(np.asarray(_map, dtype=np.uint8), cv2.COLOR_RGB2GRAY)
ret, thresh=cv2.threshold(imgray, 1, 255, cv2.THRESH_BINARY_INV)
countours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
pred_dict = {}
pred_dict['labels'] = []
pred_dict['boxes'] = []
pred_dict['confidence'] = []
n_wheels = len(countours)-1
if n_wheels == 0:
return draw_img, pred_dict, decode_mask.squeeze()
# print('wheel numbers: ', n_wheels)
xymin = []
xymax = []
for i in range(1, len(countours)):
x, y, w, h = cv2.boundingRect(countours[i])
xymin.append((x, y))
xymax.append((x+w, y+h))
xymin = np.array(xymin)
xymax = np.array(xymax)
boxes = np.concatenate([xymin, xymax], 1)
clses = []
new_boxes = []
for i in range(n_wheels):
box = list(map(lambda x:int(x), boxes[i]))
cls = decode_mask.squeeze()[box[1]:box[3], box[0]:box[2]].cpu()
if (cls==1).sum() + (cls==2).sum() <= 20:
continue
elif (cls == 1).sum() >= (cls == 2).sum():
clses.append('rear')
elif (cls == 1).sum() < (cls == 2).sum():
clses.append('front')
new_boxes.append(box)
draw = ImageDraw.Draw(draw_img)
for i in range(len(clses)):
location = new_boxes[i]
draw.rectangle(xy=location, outline=label_color_map[clses[i]])
draw.rectangle(xy=[l + 1. for l in location], outline=label_color_map[clses[i]])
pred_dict['labels'].append(clses[i])
pred_dict['boxes'].append(location)
pred_dict['confidence'].append(1)
del draw
return draw_img, pred_dict, decode_mask.squeeze()
if __name__ == '__main__':
from pprint import pprint
img_path = '../data/sub_test/test/000000.jpg'
img = Image.open(img_path, mode='r').convert('RGB')
draw, pred_dict, _ = detect(img)
draw.show()
pprint(pred_dict)
```
#### File: zongmutech-car-wheel-object-detection/detects/detect_yolo.py
```python
from __future__ import division
from yolov3.models import *
from yolov3.yolo_utils.utils import *
from yolov3.yolo_utils.datasets import *
from soft_nms import py_cpu_softnms
import os
import sys
import time
import datetime
import argparse
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from torch.autograd import Variable
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
parser = argparse.ArgumentParser()
parser.add_argument("--image_folder", type=str, default="samples/", help="path to dataset")
parser.add_argument("--model_def", type=str, default="yolov3/config/yolov3-custom.cfg", help="path to model definition file")
parser.add_argument("--weights_path", type=str, default="yolov3/weights/yolov3.weights", help="path to weights file")
parser.add_argument("--class_path", type=str, default="yolov3/classes.names", help="path to class label file")
parser.add_argument("--conf_thres", type=float, default=0.5, help="object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.3, help="iou thresshold for non-maximum suppression")
parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_size", type=int, default=608, help="size of each image dimension")
parser.add_argument("--checkpoint_model", type=str, default="checkpoints/yolov3_ckpt_140.pth",
help="path to checkpoint model")
opt = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f'Load YOLOv3...')
model = Darknet(opt.model_def, img_size=opt.img_size).to(device)
model.load_state_dict(torch.load(opt.checkpoint_model))
model.eval() # Set in evaluation mode
print(f'YOLOv3 Done\n')
def detect(img, save=False, vis=False, flip=False):
classes = load_classes(opt.class_path) # Extracts class labels from file
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
orig_img = img.copy()
img = transforms.ToTensor()(img)
img, _ = pad_to_square(img, 0)
img = resize(img, opt.img_size)
img = img.unsqueeze(0)
# print("\nPerforming object detection:")
prev_time = time.time()
# Configure input
input_imgs = Variable(img.type(Tensor))
# Get detections
with torch.no_grad():
detections = model(input_imgs) # (center x, center y, width, height, ...)
# detections = non_max_suppression(detections, opt.conf_thres, opt.nms_thres)
if flip:
flip_img = orig_img.transpose(Image.FLIP_LEFT_RIGHT)
flip_img = transforms.ToTensor()(flip_img)
flip_img, _ = pad_to_square(flip_img, 0)
flip_img = resize(flip_img, opt.img_size)
flip_img = flip_img.unsqueeze(0)
input_imgs = Variable(flip_img.type(Tensor))
with torch.no_grad():
flip_detections = model(input_imgs)
flip_anno = flip_detections[..., :4].squeeze() # (cx, cy, w, h)
flip_anno[:, 0] = opt.img_size - flip_anno[:, 0]
flip_detections[..., :4] = flip_anno.unsqueeze(0)
detections = torch.cat([detections, flip_detections], dim=1)
# Log progress
current_time = time.time()
inference_time = datetime.timedelta(seconds=current_time - prev_time)
prev_time = current_time
# print("\t+ Batch Inference Time: %s" % (inference_time))
detections = non_max_suppression(detections, opt.conf_thres, opt.nms_thres)
detections = detections[0]
total_pred_dict = {}
total_pred_dict['boxes'] = []
total_pred_dict['confidence'] = []
total_pred_dict['labels'] = []
colors_ids = []
pred_dict = {}
pred_dict['boxes'] = []
pred_dict['confidence'] = []
pred_dict['labels'] = []
if detections is not None:
# Rescale boxes to original image
detections = rescale_boxes(detections, opt.img_size, orig_img.size)
if vis or save:
draw = ImageDraw.Draw(orig_img)
font = ImageFont.truetype("./calibril.ttf", 15)
distinct_colors = ['#e6194b', '#3cb44b', '#ffe119', '#0082c8', '#f58231', '#911eb4', '#46f0f0', '#f032e6']
for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
# cls_conf thread
if cls_conf < 0.8:
continue
if classes[int(cls_pred)] == 'car':
continue
x1, x2 = [overbound(i, 1280) for i in [x1, x2]]
y1, y2 = [overbound(i, 720) for i in [y1, y2]]
box_location = [int(i) for i in [x1, y1, x2, y2]]
total_pred_dict['boxes'].append(box_location)
total_pred_dict['confidence'].append(conf.item())
total_pred_dict['labels'].append(classes[int(cls_pred)])
colors_ids.append(int(cls_pred))
if len(total_pred_dict['boxes']) == 0:
return orig_img, pred_dict
keep = py_cpu_softnms(np.array(total_pred_dict['boxes']), np.array(total_pred_dict['confidence']),
Nt=0.7)
for i in keep:
box_location = total_pred_dict['boxes'][i]
conf = total_pred_dict['confidence'][i]
label = total_pred_dict['labels'][i]
pred_dict['boxes'].append(box_location)
pred_dict['confidence'].append(conf)
pred_dict['labels'].append(label)
if vis or save:
# Box
draw.rectangle(xy=box_location, outline=distinct_colors[colors_ids[i]])
draw.rectangle(xy=[l + 1. for l in box_location], outline=distinct_colors[colors_ids[i]])
# Text
text_size = font.getsize(label.upper())
text_location = [box_location[0] + 2., box_location[1] - text_size[1]]
textbox_location = [box_location[0], box_location[1] - text_size[1], box_location[0] + text_size[0] + 4.,
box_location[1]]
draw.rectangle(xy=textbox_location, fill=distinct_colors[colors_ids[i]])
draw.text(xy=text_location, text=label.upper(), fill='white',
font=font)
text_conf = [box_location[0] + 2., box_location[1]]
textbox_conf = [box_location[0], box_location[1] + text_size[1], box_location[0] + text_size[0] + 4.,
box_location[1]]
draw.rectangle(xy=textbox_conf, fill=distinct_colors[colors_ids[i]])
draw.text(xy=text_conf, text='%.2f'%(conf), fill='white',
font=font)
return orig_img, pred_dict
if __name__ == '__main__':
from pprint import pprint
from tqdm import tqdm
# path = opt.image_folder
path = '../data/sub_test/test/'
save = False
vis = True
flip = True
for file in tqdm(os.listdir(path)):
if not save:
file = '003220.jpg'
img = Image.open(path + file)
# img = img.transpose(Image.FLIP_LEFT_RIGHT)
draw, pred_dict = detect(img, save, vis, flip)
if not save:
pprint(pred_dict)
draw.show()
break
else:
draw.save(f'output/{file.split(".")[0]}.png')
```
#### File: joinssmith/zongmutech-car-wheel-object-detection/inference_ens.py
```python
import torch
from torchvision import transforms
from utils import *
# from detects.detect_yolo import detect as detect_yolo
from detects.detect_wheel import detect as detect_wheel
from detects.detect_fastrcnn import detect as detect_fsrcnn
from detects.detect_fastrcnn2 import detect as detect_fsrcnn2
from soft_nms import py_cpu_softnms as nms
import os
from tqdm import tqdm
from PIL import Image, ImageDraw
import numpy as np
from pprint import pprint
import argparse
opj = os.path.join
parser = argparse.ArgumentParser()
parser.add_argument("--image_folder", type=str, default="../data/sub_test/test", help="path to test images folder")
parser.add_argument("--vis", type=bool, default=False, help="output visiable result")
opt = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICE'] = '0'
num2name = {1:'car', 2:'person', 3:'truck', 4:'bus', 5:'rider', 6:'wheel', 7:'rear', 8:'front'}
name2num = {'car':1, 'person':2, 'truck':3, 'bus':4, 'rider':5, 'wheel':6, 'rear':7, 'front':8}
def is_in_keep(ids, keep):
for i, flag in enumerate(ids):
if flag:
if i not in keep:
ids[i] = False
return ids
def car2wheel(pred_dict, keep):
wheel_ids = (pred_dict['labels'] == 6) + (pred_dict['labels'] == 7) + (pred_dict['labels'] == 8)
wheel_ids = is_in_keep(wheel_ids, keep)
car_ids = pred_dict['labels'] == 1
car_ids = is_in_keep(car_ids, keep)
wheel_boxes = pred_dict['boxes'][wheel_ids]
car_boxes = pred_dict['boxes'][car_ids]
wheel_centers = np.array([wheel_boxes[:, 0]+(wheel_boxes[:, 2]-wheel_boxes[:, 0])/2,
wheel_boxes[:, 1]+(wheel_boxes[:, 3]-wheel_boxes[:, 1])/2]).T
c2w = {}
for i, box in enumerate(car_boxes):
c2w[i] = []
for j in range(len(wheel_centers)):
if box[0] <= wheel_centers[j][0] and box[1] <= wheel_centers[j][1] \
and box[2] >= wheel_centers[j][0] and box[3] >= wheel_centers[j][1]:
c2w[i].append(wheel_boxes[j])
return c2w
def ens(img_path):
original_image = Image.open(img_path, mode='r')
original_image = original_image.convert('RGB')
# original_image = transforms.RandomHorizontalFlip(1)(original_image)
# _, pred_dict_yolo = detect_yolo(original_image.copy())
_, pred_dict_wheel, mask = detect_wheel(original_image.copy())
_, pred_dict_fsrcnn = detect_fsrcnn(original_image.copy(), True, True) # 6 -> wheel
_, pred_dict_fsrcnn2 = detect_fsrcnn2(original_image.copy(), True, True)
total_labels = \
pred_dict_wheel['labels'] + \
pred_dict_fsrcnn['labels'] + \
pred_dict_fsrcnn2['labels']
total_boxes = \
pred_dict_wheel['boxes'] + \
pred_dict_fsrcnn['boxes'].astype('int').tolist() + \
pred_dict_fsrcnn2['boxes'].astype('int').tolist()
total_conf = \
pred_dict_wheel['confidence'] + \
pred_dict_fsrcnn['confidence'].tolist() + \
pred_dict_fsrcnn2['confidence'].tolist()
total_pred_dict = {}
pred_dict = {}
pred_dict['boxes'] = []
pred_dict['confidence'] = []
pred_dict['labels'] = []
total_pred_dict['labels'] = np.array([name2num[name] for name in total_labels])
total_pred_dict['boxes'] = np.array(total_boxes)
total_pred_dict['confidence'] = np.array(total_conf)
wheel_ids = \
(total_pred_dict['labels'] == 6) + \
(total_pred_dict['labels'] == 7) + \
(total_pred_dict['labels'] == 8)
wheel_labels = total_pred_dict['labels'][wheel_ids]
wheel_boxes = total_pred_dict['boxes'][wheel_ids]
wheel_conf = total_pred_dict['confidence'][wheel_ids]
wheel_conf[wheel_conf == 1.] = 0.5
for i, box in enumerate(wheel_boxes):
if wheel_labels[i] == 6:
cls = mask[box[1]:box[3], box[0]:box[2]].cpu()
if cls.sum() == 0:
continue
elif (cls == 1).sum() > (cls == 2).sum():
wheel_labels[i] = 7
elif (cls == 1).sum() < (cls == 2).sum():
wheel_labels[i] = 8
wheel_keep = nms(wheel_boxes.copy(), wheel_conf.copy(), Nt=0.01, thresh=0.01)
wheel_labels = wheel_labels[wheel_keep]
wheel_boxes = wheel_boxes[wheel_keep]
wheel_conf = wheel_conf[wheel_keep]
total_pred_dict['labels'] = total_pred_dict['labels'][~wheel_ids]
total_pred_dict['boxes'] = total_pred_dict['boxes'][~wheel_ids]
total_pred_dict['confidence'] = total_pred_dict['confidence'][~wheel_ids]
total_pred_dict['labels'] = np.concatenate([total_pred_dict['labels'], wheel_labels])
total_pred_dict['boxes'] = np.concatenate([total_pred_dict['boxes'], wheel_boxes])
total_pred_dict['confidence'] = np.concatenate([total_pred_dict['confidence'], wheel_conf])
# total_pred_dict['confidence'][total_pred_dict['labels'] == 6] = 0.5
if len(total_boxes) == 0:
return original_image, pred_dict
if len(total_boxes) <= 15:
keep = nms(total_pred_dict['boxes'].copy(), total_pred_dict['confidence'].copy(), Nt=0.3, thresh=0.01, method=3)
else:
keep = nms(total_pred_dict['boxes'].copy(), total_pred_dict['confidence'].copy(), Nt=0.5, thresh=0.75, method=2)
for i, label in enumerate(total_pred_dict['labels']):
if label in [7, 8] and i not in keep:
keep = np.concatenate([keep, np.array([i])])
c2w = car2wheel(total_pred_dict, keep)
for wheel_boxes in c2w.values():
if len(wheel_boxes) == 2:
label1_ids = np.sum(total_pred_dict['boxes'] == wheel_boxes[0], 1) == 4
label2_ids = np.sum(total_pred_dict['boxes'] == wheel_boxes[1], 1) == 4
label1_ids = is_in_keep(label1_ids, keep)
label2_ids = is_in_keep(label2_ids, keep)
labels = total_pred_dict['labels'][label1_ids + label2_ids]
if 6 in labels and 7 in labels:
if total_pred_dict['labels'][label1_ids] == 6:
total_pred_dict['labels'][label1_ids] = 8
elif total_pred_dict['labels'][label2_ids] == 6:
total_pred_dict['labels'][label2_ids] = 8
elif 6 in labels and 8 in labels:
if total_pred_dict['labels'][label1_ids] == 6:
total_pred_dict['labels'][label1_ids] = 7
elif total_pred_dict['labels'][label2_ids] == 6:
total_pred_dict['labels'][label2_ids] = 7
total_pred_dict['labels'] = [num2name[i] for i in total_pred_dict['labels']]
del_wheel_ids = []
for i, label in enumerate(total_pred_dict['labels']):
if label == 'wheel':
del_wheel_ids.append(i)
final_annotated_image = original_image
draw = ImageDraw.Draw(final_annotated_image)
# font = ImageFont.truetype("./calibril.ttf", 15)
for i in keep:
if i in del_wheel_ids:
continue
# Boxes
box_location = [round(j) for j in total_pred_dict['boxes'][i]]
draw.rectangle(xy=box_location, outline=label_color_map[total_pred_dict['labels'][i]])
draw.rectangle(xy=[l + 1. for l in box_location], outline=label_color_map[
total_pred_dict['labels'][i]]) # a second rectangle at an offset of 1 pixel to increase line thickness
# draw.rectangle(xy=[l + 2. for l in box_location], outline=label_color_map[
# det_labels[i]]) # a third rectangle at an offset of 1 pixel to increase line thickness
# draw.rectangle(xy=[l + 3. for l in box_location], outline=label_color_map[
# det_labels[i]]) # a fourth rectangle at an offset of 1 pixel to increase line thickness
# Text
# text_size = font.getsize(total_labels[i].upper())
# text_location = [box_location[0] + 2., box_location[1] - text_size[1]]
# textbox_location = [box_location[0], box_location[1] - text_size[1], box_location[0] + text_size[0] + 4.,
# box_location[1]]
# draw.rectangle(xy=textbox_location, fill=label_color_map[total_labels[i]])
# draw.text(xy=text_location, text=total_labels[i].upper(), fill='white',
# font=font)
pred_dict['boxes'].append(box_location)
pred_dict['confidence'].append(total_pred_dict['confidence'][i])
pred_dict['labels'].append(total_pred_dict['labels'][i])
return final_annotated_image, pred_dict
if __name__ == '__main__':
from pprint import pprint
DEBUG = False
OUTPUT = opt.vis
if DEBUG:
# img_path = opj(opt.image_folder, '003276.jpg') # 000374
img_path = '../1.jpg'
_, total_pred_dict = ens(img_path)
_.show()
pprint(total_pred_dict)
else:
# to submission txt
test_path = opt.image_folder
test_files = os.listdir(test_path)
w = ''
for file in tqdm(test_files):
try:
img_path = opj(test_path, file)
_, total_pred_dict = ens(img_path)
if OUTPUT:
_.save(f'output/{file.split(".")[0]}.png')
pred_dict = to_submission(total_pred_dict)
for i, cls in enumerate(pred_dict['labels']):
anno = pred_dict['boxes'][i]
anno = ' '.join([str(b) for b in anno])
w += f'{file.split(".")[0]} {cls} {anno}\n'
except Exception as e:
print('Error file:', file)
print(e)
exit()
with open('submit.txt', 'w') as f:
f.write(w)
```
#### File: joinssmith/zongmutech-car-wheel-object-detection/loss.py
```python
import torch
from torch import nn
import numpy as np
from utils import *
import torch.nn.functional as F
def cross_entropy2d(logit, target, ignore_index=255, weight=None, size_average=True, batch_average=True):
n, c, h, w = logit.size()
# logit = logit.permute(0, 2, 3, 1)
target = target.squeeze(1)# (batchsize, 1, 512, 512) -> (batchsize, 512, 512)
if weight is None:
criterion = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, size_average=False)
else:
criterion = nn.CrossEntropyLoss(weight=torch.from_numpy(np.array(weight)).float().cuda(), ignore_index=ignore_index, size_average=False)
loss = criterion(logit, target.long())
if size_average:
loss /= (h * w)
if batch_average:
loss /= n
return loss
class FocalLoss(nn.Module):
def __init__(self, n_cls=21, gamma=2):
super().__init__()
self.gamma = gamma
self.n_cls = n_cls
def forward(self, logit, target):
logit = logit.cpu()
target = target.cpu()
target = torch.eye(self.n_cls)[target.data.cpu()] # to one hot
target = target.float()
max_val = (-logit).clamp(min=0)
loss = logit - logit * target + max_val + \
((-max_val).exp() + (-logit - max_val).exp()).log()
invprobs = F.logsigmoid(-logit * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
loss = loss.cuda()
if len(loss.size()) == 2:
loss = loss.sum(dim=1)
return loss.mean()
class MultiBoxLoss(nn.Module):
def __init__(self, priors_cxcy, threshold=0.5, neg_pos_ratio=3, alpha=1., beta=1., use_focalloss=False):
super(MultiBoxLoss, self).__init__()
self.priors_cxcy = priors_cxcy
self.priors_xy = cxcy_to_xy(priors_cxcy)
self.threshold = threshold
self.neg_pos_ratio = neg_pos_ratio
self.alpha = alpha
self.beta = beta
self.use_focalloss = use_focalloss
self.smooth_l1 = nn.L1Loss()
self.cross_entropy = nn.CrossEntropyLoss(reduce=False)
self.FL = FocalLoss()
self.ce2d = cross_entropy2d
def forward(self, predicted_locs, predicted_scores, predicted_masks, boxes, labels, masks):
batch_size = predicted_locs.size(0)
n_priors = self.priors_cxcy.size(0)
n_classes = predicted_scores.size(2)
assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)
true_locs = torch.zeros((batch_size, n_priors, 4), dtype=torch.float).to(device) # (N, n_priors, 4)
true_classes = torch.zeros((batch_size, n_priors), dtype=torch.long).to(device) # (N, n_priors)
# For each image
for i in range(batch_size):
n_objects = boxes[i].size(0)
overlap = find_jaccard_overlap(boxes[i],
self.priors_xy) # (n_objects, n_priors)
overlap_for_each_prior, object_for_each_prior = overlap.max(dim=0) # (n_priors)
_, prior_for_each_object = overlap.max(dim=1) # (N_o)
object_for_each_prior[prior_for_each_object] = torch.LongTensor(range(n_objects)).to(device)
overlap_for_each_prior[prior_for_each_object] = 1.
label_for_each_prior = labels[i][object_for_each_prior] # (n_priors)
label_for_each_prior[overlap_for_each_prior < self.threshold] = 0 # (n_priors)
true_classes[i] = label_for_each_prior
true_locs[i] = cxcy_to_gcxgcy(xy_to_cxcy(boxes[i][object_for_each_prior]), self.priors_cxcy) # (n_priors, 4)
# Identify priors that are positive (object/non-background)
positive_priors = true_classes != 0 # (N, 8732)
# LOCALIZATION LOSS
loc_loss = self.smooth_l1(predicted_locs[positive_priors], true_locs[positive_priors]) # (), scalar
# Note: indexing with a torch.uint8 (byte) tensor flattens the tensor when indexing is across multiple dimensions (N & n_priors)
# So, if predicted_locs has the shape (N, 8732, 4), predicted_locs[positive_priors] will have (total positives, 4)
# CONFIDENCE LOSS
if not self.use_focalloss:
# ------------- OHEM ----------------
n_positives = positive_priors.sum(dim=1) # (N)
n_hard_negatives = self.neg_pos_ratio * n_positives # (N)
conf_loss_all = self.cross_entropy(predicted_scores.view(-1, n_classes), true_classes.view(-1)) # (N * n_priors)
conf_loss_all = conf_loss_all.view(batch_size, n_priors) # (N, 8732)
conf_loss_pos = conf_loss_all[positive_priors] # (sum(n_positives))
conf_loss_neg = conf_loss_all.clone() # (N, 8732)
conf_loss_neg[positive_priors] = 0. # (N, 8732), positive priors are ignored (never in top n_hard_negatives)
conf_loss_neg, _ = conf_loss_neg.sort(dim=1, descending=True) # (N, 8732), sorted by decreasing hardness
hardness_ranks = torch.LongTensor(range(n_priors)).unsqueeze(0).expand_as(conf_loss_neg).to(device) # (N, n_priors)
hard_negatives = hardness_ranks < n_hard_negatives.unsqueeze(1) # (N, 8732)
conf_loss_hard_neg = conf_loss_neg[hard_negatives] # (sum(n_hard_negatives))
conf_loss = (conf_loss_hard_neg.sum() + conf_loss_pos.sum()) / n_positives.sum().float() # (), scalar
else:
# ---------- Focal Loss ----------------
conf_loss = self.FL(predicted_scores.view(-1, n_classes), true_classes.view(-1))
# SEGMANTATION LOSS
mask_loss = self.ce2d(predicted_masks, masks)
# TOTAL LOSS
# print(conf_loss, loc_loss, mask_loss)
return conf_loss + self.alpha * loc_loss + self.beta * mask_loss
```
#### File: zongmutech-car-wheel-object-detection/nets/deeplabv3.py
```python
import math
import torch
import torch.nn.functional as F
from torch import nn
# from groupnorm import GroupNorm
class ASPP_module(nn.Module):
def __init__(self, inplanes, planes, rate):
super(ASPP_module, self).__init__()
if rate == 1:
kernel_size = 1
padding = 0
else:
kernel_size = 3
padding = rate
self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=rate, bias=False)
self.bn = nn.BatchNorm2d(planes)
# self.bn = GroupNorm(planes)
self.relu = nn.ReLU()
self._init_weight()
def forward(self, x):
x = self.atrous_convolution(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
# elif isinstance(m, GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
class DeepLabv3_plus(nn.Module):
def __init__(self, nInputChannels=3, n_classes=21, os=16, pretrained=False, _print=True):
if _print:
print("Constructing DeepLabv3+ model...")
print("Number of classes : {}".format(n_classes))
print("Output stride : {}".format(os))
print("Number of Input Channels: {}".format(nInputChannels))
print("Input shape : {}".format(f"batchsize, {nInputChannels}, 512, 512"))
print("Output shape : {}".format(f"batchsize, {n_classes}, 512, 512"))
super(DeepLabv3_plus, self).__init__()
self.conv_x = nn.Conv2d(512, 2048, kernel_size=1, stride=1)
self.conv_lower_x = nn.Conv2d(128, 256, kernel_size=1, stride=1)
# Atrous Conv
# self.resnet_features = ResNet101(nInputChannels, os, pretrained=pretrained)
# ASPP
if os == 16:
rates = [1, 6, 12, 18]
elif os == 8:
rates = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = ASPP_module(2048, 256, rate=rates[0])
self.aspp2 = ASPP_module(2048, 256, rate=rates[1])
self.aspp3 = ASPP_module(2048, 256, rate=rates[2])
self.aspp4 = ASPP_module(2048, 256, rate=rates[3])
self.relu = nn.ReLU()
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(2048, 256, 1, stride=1, bias=False),
nn.BatchNorm2d(256),
# GroupNorm(256),
nn.ReLU())
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.bn1 = nn.BatchNorm2d(256)
# self.bn1 = GroupNorm(256)
# adopt [1x1, 48] for channel reduction.
self.conv2 = nn.Conv2d(256, 48, 1, bias=False)
self.bn2 = nn.BatchNorm2d(48)
# self.bn2 = GroupNorm(48, num_groups=48)
self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
# GroupNorm(256),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
# GroupNorm(256),
nn.ReLU(),
nn.Conv2d(256, n_classes, kernel_size=1, stride=1))
def forward(self, input, features): # input 1, 3, 512, 512
x, low_level_features = features # ([N, 512, 32, 32], [N, 128, 128, 128])
x = self.conv_x(x) # [1, 2048, 32, 32]
low_level_features = self.conv_lower_x(low_level_features) # [1,256, 128, 128]
x1 = self.aspp1(x) # [1, 256, 32, 32]
x2 = self.aspp2(x) # [1, 256, 32, 32]
x3 = self.aspp3(x) # [1, 256, 32, 32]
x4 = self.aspp4(x) # [1, 256, 32, 32]
x5 = self.global_avg_pool(x) # [1, 256, 1, 1]
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = F.interpolate(x, size=(int(math.ceil(input.size()[-2]/4)), int(math.ceil(input.size()[-1]/4))), mode='bilinear', align_corners=True)
low_level_features = self.conv2(low_level_features)
low_level_features = self.bn2(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.last_conv(x)
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
# if isinstance(m, GroupNorm):
m.eval()
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
# elif isinstance(m, GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
```
#### File: joinssmith/zongmutech-car-wheel-object-detection/soft_nms.py
```python
import numpy as np
def py_cpu_softnms(dets, sc, Nt=0.3, sigma=0.5, thresh=0.001, method=3):
"""
py_cpu_softnms
:param dets: boexs 坐标矩阵 format [x1, y1, x2, y2]
:param sc: 每个 boxes 对应的分数
:param Nt: iou 交叠门限
:param sigma: 使用 gaussian 函数的方差
:param thresh: 最后的分数门限
:param method: 使用的方法
:return: 留下的 boxes 的 index
"""
# indexes concatenate boxes with the last column
N = dets.shape[0]
indexes = np.array([np.arange(N)])
dets = np.concatenate((dets, indexes.T), axis=1)
# the order of boxes coordinate is [y1,x1,y2,x2]
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = sc
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
for i in range(N):
# intermediate parameters for later parameters exchange
tBD = dets[i, :].copy()
tscore = scores[i].copy()
tarea = areas[i].copy()
pos = i + 1
#
if i != N-1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
if tscore < maxscore:
dets[i, :] = dets[maxpos + i + 1, :]
dets[maxpos + i + 1, :] = tBD
tBD = dets[i, :]
scores[i] = scores[maxpos + i + 1]
scores[maxpos + i + 1] = tscore
tscore = scores[i]
areas[i] = areas[maxpos + i + 1]
areas[maxpos + i + 1] = tarea
tarea = areas[i]
# IoU calculate
xx1 = np.maximum(dets[i, 0], dets[pos:, 0])
yy1 = np.maximum(dets[i, 1], dets[pos:, 1])
xx2 = np.minimum(dets[i, 2], dets[pos:, 2])
yy2 = np.minimum(dets[i, 3], dets[pos:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[pos:] - inter)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(ovr.shape)
weight[ovr > Nt] = weight[ovr > Nt] - ovr[ovr > Nt]
elif method == 2: # gaussian
weight = np.exp(-(ovr * ovr) / sigma)
else: # original NMS
weight = np.ones(ovr.shape)
weight[ovr > Nt] = 0
scores[pos:] = weight * scores[pos:]
# select the boxes and keep the corresponding indexes
inds = dets[:, 4][scores > thresh]
keep = inds.astype(int)
return keep
if __name__ == '__main__':
boxes = np.array(
[[200, 200, 400, 400], [220, 220, 420, 420], [200, 240, 400, 440], [240, 200, 440, 400], [1, 1, 2, 2]],
dtype=np.float32)
boxscores = np.array([0.9, 0.8, 0.7, 0.6, 0.5], dtype=np.float32)
keep = py_cpu_softnms(boxes, boxscores)
print(keep)
```
#### File: joinssmith/zongmutech-car-wheel-object-detection/submit_xml.py
```python
from xml.dom.minidom import Document
import os
import os.path
import xml.etree.ElementTree as ET
from tqdm import tqdm
opj = os.path.join
txt_path = "submit.txt"
xml_path = "inference_xml"
img_name=[]
if not os.path.exists(xml_path):
os.mkdir(xml_path)
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def txttoxml(txtPath,xmlPath):
dict = {'1':"car",
'2':"person",
'3':"truck",
'4':"bus",
'5':"rider",
'6':"rear",
'7':"front"}
txtFile = open(txtPath)
txtList = txtFile.readlines()
for i in tqdm(txtList):
oneline = i.strip().split(" ")
if oneline[0] not in img_name:
img_name.append(oneline[0])
xmlBuilder = Document()
annotation = xmlBuilder.createElement("annotation")
xmlBuilder.appendChild(annotation)
filename = xmlBuilder.createElement("filename")
filenameContent = xmlBuilder.createTextNode(oneline[0]+".jpg")
filename.appendChild(filenameContent)
annotation.appendChild(filename)
f = open(opj(xmlPath, oneline[0]+".xml"), 'w')
xmlBuilder.writexml(f, newl='\n', addindent=' ')
f.close()
tree = ET.parse(opj(xmlPath, oneline[0]+".xml"))
root = tree.getroot()
obj = ET.Element("object")
name = ET.Element("name")
name.text = dict[oneline[1]]
obj.append(name)
bndbox = ET.Element("bndbox")
xmin = ET.Element("xmin")
xmin.text = oneline[2]
bndbox.append(xmin)
ymin = ET.Element("ymin")
ymin.text = oneline[3]
bndbox.append(ymin)
xmax = ET.Element("xmax")
xmax.text = str(int(oneline[2])+int(oneline[4]))
bndbox.append(xmax)
ymax = ET.Element("ymax")
ymax.text = str(int(oneline[3])+int(oneline[5]))
bndbox.append(ymax)
obj.append(bndbox)
root.append(obj)
indent(root)
tree.write(opj(xmlPath, oneline[0]+".xml"))
txttoxml(txt_path,xml_path)
```
#### File: zongmutech-car-wheel-object-detection/trainer/deepdsod_train.py
```python
import time
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from nets.deepdsod_model import DSOD
from loss import MultiBoxLoss
from datasets.dsod_dataset import mydateset
from os.path import exists
from utils import *
# {'car': 1, 'person': 2, 'truck': 3, 'bus': 4, 'rider': 5, 'rear': 6, 'front': 7}
# Data parameters
keep_difficult = True # use objects considered difficult to detect?
use_focalloss = False
# Model parameters
# Not too many here since the SSD300 has a very specific structure
n_classes = len(label_map) # number of different types of objects
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(opt):
"""
Training and validation.
"""
global epochs_since_improvement, start_epoch, label_map, best_loss, epoch, checkpoint, lr_scheduler
epochs_since_improvement = opt['epochs_since_improvement']
start_epoch = opt['start_epoch']
best_loss = opt['best_loss']
checkpoint = opt['checkpoint']
lr_scheduler = opt['lr_scheduler']
batch_size = opt['batch_size']
epochs = opt['epochs']
lr = opt['lr']
momentum = opt['momentum']
weight_decay = opt['weight_decay']
grad_clip = opt['grad_clip']
workers = opt['workers']
print_freq = opt['print_freq']
root = opt['root']
# Initialize model or load checkpoint
if checkpoint is None:
model = DSOD(n_classes=n_classes)
# Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
biases = list()
not_biases = list()
for param_name, param in model.named_parameters():
if param.requires_grad:
if param_name.endswith('.bias'):
biases.append(param)
else:
not_biases.append(param)
optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
lr=lr, momentum=momentum, weight_decay=weight_decay)
else:
checkpoint = torch.load(checkpoint)
start_epoch = checkpoint['epoch'] + 1
epochs_since_improvement = checkpoint['epochs_since_improvement']
best_loss = checkpoint['best_loss']
print('\nLoaded checkpoint from epoch %d. Best loss so far is %.3f.\n' % (start_epoch, best_loss))
model = checkpoint['model']
# optimizer = checkpoint['optimizer']
# or
# Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
optimizer = torch.optim.SGD(model.parameters(),
lr=lr, momentum=momentum, weight_decay=weight_decay)
print('Learning Rate: ', optimizer.param_groups[-1]['lr'])
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', factor=0.5, patience=20, verbose=True
)
# Move to default device
model = model.to(device)
criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy, use_focalloss=use_focalloss).to(device)
# Custom dataloaders
train_dataset = mydateset(root='../data', transform=True)
val_dataset = mydateset(root='../data', mode='test')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
collate_fn=train_dataset.collate_fn, num_workers=workers,
pin_memory=True) # note that we're passing the collate function here
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True,
collate_fn=val_dataset.collate_fn, num_workers=workers,
pin_memory=True)
# Epochs
for epoch in range(start_epoch, epochs):
# One epoch's training
train(train_loader=train_loader,
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch)
# One epoch's validation
val_loss = validate(val_loader=val_loader,
model=model,
criterion=criterion)
# Did validation loss improve?
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
if lr_scheduler is not None:
lr_scheduler.step(best_loss)
if not is_best:
epochs_since_improvement += 1
print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,))
else:
epochs_since_improvement = 0
# Save checkpoint
save_checkpoint(epoch, epochs_since_improvement, model, optimizer, val_loss, best_loss, is_best)
def train(train_loader, model, criterion, optimizer, epoch):
"""
One epoch's training.
:param train_loader: DataLoader for training data
:param model: model
:param criterion: MultiBox loss
:param optimizer: optimizer
:param epoch: epoch number
"""
model.train() # training mode enables dropout
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter() # loss
start = time.time()
# Batches
for i, (images, boxes, labels, masks) in enumerate(train_loader):
data_time.update(time.time() - start)
# Move to default device
images = images.to(device)
boxes = [torch.cat(b).to(device) for b in boxes]
labels = [l.to(device) for l in labels]
masks = torch.cat([m.unsqueeze(0) for m in masks]).to(device)
# Forward prop.
predicted_locs, predicted_scores, segm_score = model(images)
# print(predicted_locs.shape, predicted_scores.shape, segm_score.shape)
# Loss
loss = criterion(predicted_locs, predicted_scores, segm_score, boxes, labels, masks) # scalar
# Backward prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients, if necessary
if grad_clip is not None:
clip_gradient(optimizer, grad_clip)
# Update model
optimizer.step()
losses.update(loss.item(), images.size(0))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader),
batch_time=batch_time,
data_time=data_time, loss=losses))
del predicted_locs, predicted_scores, images, boxes, labels # free some memory since their histories may be stored
def validate(val_loader, model, criterion):
"""
One epoch's validation.
:param val_loader: DataLoader for validation data
:param model: model
:param criterion: MultiBox loss
:return: average validation loss
"""
model.eval() # eval mode disables dropout
batch_time = AverageMeter()
losses = AverageMeter()
start = time.time()
# Prohibit gradient computation explicity because I had some problems with memory
with torch.no_grad():
# Batches
for i, (images, boxes, labels, masks, difficulties) in enumerate(val_loader):
# Move to default device
images = images.to(device)
boxes = [torch.cat(b).to(device) for b in boxes]
labels = [l.to(device) for l in labels]
masks = torch.cat([m.unsqueeze(0) for m in masks]).to(device)
# Forward prop.
predicted_locs, predicted_scores, segm_score = model(images)
# Loss
loss = criterion(predicted_locs, predicted_scores, segm_score, boxes, labels, masks)
losses.update(loss.item(), images.size(0))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % print_freq == 0:
print('[{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(i, len(val_loader),
batch_time=batch_time,
loss=losses))
print('\n * LOSS - {loss.avg:.3f}\n'.format(loss=losses))
return losses.avg
if __name__ == '__main__':
train()
``` |
{
"source": "JointFaaS/applications",
"score": 2
} |
#### File: video-convertor-py/split/index.py
```python
import subprocess
from jfstorage import cloudstorage
import logging
import json
import os
import time
import math
LOGGER = logging.getLogger()
MAX_SPLIT_NUM = 100
ROOT = "tmp"
FFMPEG_BIN = "/tmp/ffmpeg"
FFPROBE_BIN = "/tmp/ffprobe"
class FFmpegError(Exception):
def __init__(self, message, status):
super().__init__(message, status)
self.message = message
self.status = status
def exec_FFmpeg_cmd(cmd_lst):
try:
subprocess.check_call(cmd_lst)
except subprocess.CalledProcessError as exc:
LOGGER.error('returncode:{}'.format(exc.returncode))
LOGGER.error('cmd:{}'.format(exc.cmd))
LOGGER.error('output:{}'.format(exc.output))
# log json to Log Service as db
# or insert record in mysql, etc
raise FFmpegError(exc.output, exc.returncode)
def getVideoDuration(input_video):
cmd = '{0} -i {1} -show_entries format=duration -v quiet -of csv="p=0"'.format(
FFPROBE_BIN, input_video)
raw_result = subprocess.check_output(cmd, shell=True)
result = raw_result.decode().replace("\n", "").strip()
duration = float(result)
return duration
def downloadFFmpeg(cs):
if os.path.exists(FFMPEG_BIN):
return
ffmpeg = open(FFMPEG_BIN, 'wb')
ffmpeg.write(cs.getObj('ffmpeg'))
ffmpeg.close()
os.system("chmod 777 " + FFMPEG_BIN)
ffprobe = open(FFPROBE_BIN, 'wb')
ffprobe.write(cs.getObj('ffprobe'))
ffprobe.close()
os.system("chmod 777 " + FFPROBE_BIN)
def handler(event):
cs = cloudstorage.NewCloudStorage()
downloadFFmpeg(cs)
video_key = event['video_key']
segment_time_seconds = event['segment_time_seconds']
video_path = ("/tmp/video.avi")
video = open(video_path, "wb")
video.write(cs.getObj(video_key))
video_duration = getVideoDuration(video_path)
split_num = math.ceil(video_duration/segment_time_seconds)
if split_num > MAX_SPLIT_NUM:
segment_time_seconds = int(math.ceil(video_duration/MAX_SPLIT_NUM)) + 1
segment_time_seconds = str(segment_time_seconds)
exec_FFmpeg_cmd([FFMPEG_BIN, '-i', video_path, "-c", "copy", "-f", "segment", "-segment_time",
segment_time_seconds, "-reset_timestamps", "1", ROOT + "/split_piece_" + video_key + "%02d.avi"])
split_keys = []
for filename in os.listdir(ROOT):
if filename.startswith('split_'):
split_keys.append(filename)
f = open(os.path.join(ROOT, filename), 'rb')
cs.setObj(filename, f.read())
return {"split_keys": split_keys}
``` |
{
"source": "JointFaaS/cli",
"score": 3
} |
#### File: python3/detect/index.py
```python
import os
def handler(event):
funcName = os.getenv("funcName")
if funcName == None:
return "hello world from aliyun"
else:
return "hello world from HCloud" + funcName
``` |
{
"source": "JointKush/Houdini",
"score": 2
} |
#### File: Houdini/Events/PluginFileEvent.py
```python
import logging
import sys
import importlib
from os.path import sep as pathSeparator
from watchdog.events import FileSystemEventHandler
import twisted.python.rebuild as rebuild
from Houdini.Handlers import Handlers
from Houdini.Events import Events, evaluateHandlerFileEvent, evaluatePluginFileEvent, \
removeHandlersByModule, removeEventsByInstance, createDeepCopy
class PluginFileEventHandler(FileSystemEventHandler):
def __init__(self, server):
self.logger = logging.getLogger("Houdini")
self.server = server
def on_created(self, event):
pluginModuleDetails = evaluateHandlerFileEvent(event)
if not pluginModuleDetails:
return
pluginModulePath, pluginModule = pluginModuleDetails
self.logger.debug("New handler module detected %s", pluginModule)
try:
pluginModuleObject = importlib.import_module(pluginModule)
pluginClass = pluginModuleObject.__name__.split(".")[2]
pluginObject = getattr(pluginModuleObject, pluginClass)(self.server)
self.server.plugins[pluginClass] = pluginObject
self.logger.info("New plugin '%s' has been loaded." % pluginClass)
except Exception as importError:
self.logger.error("%s detected in %s, not importing.", importError.__class__.__name__, pluginModule)
def on_deleted(self, event):
pluginModulePath = event.src_path[2:]
pluginModule = pluginModulePath.replace(pathSeparator, ".")
if pluginModule not in sys.modules:
return
self.logger.debug("Deleting listeners registered by %s..", pluginModule)
pluginModuleObject = sys.modules[pluginModule]
pluginClass = pluginModuleObject.__name__.split(".")[2]
del self.server.plugins[pluginClass]
removeEventsByInstance(pluginModuleObject)
pluginModulePath += "{}__init__.py".format(pathSeparator)
removeHandlersByModule(pluginModulePath)
def on_modified(self, event):
pluginModuleDetails = evaluatePluginFileEvent(event)
if not pluginModuleDetails:
return
pluginModulePath, pluginModule = pluginModuleDetails
if pluginModule not in sys.modules:
return
self.logger.info("Reloading %s", pluginModule)
xtHandlersCollection, xmlHandlersCollection = removeHandlersByModule(pluginModulePath)
eventHandlersCollection = createDeepCopy(Events.EventHandlers)
pluginModuleObject = sys.modules[pluginModule]
pluginClass = pluginModuleObject.__name__.split(".")[2]
try:
removeEventsByInstance(pluginModuleObject)
newPluginModule = rebuild.rebuild(pluginModuleObject)
newPluginObject = getattr(newPluginModule, pluginClass)(self.server)
self.server.plugins[pluginClass] = newPluginObject
self.logger.info("Successfully reloaded %s!", pluginModule)
except LookupError as lookupError:
self.logger.warn("Did not reload plugin '%s': %s." % (pluginClass, lookupError.message))
except Exception as rebuildError:
self.logger.error("%s detected in %s, not reloading.", rebuildError.__class__.__name__, pluginModule)
self.logger.info("Restoring handler references...")
Handlers.XTHandlers = xtHandlersCollection
Handlers.XMLHandlers = xmlHandlersCollection
Events.EventHandlers = eventHandlersCollection
self.logger.info("Handler references restored. Phew!")
```
#### File: Handlers/Games/CardFire.py
```python
import random, itertools
from twisted.internet import reactor
from Houdini.Handlers import Handlers, XT
from Houdini.Handlers.Games.CardJitsu import CardEventHandler, sendStampsEarned
from Houdini.Handlers.Games.Waddle import WaddleHandler
class FireOpponent(object):
def __init__(self, seatId, penguin):
self.seatId, self.penguin = seatId, penguin
self.energy = 6
self.state = 0
self.cardChosen = None
self.deck = []
self.ready = False
self.battleTimeout = None
self.energyWon = 0
class CardFire(object):
def __init__(self, penguins, seats):
self.penguins, self.seats = penguins, seats
self.board = ["b", "s", "w", "f", "c",
"s", "f", "w", "b", "s",
"w", "f", "c", "w", "s", "f"]
self.currentPlayer = None
self.spinAmount = 1
self.moveClockwise = 0
self.moveAnticlockwise = 0
self.tabId = 0
self.currentBattleState = 0
self.currentBattleElement = None
self.currentBattleType = "bt"
self.highestBattleCard = 0
self.isBattleTie = False
self.finishPositions = [0 for _ in xrange(seats)]
self.finishPosition = seats
self.boardIds = [0, 8, 4, 12][:seats]
self.opponents = []
self.battleOpponents = []
self.rankSpeed = 2
for seatId, penguin in enumerate(self.penguins):
penguin.joinRoom(997)
penguin.waddle = self
fireOpponent = FireOpponent(seatId, penguin)
self.opponents.append(fireOpponent)
for _ in xrange(5):
usableDeck = [card for card in penguin.cards if sum(find.Id == card.Id for find in penguin.cards) >
sum(find.Id == card.Id for find in fireOpponent.deck)]
fireOpponent.deck.append(random.choice(usableDeck))
self.playerCycle = itertools.cycle(self.opponents)
self.getNextTurn()
self.boardTimeout = reactor.callLater(22, self.boardTimeoutCallback)
self.spin()
def boardTimeoutCallback(self):
self.tabId = 1
self.currentPlayer.penguin.sendXt("zm", "tb")
chooseBoardId(self.currentPlayer.penguin, self.moveAnticlockwise, True)
def startBattleTimeouts(self):
for opponent in self.battleOpponents:
opponent.battleTimeout = reactor.callLater(22, self.battleTimeoutCallback, opponent)
def battleTimeoutCallback(self, opponent):
self.currentPlayer.penguin.sendXt("zm", "tc")
playableCards = self.getPlayableCards(opponent)
cardIndex = random.choice(playableCards)
chooseCard(opponent.penguin, cardIndex)
def getWinnerSeatId(self, firstCard, secondCard):
if firstCard.Element != secondCard.Element:
ruleSet = {"f": "s", "w": "f", "s": "w"}
return 0 if ruleSet[firstCard.Element] == secondCard.Element else 1
elif firstCard.Value > secondCard.Value:
return 0
elif secondCard.Value > firstCard.Value:
return 1
return -1
def resolveBattle(self):
if self.currentBattleType == "be":
firstOpponent, secondOpponent = self.battleOpponents[:2]
firstCard = firstOpponent.deck[firstOpponent.cardChosen]
secondCard = secondOpponent.deck[secondOpponent.cardChosen]
winnerSeatId = self.getWinnerSeatId(firstCard, secondCard)
if winnerSeatId == 0:
firstOpponent.state, secondOpponent.state = (4, 1)
firstOpponent.energy += 1
secondOpponent.energy -= 1
firstOpponent.energyWon += 1
elif winnerSeatId == 1:
firstOpponent.state, secondOpponent.state = (1, 4)
firstOpponent.energy -= 1
secondOpponent.energy += 1
secondOpponent.energyWon += 1
else:
firstOpponent.state, secondOpponent.state = (2, 2)
self.currentBattleElement = firstCard.Element if winnerSeatId == 0 else secondCard.Element
elif self.currentBattleType == "bt":
for opponent in self.battleOpponents:
card = opponent.deck[opponent.cardChosen]
if card.Element != self.currentBattleElement:
opponent.state = 1
opponent.energy -= 1
elif card.Value == self.highestBattleCard and self.isBattleTie:
opponent.state = 2
elif card.Value == self.highestBattleCard:
opponent.state = 3
else:
opponent.state = 1
opponent.energy -= 1
def getNextTurn(self):
self.currentPlayer = next(self.playerCycle)
while self.currentPlayer not in self.opponents:
self.currentPlayer = next(self.playerCycle)
return self.currentPlayer
def spin(self):
self.spinAmount = random.randrange(1, 7)
playerPosition = self.boardIds[self.currentPlayer.seatId]
self.moveClockwise = (playerPosition + self.spinAmount) % 16
self.moveAnticlockwise = (playerPosition - self.spinAmount) % 16
def remove(self, penguin, isQuit=True):
penguinIndex = self.penguins.index(penguin)
opponent = self.opponents[penguinIndex]
if isQuit:
self.finishPosition -= 1
for player in self.penguins:
player.sendXt("zm", "cz", opponent.seatId)
if opponent == self.currentPlayer and 0 <= self.currentBattleState <= 2:
self.boardTimeout.cancel()
if len(self.opponents) > 2:
self.boardTimeoutCallback()
if opponent.battleTimeout is not None:
opponent.battleTimeout.cancel()
if len(self.opponents) > 2:
self.battleTimeoutCallback(opponent)
self.boardIds[penguinIndex] = -1
self.opponents.remove(opponent)
self.penguins.remove(penguin)
if len(self.opponents) == 1:
opponent = self.opponents[0]
if self.finishPositions[opponent.seatId] == 0:
self.finishPositions[opponent.seatId] = 1
finishPositions = ",".join(str(position) for position in self.finishPositions)
opponent.penguin.sendXt("zm", "zo", finishPositions)
self.remove(opponent.penguin, False)
penguin.waddle = None
self.seats -= 1
sendStampsEarned(penguin, 32)
def getPlayableCards(self, opponent):
if self.currentBattleType == "bt":
playableCards = [cardIndex for cardIndex, card in enumerate(opponent.deck)
if card.Element == self.currentBattleElement]
if playableCards:
return playableCards
return range(5)
def getSeatId(self, penguin):
return self.penguins.index(penguin)
def sendXt(self, *data):
for penguin in self.penguins:
penguin.sendXt(*data)
class FireMat(CardFire):
def __init__(self, penguins, seats):
super(FireMat, self).__init__(penguins, seats)
self.rankSpeed = 3
@Handlers.Handle(XT.GetGame)
@WaddleHandler(CardFire, FireMat)
def handleGetGame(self, data):
mySeatId = self.waddle.getSeatId(self)
self.sendXt("gz", self.waddle.seats, len(self.waddle.penguins))
self.sendXt("jz", mySeatId)
nicknames = ",".join([player.user.Nickname for player in self.waddle.penguins])
colors = ",".join([str(player.user.Color) for player in self.waddle.penguins])
energy = ",".join([str(opponent.energy) for opponent in self.waddle.opponents])
boardIds = ",".join(map(str, self.waddle.boardIds))
playerRanks = ",".join([str(player.user.FireNinjaRank) for player in self.waddle.penguins])
myCardIds = ",".join([str(card.Id) for card in self.waddle.opponents[mySeatId].deck])
spinResult = ",".join(map(str, [self.waddle.spinAmount,
self.waddle.moveClockwise, self.waddle.moveAnticlockwise]))
self.sendXt("sz", self.waddle.currentPlayer.seatId, nicknames, colors, energy,
boardIds, myCardIds, spinResult, playerRanks)
@Handlers.Handle(XT.SendMove)
@WaddleHandler(CardFire, FireMat)
@CardEventHandler("is")
def handleInfoClickSpinner(self, data):
event, seatId, tabId = data.Move
if not 0 <= int(tabId) <= 6:
return
self.waddle.tabId = int(tabId)
self.waddle.sendXt("zm", "is", seatId, tabId)
def chooseBoardId(self, boardId, isAutoPlay=False):
mySeatId = self.waddle.getSeatId(self)
externalSeatId = self.waddle.opponents[mySeatId].seatId
if not isAutoPlay or self.waddle.currentBattleState == 0:
self.waddle.boardIds[externalSeatId] = boardId
boardIds = ",".join(map(str, self.waddle.boardIds))
element = self.waddle.board[boardId]
self.waddle.sendXt("zm", "ub", externalSeatId, boardIds, self.waddle.tabId)
self.waddle.currentBattleType = "bt"
self.waddle.battleOpponents = self.waddle.opponents
else:
boardId = self.waddle.currentPlayer.boardId
element = self.waddle.board[boardId]
opponentsOnTab = []
for opponent in self.waddle.opponents:
if opponent.seatId != externalSeatId and self.waddle.boardIds[opponent.seatId] == boardId:
opponentsOnTab.append(str(opponent.seatId))
if len(opponentsOnTab) > 0:
if isAutoPlay:
self.waddle.currentBattleType = "be"
opponent = [opponent for opponent in self.waddle.opponents if opponent.seatId != externalSeatId
and self.waddle.boardIds[opponent.seatId] == boardId][0]
self.waddle.battleOpponents = [self.waddle.opponents[mySeatId], opponent]
self.waddle.sendXt("zm", "sb", self.waddle.currentBattleType, str(externalSeatId) + "," +
str(opponent.seatId), self.waddle.currentBattleElement)
self.waddle.currentBattleState = 3
self.waddle.startBattleTimeouts()
else:
self.waddle.currentBattleState = 2
self.waddle.currentBattleElement = element
opponents = ",".join(opponentsOnTab)
self.sendXt("zm", "co", 0, opponents)
elif element in self.waddle.board[1:4]:
seatIds = ",".join([str(opponent.seatId) for opponent in self.waddle.opponents])
self.waddle.currentBattleElement = element
self.waddle.currentBattleState = 3
self.waddle.sendXt("zm", "sb", self.waddle.currentBattleType, seatIds, element)
self.waddle.startBattleTimeouts()
elif element == "c":
if isAutoPlay:
self.waddle.currentBattleElement = random.choice(self.waddle.board[1:4])
seatIds = ",".join([str(opponent.seatId) for opponent in self.waddle.opponents])
self.waddle.sendXt("zm", "sb", self.waddle.currentBattleType, seatIds, self.waddle.currentBattleElement)
self.waddle.currentBattleState = 3
self.waddle.startBattleTimeouts()
else:
self.waddle.currentBattleState = 1
self.sendXt("zm", "ct")
elif element == "b":
if isAutoPlay:
self.waddle.currentBattleType = "be"
opponent = [opponent for opponent in self.waddle.opponents if opponent.seatId != externalSeatId][0]
self.waddle.battleOpponents = [self.waddle.opponents[mySeatId], opponent]
self.waddle.sendXt("zm", "sb", self.waddle.currentBattleType, str(externalSeatId) + "," +
str(opponent.seatId), self.waddle.currentBattleElement)
self.waddle.currentBattleState = 3
self.waddle.startBattleTimeouts()
else:
self.waddle.currentBattleState = 2
self.waddle.currentBattleElement = element
opponents = ",".join([str(opponent.seatId) for opponent in self.waddle.opponents if opponent.seatId != externalSeatId])
self.sendXt("zm", "co", 0, opponents)
@Handlers.Handle(XT.SendMove)
@WaddleHandler(CardFire, FireMat)
@CardEventHandler("cb")
def handleChooseBoardId(self, data):
event, boardId = data.Move
mySeatId = self.waddle.getSeatId(self)
if mySeatId == self.waddle.opponents.index(self.waddle.currentPlayer) and self.waddle.currentBattleState == 0:
if int(boardId) != self.waddle.moveClockwise and int(boardId) != self.waddle.moveAnticlockwise:
return
self.waddle.boardTimeout.cancel()
chooseBoardId(self, int(boardId))
@Handlers.Handle(XT.SendMove)
@WaddleHandler(CardFire, FireMat)
@CardEventHandler("co")
def handleChooseOpponent(self, data):
event, opponentSeatId = data.Move
mySeatId = self.waddle.getSeatId(self)
externalSeatId = self.waddle.opponents[mySeatId].seatId
seatIds = [opponent.seatId for opponent in self.waddle.opponents if opponent.seatId != externalSeatId]
if not int(opponentSeatId) in seatIds:
return
if mySeatId == self.waddle.opponents.index(self.waddle.currentPlayer) and self.waddle.currentBattleState == 2:
self.waddle.currentBattleType = "be"
self.waddle.battleOpponents = [self.waddle.opponents[mySeatId]]
for opponent in self.waddle.opponents:
if opponent.seatId == int(opponentSeatId):
self.waddle.battleOpponents.append(opponent)
self.waddle.sendXt("zm", "sb", self.waddle.currentBattleType, str(externalSeatId) + "," +
opponentSeatId, self.waddle.currentBattleElement)
self.waddle.currentBattleState = 3
self.waddle.startBattleTimeouts()
@Handlers.Handle(XT.SendMove)
@WaddleHandler(CardFire, FireMat)
@CardEventHandler("ct")
def handleChooseTrump(self, data):
event, element = data.Move
if element not in self.waddle.board[1:4]:
return
mySeatId = self.waddle.getSeatId(self)
if mySeatId == self.waddle.opponents.index(self.waddle.currentPlayer) and self.waddle.currentBattleState == 1:
self.waddle.currentBattleElement = element
seatIds = ",".join([str(opponent.seatId) for opponent in self.waddle.opponents])
self.waddle.sendXt("zm", "sb", self.waddle.currentBattleType, seatIds, element)
self.waddle.currentBattleState = 3
self.waddle.startBattleTimeouts()
def chooseCard(self, cardIndex):
seatId = self.waddle.getSeatId(self)
waddle = self.waddle
if cardIndex not in waddle.getPlayableCards(waddle.opponents[seatId]):
return
waddle.opponents[seatId].cardChosen = cardIndex
waddle.opponents[seatId].battleTimeout = None
for opponent in self.waddle.opponents:
if opponent.penguin != self:
opponent.penguin.sendXt("zm", "ic", waddle.opponents[seatId].seatId)
card = waddle.opponents[seatId].deck[cardIndex]
if card.Value == waddle.highestBattleCard:
self.waddle.isBattleTie = True
if card.Value > waddle.highestBattleCard:
waddle.highestBattleCard, waddle.isBattleTie = card.Value, False
cardsChosen = None not in [opponent.cardChosen for opponent in waddle.battleOpponents]
if cardsChosen:
seatIds = ",".join([str(opponent.seatId) for opponent in waddle.battleOpponents])
cardIds = ",".join([str(opponent.deck[opponent.cardChosen].Id) for opponent in waddle.battleOpponents])
waddle.resolveBattle()
energy = ",".join([str(opponent.energy) for opponent in waddle.battleOpponents])
states = ",".join([str(opponent.state) for opponent in waddle.battleOpponents])
for opponent in waddle.opponents:
if not opponent.energy:
waddle.finishPositions[opponent.seatId] = waddle.finishPosition
waddle.finishPosition -= 1
if waddle.finishPositions.count(0) == 1:
winnerIndex = waddle.finishPositions.index(0)
waddle.finishPositions[winnerIndex] = 1
finishPositions = ",".join(str(position) for position in waddle.finishPositions)
for opponent in list(waddle.opponents):
myCardIds = ",".join([str(card.Id) for card in opponent.deck])
opponent.penguin.sendXt("zm", "rb", seatIds, cardIds, energy, states, waddle.currentBattleType + ","
+ waddle.currentBattleElement, myCardIds, finishPositions)
if not opponent.energy or not waddle.finishPositions.count(0):
if 0 in waddle.finishPositions:
finishPositions = [1]*len(waddle.finishPositions)
finishPositions[opponent.seatId] = waddle.finishPositions[opponent.seatId]
finishPositions = ",".join(str(position) for position in finishPositions)
playerFinishPosition = waddle.finishPositions[opponent.seatId]
if playerFinishPosition == 1:
opponent.penguin.user.FireMatchesWon += 1
if opponent.penguin.user.FireMatchesWon >= 10:
opponent.penguin.addStamp(252, True)
if opponent.penguin.user.FireMatchesWon >= 50:
opponent.penguin.addStamp(268, True)
if opponent.energy >= 6:
opponent.penguin.addStamp(260, True)
if opponent.energyWon >= 1:
opponent.penguin.addStamp(254, True)
if opponent.energyWon >= 3:
opponent.penguin.addStamp(266, True)
if opponent.penguin.user.FireNinjaRank < 4:
currentRank = opponent.penguin.user.FireNinjaRank
progressPoints = 100 / waddle.rankSpeed / (currentRank + 1) / playerFinishPosition
opponent.penguin.user.FireNinjaProgress += progressPoints
if opponent.penguin.user.FireNinjaProgress >= 100:
awardItems = [6025, 4120, 2013, 1086]
rankStamps = {2: 256, 4: 262}
opponent.penguin.user.FireNinjaRank += 1
if opponent.penguin.user.FireNinjaRank in rankStamps:
opponent.penguin.addStamp(rankStamps[opponent.penguin.user.FireNinjaRank], True)
opponent.penguin.sendXt("zm", "nr", 0, opponent.penguin.user.FireNinjaRank)
opponent.penguin.addItem(awardItems[opponent.penguin.user.FireNinjaRank - 1], sendXt=False)
opponent.penguin.user.FireNinjaProgress %= 100
opponent.penguin.sendXt("zm", "zo", finishPositions)
waddle.remove(opponent.penguin, False)
waddle.currentBattleState = 0
waddle.highestBattleCard, waddle.isBattleTie = 0, False
@Handlers.Handle(XT.SendMove)
@WaddleHandler(CardFire, FireMat)
@CardEventHandler("ir")
def handleInfoReadySync(self, data):
seatId = self.waddle.getSeatId(self)
self.waddle.opponents[seatId].ready = True
if self.waddle.currentBattleState != 0:
return
if all([opponent.ready for opponent in self.waddle.opponents]):
nextPlayer = self.waddle.getNextTurn()
self.waddle.spin()
spinResult = ",".join(map(str, [self.waddle.spinAmount,
self.waddle.moveClockwise, self.waddle.moveAnticlockwise]))
for opponent in self.waddle.opponents:
if opponent in self.waddle.battleOpponents:
usableDeck = [card for card in opponent.penguin.cards if
sum(find.Id == card.Id for find in opponent.penguin.cards) >
sum(find.Id == card.Id for find in opponent.deck)]
opponent.deck[opponent.cardChosen] = random.choice(usableDeck)
myCardIds = ",".join([str(card.Id) for card in opponent.deck])
opponent.penguin.sendXt("zm", "nt", nextPlayer.seatId, spinResult, myCardIds)
self.waddle.boardTimeout = reactor.callLater(22, self.waddle.boardTimeoutCallback)
for opponent in self.waddle.opponents:
opponent.cardChosen = None
opponent.ready = False
@Handlers.Handle(XT.SendMove)
@WaddleHandler(CardFire, FireMat)
@CardEventHandler("cc")
def handleSendChooseCard(self, data):
event, cardIndex = data.Move
if self.waddle.currentBattleState != 3:
return
if not 0 <= int(cardIndex) <= 4:
return
seatId = self.waddle.getSeatId(self)
if self.waddle.opponents[seatId].cardChosen is not None:
return
self.waddle.opponents[seatId].battleTimeout.cancel()
chooseCard(self, int(cardIndex))
@Handlers.Handle(XT.LeaveGame)
@WaddleHandler(CardFire, FireMat)
def handleLeaveGame(self, data):
sendStampsEarned(self, 32)
class FireSensei(CardFire):
def __init__(self, penguin):
super(FireSensei, self).__init__([penguin], 2)
self.penguin = penguin
penguin.waddle = self
self.senseiOpponent = FireOpponent(1, penguin)
self.opponents.append(self.senseiOpponent)
def boardTimeoutCallback(self):
pass
def startBattleTimeouts(self):
pass
def battleTimeoutCallback(self, opponent):
pass
def remove(self, penguin, isQuit=False):
penguin.waddle = None
```
#### File: Handlers/Games/MatchMaking.py
```python
from itertools import izip
from twisted.internet import task, reactor
from Houdini.Handlers import Handlers, XT
from Houdini.Handlers.Games.CardJitsu import CardJitsu, CardSensei
from Houdini.Handlers.Games.CardFire import CardFire, FireSensei
MatchMakers = {
951: (CardJitsu, CardSensei, 2, "NinjaRank", 2),
953: (CardFire, FireSensei, 4, "FireNinjaRank", 4)
}
class MatchMaking(object):
def __init__(self):
self.penguins = []
self.ticker = task.LoopingCall(self.tick)
def tick(self):
for roomId, matchMaker in MatchMakers.iteritems():
cardGame, senseiGame, maxPlayers, sortBy, delay = matchMaker
penguins = [penguin for penguin in self.penguins if penguin.room.Id == roomId]
penguins.sort(key=lambda player: getattr(player.user, sortBy))
matchCount = len(penguins) % maxPlayers
matchSize = max(2, maxPlayers if matchCount == 0 else matchCount)
for matchedPenguins in izip(*[iter(penguins)] * matchSize):
nicknames = "%".join([penguin.user.Nickname + "|" + str(penguin.user.Color) if maxPlayers > 2
else penguin.user.Nickname for penguin in matchedPenguins])
for penguin in matchedPenguins:
penguin.tick -= 1
if penguin.tick < -1:
for removePenguin in matchedPenguins:
removePenguin.sendXt("scard", 0, 0, len(matchedPenguins), 0, nicknames)
# delay on the server because start waddle is unreliable!
if penguin.tick == -delay:
cardGame(list(matchedPenguins), matchSize)
matchedPenguins = []
break
for matchedPenguin in matchedPenguins:
if maxPlayers > 2:
matchedPenguin.sendXt("tmm", len(matchedPenguins), matchedPenguin.tick, nicknames)
else:
matchedPenguin.sendXt("tmm", matchedPenguin.tick, nicknames)
def add(self, penguin):
if penguin not in self.penguins:
self.penguins.append(penguin)
penguin.tick = 10
if len(self.penguins) == 2:
self.ticker.start(1)
def remove(self, penguin):
if penguin in self.penguins:
self.penguins.remove(penguin)
if len(self.penguins) == 1:
self.ticker.stop()
@Handlers.Handle(XT.JoinMatchMaking)
def handleJoinMatchMaking(self, data):
if self.room.Id in MatchMakers:
self.server.matchMaker.add(self)
self.sendXt("jmm", self.user.Username)
@Handlers.Handle(XT.LeaveMatchMaking)
def handleLeaveMatchMaking(self, data):
self.server.matchMaker.remove(self)
@Handlers.Handle(XT.JoinSensei)
def handleJoinSensei(self, data):
if self.room.Id in MatchMakers:
cardGame, senseiGame, maxPlayers, sortBy, delay = MatchMakers[self.room.Id]
if maxPlayers > 2:
self.sendXt("scard", 0, 0, 1, 0, self.user.Nickname + "|" + str(self.user.Color))
reactor.callLater(delay, senseiGame, self)
else:
senseiGame(self)
```
#### File: Handlers/Play/Ignore.py
```python
from Houdini.Handlers import Handlers, XT
from Houdini.Data.Penguin import Penguin, IgnoreList
@Handlers.Handle(XT.GetIgnoreList)
@Handlers.Throttle(-1)
def handleGetIgnoreList(self, data):
ignoreString = "%".join(["{}|{}".format(ignoreId, ignoreUsername) for ignoreId, ignoreUsername in self.ignore.items()])
self.sendXt("gn", ignoreString)
@Handlers.Handle(XT.AddIgnore)
def handleAddIgnore(self, data):
if data.PlayerId in self.buddies:
return
if data.PlayerId in self.ignore:
return
ignoreUser = self.session.query(Penguin.Username, Penguin.ID).\
filter(Penguin.ID == data.PlayerId).first()
self.ignore[data.PlayerId] = ignoreUser.Username
ignore = IgnoreList(PenguinID=self.user.ID, IgnoreID=ignoreUser.ID)
self.session.add(ignore)
@Handlers.Handle(XT.RemoveIgnore)
def handleRemoveIgnore(self, data):
if data.PlayerId not in self.ignore:
return
del self.ignore[data.PlayerId]
self.session.query(IgnoreList).filter_by(PenguinID=self.user.ID, IgnoreID=data.PlayerId).delete()
```
#### File: Handlers/Redemption/__init__.py
```python
from datetime import datetime
from Houdini.Handlers import Handlers, XT
from Houdini.Data.Redemption import RedemptionCode, RedemptionAward, PenguinRedemption
@Handlers.Handle(XT.JoinRedemption)
@Handlers.Throttle(-1)
def handleJoinRedemption(self, data):
if int(data.ID) != self.user.ID:
return self.transport.loseConnection()
if data.LoginKey == "":
return self.transport.loseConnection()
if data.LoginKey != self.user.LoginKey:
self.user.LoginKey = ""
return self.sendErrorAndDisconnect(101)
self.sendXt("rjs", "", 1)
@Handlers.Handle(XT.SendCode)
@Handlers.Throttle(2)
def handleSendCode(self, data):
code = self.session.query(RedemptionCode).filter(RedemptionCode.Code == data.Code).first()
if code is None:
return self.sendError(720)
redeemed = self.session.query(PenguinRedemption).filter_by(PenguinID=self.user.ID, CodeID=code.ID).scalar()
if redeemed is not None:
return self.sendError(721)
if code.Expires is not None and code.Expires < datetime.now():
return self.sendError(726)
awards = self.session.query(RedemptionAward.Award).filter_by(CodeID=code.ID)
awardIds = [awardId for awardId, in awards]
if code.Type == "GOLDEN":
return self.sendXt("rsc", "GOLDEN", self.user.NinjaRank, self.user.FireNinjaRank, self.user.WaterNinjaRank,
int(self.user.FireNinjaRank > 0), int(self.user.WaterNinjaRank > 0))
if code.Type == "CARD":
self.addCards(*awardIds)
else:
for itemId in awardIds:
self.addItem(itemId)
self.session.add(PenguinRedemption(PenguinID=self.user.ID, CodeID=code.ID))
self.user.Coins += code.Coins
self.sendXt("rsc", code.Type, ",".join(map(str, awardIds)), code.Coins)
@Handlers.Handle(XT.SendGoldenChoice)
@Handlers.Throttle(2)
def handleSendGoldenChoice(self, data):
awards = self.session.query(RedemptionCode, RedemptionAward.Award)\
.join(RedemptionAward, RedemptionAward.CodeID == RedemptionCode.ID)\
.filter(RedemptionCode.Code == data.Code)
if awards is None:
return self.transport.loseConnection()
if awards.count() < 6:
return self.transport.loseConnection()
cardIds = [awardId for code, awardId in awards]
redeemed = self.session.query(PenguinRedemption).filter_by(CodeID=code.ID).scalar()
if redeemed is not None:
return self.transport.loseConnection()
if data.Choice == 1:
cardIds = cardIds[:4]
self.ninjaRankUp()
self.sendXt("rsgc", ",".join(map(str, cardIds)) + "|" + str(self.user.NinjaRank))
elif data.Choice == 2:
self.sendXt("rsgc", ",".join(map(str, cardIds[:4])) + "|" + ",".join(map(str, cardIds[-2:])))
self.addCards(*cardIds)
self.session.add(PenguinRedemption(PenguinID=self.user.ID, CodeID=code.ID))
```
#### File: Houdini/Houdini/Penguin.py
```python
import time
from beaker.cache import region_invalidate as Invalidate
from Houdini.Spheniscidae import Spheniscidae
from Houdini.Data.Penguin import Inventory, IglooInventory, FurnitureInventory
from Houdini.Data.Puffle import Puffle
from Houdini.Data.Postcard import Postcard
from Houdini.Data.Stamp import Stamp
from Houdini.Data.Deck import Deck
from Houdini.Data import retryableTransaction
from Houdini.Handlers.Games.Table import leaveTable
from Houdini.Handlers.Games.Waddle import leaveWaddle
from Houdini.Handlers.Play.Stampbook import getStampsString
class Penguin(Spheniscidae):
def __init__(self, session, spirit):
super(Penguin, self).__init__(session, spirit)
self.user = None
self.throttle = {}
self.frame = 1
self.x, self.y = (0, 0)
self.age = 0
self.muted = False
self.playerString = None
self.table = None
self.waddle = None
self.gameFinished = True
self.logger.info("Penguin class instantiated")
def addItem(self, itemId, itemCost=0, sendXt=True):
if itemId in self.inventory:
return False
self.inventory.append(itemId)
self.session.add(Inventory(PenguinID=self.user.ID, ItemID=itemId))
self.user.Coins -= itemCost
if sendXt:
self.sendXt("ai", itemId, self.user.Coins)
def addIgloo(self, iglooId, iglooCost=0):
if iglooId in self.igloos:
return False
self.igloos.append(iglooId)
self.session.add(IglooInventory(PenguinID=self.user.ID, IglooID=iglooId))
self.user.Coins -= iglooCost
self.sendXt("au", iglooId, self.user.Coins)
def addFurniture(self, furnitureId, furnitureCost=0):
furnitureQuantity = 1
if furnitureId in self.furniture:
furnitureQuantity = self.furniture[furnitureId]
furnitureQuantity += 1
if furnitureQuantity >= 100:
return False
self.session.query(FurnitureInventory).filter_by(PenguinID=self.user.ID, FurnitureID=furnitureId) \
.update({"Quantity": furnitureQuantity})
else:
self.session.add(FurnitureInventory(PenguinID=self.user.ID, FurnitureID=furnitureId))
self.furniture[furnitureId] = furnitureQuantity
self.user.Coins -= furnitureCost
self.sendXt("af", furnitureId, self.user.Coins)
def addFlooring(self, floorId, floorCost=0):
self.user.Coins -= floorCost
self.igloo.Floor = floorId
self.sendXt("ag", floorId, self.user.Coins)
def addStamp(self, stampId, sendXt=False):
if stampId in self.stamps:
return False
self.stamps.append(stampId)
self.recentStamps.append(stampId)
self.session.add(Stamp(PenguinID=self.user.ID, Stamp=stampId))
if sendXt:
self.sendXt("aabs", stampId)
Invalidate(getStampsString, 'houdini', 'stamps', self.user.ID)
def addCards(self, *args):
for cardId in args:
cardQuantity = 1
if cardId in self.deck:
cardQuantity = self.deck[cardId]
cardQuantity += 1
self.session.query(Deck).filter_by(PenguinID=self.user.ID, CardID=cardId) \
.update({"Quantity": cardQuantity})
else:
self.session.add(Deck(PenguinID=self.user.ID, CardID=cardId))
self.deck[cardId] = cardQuantity
self.cards.append(self.server.cards[cardId])
def ninjaRankUp(self, levels=1):
rankAwards = [4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 104]
beltPostcards = {1: 177, 5: 178, 9: 179}
beltStamps = {1: 230, 5: 232, 9: 234, 10: 236}
for i in xrange(levels):
if self.user.NinjaRank == 10:
return False
self.user.NinjaRank += 1
self.user.NinjaProgress = 0
self.addItem(rankAwards[self.user.NinjaRank - 1], sendXt=False)
if self.user.NinjaRank in beltPostcards:
self.receiveSystemPostcard(beltPostcards[self.user.NinjaRank])
if self.user.NinjaRank in beltStamps:
self.addStamp(beltStamps[self.user.NinjaRank], True)
def joinRoom(self, roomId):
self.room.remove(self)
self.server.rooms[roomId].add(self)
@retryableTransaction()
def receiveSystemPostcard(self, postcardId, details=""):
postcard = Postcard(RecipientID=self.user.ID, SenderID=None, Details=details, Type=postcardId)
self.session.add(postcard)
self.session.commit()
self.sendXt("mr", "sys", 0, postcardId, details, int(time.time()), postcard.ID)
def sendCoins(self, coinAmount):
self.user.Coins = coinAmount
self.sendXt("zo", self.user.Coins, "", 0, 0, 0)
def getPlayerString(self):
playerArray = (
self.user.ID,
self.user.Nickname,
self.user.Approval,
self.user.Color,
self.user.Head,
self.user.Face,
self.user.Neck,
self.user.Body,
self.user.Hand,
self.user.Feet,
self.user.Flag,
self.user.Photo,
self.x, self.y,
self.frame,
1, self.age
)
playerStringArray = map(str, playerArray)
self.playerString = "|".join(playerStringArray)
return self.playerString
def connectionLost(self, reason):
if hasattr(self, "room") and self.room is not None:
self.room.remove(self)
puffleId = self.session.query(Puffle.ID) \
.filter(Puffle.PenguinID == self.user.ID, Puffle.Walking == 1).scalar()
if puffleId is not None:
self.user.Hand = 0
self.session.query(Puffle.ID == puffleId).update({"Walking": 0})
for buddyId in self.buddies.keys():
if buddyId in self.server.players:
self.server.players[buddyId].sendXt("bof", self.user.ID)
loginUnix = time.mktime(self.login.Date.timetuple())
minutesPlayed = int(time.time() - loginUnix) / 60
self.user.MinutesPlayed += minutesPlayed
self.session.add(self.login)
self.server.redis.srem("%s.players" % self.server.serverName, self.user.ID)
self.server.redis.decr("%s.population" % self.server.serverName)
super(Penguin, self).connectionLost(reason)
```
#### File: Plugins/Example/__init__.py
```python
import zope.interface, logging
from Houdini.Plugins import Plugin
from Houdini.Handlers import Handlers
from Houdini.Events import Events
class Example(object):
zope.interface.implements(Plugin)
author = "<NAME>"
version = 0.1
description = "A plugin to verify plugin system functionality and demonstrate implementation"
def __init__(self, server):
self.logger = logging.getLogger("Houdini")
self.server = server
Handlers.Login += self.handleLogin
# Only do this if the server is a world server
if self.server.server["World"]:
Handlers.JoinWorld += self.handleJoinWorld
Handlers.JoinWorld -= self.handleJoinWorld
Events.Connected += self.handleConnection
Events.Disconnected += self.handleDisconnection
def handleJoinWorld(self, player, data):
self.logger.info("[Example] Holy smokes!")
def handleLogin(self, player, data):
self.logger.info("[Example] %s is trying to login" % data.Username)
def handleConnection(self, player):
self.logger.info("[Example] New player connected, woohoo!")
def handleDisconnection(self, player):
self.logger.info("[Example] Aw, that sucks :-(")
def ready(self):
self.logger.info("Example plugin is ready!")
```
#### File: Houdini/Plugins/__init__.py
```python
import zope.interface
class Plugin(zope.interface.Interface):
"""
Plugin interface which all plugins *must* implement.
"""
author = zope.interface.Attribute("""The plugin's author which is usually a nickname and an e-mail address.""")
version = zope.interface.Attribute("""The version of the plugin.""")
description = zope.interface.Attribute("""Short summary of the plugin's intended purpose.""")
def ready():
"""
Called when the plugin is ready to function.
:return:
"""
``` |
{
"source": "joint-online-judge/horse",
"score": 2
} |
#### File: horse/apis/auth.py
```python
from datetime import datetime, timezone
from typing import Any, List, Literal, Optional, Tuple
from urllib.parse import quote_plus
from fastapi import Depends, HTTPException, Query, Request, Response, status
from fastapi.security import OAuth2PasswordRequestForm
from fastapi_jwt_auth import AuthJWT
from loguru import logger
from joj.horse import models, schemas
from joj.horse.config import settings
from joj.horse.schemas.auth import (
AuthParams,
JWTAccessToken,
JWTToken,
auth_jwt_decode_access_token,
auth_jwt_decode_access_token_optional,
auth_jwt_decode_oauth_state,
auth_jwt_decode_refresh_token,
auth_jwt_encode_oauth_state,
auth_jwt_encode_user,
auth_jwt_raw_access_token,
auth_jwt_raw_refresh_token,
)
from joj.horse.services.oauth import BaseOAuth2, OAuth2Dependency, OAuth2Token
from joj.horse.services.oauth.github import GitHubOAuth2
from joj.horse.services.oauth.jaccount import JaccountOAuth2
from joj.horse.utils.errors import BizError, ErrorCode
from joj.horse.utils.router import MyRouter
from joj.horse.utils.url import get_base_url
router = MyRouter()
router_name = "auth"
router_tag = "auth"
router_prefix = "/api/v1"
# @camelcase_parameters
def auth_parameters_dependency(
cookie: bool = Query(True, description="Add Set/Delete-Cookie on response header"),
response_type: Literal["redirect", "json"] = Query(...),
redirect_url: Optional[str] = Query(
None, description="The redirect url after the operation"
),
) -> AuthParams:
return AuthParams(
cookie=cookie, response_type=response_type, redirect_url=redirect_url
)
def set_redirect_response(response: Response, redirect_url: Optional[str]) -> bool:
if redirect_url:
response.status_code = 302
response.headers["location"] = quote_plus(
str(redirect_url), safe=":/%#?&=@[]!$&'()*+,;"
)
return True
return False
async def get_login_response(
request: Request,
response: Response,
auth_jwt: AuthJWT,
parameters: AuthParams,
access_token: str,
refresh_token: str,
) -> Any:
if parameters.cookie:
if access_token:
auth_jwt.set_access_cookies(access_token, response)
if refresh_token:
auth_jwt.set_refresh_cookies(refresh_token, response)
if parameters.response_type == "json":
return schemas.StandardResponse(
schemas.AuthTokens(
access_token=access_token,
refresh_token=refresh_token,
token_type="bearer",
)
)
if parameters.response_type == "redirect":
redirect_url = parameters.redirect_url or str(get_base_url(request))
if set_redirect_response(response, redirect_url):
return None
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)
async def get_logout_response(
request: Request,
response: Response,
auth_jwt: AuthJWT,
parameters: AuthParams,
oauth_name: Optional[str],
) -> Any:
if parameters.cookie:
auth_jwt.unset_jwt_cookies(response)
if parameters.response_type == "json":
return schemas.StandardResponse()
if parameters.response_type == "redirect":
for oauth_client in _oauth_clients:
if oauth_client.name == oauth_name:
pass # TODO: oauth logout
redirect_url = parameters.redirect_url or str(get_base_url(request))
if set_redirect_response(response, redirect_url):
return None
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)
def get_oauth_router(
oauth_clients: List[BaseOAuth2[Any]],
# backend: BaseAuthentication,
callback_redirect_url: Optional[str] = None,
) -> MyRouter:
oauth_router = MyRouter()
authorize_route_name = "oauth_authorize"
callback_route_name = "oauth_callback"
if callback_redirect_url is not None:
oauth2_dependency = OAuth2Dependency(
oauth_clients,
redirect_url=callback_redirect_url,
)
else:
oauth2_dependency = OAuth2Dependency(
oauth_clients,
route_name=callback_route_name,
)
@oauth_router.get("")
async def list_oauth2() -> schemas.StandardListResponse[schemas.OAuth2Client]:
result = [
schemas.OAuth2Client(
oauth_name=oauth_client.name,
display_name=oauth_client.display_name,
icon=oauth_client.icon,
)
for oauth_client in oauth_clients
]
return schemas.StandardListResponse(result)
@oauth_router.get("/{oauth2}/authorize", name=authorize_route_name)
async def authorize(
request: Request,
oauth_client: BaseOAuth2[Any] = Depends(oauth2_dependency.oauth_client()),
auth_parameters: AuthParams = Depends(auth_parameters_dependency),
auth_jwt: AuthJWT = Depends(AuthJWT),
scopes: List[str] = Query(None),
) -> schemas.StandardResponse[schemas.Redirect]:
if callback_redirect_url is not None:
authorize_redirect_url = callback_redirect_url
else:
authorize_redirect_url = request.url_for(
callback_route_name, oauth2=oauth_client.name
)
state_data = {"auth_parameters": auth_parameters.dict()}
state = auth_jwt_encode_oauth_state(auth_jwt, oauth_client.name, state_data)
authorization_url = await oauth_client.get_authorization_url(
authorize_redirect_url,
state,
scopes,
)
return schemas.StandardResponse(
schemas.Redirect(redirect_url=authorization_url)
)
@oauth_router.get(
"/{oauth2}/callback", name=callback_route_name, include_in_schema=False
)
async def callback(
request: Request,
response: Response,
oauth_client: BaseOAuth2[Any] = Depends(oauth2_dependency.oauth_client()),
auth_jwt: AuthJWT = Depends(AuthJWT),
access_token_state: Tuple[OAuth2Token, Optional[str]] = Depends(
oauth2_dependency.access_token_state()
),
) -> schemas.StandardResponse[schemas.AuthTokens]:
try:
token, state = access_token_state
logger.info(token)
oauth_profile, _ = await oauth_client.get_profile(token)
state_data = auth_jwt_decode_oauth_state(auth_jwt, state)
except Exception as e:
logger.exception(e)
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)
if not state_data:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)
oauth_account = await models.UserOAuthAccount.create_or_update(
oauth_client.name, token, oauth_profile
)
logger.info(oauth_account)
if not oauth_account.user_id:
access_token, refresh_token = auth_jwt_encode_user(
auth_jwt, oauth=oauth_profile
)
else:
user = await models.User.get_or_none(id=oauth_account.user_id)
if user is not None:
user.login_at = datetime.now(tz=timezone.utc)
user.login_ip = request.client.host
await user.save_model()
logger.info(f"user oauth login: {user}")
access_token, refresh_token = auth_jwt_encode_user(
auth_jwt, user=user, oauth_name=oauth_profile.oauth_name
)
return await get_login_response(
request,
response,
auth_jwt,
state_data.auth_parameters,
access_token,
refresh_token,
)
return oauth_router
@router.post("/login")
async def login(
request: Request,
response: Response,
auth_parameters: AuthParams = Depends(auth_parameters_dependency),
auth_jwt: AuthJWT = Depends(AuthJWT),
credentials: OAuth2PasswordRequestForm = Depends(),
) -> schemas.StandardResponse[schemas.AuthTokens]:
user = await models.User.get_or_none(username=credentials.username)
if not user:
raise BizError(ErrorCode.UsernamePasswordError, "user not found")
if not user.verify_password(credentials.password):
raise BizError(ErrorCode.UsernamePasswordError, "incorrect password")
user.login_at = datetime.now(tz=timezone.utc)
user.login_ip = request.client.host
await user.save_model()
logger.info(f"user login: {user}")
access_token, refresh_token = auth_jwt_encode_user(auth_jwt, user=user)
return await get_login_response(
request, response, auth_jwt, auth_parameters, access_token, refresh_token
)
# raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)
@router.post("/logout")
async def logout(
request: Request,
response: Response,
auth_parameters: AuthParams = Depends(auth_parameters_dependency),
auth_jwt: AuthJWT = Depends(AuthJWT),
jwt_access_token: JWTAccessToken = Depends(auth_jwt_decode_access_token),
) -> Any:
oauth = jwt_access_token.oauth_name
return await get_logout_response(
request, response, auth_jwt, auth_parameters, oauth
)
@router.post("/register")
async def register(
request: Request,
response: Response,
user_create: schemas.UserCreate,
auth_parameters: AuthParams = Depends(auth_parameters_dependency),
auth_jwt: AuthJWT = Depends(AuthJWT),
jwt_access_token: Optional[JWTAccessToken] = Depends(
auth_jwt_decode_access_token_optional
),
) -> schemas.StandardResponse[schemas.AuthTokens]:
if jwt_access_token is not None and jwt_access_token.category == "user":
jwt_access_token = None
# raise BizError(
# ErrorCode.UserRegisterError,
# "user already login, please logout before register",
# )
user_model = await models.User.create(
user_create=user_create,
jwt_access_token=jwt_access_token,
register_ip=request.client.host,
)
access_token, refresh_token = auth_jwt_encode_user(
auth_jwt, user=user_model, oauth_name=user_create.oauth_name
)
return await get_login_response(
request, response, auth_jwt, auth_parameters, access_token, refresh_token
)
@router.get("/token")
async def get_token(
request: Request,
response: Response,
auth_parameters: AuthParams = Depends(auth_parameters_dependency),
auth_jwt: AuthJWT = Depends(AuthJWT),
access_token: str = Depends(auth_jwt_raw_access_token),
refresh_token: str = Depends(auth_jwt_raw_refresh_token),
) -> schemas.StandardResponse[schemas.AuthTokens]:
return await get_login_response(
request,
response,
auth_jwt,
auth_parameters,
access_token,
refresh_token,
)
@router.post("/refresh")
async def refresh(
request: Request,
response: Response,
auth_parameters: AuthParams = Depends(auth_parameters_dependency),
auth_jwt: AuthJWT = Depends(AuthJWT),
jwt_refresh_token: JWTToken = Depends(auth_jwt_decode_refresh_token),
) -> schemas.StandardResponse[schemas.AuthTokens]:
user = await models.User.get_or_none(id=jwt_refresh_token.id)
if user is None:
access_token, refresh_token = "", ""
else:
access_token, refresh_token = auth_jwt_encode_user(
auth_jwt, user=user, fresh=False
)
return await get_login_response(
request, response, auth_jwt, auth_parameters, access_token, refresh_token
)
_oauth_clients: List[BaseOAuth2[Any]] = []
if settings.oauth_jaccount:
_oauth_clients.append(
JaccountOAuth2(settings.oauth_jaccount_id, settings.oauth_jaccount_secret)
)
if settings.oauth_github:
_oauth_clients.append(
GitHubOAuth2(settings.oauth_github_id, settings.oauth_github_secret)
)
for _oauth_client in _oauth_clients:
router.include_router(
get_oauth_router(_oauth_clients),
prefix="/oauth2",
)
```
#### File: horse/apis/__init__.py
```python
from typing import Any
from joj.horse.apis import (
admin as admin,
auth as auth,
domains as domains,
judge as judge,
misc as misc,
problem_configs as problem_configs,
problem_groups as problem_groups,
problem_sets as problem_sets,
problems as problems,
records as records,
user as user,
users as users,
)
from joj.horse.apis.auth import login
from joj.horse.apis.problem_configs import (
update_problem_config_by_archive,
upload_file_to_problem_config,
upload_file_to_root_in_problem_config,
)
from joj.horse.apis.problem_sets import submit_solution_to_problem_set
from joj.horse.apis.problems import submit_solution_to_problem
from joj.horse.app import app
from joj.horse.utils.router import copy_schema, update_schema_name
def include_router(module: Any) -> None:
app.include_router(
module.router,
prefix="/" + module.router_name if module.router_name else "",
tags=[module.router_tag],
)
include_router(domains)
include_router(problem_sets)
include_router(problems)
include_router(problem_configs)
include_router(problem_groups)
include_router(records)
include_router(user)
include_router(users)
include_router(auth)
include_router(misc)
include_router(admin)
include_router(judge)
update_schema_name(app, submit_solution_to_problem, "ProblemSolutionSubmit")
copy_schema(app, submit_solution_to_problem, submit_solution_to_problem_set)
update_schema_name(app, update_problem_config_by_archive, "FileUpload")
copy_schema(
app,
update_problem_config_by_archive,
upload_file_to_problem_config,
upload_file_to_root_in_problem_config,
)
update_schema_name(app, login, "OAuth2PasswordRequestForm")
```
#### File: horse/apis/problem_groups.py
```python
from fastapi import Depends
from sqlmodel import select
from joj.horse import models, schemas
from joj.horse.schemas import StandardListResponse
from joj.horse.schemas.auth import Authentication
from joj.horse.utils.parser import parse_ordering_query, parse_pagination_query
from joj.horse.utils.router import MyRouter
router = MyRouter()
router_name = "problem_groups"
router_tag = "problem group"
router_prefix = "/api/v1"
@router.get("")
async def list_problem_groups(
ordering: schemas.OrderingQuery = Depends(parse_ordering_query()),
pagination: schemas.PaginationQuery = Depends(parse_pagination_query),
auth: Authentication = Depends(),
) -> StandardListResponse[schemas.ProblemGroup]:
statement = select(models.ProblemGroup)
problem_groups, count = await models.ProblemGroup.execute_list_statement(
statement, ordering, pagination
)
return StandardListResponse(problem_groups, count)
```
#### File: horse/apis/problems.py
```python
from typing import List, Optional
from uuid import UUID
from celery import Celery
from fastapi import BackgroundTasks, Depends
from loguru import logger
from sqlmodel.ext.asyncio.session import AsyncSession
from joj.horse import models, schemas
from joj.horse.schemas import Empty, StandardListResponse, StandardResponse
from joj.horse.schemas.auth import Authentication
from joj.horse.schemas.permission import Permission
from joj.horse.services.celery_app import celery_app_dependency
from joj.horse.services.db import db_session_dependency
from joj.horse.services.lakefs import LakeFSProblemConfig
from joj.horse.utils.parser import (
parse_domain_from_auth,
parse_ordering_query,
parse_pagination_query,
parse_problem,
parse_problem_set,
parse_problem_without_validation,
parse_user_from_auth,
parse_view_hidden_problem,
)
from joj.horse.utils.router import MyRouter
router = MyRouter()
router_name = "domains/{domain}/problems"
router_tag = "problem"
router_prefix = "/api/v1"
@router.get("", permissions=[Permission.DomainProblem.view])
async def list_problems(
domain: models.Domain = Depends(parse_domain_from_auth),
ordering: schemas.OrderingQuery = Depends(parse_ordering_query()),
pagination: schemas.PaginationQuery = Depends(parse_pagination_query),
include_hidden: bool = Depends(parse_view_hidden_problem),
user: models.User = Depends(parse_user_from_auth),
) -> StandardListResponse[schemas.ProblemWithLatestRecord]:
statement = domain.find_problems_statement(include_hidden)
problems, count = await models.Problem.execute_list_statement(
statement, ordering, pagination
)
result = await models.Problem.get_problems_with_record_states(
result_cls=schemas.ProblemWithLatestRecord,
problem_set_id=None,
problems=problems,
user_id=user.id,
)
return StandardListResponse(result, count)
@router.post("", permissions=[Permission.DomainProblem.create])
async def create_problem(
problem_create: schemas.ProblemCreate,
background_tasks: BackgroundTasks,
domain: models.Domain = Depends(parse_domain_from_auth),
user: models.User = Depends(parse_user_from_auth),
session: AsyncSession = Depends(db_session_dependency),
) -> StandardResponse[schemas.Problem]:
try:
problem_group = models.ProblemGroup()
session.sync_session.add(problem_group)
logger.info(f"problem group created: {problem_group}")
problem = models.Problem(
**problem_create.dict(),
domain_id=domain.id,
owner_id=user.id,
problem_group_id=problem_group.id,
)
session.sync_session.add(problem)
logger.info(f"problem created: {problem}")
await session.commit()
await session.refresh(problem)
except Exception as e:
logger.exception(f"problem creation failed: {problem_create}")
raise e
lakefs_problem_config = LakeFSProblemConfig(problem)
background_tasks.add_task(lakefs_problem_config.ensure_branch)
return StandardResponse(problem)
@router.get("/{problem}", permissions=[Permission.DomainProblem.view])
async def get_problem(
problem: models.Problem = Depends(parse_problem),
user: models.User = Depends(parse_user_from_auth),
) -> StandardResponse[schemas.ProblemDetailWithLatestRecord]:
record = await models.Record.get_user_latest_record(
problem_set_id=None, problem_id=problem.id, user_id=user.id
)
result = schemas.ProblemDetailWithLatestRecord(
**problem.dict(), latest_record=record
)
return StandardResponse(result)
@router.delete("/{problem}", permissions=[Permission.DomainProblem.edit])
async def delete_problem(
problem: models.Problem = Depends(parse_problem),
) -> StandardResponse[Empty]:
await problem.delete_model()
return StandardResponse()
@router.patch("/{problem}", permissions=[Permission.DomainProblem.edit])
async def update_problem(
problem_edit: schemas.ProblemEdit = Depends(schemas.ProblemEdit.edit_dependency),
problem: models.Problem = Depends(parse_problem),
) -> StandardResponse[schemas.Problem]:
problem.update_from_dict(problem_edit.dict())
await problem.save_model()
return StandardResponse(problem)
# @router.patch(
# "/{problem}/config",
# permissions=[Permission.DomainProblem.view_config],
# )
# async def update_problem_config(
# config: UploadFile = File(...), problem: models.Problem = Depends(parse_problem)
# ) -> StandardResponse[schemas.Problem]:
# return StandardResponse(problem)
@router.post("/clone", permissions=[Permission.DomainProblem.view_config])
async def clone_problem(
problem_clone: schemas.ProblemClone,
domain: models.Domain = Depends(parse_domain_from_auth),
user: models.User = Depends(parse_user_from_auth),
auth: Authentication = Depends(),
session: AsyncSession = Depends(db_session_dependency),
) -> StandardListResponse[schemas.Problem]:
problems: List[models.Problem] = [
parse_problem(await parse_problem_without_validation(oid, domain), auth)
for oid in problem_clone.problems
]
problem_set = await parse_problem_set(problem_clone.problem_set, domain)
new_group = problem_clone.new_group
# FIXME: /root/.venv/lib/python3.8/site-packages/sqlmodel/orm/session.py:60:
# SAWarning: relationship 'Problem.problem_sets' will copy column problems.id
# to column problem_problem_set_links.problem_id, which conflicts with
# relationship(s): 'ProblemProblemSetLink.problem' (copies problems.id to
# problem_problem_set_links.problem_id). If this is not the intention, consider
# if these relationships should be linked with back_populates, or if
# viewonly=True should be applied to one or more if they are read-only.
# For the less common case that foreign key constraints are partially
# overlapping, the orm.foreign() annotation can be used to isolate the
# columns that should be written towards. To silence this warning,
# add the parameter 'overlaps="problem"' to the 'Problem.problem_sets'
# relationship. (Background on this error at: https://sqlalche.me/e/14/qzyx)
try:
res = []
for problem in problems:
problem_group_id: Optional[UUID]
if new_group:
problem_group = models.ProblemGroup()
# TODO: transaction (since session has already committed here)
await problem_group.save_model()
problem_group_id = problem_group.id
await session.refresh(problem)
await session.refresh(domain)
await session.refresh(problem_set)
else:
problem_group_id = problem.problem_group_id
new_problem = models.Problem(
domain_id=domain.id,
owner_id=user.id,
title=problem.title,
content=problem.content,
problem_group_id=problem_group_id,
problem_set_id=problem_set.id,
)
await new_problem.save_model()
res.append(models.Problem.from_orm(new_problem))
logger.info(f"problem cloned: {new_problem}")
except Exception as e:
logger.exception(f"problems clone to problem set failed: {problem_set}")
raise e
return StandardListResponse(res)
@router.post("/{problem}", permissions=[Permission.DomainProblem.submit])
async def submit_solution_to_problem(
background_tasks: BackgroundTasks,
celery_app: Celery = Depends(celery_app_dependency),
problem_submit: schemas.ProblemSolutionSubmit = Depends(
schemas.ProblemSolutionSubmit.form_dependency
),
problem: models.Problem = Depends(parse_problem),
user: models.User = Depends(parse_user_from_auth),
) -> StandardResponse[schemas.Record]:
record = await models.Record.submit(
background_tasks=background_tasks,
celery_app=celery_app,
problem_submit=problem_submit,
problem_set=None,
problem=problem,
user=user,
)
logger.info("create record: {}", record)
return StandardResponse(record)
```
#### File: joj/horse/app.py
```python
import asyncio
import rollbar
from fastapi import Depends, FastAPI, Request
from fastapi.responses import ORJSONResponse
from fastapi_versioning import VersionedFastAPI
from lakefs_client.exceptions import ApiException as LakeFSApiException
from loguru import logger
from pydantic_universal_settings import init_settings
from rollbar.contrib.fastapi import ReporterMiddleware as RollbarMiddleware
from starlette.responses import RedirectResponse
from starlette_context import plugins
from starlette_context.middleware import RawContextMiddleware
from tenacity import RetryError
import joj.horse.models # noqa: F401
import joj.horse.utils.monkey_patch # noqa: F401
from joj.horse.config import AllSettings
from joj.horse.schemas.cache import try_init_cache
from joj.horse.services.db import db_session_dependency, try_init_db
from joj.horse.services.lakefs import try_init_lakefs
from joj.horse.utils.exception_handlers import register_exception_handlers
from joj.horse.utils.logger import init_logging # noqa: F401
from joj.horse.utils.router import simplify_operation_ids
from joj.horse.utils.url import get_base_url
from joj.horse.utils.version import get_git_version, get_version
settings = init_settings(AllSettings)
app = FastAPI(
title=settings.app_name,
version=get_version(),
description=f"Git version: {get_git_version()}",
dependencies=[Depends(db_session_dependency)],
default_response_class=ORJSONResponse,
)
init_logging()
import joj.horse.apis # noqa: F401
app = VersionedFastAPI(
app,
version_format="{major}",
prefix_format="/api/v{major}",
)
# we temporarily redirect "/" and "/api" to "/api/v1" for debugging
@app.get("/api")
@app.get("/")
async def redirect_to_docs(request: Request) -> RedirectResponse: # pragma: no cover
base_url = get_base_url(request, prefix="api/v1")
redirect_url = app.url_path_for("swagger_ui_html").make_absolute_url(base_url)
logger.info(base_url)
logger.info(redirect_url)
return RedirectResponse(redirect_url + "?docExpansion=none")
@app.on_event("startup")
async def startup_event() -> None: # pragma: no cover
try:
logger.info(f"Using {asyncio.get_running_loop().__module__}.")
initialize_tasks = [
try_init_db(),
try_init_cache(),
]
if settings.lakefs_host:
initialize_tasks.append(try_init_lakefs())
else:
logger.warning("LakeFS not configured! All file features will be disabled.")
await asyncio.gather(*initialize_tasks)
except (RetryError, LakeFSApiException) as e:
logger.error("Initialization failed, exiting.")
logger.error(e)
exit(-1)
# if settings.dsn: # pragma: no cover
# sentry_sdk.init(dsn=settings.dsn, traces_sample_rate=settings.traces_sample_rate)
# app.add_middleware(SentryAsgiMiddleware)
# logger.info("sentry activated")
app.add_middleware(
RawContextMiddleware,
plugins=(plugins.RequestIdPlugin(), plugins.CorrelationIdPlugin()),
)
if settings.rollbar_access_token and not settings.dsn: # pragma: no cover
rollbar.init(
settings.rollbar_access_token,
environment="production" if not settings.debug else "debug",
handler="async",
)
app.add_middleware(RollbarMiddleware)
logger.info("rollbar activated")
for route in app.routes:
sub_app = route.app
if isinstance(sub_app, FastAPI):
register_exception_handlers(sub_app)
simplify_operation_ids(sub_app)
```
#### File: horse/models/base.py
```python
from datetime import datetime
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, TypeVar, Union
from uuid import UUID, uuid4
from pydantic.fields import Undefined
from sqlalchemy.engine import Connection, Row
from sqlalchemy.exc import StatementError
from sqlalchemy.orm import Mapper
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.sql.expression import Delete, Select, Update
from sqlalchemy.sql.functions import count
from sqlmodel import Field, SQLModel, delete, select, update
from sqlmodel.engine.result import ScalarResult
from joj.horse.schemas.base import BaseModel, UserInputURL, get_datetime_column, utcnow
from joj.horse.services.db import db_session
from joj.horse.utils.base import is_uuid
if TYPE_CHECKING:
from joj.horse.models.domain import Domain
from joj.horse.schemas.query import OrderingQuery, PaginationQuery
class ORMUtils(SQLModel, BaseModel):
def update_from_dict(self: "BaseORMModel", d: Dict[str, Any]) -> None:
for k, v in d.items():
if v is not Undefined:
setattr(self, k, v)
@classmethod
def sql_select(cls) -> Select:
return select(cls)
@classmethod
def sql_update(cls) -> Update:
return update(cls)
@classmethod
def sql_delete(cls) -> Delete:
return delete(cls)
@classmethod
async def session_exec(cls, statement: Select) -> ScalarResult["BaseORMModelType"]:
async with db_session() as session:
return await session.exec(statement)
@classmethod
async def get_or_none(
__base_orm_model_cls__: Type["BaseORMModelType"], **kwargs: Any
) -> Optional["BaseORMModelType"]:
async with db_session() as session:
statement = __base_orm_model_cls__.apply_filtering(
select(__base_orm_model_cls__), **kwargs
)
try:
results = await session.exec(statement)
except StatementError:
return None
return results.one_or_none()
@classmethod
async def get_many(
__base_orm_model_cls__: Type["BaseORMModelType"], **kwargs: Any
) -> List["BaseORMModelType"]:
async with db_session() as session:
statement = __base_orm_model_cls__.apply_filtering(
select(__base_orm_model_cls__), **kwargs
)
try:
results = await session.exec(statement)
except StatementError:
return []
return results.all()
async def save_model(self, commit: bool = True, refresh: bool = True) -> None:
async with db_session() as session:
session.sync_session.add(self)
if commit:
await session.commit()
if refresh:
await session.refresh(self)
async def delete_model(self, commit: bool = True) -> None:
async with db_session() as session:
session.sync_session.delete(self)
if commit:
await session.commit()
async def refresh_model(self) -> None:
async with db_session() as session:
await session.refresh(self)
async def fetch_related(self, *fields: str) -> None:
def sync_func(_: Any) -> None:
for field in fields:
getattr(self, field)
async with db_session() as session:
await session.run_sync(sync_func)
@classmethod
def apply_ordering(
cls: Type["BaseORMModelType"],
statement: Select,
ordering: Optional["OrderingQuery"],
) -> Select:
if ordering is None or not ordering.orderings:
return statement
order_by_clause = []
for x in ordering.orderings:
asc: Optional[bool] = None
if x.startswith("-"):
asc = False
field = x[1:]
elif x.startswith("+"):
asc = True
field = x[1:]
else:
asc = None
field = x
if field.startswith("_"):
continue
sa_column = getattr(cls, field, None)
if sa_column is not None and isinstance(sa_column, InstrumentedAttribute):
if asc is None:
order_by_clause.append(sa_column)
elif asc:
order_by_clause.append(sa_column.asc())
else:
order_by_clause.append(sa_column.desc())
if len(order_by_clause) > 0:
statement = statement.order_by(*order_by_clause)
return statement
@classmethod
def apply_count(
cls: Type["BaseORMModelType"],
statement: Select,
# alt_cls: Optional[Type["BaseORMModelType"]] = None,
) -> Select:
# if alt_cls is None:
# alt_cls = cls
return statement.with_only_columns(count(), maintain_column_froms=True)
@classmethod
def apply_pagination(
cls: Type["BaseORMModelType"],
statement: Select,
pagination: Optional["PaginationQuery"],
) -> Select:
if pagination is not None:
statement = statement.offset(pagination.offset).limit(pagination.limit)
return statement
@classmethod
def apply_filtering(
__base_orm_model_cls__: Type["BaseORMModelType"],
__statement__: Union[Select, Update, Delete],
**kwargs: Any,
) -> Union[Select, Update, Delete]:
statement = select(__base_orm_model_cls__)
for k, v in kwargs.items():
statement = statement.where(getattr(__base_orm_model_cls__, k) == v)
return statement
@classmethod
async def execute_list_statement(
cls: Type["BaseORMModelType"],
statement: Select,
ordering: Optional["OrderingQuery"] = None,
pagination: Optional["PaginationQuery"] = None,
) -> Tuple[Union[List["BaseORMModelType"], List[Row]], int]:
count_statement = cls.apply_count(statement)
statement = cls.apply_ordering(statement, ordering)
statement = cls.apply_pagination(statement, pagination)
async with db_session() as session:
try:
row_count = await session.exec(count_statement)
results = await session.exec(statement)
except StatementError:
return [], 0
row_count_value = row_count.one()
if not isinstance(row_count_value, int):
row_count_value = row_count_value[0]
return results.all(), row_count_value
@staticmethod
def parse_rows(
rows: List[Row], *tables: Type["BaseORMModelType"]
) -> Tuple[List["BaseORMModelType"], ...]:
if len(rows) == 0:
return tuple([] for _ in tables)
return tuple(list(x) for x in zip(*rows))
class BaseORMModel(ORMUtils):
id: UUID = Field(default_factory=uuid4, primary_key=True, nullable=False)
created_at: Optional[datetime] = Field(
None, sa_column=get_datetime_column(index=True, server_default=utcnow())
)
updated_at: Optional[datetime] = Field(
None,
sa_column=get_datetime_column(
index=True, server_default=utcnow(), onupdate=utcnow()
),
)
BaseORMModelType = TypeVar("BaseORMModelType", bound=ORMUtils)
class URLMixin(BaseORMModel):
url: UserInputURL = Field("", description="(unique) url of the domain")
class URLORMModel(BaseORMModel):
url: str = Field(..., index=True, nullable=False, sa_column_kwargs={"unique": True})
@classmethod
async def find_by_url_or_id(
cls: Type["BaseORMModelType"], url_or_id: str
) -> Optional["BaseORMModelType"]:
if is_uuid(url_or_id):
statement = select(cls).where(cls.id == url_or_id)
else:
statement = select(cls).where(cls.url == url_or_id)
async with db_session() as session:
try:
result = await session.exec(statement)
except StatementError:
return None
return result.one_or_none()
class DomainURLORMModel(URLORMModel):
if TYPE_CHECKING:
domain_id: UUID
url: str = Field(..., index=True, nullable=False)
@classmethod
async def find_by_domain_url_or_id(
cls: Type["BaseORMModelType"],
domain: "Domain",
url_or_id: str,
options: Any = None,
) -> Optional["BaseORMModelType"]:
if is_uuid(url_or_id):
statement = (
select(cls).where(cls.id == url_or_id).where(cls.domain_id == domain.id)
)
else:
statement = (
select(cls)
.where(cls.url == url_or_id)
.where(cls.domain_id == domain.id)
)
if options:
if isinstance(options, list):
statement = statement.options(*options)
else:
statement = statement.options(options)
async with db_session() as session:
try:
result = await session.exec(statement)
except StatementError:
return None
return result.one_or_none()
def url_pre_save(mapper: Mapper, connection: Connection, target: URLORMModel) -> None:
if not target.url:
target.url = str(target.id)
```
#### File: horse/models/link_tables.py
```python
from typing import TYPE_CHECKING, Optional
from uuid import UUID
from sqlalchemy.orm import joinedload
from sqlalchemy.schema import Column, ForeignKey
from sqlmodel import Field, Relationship
from sqlmodel.sql.sqltypes import GUID
from joj.horse.models.base import ORMUtils
from joj.horse.utils.base import is_uuid
if TYPE_CHECKING:
from joj.horse.models import Problem, ProblemSet
class ProblemProblemSetLink(ORMUtils, table=True): # type: ignore[call-arg]
__tablename__ = "problem_problem_set_links"
problem_id: UUID = Field(
sa_column=Column(
GUID, ForeignKey("problems.id", ondelete="CASCADE"), primary_key=True
),
)
problem_set_id: UUID = Field(
sa_column=Column(
GUID, ForeignKey("problem_sets.id", ondelete="CASCADE"), primary_key=True
),
)
position: int = Field(
index=True, nullable=False, sa_column_kwargs={"server_default": "0"}
)
problem: "Problem" = Relationship(back_populates="problem_problem_set_links")
problem_set: "ProblemSet" = Relationship(back_populates="problem_problem_set_links")
@classmethod
async def find_by_problem_set_and_problem(
cls, problem_set: str, problem: str
) -> Optional["ProblemProblemSetLink"]:
# this is buggy, do not use!
# not sure how much it's better than three queries (maybe even worse)
from joj.horse import models
statement = cls.sql_select().options(
joinedload(cls.problem_set, innerjoin=True),
joinedload(cls.problem, innerjoin=True),
)
if is_uuid(problem_set):
statement = statement.where(cls.problem_set_id == problem_set)
else:
statement = statement.where(models.ProblemSet.url == problem_set)
if is_uuid(problem):
statement = statement.where(cls.problem_id == problem)
else:
statement = statement.where(models.Problem.url == problem)
from loguru import logger
logger.info(statement)
result = await cls.session_exec(statement)
logger.info(result.all())
return result.one_or_none()
```
#### File: horse/models/problem_config.py
```python
from typing import TYPE_CHECKING, List, Optional
from uuid import UUID
from joj.elephant.errors import ElephantError
# from joj.elephant.manager import Manager
from lakefs_client.models import Commit
from sqlalchemy.schema import Column, ForeignKey
from sqlmodel import Field, Relationship
from sqlmodel.sql.sqltypes import GUID
from starlette.concurrency import run_in_threadpool
from joj.horse.models.base import BaseORMModel
from joj.horse.schemas.problem_config import ProblemConfigCommit, ProblemConfigDetail
from joj.horse.services.lakefs import LakeFSProblemConfig
from joj.horse.utils.errors import BizError, ErrorCode
if TYPE_CHECKING:
from joj.horse.models import Problem, Record, User
class ProblemConfig(BaseORMModel, ProblemConfigDetail, table=True): # type: ignore[call-arg]
__tablename__ = "problem_configs"
problem_id: Optional[UUID] = Field(
sa_column=Column(
GUID, ForeignKey("problems.id", ondelete="SET NULL"), nullable=True
)
)
problem: Optional["Problem"] = Relationship(back_populates="problem_configs")
committer_id: Optional[UUID] = Field(
sa_column=Column(
GUID, ForeignKey("users.id", ondelete="SET NULL"), nullable=True
)
)
committer: Optional["User"] = Relationship(back_populates="problem_configs")
records: List["Record"] = Relationship(back_populates="problem_config")
@classmethod
async def make_commit(
cls, problem: "Problem", committer: "User", commit: ProblemConfigCommit
) -> "ProblemConfig":
def sync_func() -> Commit:
lakefs_problem_config = LakeFSProblemConfig(problem)
# manager = Manager(logger, lakefs_problem_config.storage)
# manager.validate_source()
return lakefs_problem_config.commit(commit.message)
try:
commit_result = await run_in_threadpool(sync_func)
problem_config = cls(
problem_id=problem.id,
committer_id=committer.id,
commit_id=commit_result.id,
)
await problem_config.save_model()
except ElephantError as e:
raise BizError(ErrorCode.ProblemConfigValidationError, e.message)
return problem_config
```
#### File: horse/models/user.py
```python
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
from pydantic import EmailStr, root_validator
from sqlalchemy.sql.expression import Select, or_
from sqlmodel import Field, Relationship
from joj.horse.models.base import BaseORMModel
from joj.horse.models.user_oauth_account import UserOAuthAccount
from joj.horse.schemas.user import UserCreate, UserDetail
from joj.horse.services.db import db_session
from joj.horse.utils.errors import BizError, ErrorCode
if TYPE_CHECKING:
from joj.horse.models import (
Domain,
DomainUser,
Problem,
ProblemConfig,
ProblemSet,
Record,
UserAccessKey,
)
from joj.horse.schemas.auth import JWTAccessToken
class User(BaseORMModel, UserDetail, table=True): # type: ignore[call-arg]
__tablename__ = "users"
hashed_password: str = Field(
"",
nullable=False,
sa_column_kwargs={"server_default": ""},
)
username_lower: str = Field(
index=True,
nullable=False,
sa_column_kwargs={"unique": True},
)
email_lower: EmailStr = Field(
index=True,
nullable=False,
sa_column_kwargs={"unique": True},
)
oauth_accounts: List["UserOAuthAccount"] = Relationship(back_populates="user")
access_keys: List["UserAccessKey"] = Relationship(back_populates="user")
owned_domains: List["Domain"] = Relationship(back_populates="owner")
domain_users: List["DomainUser"] = Relationship(back_populates="user")
owned_problems: List["Problem"] = Relationship(back_populates="owner")
owned_problem_sets: List["ProblemSet"] = Relationship(back_populates="owner")
problem_configs: List["ProblemConfig"] = Relationship(back_populates="committer")
committed_records: List["Record"] = Relationship(
back_populates="committer",
sa_relationship_kwargs={"foreign_keys": "[Record.committer_id]"},
)
judged_records: List["Record"] = Relationship(
back_populates="judger",
sa_relationship_kwargs={"foreign_keys": "[Record.judger_id]"},
)
@root_validator(pre=True)
def validate_lower_name(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if "username" not in values:
raise ValueError("username undefined")
values["username_lower"] = values["username"].lower()
if "email" not in values:
raise ValueError("email undefined")
values["email_lower"] = values["email"].lower()
return values
def verify_password(self, plain_password: str) -> bool:
from joj.horse.schemas.auth import pwd_context
return pwd_context.verify(plain_password, self.hashed_password)
async def reset_password(self, current_password: str, new_password: str) -> None:
if self.hashed_password:
if not self.verify_password(current_password):
raise BizError(ErrorCode.UsernamePasswordError, "incorrect password")
self.hashed_password = self._<PASSWORD>_password_hash(<PASSWORD>)
await self.save_model()
@classmethod
def _generate_password_hash(cls, password: str) -> str:
from joj.horse.schemas.auth import pwd_context
return pwd_context.hash(password)
@classmethod
def _create_user(cls, user_create: "UserCreate", register_ip: str) -> "User":
if not user_create.password:
raise BizError(ErrorCode.UserRegisterError, "password not provided")
if not user_create.username:
raise BizError(ErrorCode.UserRegisterError, "username not provided")
if not user_create.email:
raise BizError(ErrorCode.UserRegisterError, "email not provided")
hashed_password = cls._generate_password_hash(user_create.password)
user = User(
username=user_create.username,
email=user_create.email,
student_id="",
real_name="",
is_active=False,
hashed_password=<PASSWORD>,
register_ip=register_ip,
login_ip=register_ip,
)
return user
@classmethod
async def _create_user_by_oauth(
cls,
user_create: "UserCreate",
jwt_access_token: "JWTAccessToken",
register_ip: str,
) -> Tuple["User", "UserOAuthAccount"]:
oauth_account = await UserOAuthAccount.get_or_none(
oauth_name=jwt_access_token.oauth_name,
account_id=jwt_access_token.id,
)
if oauth_account is None:
raise BizError(ErrorCode.UserRegisterError, "oauth account not matched")
if not user_create.username:
if not oauth_account.account_name:
raise BizError(ErrorCode.UserRegisterError, "username not provided")
username = oauth_account.account_name
else:
username = user_create.username
email = oauth_account.account_email
if user_create.email and user_create.email != oauth_account.account_email:
raise BizError(
ErrorCode.UserRegisterError,
"email must be same as the primary email of oauth account",
)
if user_create.password:
hashed_password = cls._generate_password_hash(user_create.password)
else:
# register with oauth can omit password
hashed_password = "" # pragma: no cover
user = User(
username=username,
email=email,
student_id=jwt_access_token.student_id,
real_name=jwt_access_token.real_name,
is_active=True,
hashed_password=<PASSWORD>,
register_ip=register_ip,
login_ip=register_ip,
)
return user, oauth_account
@classmethod
async def create(
cls,
user_create: "UserCreate",
jwt_access_token: Optional["JWTAccessToken"],
register_ip: str,
) -> "User":
oauth_account: Optional[UserOAuthAccount]
if user_create.oauth_name:
if (
jwt_access_token is None
or jwt_access_token.category != "oauth"
or jwt_access_token.oauth_name != user_create.oauth_name
or jwt_access_token.id != user_create.oauth_account_id
):
raise BizError(ErrorCode.UserRegisterError, "oauth account not matched")
user, oauth_account = await cls._create_user_by_oauth(
user_create, jwt_access_token, register_ip
)
else:
user = cls._create_user(user_create, register_ip)
oauth_account = None
async with db_session() as session:
session.sync_session.add(user)
if oauth_account: # pragma: no cover
oauth_account.user_id = user.id
session.sync_session.add(oauth_account)
await session.commit()
await session.refresh(user)
return user
def find_domains_statement(self, role: Optional[List[str]]) -> Select:
from joj.horse import models
statement = models.Domain.sql_select().outerjoin(models.DomainUser).distinct()
# if user.role != "root":
# # root user can view all domains
statement = statement.where(models.DomainUser.user_id == self.id)
if role is not None:
statement = statement.where(models.DomainUser.role.in_(role)) # type: ignore[attr-defined]
return statement
@classmethod
def apply_search(cls, statement: Select, query: str) -> Select:
looking_for = f"%{query}%"
statement = statement.where(
or_(
cls.username_lower.ilike(looking_for), # type: ignore[attr-defined]
cls.email_lower.ilike(looking_for), # type: ignore[attr-defined]
cls.student_id.ilike(looking_for), # type: ignore[attr-defined]
cls.real_name.ilike(looking_for), # type: ignore[attr-defined]
)
)
return statement
@classmethod
def find_users_statement(cls, query: str) -> Select:
return cls.apply_search(cls.sql_select(), query)
```
#### File: horse/schemas/lakefs.py
```python
from typing import Literal
from pydantic import BaseModel
class FileStats(BaseModel):
path: str
checksum: str
mtime: str
size_bytes: int = 0
LAKEFS_RESET_TYPE_MAPPING = {
"file": "object",
"dir": "common_prefix",
"all": "reset",
}
class LakeFSReset(BaseModel):
type: Literal["file", "dir", "all"] = "all"
path: str = ""
def get_lakefs_type(self) -> str:
return LAKEFS_RESET_TYPE_MAPPING.get(self.type, "")
```
#### File: tests/utils/utils.py
```python
from typing import Any, Dict, Optional, Tuple, Union
from uuid import UUID
import jwt
import pytest
from fastapi.encoders import jsonable_encoder
from httpx import AsyncClient, Response
from loguru import logger
from pydantic import BaseModel
from pytest_lazyfixture import lazy_fixture
from joj.horse import apis, models, schemas
from joj.horse.config import settings
from joj.horse.utils.errors import ErrorCode
# def random_lower_string(length: int = 32) -> str:
# return "".join(random.choices(string.ascii_lowercase, k=length))
# def random_ip() -> str:
# return ".".join(map(str, (random.randint(0, 255) for _ in range(4))))
GLOBAL_DOMAIN_COUNT = 3
GLOBAL_PROBLEM_SET_COUNT = 2
user_access_tokens: Dict[UUID, str] = {}
user_refresh_tokens: Dict[UUID, str] = {}
def validate_response(
response: Response, error_code: ErrorCode = ErrorCode.Success
) -> Dict[str, Any]:
assert response.status_code == 200
res = response.json()
if res["errorCode"] != error_code:
logger.info(res)
assert res["errorCode"] == error_code
if error_code == ErrorCode.Success:
assert res["data"]
return res["data"]
def to_dict(data: Union[Dict[Any, Any], BaseModel]) -> Dict[Any, Any]:
if isinstance(data, dict):
return data
if isinstance(data, models.Domain):
return data.dict(by_alias=True)
assert False
def validate_url(res: Dict[Any, Any], data: Dict[Any, Any]) -> None:
if "url" in data:
assert res["url"] == data["url"]
else:
assert res["url"] == res["id"]
def validate_domain(
res: Dict[Any, Any], data: Dict[Any, Any], domain: models.Domain, in_data: bool
) -> None:
assert res["domainId"] == str(domain.id)
if in_data:
assert res["domainId"] == str(data["domainId"])
def validate_owner(
res: Dict[Any, Any], data: Dict[Any, Any], owner: models.User, in_data: bool
) -> None:
assert res["ownerId"] == str(owner.id)
if in_data:
assert res["ownerId"] == str(data["ownerId"])
def generate_auth_headers(user: models.User) -> Dict[str, str]:
# access_token, _ = auth_jwt_encode_user(auth_jwt=AuthJWT(), user=user)
access_token = user_access_tokens[user.id]
return {"Authorization": f"Bearer {access_token}"}
def get_path_by_url_type(model: Any, url_type: str) -> str:
if url_type == "url":
return model.url
if url_type == "id" or url_type == "pk":
return model.id
assert False
async def do_api_request(
client: AsyncClient,
method: str,
url: str,
user: models.User,
query: Optional[Dict[str, str]] = None,
data: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, str]] = None,
) -> Response:
if headers is None:
headers = generate_auth_headers(user)
response = await client.request(
method=method,
url=url,
params=query,
json=jsonable_encoder(data),
headers=headers,
)
return response
async def create_test_user(
client: AsyncClient, username: str, password: Optional[str] = None
) -> Response:
if password is None:
password = <PASSWORD>
user_create = schemas.UserCreate(
username=username,
email=username + "@sjtu.edu.cn",
password=password,
)
base_auth_url = get_base_url(apis.auth)
response = await client.post(
f"{base_auth_url}/register",
json=jsonable_encoder(user_create.dict()),
params={"responseType": "json", "cookie": False},
)
return response
async def login_test_user(
client: AsyncClient, username: str, password: Optional[str] = None
) -> Response:
if password is None:
password = <PASSWORD>
base_auth_url = get_base_url(apis.auth)
response = await client.post(
f"{base_auth_url}/login",
data={
"username": username,
"password": password,
},
params={"responseType": "json", "cookie": False},
)
return response
async def validate_test_user(
response: Response,
username: str,
) -> Tuple[models.User, str, str]:
res = validate_response(response)
assert res["accessToken"]
assert res["refreshToken"]
payload = jwt.decode(
res["accessToken"],
key=settings.jwt_secret,
verify=False,
algorithms=[settings.jwt_algorithm],
)
assert payload["username"] == username
assert payload["sub"]
user = await models.User.get_or_none(id=payload["sub"])
assert user
return user, res["accessToken"], res["refreshToken"]
async def create_test_domain(
client: AsyncClient, owner: models.User, data: Dict[str, Any]
) -> Response:
base_domain_url = get_base_url(apis.domains)
headers = generate_auth_headers(owner)
response = await client.post(
f"{base_domain_url}", json=jsonable_encoder(data), headers=headers
)
return response
async def validate_test_domain(
response: Response,
owner: models.User,
domain: Union[Dict[str, str], models.Domain],
) -> models.Domain:
res = validate_response(response)
assert res["id"]
data = to_dict(domain)
validate_url(res, data)
validate_owner(res, data, owner, isinstance(domain, models.Domain))
assert res["name"] == data["name"]
assert res["bulletin"] == data.get("bulletin", "")
assert res["gravatar"] == data.get("gravatar", "")
if isinstance(domain, dict):
domain = await models.Domain.get_or_none(id=res["id"])
return domain
async def create_test_problem_set(
client: AsyncClient, domain: models.Domain, owner: models.User, data: Dict[str, str]
) -> Response:
base_problem_set_url = get_base_url(apis.problem_sets, domain=domain.id)
headers = generate_auth_headers(owner)
response = await client.post(
f"{base_problem_set_url}", json=jsonable_encoder(data), headers=headers
)
return response
async def validate_test_problem_set(
response: Response,
domain: models.Domain,
owner: models.User,
problem_set: Union[Dict[str, str], models.ProblemSet],
) -> models.ProblemSet:
res = validate_response(response)
assert res["id"]
data = to_dict(problem_set)
validate_url(res, data)
validate_domain(res, data, domain, isinstance(problem_set, models.ProblemSet))
validate_owner(res, data, owner, isinstance(problem_set, models.ProblemSet))
assert res["title"] == data["title"]
assert res["content"] == data.get("content", "")
assert res["hidden"] == data.get("hidden", False)
assert res["scoreboardHidden"] == data.get("scoreboardHidden", False)
if isinstance(problem_set, dict):
problem_set = await models.ProblemSet.get_or_none(id=res["id"])
return problem_set
def get_base_url(module: Any, **kwargs: Any) -> str:
s = module.router_prefix + ("/" + module.router_name if module.router_name else "")
return s.format(**kwargs)
def parametrize_global_domains(func: Any) -> Any:
fixtures = [lazy_fixture(f"global_domain_{i}") for i in range(GLOBAL_DOMAIN_COUNT)]
return pytest.mark.parametrize("domain", fixtures)(func)
def parametrize_global_problem_sets(func: Any) -> Any:
fixtures = [
lazy_fixture(f"global_problem_set_{i}") for i in range(GLOBAL_PROBLEM_SET_COUNT)
]
return pytest.mark.parametrize("problem_set", fixtures)(func)
```
#### File: horse/utils/logger.py
```python
import logging
import sys
# if you dont like imports of private modules
# you can move it to typing.py module
from loguru import logger
from uvicorn.logging import AccessFormatter
class InterceptHandler(logging.Handler):
"""
Default handler from examples in loguru documentaion.
See https://loguru.readthedocs.io/en/stable/overview.html#entirely-compatible-with-standard-logging
"""
accessFormat = AccessFormatter(
fmt='%(client_addr)s - "%(request_line)s" %(status_code)s'
).formatMessage
def emit(self, record: logging.LogRecord) -> None:
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back # type: ignore
depth += 1
if record.name == "uvicorn.access":
msg = InterceptHandler.accessFormat(record)
else:
msg = record.getMessage()
logger.opt(depth=depth, exception=record.exc_info).log(level, msg)
def init_logging(test: bool = False) -> None:
"""
Replaces logging handlers with a handler for using the custom handler.
WARNING!
if you call the init_logging in startup event function,
then the first logs before the application start will be in the old format
>>> app.add_event_handler("startup", init_logging)
stdout:
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: Started reloader process [11528] using statreload
INFO: Started server process [6036]
INFO: Waiting for application startup.
2020-07-25 02:19:21.357 | INFO | uvicorn.lifespan.on:startup:34 - Application startup complete.
"""
# disable handlers for specific uvicorn loggers
# to redirect their output to the default uvicorn logger
# works with uvicorn==0.11.6
uvicorn_loggers = (
logging.getLogger(name)
for name in logging.root.manager.loggerDict
if name.startswith("uvicorn.")
)
for uvicorn_logger in uvicorn_loggers:
uvicorn_logger.handlers = []
# change handler for default uvicorn logger
logging.getLogger("uvicorn").handlers = [InterceptHandler()]
logging.getLogger("uvicorn.access").handlers = [InterceptHandler()]
logging.getLogger("sqlalchemy").handlers = [InterceptHandler()]
# set logs output, level and format
logger.remove()
if not test:
logger.add(sys.stderr, level="DEBUG", enqueue=True)
logger.add(
"uvicorn.log",
filter=lambda record: record["name"].startswith("uvicorn"),
enqueue=True,
)
logger.add(
"joj.horse.log",
filter=lambda record: record["name"].startswith("joj.horse"),
enqueue=True,
)
logger.add(
"sqlalchemy.log",
filter=lambda record: record["name"].startswith("sqlalchemy"),
enqueue=True,
)
```
#### File: horse/utils/router.py
```python
import functools
from inspect import Parameter, signature
from typing import TYPE_CHECKING, Any, Callable, List, get_type_hints
from fastapi import APIRouter, Depends, FastAPI
from fastapi.routing import APIRoute
from loguru import logger
from pydantic.fields import ModelField
from joj.horse.schemas import BaseModel
from joj.horse.schemas.permission import PermissionBase
class Detail(BaseModel):
detail: str
class MyRouter(APIRouter):
"""
Overrides the route decorator logic to use the annotated return type as the `response_model` if unspecified.
Parse the permissions in endpoints args and add them to the dependencies.
"""
def _parse_permissions(func: Callable[..., Any]) -> Callable[..., Any]:
sig = signature(func)
parameters = [
Parameter(
name="permissions",
kind=Parameter.POSITIONAL_ONLY,
default=None,
annotation=List[PermissionBase],
)
]
sig = sig.replace(parameters=parameters)
func.__signature__ = sig
func.__annotations__["permissions"] = List[PermissionBase]
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
from joj.horse.schemas.auth import ensure_permission
permissions = kwargs.pop("permissions", None)
if permissions:
new_dependencies = Depends(ensure_permission(permissions))
kwargs["dependencies"] = list(kwargs.get("dependencies", []))
kwargs["dependencies"].append(new_dependencies)
return func(*args, **kwargs)
return wrapper
get = _parse_permissions(APIRouter.get)
put = _parse_permissions(APIRouter.put)
post = _parse_permissions(APIRouter.post)
delete = _parse_permissions(APIRouter.delete)
options = _parse_permissions(APIRouter.options)
head = _parse_permissions(APIRouter.head)
patch = _parse_permissions(APIRouter.patch)
trace = _parse_permissions(APIRouter.trace)
def add_api_route(
self, path: str, endpoint: Callable[..., Any], **kwargs: Any
) -> None:
if kwargs.get("response_model") is None:
kwargs["response_model"] = get_type_hints(endpoint).get("return")
kwargs["responses"] = {403: {"model": Detail}}
return super().add_api_route(path, endpoint, **kwargs)
def simplify_operation_ids(app: FastAPI) -> None:
"""
Simplify operation IDs so that generated clients have simpler api function names
"""
version = f"v{app.version}"
logger.info("Simplify operation ids: {}", version)
for route in app.routes:
if isinstance(route, APIRoute):
route.operation_id = f"{version}_{route.name}"
def _get_schema(_app: FastAPI, function: Callable[..., Any]) -> ModelField:
"""
Get the Pydantic schema of a FastAPI function.
"""
for route in _app.routes:
if route.endpoint is function:
return route.body_field
assert False
def update_schema_name(_app: FastAPI, function: Callable[..., Any], name: str) -> None:
"""
Updates the Pydantic schema name for a FastAPI function that takes
in a fastapi.UploadFile = File(...) or bytes = File(...).
This is a known issue that was reported on FastAPI#1442 in which
the schema for file upload routes were auto-generated with no
customization options. This renames the auto-generated schema to
something more useful and clear.
Args:
_app: The FastAPI application to modify.
function: The function object to modify.
name: The new name of the schema.
"""
if not TYPE_CHECKING:
schema = _get_schema(_app, function)
schema.type_.__name__ = name
def copy_schema(
_app: FastAPI, function_src: Callable[..., Any], *function_dest: Callable[..., Any]
) -> None:
"""
Copy the Pydantic schema from a FastAPI function to some other functions.
This is useful because if update_schema_name is called for two functions
with the same schema, two schemas (same but not merged) will be generated
and some openapi client generator will provide weird model names.
Args:
_app: The FastAPI application to modify.
function_src: The function object to copy the schema from.
function_dest: The function objects to copy the schema to.
"""
if not TYPE_CHECKING:
for func in function_dest:
schema_src = _get_schema(_app, function_src)
schema_dest = _get_schema(_app, func)
schema_dest.type_ = schema_src.type_
```
#### File: horse/utils/url.py
```python
from fastapi import Request
from starlette.datastructures import URL, URLPath
from joj.horse.config import settings
# def get_prefix(protocol: str, netloc: str) -> str:
# # if protocol == "http":
# # if settings.https:
# # protocol += "s"
# # else:
# # protocol = "ws"
# return f"{protocol}://{settings.domain}"
def get_base_url(request: Request, prefix: str = "") -> URL:
url = f"{request.url.scheme}://{request.url.netloc}/{settings.root_path}{prefix}"
return URL(url)
def generate_url(request: Request, *args: str, protocol: str = "http") -> str:
assert protocol in ["http", "ws"]
path = "/".join(args)
if path and path[0] != "/":
path = "/" + path
if not protocol:
protocol = request.url.scheme
return URLPath(path, protocol).make_absolute_url(get_base_url(request))
``` |
{
"source": "joinvalle/Joinville-Smart-Mobility",
"score": 2
} |
#### File: Joinville-Smart-Mobility/tests/test_functions.py
```python
import os
import sys
project_dir = os.path.join(os.path.dirname(__file__), os.pardir)
sys.path.append(project_dir)
import unittest
import dotenv
import pandas as pd
import json
import time
from io import StringIO
import pytz
from sqlalchemy import create_engine, exc, MetaData
from sqlalchemy.engine.url import URL
from sqlalchemy.types import JSON as typeJSON
import datetime
import math
from bson.objectid import ObjectId
from pymongo import MongoClient
from shapely.geometry import Point
from src.data.processing_func import (connect_database, collect_records, tabulate_records, json_to_df,
tabulate_jams, lon_lat_to_UTM, UTM_to_lon_lat,
prep_jams_tosql, prep_rawdata_tosql, extract_geo_sections,
prep_section_tosql, store_jps)
from src.data.load_func import (extract_jps)
dotenv_path = os.path.join(project_dir, '.env')
dotenv.load_dotenv(dotenv_path)
class TestProcessingFunc(unittest.TestCase):
def test_collect_records(self):
uri = os.environ.get("mongo_uri")
client = MongoClient(uri)
db = client.ccp
collection = db.ccp_collection
records = collect_records(collection, limit=1)
self.assertEqual(type(records), list)
self.assertEqual(len(records), 1)
client.close()
def test_tabulate_records(self):
"""
1 - Date is in correct timezone
2 -
"""
doc = open(project_dir + "/tests/test_data/test_records.txt", "r")
json_string = json.load(doc)
json_io = StringIO(json_string)
records = json.load(json_io)
raw_data = tabulate_records(records)
self.assertEqual(type(raw_data), pd.DataFrame)
self.assertEqual(type(raw_data['startTime'][0]), pd.Timestamp)
self.assertEqual(raw_data['startTime'][0].tz.zone, pytz.timezone("America/Sao_Paulo").zone)
self.assertEqual(raw_data['startTimeMillis'].dtype, int)
self.assertEqual(raw_data['endTimeMillis'].dtype, int)
if 'jams' in raw_data:
self.assertEqual(raw_data['jams'].dtype, dict)
if 'alerts' in raw_data:
self.assertEqual(raw_data['alerts'].dtype, dict)
if 'irregularities' in raw_data:
self.assertEqual(raw_data['irregularities'].dtype, dict)
doc.close()
def test_tabulate_jams(self):
sample_json = { "_id" : ObjectId("59cc0811d34a9512bab73343"),
"startTime" : "2017-09-27 20:17:00:000",
"endTimeMillis" : 1506543480000,
"startTimeMillis" : 1506543420000,
"endTime" : "2017-09-27 20:18:00:000",
"jams" : [{ "turnType" : "NONE",
"delay" : 82,
"roadType" : 1,
"street" : "R. Alm. Jaceguay",
"uuid" : 1174570,
"line" : [{"y" : -26.273961,"x" : -48.879597},
{"x" : -48.878684, "y" : -26.273931}],
"pubMillis" : 1506541721537,
"country" : "BR",
"speed" : 4.55277777777778,
"length" : 743,
"segments" : [{},],
"type" : "NONE",
"city" : "Joinville"
},
{"turnType" : "NONE",
"level" : 2,
"delay" : 96,
"roadType" : 2,
"street" : "R. Timbó",
"endNode" : "R. Dr. <NAME>",
"uuid" : 3246489,
"line" : [{"y" : -26.293511,"x" : -48.852581},
{"y" : -26.293693, "x" : -48.850126},
],
"pubMillis" : 1506542575993,
"country" : "BR",
"speed" : 2.96388888888889,
"length" : 454,
"segments" : [{},{}],
"type" : "NONE",
"city" : "Joinville"
},
],
}
sample_df = pd.DataFrame(sample_json)
test_df = tabulate_jams(sample_df)
self.assertEqual(test_df.shape, (2, 21))
self.assertEqual(test_df['_id'].iloc[0], test_df['_id'].iloc[0])
self.assertTrue(pd.isnull(test_df['jams_level'].iloc[0]))
def test_json_to_df(self):
test_jam = [{'uid':'Jam 1',
'coluna1.1': 'conteúdo A',
'coluna1.2': 'conteúdo B',
'coluna1.3': 'conteúdo C',
'coluna1.4':[{'1.4.1': 'conteúdo D',
'1.4.2': 'conteúdo E',
'1.4.3': 'conteúdo F',
}],
},
{'uid': 'Jam 2',
'coluna1.1': 'conteúdo G'},
{'uid': 'Jam 3',
'coluna1.1': 'conteúdo H',
'coluna1.2': 'conteúdo I',
'coluna1.3': 'conteúdo J',
}
]
data = {'coluna A': 'a',
'coluna B': 'b',
'coluna C': test_jam,
}
test_row = pd.Series(data)
df = json_to_df(test_row, 'coluna C')
self.assertEqual(type(df), pd.DataFrame)
self.assertEqual(df.shape, (3, 8))
self.assertEqual(df['coluna A'][0], 'a')
self.assertEqual(df['coluna B'][0], 'b')
self.assertEqual(df[df['coluna C_uid'] == 'Jam 1']['coluna C_coluna1.1'].iloc[0], 'conteúdo A')
self.assertEqual(type(df[df['coluna C_uid'] == 'Jam 1']['coluna C_coluna1.4'].iloc[0]), list)
self.assertTrue(pd.isnull(df[df['coluna C_uid'] == 'Jam 3']['coluna C_coluna1.4'].iloc[0]))
self.assertEqual(df['coluna A'].iloc[0], df['coluna A'].iloc[1])
def test_lon_lat_to_UTM(self):
l = [(-48.85777, -26.31254), (-48.84572, -26.30740)]
UTM_list = lon_lat_to_UTM(l)
self.assertTrue(math.isclose(UTM_list[0][1], 7087931, rel_tol=1e-7))
self.assertTrue(math.isclose(UTM_list[1][0], 715062, rel_tol=1e-6))
def test_prep_jams_tosql(self):
test_df_jams = pd.read_csv(project_dir + "/tests/test_data/test_df_jams.csv")
test_jams_tosql = prep_jams_tosql(test_df_jams)
self.assertEqual(len(test_jams_tosql.columns),16)
self.assertEqual(test_jams_tosql["JamTimeDelayInSeconds"].dtype, int)
self.assertEqual(test_jams_tosql["JamDateEnd"].dtype, datetime.datetime)
self.assertEqual(type(test_jams_tosql["JamDscStreet"].iloc[0]), str)
self.assertEqual(type(test_jams_tosql["JamDscCoordinatesLonLat"].iloc[0]), str)
def test_UTM_to_lon_lat(self):
l = [(713849, 7087931), (715062, 7088480)]
lon_lat_list = UTM_to_lon_lat(l)
self.assertTrue(math.isclose(lon_lat_list[0][0], -48.85777, rel_tol=1e-5))
self.assertTrue(math.isclose(lon_lat_list[1][1], -26.30740, rel_tol=1e-5))
def test_extract_geo_sections(self):
#Connection and initial setup
DATABASE = {
'drivername': os.environ.get("db_drivername"),
'host': os.environ.get("db_host"),
'port': os.environ.get("db_port"),
'username': os.environ.get("db_username"),
'password': os.environ.get("db_password"),
'database': os.environ.get("db_database"),
}
meta = connect_database(DATABASE)
test_geo_sections = extract_geo_sections(meta)
self.assertEqual(test_geo_sections.shape, (16148, 16))
self.assertEqual(test_geo_sections.geometry.name, "section_polygon")
self.assertFalse((test_geo_sections.min_x > test_geo_sections.max_x).any())
self.assertFalse((test_geo_sections.min_y > test_geo_sections.max_y).any())
class TestLoadFunc(unittest.TestCase):
DATABASE = {
'drivername': os.environ.get("db_drivername"),
'host': os.environ.get("db_host"),
'port': os.environ.get("db_port"),
'username': os.environ.get("db_username"),
'password': <PASSWORD>("db_password"),
'database': os.environ.get("db_database"),
}
db_url = URL(**DATABASE)
engine = create_engine(db_url)
meta = MetaData()
meta.bind = engine
meta.reflect()
def test_extract_jps_datefilter(self):
"""
Date filter works properly
"""
date_begin = datetime.date(day=27, month=9, year=2017)
date_end = datetime.date(day=29, month=9, year=2017)
df_jps = extract_jps(self.meta, date_begin, date_end)
self.assertEqual(df_jps["MgrcDateStart"].dt.date.nunique(), 2)
def test_extract_jps_timefilter(self):
"""
2 - Time filter works properly
"""
date_begin = datetime.date(day=28, month=9, year=2017)
date_end = datetime.date(day=29, month=9, year=2017)
periods = [(7,9), (17,19)]
df_jps = extract_jps(self.meta, date_begin, date_end, periods=periods)
self.assertEqual(df_jps["MgrcDateStart"].dt.hour.nunique(), 4)
def test_extract_jps_weekendsfilter(self):
"""
3 - Weekends filter works properly
"""
date_begin = datetime.date(day=28, month=9, year=2017)
date_end = datetime.date(day=10, month=10, year=2017)
periods = [(17,19)]
df_jps_wkTrue = extract_jps(self.meta, date_begin, date_end, periods=periods, weekends=True)
df_jps_wkFalse = extract_jps(self.meta, date_begin, date_end, periods=periods, weekends=False)
self.assertEqual(df_jps_wkTrue["MgrcDateStart"].dt.dayofweek.nunique(), 7)
self.assertEqual(df_jps_wkFalse["MgrcDateStart"].dt.dayofweek.nunique(), 5)
def test_extract_jps_binsdivision(self):
"""
Bins are set properly
"""
date_begin = datetime.date(day=27, month=9, year=2017)
date_end = datetime.date(day=28, month=9, year=2017)
df_jps = extract_jps(self.meta, date_begin, date_end)
df_jps['minute_bin_check'] = (df_jps["MgrcDateStart"].dt.minute < df_jps["minute_bin"].str[0:2].astype(int)) \
| (df_jps["MgrcDateStart"].dt.minute > df_jps["minute_bin"].str[-2:].astype(int))
self.assertEqual(df_jps["minute_bin_check"].sum(), 0)
``` |
{
"source": "joirneto/mqqt_test_DEA",
"score": 3
} |
#### File: joirneto/mqqt_test_DEA/pub_sub.py
```python
import random
import time
from paho.mqtt import client as mqtt_client
broker = '172.16.17.32'
port = 1883
topic = "info/0000000000123"
# generate client ID with pub prefix randomly
client_id = f'python-mqtt-{random.randint(0, 1000)}'
# username = 'emqx'
# password = '<PASSWORD>'
def connect_mqtt():
now0 = time.time()
def on_connect(client, userdata, flags, rc):
if rc == 0:
now1 = time.time()
now = now1 - now0
print(f"Connected to MQTT Broker! `{now}` ")
time.sleep(10)
else:
print("Failed to connect, return code %d\n", rc)
client = mqtt_client.Client(client_id)
client.on_connect = on_connect
client.connect(broker, port)
return client
def publish(client):
msg_count = 0
while True:
msg = f"messages: {msg_count}"
result = client.publish(topic, msg)
# result: [0, 1]
status = result[0]
if status == 0:
print(f"Send `{msg}` to topic `{topic}`")
else:
print(f"Failed to send message to topic {topic}")
msg_count += 1
def subscribe(client: mqtt_client):
def on_message(client, userdata, msg):
print(f"Received `{msg.payload.decode()}` from `{msg.topic}` topic and time `")
client.subscribe(topic)
client.on_message = on_message
def run():
client = connect_mqtt()
subscribe(client)
client.loop_start()
publish(client)
if __name__ == '__main__':
run()
```
#### File: joirneto/mqqt_test_DEA/sub.py
```python
import random
import base64
from paho.mqtt import client as mqtt_client
broker = '172.16.58.3'
port = 1883
topic = "info/12340000000000"
# generate client ID with pub prefix randomly
client_id = f'python-mqtt-{random.randint(0, 100)}'
# username = 'emqx'
# password = '<PASSWORD>'
def ecg(mensagem):
import matplotlib.pyplot
from numpy.core.fromnumeric import size
#Manipulação de uma string para um array de caracteres
arrayCaracteres = []
for palavra in mensagem:
for letra in palavra:
arrayCaracteres.append(letra)
#Retirar o cabeçalho
payloadCaracteres= arrayCaracteres[32:2532]
#Manipular para um array de pares de caracteres
i=0
payloadParesHexa=[]
while i < size(payloadCaracteres)-1:
aux = payloadCaracteres[i] + '' + payloadCaracteres[i+1]
payloadParesHexa.append(aux)
i +=2
#Converter de Hexa para int - Valores de 0 a 255
payloadInt = []
for val in payloadParesHexa:
payloadInt.append(int(val,16))
# Gerando unidade de tempo
tempo = 5/size(payloadInt)
#Gerando array com todas os valores de tempo
aux = tempo
arrayTempo = []
while tempo <= 5:
arrayTempo.append(tempo)
tempo+= aux
#Verificação de dimensões entre dados a serem plotados
if(size(payloadInt)>size(arrayTempo)):
del(payloadInt[size(payloadInt)-1])
#Plotando os dados
matplotlib.pyplot.plot(arrayTempo, payloadInt)
matplotlib.pyplot.xlabel('time in seconds')
matplotlib.pyplot.ylabel('Amplitude (normalised)')
matplotlib.pyplot.title('Heart beat signal Template')
matplotlib.pyplot.show()
def connect_mqtt() -> mqtt_client:
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to MQTT Broker!")
else:
print("Failed to connect, return code %d\n", rc)
client = mqtt_client.Client(client_id)
client.on_connect = on_connect
client.connect(broker, port)
return client
def subscribe(client: mqtt_client):
def on_message(client, userdata, msg):
print(f"Received `{msg.payload}` from `{msg.topic}` topic and time `")
res = ''.join(format(x, '02x') for x in msg.payload)
print(f"Received `{res}` from `{msg.topic}` topic and time `")
ecg(res)
client.subscribe(topic)
client.on_message = on_message
def run():
client = connect_mqtt()
subscribe(client)
client.loop_forever()
if __name__ == '__main__':
run()
``` |
{
"source": "Joisha02/DataStructure-in-C",
"score": 4
} |
#### File: DataStructure-in-C/Recursion/sum_natural.py
```python
def sum_Natural(n):
if n == 0:
return 0
else:
return sum_Natural(n - 1) + n
n = int(input())
result = sum_Natural(n)
print(f"Sum of first {n} natural numbers -> {result}")
```
#### File: DataStructure-in-C/Recursion/taylor_using_recur.py
```python
def taylor_Recr(x, n):
p, f = 1, 1
r = 0
if n == 0:
return 1
else:
r = taylor_Recr(x, n - 1)
p = p * x
f *= n
return r + p // f
x, n = list(map(int, input().split()))
res = taylor_Recr(x, n)
print(res)
``` |
{
"source": "joishbader/test",
"score": 2
} |
#### File: lib/generate_labels/generate_labels.py
```python
import time
import sys
from collections import defaultdict
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as COCOmask
import json
import numpy as np
# Percentage movement on (x,y,w,h).
# Each movement will generate a json file, consisting of 'dious', 'act', 'image_id', 'bbox', 'score', 'category_id'.
acts = [[-0.02, 0, 0, 0], [0, -0.02, 0, 0], [0, 0, -0.02, 0], [0, 0, 0, -0.02]]
# gtpah is groudtruth path. pdpath is the result path.
# gtpath = '/S2/MI/data/human_pose/mscoco/annotations/instances_minival2014.json'
# pdpath = '/S2/MI/jbr/RLObjectDetection/output/res101/coco_2014_minival/faster_rcnn_10_validation/detections_minival2014_results.json'
gtpath = '/S2/MI/data/human_pose/mscoco/annotations/instances_train2014.json'
pdpath = '/S2/MI/jbr/RLObjectDetection/output/res101/coco_2014_train/faster_rcnn_10/detections_train2014_results.json'
jsonname = 'train2014'
f = open(pdpath, 'r')
dts = json.load(f)
f.close()
imageidlist = [x['image_id'] for x in dts]
imageidlist = sorted(np.unique(imageidlist))
print('---------------------------')
print('len imageidlist:', len(imageidlist))
print('imageidlist[0]:', imageidlist[0])
print('---------------------------')
_dts = defaultdict(list)
for dt in dts:
_dts[dt['image_id'], dt['category_id']].append(dt)
print('------------------------')
cocoGt = COCO(gtpath)
imgIDs = sorted(cocoGt.getImgIds())
catIDs = sorted(cocoGt.getCatIds())
print('len imgIDs:', len(imgIDs))
gts = cocoGt.loadAnns(cocoGt.getAnnIds(imgIds=imgIDs, catIds=catIDs))
print('len gts:', len(gts))
_gts = defaultdict(list)
for gt in gts:
_gts[gt['image_id'], gt['category_id']].append(gt)
print('-------------------------')
def computeIoU(imgId, catId, useCats=False, maxDets=100000):
if useCats:
gt = _gts[imgId, catId]
dt = _gts[imgId, catId]
else:
gt = [_ for cId in catIDs for _ in _gts[imgId, cId]]
dt = [_ for cId in catIDs for _ in _dts[imgId, cId]]
if len(gt) == 0 or len(dt) == 0:
return [], 1, 1, 1
dt = sorted(dt, key=lambda x: -x['score'])
if len(dt) > maxDets:
dt = dt[0:maxDets]
g = [g['bbox'] for g in gt]
d = [d['bbox'] for d in dt]
# print('d len:', len(d))
# print('g len:', len(g))
iscrowd = [int(o['iscrowd']) for o in gt]
ious = COCOmask.iou(d, g, iscrowd)
ious = np.array(ious)
if ious.ndim == 0:
ious = np.array([ious])
if ious.ndim == 1:
if len(g) > 1:
ious = np.array([ max(ious) ])
if ious.ndim == 2:
ious = np.amax(ious, axis=1)
if ious.ndim > 2:
print('Wrong in ious dim!')
sys.exit(-1)
return ious, g, dt, iscrowd
# action is 4-tuple floats, [0.02, 0, 0, 0]
def computeNewIoU(g, d, iscrowd, action):
if g == 1:
return []
for i in range(len(d)):
x, y, w, h = d[i]
dx = w * action[0]
dy = h * action[1]
dw = w * action[2]
dh = h * action[3]
x += dx
y += dy
w += dw
h += dh
d[i] = [x, y, w, h]
ious = COCOmask.iou(d, g, iscrowd)
ious = np.array(ious)
if ious.ndim == 0:
ious = np.array([ious])
return ious
if ious.ndim == 1:
if len(d) > 1:
return ious
else:
ious = np.array([ max(ious) ])
return ious
if ious.ndim == 2:
ious = np.amax(ious, axis=1)
return ious
if ious.ndim > 2:
print('Wrong in newious dim!')
sys.exit(-1)
# ious, g, dt, iscrowd = computeIoU(139, 86)
# # print('iou sample:', ious)
#
# d = [d['bbox'] for d in dt]
# act = [0.02, 0, 0, 0]
# newious = computeNewIoU(g, d, iscrowd, act)
# # print('newious:', newious)
#
# for i in range(len(dt)):
# dious = newious[i] - ious[i]
# dt[i]['dious'] = dious
# dt[i]['act'] = act
# f = open('1.json', 'w')
# json.dump(dt, f)
# f.close()
for act in acts:
tic = time.time()
dtlist = []
cnt = -1
for image_id in imgIDs:
print('image_id :', image_id)
sys.exit("exit suddenly.")
cnt += 1
if cnt % 10000 == 0:
print('num: {:8d} | image_id:{:10d}'.format(cnt, image_id))
# for category_id in catIDs:
ious, g, dt, iscrowd = computeIoU(image_id, 0)
if g == 1:
continue
d = [d['bbox'] for d in dt]
newious = computeNewIoU(g, d, iscrowd, act)
if len(dt) == 0:
print('ious:', ious)
if len(ious) == 0:
print('ious::', ious, 'g::', g)
print('len(ious):', len(ious))
for i in range(len(dt)):
dious = newious[i] - ious[i]
dt[i]['dious'] = dious
dt[i]['act'] = act
dtlist += dt
# break
f = open('{}{}.json'.format(jsonname, act), 'w')
json.dump(dtlist, f)
f.close()
toc = time.time()
print('--- time: {:.4f} seconds ---'.format(toc - tic))
```
#### File: model/Reinforcement/action.py
```python
import numpy as np
def Identify(x):
return x
class Action:
def __init__(self, delta, alpha=1., iou_thres=0, wtrans=None):
self.delta = delta
self.alpha = alpha
self.iou_thres = iou_thres
self.num_acts = 4 * len(delta) * 2
self.actDeltas = np.zeros((self.num_acts, 4), dtype=np.float32)
self.wtrans = Identify if wtrans is None else wtrans
idx = 0
for i in range(4): # bbox dimention
for j in range(len(delta)):
self.actDeltas[idx, i] = delta[j] * alpha
idx += 1
self.actDeltas[idx, i] = -delta[j] * alpha
idx += 1
def move_from_act(self, bboxes, preds, targets, maxk):
"""
input:
bboxes: np.array of shape b * n * 4
preds: np.array of shape b * n * num_acts
targest:np.array of shape b * n * num_acts
maxk: int, max number of boxes to be moved
"""
batch_size, num_boxes, _ = bboxes.shape
assert(preds.shape == targets.shape)
assert(bboxes.ndim == 3 and preds.ndim == 3)
assert(preds.shape[0] == batch_size)
assert(preds.shape[1] == num_boxes)
correct = 0
for bid in range(batch_size):
cnt = 0
vis = [None] * num_boxes
pred, target = preds[bid], targets[bid]
inds = np.flip(np.argsort(pred.reshape(-1)), axis=0)
for num in inds:
idx = num // self.num_acts
act_id = num % self.num_acts
assert(pred.reshape(-1)[num] == pred[idx][act_id])
x, y, w, h = bboxes[bid][idx]
delta = self.actDeltas[act_id]
if vis[idx] is None:
cnt += 1
vis[idx] = 1
if target[idx][act_id] == 1:
correct += 1
bboxes[bid][idx] += delta * np.array([w,h,w,h])
if cnt >= maxk:
break
return bboxes, correct * 100. / (batch_size * maxk)
```
#### File: model/Reinforcement/utils.py
```python
import logging
import os
import numpy as np
logs = set()
def init_log(name, level = logging.INFO):
if (name, level) in logs: return
logs.add((name, level))
logger = logging.getLogger(name)
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
if 'SLURM_PROCID' in os.environ:
rank = int(os.environ['SLURM_PROCID'])
logger.addFilter(lambda record: rank == 0)
else:
rank = 0
format_str = '%(asctime)s-rk{}-%(filename)s#%(lineno)d:%(message)s'.format(rank)
formatter = logging.Formatter(format_str)
ch.setFormatter(formatter)
logger.addHandler(ch)
class AveMeter():
def __init__(self, size):
self.size = size
self.opr = 0
self.val = 0.0
self.avg = 0.0
self.elems = list()
def add(self, x):
self.val = x
if self.opr >= self.size:
pos = self.opr % self.size
self.elems[pos] = x
self.avg = sum(self.elems) / self.size
else:
self.elems.append(x)
self.avg = sum(self.elems) / (self.opr+1)
self.opr += 1
def accuracy(output, target, k=1):
output, target = output.reshape(-1), target.reshape(-1)
inds = np.argsort(output)[-k:]
output = output[inds]
target = target[inds]
correct = np.sum(target == 1)
return correct * 100.0 / k
def adjust_learning_rate(optimizer, epoch,
learning_rate=None, interval=None, epochs=None, decay=.1):
if interval is not None:
learning_rate *= (decay ** (epoch // interval))
else:
for decay_epoch in epochs:
if decay_epoch <= epoch:
learning_rate *= decay
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
return
def ensure_file(filename):
assert os.path.isfile(filename), '{} is not a valid file.'.format(filename)
def ensure_dir(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def cocoval(ann_file, res_file, ann_type='bbox'):
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
coco_gt = COCO(ann_file)
coco_dt = coco_gt.loadRes(res_file)
imgIds = sorted(coco_gt.getImgIds())
coco_eval = COCOeval(coco_gt, coco_dt)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
``` |
{
"source": "Joish/data_mining",
"score": 3
} |
#### File: Joish/data_mining/app.py
```python
from config import TwitterCredentials, FacebookCredentials
from twitter.main import TwitterStream
from flask import Flask,render_template,request,send_file
from os import remove
try:
remove("twitter/twitter_stream.csv")
except:
pass
twc = TwitterCredentials()
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return render_template('index.html')
@app.route('/getStream')
def getStream():
try:
remove("twitter/twitter_stream.csv")
except:
pass
search_words=[]
search_words.append(request.args.get('word',None,str))
limit = request.args.get('limit',500,int)
tws = TwitterStream(
CONSUMER_KEY=twc.CONSUMER_KEY,
CONSUMER_SECRET=twc.CONSUMER_SECRET,
ACCESS_TOKEN=twc.ACCESS_TOKEN,
ACCESS_SECRET=twc.ACCESS_SECRET)
tws.run_stream(search_words,limit)
return send_file(
'twitter/twitter_stream.csv',
attachment_filename="stream.csv")
@app.route('/getTweets')
def getTweets():
try:
remove("twitter/twitter_previous.csv")
except:
pass
search_words=[]
search_words.append(request.args.get('word',None,str))
from_date=request.args.get('from_date',None,str)
to_date=request.args.get('to_date',None,str)
count_per_day=request.args.get('count_per_day',1,int)
total_count=request.args.get('total_count',2,int)
print(from_date,to_date,count_per_day,total_count)
tws = TwitterStream(
CONSUMER_KEY=twc.CONSUMER_KEY,
CONSUMER_SECRET=twc.CONSUMER_SECRET,
ACCESS_TOKEN=twc.ACCESS_TOKEN,
ACCESS_SECRET=twc.ACCESS_SECRET)
tws.get_previous_tweet(search_words,from_date,to_date,count_per_day,total_count)
return send_file(
'twitter/twitter_previous.csv',
attachment_filename=from_date+".csv")
if __name__ == "__main__":
app.run()
``` |
{
"source": "Joish/FeatureSelection",
"score": 2
} |
#### File: Joish/FeatureSelection/ForwardFeatureSelection.py
```python
import logging
from helpers import get_final_model_results, return_X_y, remove_from_list, get_features_list, \
get_max_no_features_count, get_result, intial_check, get_current_log_file_name, file_logger
class ForwardFeatureSelection:
def __init__(self, classifier, dataframe, target_name, metric_obj=None, scale_obj=None, test_size=0.3,
min_no_features=0, max_no_features=None, log=False, variation='soft', verbose=1,
random_state=42, selected=[]):
self.classifier = classifier
self.dataframe = dataframe
self.target_name = target_name
self.scale_obj = scale_obj
self.metric_obj = metric_obj
self.test_size = test_size
self.min_no_features = min_no_features
self.max_no_features = max_no_features
self.log = log
self.variation = variation
self.verbose = verbose
self.random_state = random_state
self.selected = selected
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
self.current_log_file_name = get_current_log_file_name()
def core_algoritm(self, X, y, feature_list, sel, max_no_features, sco):
feature_list_len = len(feature_list)
selected = sel
score = sco
temp_selected = ''
stop = False
min_no_features = self.min_no_features
for iteration in range(feature_list_len):
logging.warning("##### {} out of {} #####".format(
iteration+1, feature_list_len))
features = selected + [feature_list[iteration]]
metric_score = get_result(X, y, features, self.scale_obj, self.classifier,
self.test_size, self.random_state, self.metric_obj,
self.verbose)
# print(features, metric_score)
# print('\n')
if self.variation == 'soft':
if metric_score > score:
score = metric_score
selected.append(feature_list[iteration])
if self.log:
content = "{} - {} \n".format(selected, score)
file_logger(self.current_log_file_name, content)
# print(max_no_features, min_no_features, len(selected))
if len(selected) >= max_no_features:
break
elif self.variation == 'hard':
if metric_score >= score:
score = metric_score
temp_selected = feature_list[iteration]
elif self.variation == 'hard+':
if metric_score > score:
score = metric_score
temp_selected = feature_list[iteration]
if self.variation == 'hard' or self.variation == 'hard+':
if temp_selected:
selected.append(temp_selected)
if self.log:
content = "{} - {} \n".format(selected, score)
file_logger(self.current_log_file_name, content)
else:
stop = True
if len(selected) >= max_no_features:
stop = True
return selected, score, stop
def soft_forward_feature_selection(self):
X, y = return_X_y(self.dataframe, self.target_name)
feature_list = get_features_list(self.dataframe, self.target_name)
feature_list = remove_from_list(feature_list, self.selected)
max_no_features = get_max_no_features_count(
self.dataframe, self.target_name, self.max_no_features)
score = 0
self.selected, score, stop = self.core_algoritm(
X, y, feature_list, self.selected, max_no_features, score)
def hard_forward_feature_selection(self):
X, y = return_X_y(self.dataframe, self.target_name)
feature_list = get_features_list(self.dataframe, self.target_name)
feature_list = remove_from_list(feature_list, self.selected)
feature_list_len = len(feature_list)
max_no_features = get_max_no_features_count(
self.dataframe, self.target_name, self.max_no_features)
score = 0
cnt = 0
while len(feature_list):
logging.warning("{} out of {}".format(
cnt+1, feature_list_len))
temp_selected, score, stop = self.core_algoritm(
X, y, feature_list, self.selected, max_no_features, score)
# print(temp_selected, score, stop)
# print(temp_selected, score)
# print(feature_list)
if stop:
break
self.selected = temp_selected
feature_list = remove_from_list(feature_list, self.selected)
cnt += 1
# print(self.selected, score, feature_list)
print('\n')
def run(self):
intial_check(self.min_no_features, self.max_no_features,
self.scale_obj, self.dataframe, self.target_name, self.selected)
logging.warning('STARTING {} FORWARD FEATURE SELECTION'.format(
self.variation.upper()))
if self.variation == 'soft':
self.soft_forward_feature_selection()
elif self.variation == 'hard' or self.variation == 'hard+':
self.hard_forward_feature_selection()
else:
logging.error('INVALID VARIATION PASSED')
if self.verbose > 1:
get_final_model_results(self.dataframe, self.target_name, self.selected, self.scale_obj,
self.classifier, self.test_size, self.random_state,
self.metric_obj, self.verbose)
return (self.selected)
```
#### File: Joish/FeatureSelection/helpers.py
```python
import os
import datetime
import logging
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
def intial_check(min_no_features, max_no_features, scale_obj, dataframe, target_name, selected):
feature_list = get_features_list(dataframe, target_name)
if min_no_features > max_no_features:
logging.error('MINIMUM NUMBER OF FEATURES PARAMETER SHOULD \
BE LESS THAT MAXIMUM NUMBER OF FEATURE PARAMETER')
exit(0)
if scale_obj != None and not isinstance(scale_obj, object):
logging.error('INVALID SCALER OBJECT')
exit(0)
for feat in selected:
if feat not in feature_list:
logging.error("FEATURE '{}' MISSING IN DATAFRAME".format(feat))
exit(0)
def remove_from_list(master_list=[], remove_list=[]):
# This function is used to remove a list of
# values from another list
for items in remove_list:
if items in master_list:
master_list.remove(items)
return master_list
def get_features_list(dataframe, target_name):
# This funtion return the feature list of
# the dataframe by removing the target feature
feature_list = list(dataframe)
feature_list = remove_from_list(feature_list, [target_name])
return feature_list
def get_max_no_features_count(dataframe, target_name, max_no_features):
# This function is used to get the column count
# if the max_no_features = None
feature_list = get_features_list(dataframe, target_name)
if max_no_features == None:
column_count = dataframe[feature_list].shape[1]
return column_count
return max_no_features
def return_X_y(dataframe, target_name):
# This function return,
# X - In-dependent variable dataframe
# y - Dependent variable dataframe
feature_list = get_features_list(dataframe, target_name)
X = dataframe[feature_list]
y = dataframe[target_name]
return X, y
def build_model(classifier, X, y, test_size, random_state):
# This function is used to build the ML model
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state, stratify=y)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
return y_test, y_pred
def calcuate_prefered_metric(y_test, y_pred, metric_obj, verbose, features):
# This function is used to calculate the
# prefered metric. It also has the ability to get
# callback function
if metric_obj == None:
report = classification_report(y_test, y_pred, output_dict=True)
if verbose > 2:
print("FEATURES : {}".format(features))
print(classification_report(y_test, y_pred))
return report['accuracy']
else:
logging.error('WORKING ON IT')
def get_result(X, y, features, scale_obj, classifier, test_size, random_state,
metric_obj, verbose):
# Return the prefered Metric score
X_ffs = X[features]
if scale_obj:
X_ffs = scale_obj.fit_transform(X_ffs)
y_test, y_pred = build_model(classifier, X_ffs, y, test_size, random_state)
metric_score = calcuate_prefered_metric(
y_test, y_pred, metric_obj, verbose, features)
return metric_score
def get_current_log_file_name():
# Return the file name for logging
current_time = datetime.datetime.now()
return "Log::{}.txt".format(current_time)
def file_logger(filename, content):
cwd = os.getcwd()
directory = "Log"
dir_path = os.path.join(cwd, directory)
file_path = os.path.join(dir_path, filename)
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
if not os.path.isfile(file_path):
open(file_path, 'a').close()
f = open(file_path, "a")
f.write(str(content))
f.close()
def get_final_model_results(dataframe, target_name, selected, scale_obj, classifier,
test_size, random_state, metric_obj, verbose):
X, y = return_X_y(dataframe, target_name)
logging.warning("FINAL MODEL RESULTS WITH FEATURES - {}".format(selected))
metric_score = get_result(X, y, selected, scale_obj, classifier, test_size,
random_state, metric_obj, verbose)
logging.warning("RESULT : {}".format(metric_score))
``` |
{
"source": "Joish/JsonParser",
"score": 3
} |
#### File: lib/jsonj/JsonJ.py
```python
class JsonJ:
# Constructor
def __init__(self,data):
self.data = {}
self.identity = 0
self.identity2 = 0
self.localIdentity = 0
self.mpath = ''
self.identityArray = []
self.path = ""
self.typeof = ""
self.children = []
self.isRoot = ''
self.temp = ""
self.objects = [dict,list]
self.objects2 = [str,int,bool]
self.traverse(data)
# function used to clear the saved variable data
def clean(self):
self.data = {}
self.identity = 0
self.identity2 = 0
self.localIdentity = 0
self.mpath = ''
self.identityArray = []
self.path = ""
self.typeof = ""
self.children = []
self.isRoot = ''
self.temp = ""
# function responsible for getting the childrens with respect to that that node
def get_childrens(self, data, path):
self.children = []
for key in data:
self.children.append(self.path + '/' + str(key))
return self.children
# part of a recursive function to create the required data
def process(self, key, value):
if (value == None or type(value) in self.objects2) :
self.path = self.identityArray[self.identity2 - 1] + '/' + str(key)
# self.typeof = 'file'
self.isRoot = True if self.identity == 0 else False
self.children = []
self.data[self.path] = {
'path': self.path,
# 'type': self.typeof,
'isRoot': self.isRoot,
'children': self.children,
}
else:
self.path = str(key) if self.identity == 0 else self.mpath + '/' + str(key)
# self.typeof = 'folder'
self.isRoot = True if self.identity == 0 else False
self.children = self.get_childrens(value, self.path)
self.data[self.path] = {
'path': self.path,
# 'type': self.typeof,
'isRoot': self.isRoot,
'children': self.children,
}
self.mpath = str(key) if self.identity == 0 else self.mpath + '/' + str(key)
self.identityArray.append(self.mpath)
self.identity += 1
self.identity2 += 1
# recursive function to go through all the nodes of a JSON tree
def traverse(self, o):
if (type(o) == list):
for key, val in enumerate(o):
if type(val) in self.objects:
self.process(key, val)
else:
self.process(val, val)
if (val != None and type(val) in self.objects):
self.traverse(val)
else:
for key in o:
self.process(key, o[key])
if (o[key] != None and type(o[key]) in self.objects):
self.traverse(o[key])
self.localIdentity = self.identity
self.identity -= 1
self.identity2 += 1
self.temp = self.identityArray[len(self.identityArray) - 1].split('/')
if (len(self.temp) > 1):
self.temp.pop()
self.temp = "/".join(self.temp)
self.identityArray.append(self.temp)
self.mpath = self.temp;
# function to return parsed data - RESULT
def get_parsed_data(self):
return self.data
``` |
{
"source": "Joish/Natural-Language-to-SQL",
"score": 3
} |
#### File: Joish/Natural-Language-to-SQL/sqlupload.py
```python
from sqlalchemy import create_engine
import pandas as pd
from multiprocessing import Process
def replace(filename):
engine = create_engine("mysql://root:joish@123@localhost/nlp2sql")
con = engine.connect()
df = pd.read_csv(filename,encoding="latin-1")
#print ("-------------")
#print (df)
df.to_sql(name='data',con=con,if_exists='replace',index=False)
#print ("After df")
con.close()
#return "sucess"
if __name__ == '__main__':
p = Process(target=replace)
p.start()
p.join()
``` |
{
"source": "joisig/legacy-icelandic-financial",
"score": 2
} |
#### File: joisig/legacy-icelandic-financial/skattskil.py
```python
import csv
import sys
def padFront(num, char, msg):
numSpaces = num - len(msg)
return (char * numSpaces) + msg
def toChar(num, msg):
return padFront(num, ' ', msg)
def toNumber(num, number):
if isinstance(number, str):
return padFront(num, '0', number)
else:
return toNumber(num, str(number))
def makeLine(bankanumer, hofudbnr, skuldabnr, kennitala, gjalddagi,
greidsludag, afborgun, vextir, verdbaetur, drvextir,
kostnadur, eftirstodvar, athugasemdir):
lineParts = [
toChar(4, bankanumer),
toChar(2, hofudbnr),
toChar(6, skuldabnr),
toNumber(10, kennitala),
#dateToChar(gjalddagi),
#dateToChar(greidsludag),
toNumber(8, gjalddagi),
toNumber(8, greidsludag),
toNumber(9, afborgun.replace('.', '')),
toNumber(9, vextir.replace('.', '')),
toNumber(9, verdbaetur.replace('.', '')),
toNumber(9, drvextir),
toNumber(9, kostnadur),
toNumber(9, eftirstodvar.replace('.', '')),
toChar(10, athugasemdir)
]
return "".join(lineParts)
def makeLineFromDict(dict):
return makeLine(
dict["BANKANUMER"], dict["HOFUDBNR"], dict["SKULDABNR"], dict["KENNITALA"],
dict["GJALDDAGI"], dict["GREIDSLUDAG"], dict["AFBORGUN"], dict["VEXTIR"],
dict["VERDBAETUR"], dict["DRVEXTIR"], dict["KOSTNADUR"],
dict["EFTIRSTODVAR"], dict["ATHUGASEMDIR"]
)
if __name__ == '__main__':
reader = csv.DictReader(sys.stdin, dialect='excel', delimiter=';')
dictList = []
for line in reader:
print(makeLineFromDict(line))
``` |
{
"source": "joisino/chainer-ETTTS",
"score": 3
} |
#### File: chainer-ETTTS/Text2Mel/generate.py
```python
import string
import argparse
import sys
import numpy as np
import chainer
from pykakasi import kakasi
kakasi = kakasi()
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import network
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', type=int, default=-1)
parser.add_argument('--text', type=str, default='icassp stands for the international conference on acoustics, speech and signal processing.')
parser.add_argument('--len', type=int, default=100)
parser.add_argument('--model', '-m', type=str, required=True)
parser.add_argument('--ja', '-j', action='store_true')
args = parser.parse_args()
chars = string.ascii_lowercase + ',.- \"'
model = network.SynthesisNetwork()
print('loading model from ' + args.model)
chainer.serializers.load_npz(
args.model,
model,
)
print('model loaded')
xp = np
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
xp = chainer.cuda.cupy
ctext = args.text
if args.ja:
kakasi.setMode("H","a")
kakasi.setMode("K","a")
kakasi.setMode("J","a")
kakasi.setMode("r","Hepburn")
kakasi.setMode("C", True)
kakasi.setMode("c", False)
conv = kakasi.getConverter()
ctext = conv.do(ctext).lower()
ctext = ctext.replace('、', ', ')
ctext = ctext.replace('。', '.')
print(ctext)
li = []
for c in ctext:
li.append(chars.index(c))
text = xp.array(li).astype('i')
text = xp.expand_dims(text, 0)
x = xp.zeros((1, 80, 1)).astype('f')
cnt = args.len
for i in range(args.len):
sys.stdout.write('\r%d' % i)
sys.stdout.flush()
with chainer.using_config('train', False):
y, a = model.gen(text, x)
x = xp.concatenate((xp.zeros((1, 80, 1)).astype('f'), y), axis=2)
cnt -= 1
if xp.argmax(a[0, :, -1]) >= len(ctext) - 3:
cnt = min(cnt, 10)
if cnt <= 0:
break
sys.stdout.write('\n')
sys.stdout.flush()
img = chainer.cuda.to_cpu(y[0])
plt.pcolor(img)
plt.savefig('./results/gen.png')
plt.close()
img = chainer.cuda.to_cpu(a[0])
plt.pcolor(img)
plt.savefig('./results/gen_a.png')
plt.close()
y = chainer.cuda.to_cpu(y)
np.save('./results/res.npy', y)
if __name__ == '__main__':
main()
```
#### File: chainer-ETTTS/Text2Mel/train.py
```python
import argparse
import os
import subprocess
import chainer
from chainer import iterators, optimizers, serializers
from chainer import training
from chainer.training import extensions
import matplotlib
matplotlib.use('Agg')
import dataset
import network
from updater import SynthesisUpdater
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', type=int, default=-1)
parser.add_argument('--dump', '-d', type=str, default=None)
parser.add_argument('--epoch', '-e', type=int, default=100)
parser.add_argument('--alpha', type=float, default=0.0001)
parser.add_argument('--beta1', type=float, default=0.9)
parser.add_argument('--beta2', type=float, default=0.999)
parser.add_argument('--batch', '-b', type=int, default=32)
parser.add_argument('--base', type=str, default='.')
parser.add_argument('--data', type=str, required=True)
args = parser.parse_args()
train = dataset.SynthesisDataset(args.data)
train_iter = iterators.SerialIterator(train, batch_size=args.batch)
model = network.SynthesisNetwork()
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
opt = optimizers.Adam(alpha=args.alpha, beta1=args.beta1, beta2=args.beta2)
opt.setup(model)
opt.add_hook(chainer.optimizer.GradientClipping(1.0))
updater = SynthesisUpdater(
net=model,
iterator={'main': train_iter},
optimizer={'opt': opt},
device=args.gpu,
)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=os.path.join(args.base, 'results'))
trainer.extend(extensions.LogReport(trigger=(1, 'epoch')))
trainer.extend(extensions.snapshot(filename='dump'), trigger=(100, 'epoch'))
trainer.extend(extensions.PlotReport(['loss_bin'], trigger=(1, 'epoch'), file_name='loss_bin.png'))
trainer.extend(extensions.PlotReport(['loss_l1'], trigger=(1, 'epoch'), file_name='loss_l1.png'))
trainer.extend(extensions.PlotReport(['loss_att'], trigger=(1, 'epoch'), file_name='loss_att.png'))
trainer.extend(extensions.PrintReport(['epoch', 'loss_bin', 'loss_l1', 'loss_att']))
trainer.extend(extensions.ProgressBar(update_interval=1))
if args.dump:
print('loading dump from ' + args.dump)
serializers.load_npz(args.dump, trainer)
trainer.run()
if __name__ == '__main__':
main()
``` |
{
"source": "joisino/HiSampler",
"score": 2
} |
#### File: joisino/HiSampler/reinforce.py
```python
import os
import time
import matplotlib
matplotlib.use('Agg')
import numpy as np
import chainer
import chainer.functions as F
import chainer.links as L
import matplotlib.pyplot as plt
import yaml
from util import calc_reward, makedir, output_graph, output_distribution, load_conf
class MLP(chainer.Chain):
def __init__(self, channels, bias_final):
super(MLP, self).__init__()
self.n_layers = len(channels) - 1
self.channels = channels
bias = [0 for i in range(self.n_layers)]
bias[-1] = bias_final
for i in range(self.n_layers):
self.add_link('l{}'.format(i), L.Linear(channels[i], channels[i+1], initial_bias=bias[i]))
def z(self, batch):
return self.xp.random.randn(batch, self.channels[0]).astype('f')
def __call__(self, x):
for i in range(self.n_layers):
x = self['l{}'.format(i)](x)
if i + 1 == self.n_layers:
x = F.sigmoid(x)
else:
x = F.relu(x)
return x
def gen_edges(n, p, xp):
EPS = 1e-6
a = xp.random.binomial(1, p.data, n * (n-1) // 2)
lp = F.sum(a * F.log(p + EPS) + (1 - a) * F.log(1 - p + EPS))
a_cpu = chainer.cuda.to_cpu(a)
edges = np.array(np.tril_indices(n, -1)).T[np.where(a_cpu == 1)]
return a, edges, lp
def calc_lp(n, p, a, xp):
EPS = 1e-6
lp = F.sum(a * F.log(p + EPS) + (1 - a) * F.log(1 - p + EPS))
return lp
def train():
conf = load_conf()
no_replay = conf['noreplay']
solver = conf['solver']
n = conf['n']
eps = conf['eps']
savedir = conf['dirname']
makedir(savedir)
tmpdir = os.path.join(savedir, 'tmp')
makedir(tmpdir)
np.random.seed(conf['seed'])
logfile = os.path.join(savedir, 'log')
ave = 0
aves = []
ma = 0
global_ma = 0
channels = [10, 100, 500, n*(n-1)//2]
if 'channels' in conf:
channels = conf['channels']
channels.append(n*(n-1)//2)
bias = - np.log(1.0 / conf['p'] - 1)
net = MLP(channels, bias)
if conf['gpu'] != -1:
chainer.cuda.get_device_from_id(conf['gpu']).use()
net.to_gpu()
if conf['opt'] == 'SGD':
opt = chainer.optimizers.SGD(lr=conf['lr'])
elif conf['opt'] == 'Adam':
opt = chainer.optimizers.Adam(alpha=conf['lr'])
opt.setup(net)
stop = 0
pool_size = 10
start_training = 20
r_bests = []
edges_bests = []
z_bests = []
if no_replay:
pool_size = 1
start_training = 1e9
iteration = 0
from_restart = 0
start_time = time.time()
while True:
iteration += 1
from_restart += 1
z = net.z(1)
x = net(z)[0]
edges_li, edges, lp = gen_edges(n, x, net.xp)
r = calc_reward(n, edges, solver, tmpdir)
entropy = F.mean(x * F.log(x + 1e-6) + (1 - x) * F.log(1 - x + 1e-6))
if no_replay:
loss = - r * lp
net.cleargrads()
loss.backward()
opt.update()
if r > ma:
ma = r
stop = 0
else:
stop += 1
if r > global_ma:
global_ma = r
output_graph(os.path.join(savedir, 'output_{}.txt'.format(r)), n, edges)
output_distribution(os.path.join(savedir, 'distribution_{}.txt'.format(r)), n, x.data)
chainer.serializers.save_npz(os.path.join(savedir, 'snapshot_at_reward_{}'.format(r)), net)
elapsed = time.time() - start_time
ave = ave * (1 - conf['eps']) + r * conf['eps']
aves.append(ave)
with open(logfile, 'a') as f:
print(savedir, iteration, elapsed, r, len(edges), entropy.data, global_ma, ma, ave, flush=True)
print(iteration, elapsed, r, len(edges), entropy.data, global_ma, ma, ave, flush=True, file=f)
f = False
for es in edges_bests:
if (es == edges_li).all():
f = True
if not f:
r_bests.append(r)
edges_bests.append(edges_li)
z_bests.append(z)
while len(r_bests) > pool_size:
mi = 0
for j in range(len(r_bests)):
if r_bests[j] < r_bests[mi]:
mi = j
r_bests.pop(mi)
edges_bests.pop(mi)
z_bests.pop(mi)
if from_restart >= start_training:
ind = np.random.randint(len(r_bests))
x = net(z_bests[ind])[0]
lp = calc_lp(n, x, edges_bests[ind], net.xp)
loss = - r_bests[ind] * lp
net.cleargrads()
loss.backward()
opt.update()
if stop >= conf['restart']:
stop = 0
ma = 0
r_bests = []
edges_bests = []
z_bests = []
from_restart = 0
net = MLP(channels, bias)
if conf['gpu'] != -1:
chainer.cuda.get_device_from_id(conf['gpu']).use()
net.to_gpu()
if conf['opt'] == 'SGD':
opt = chainer.optimizers.SGD(lr=conf['lr'])
elif conf['opt'] == 'Adam':
opt = chainer.optimizers.Adam(alpha=conf['lr'])
opt.setup(net)
continue
if iteration % 100 == 0:
plt.clf()
plt.plot(range(len(aves)), aves)
plt.savefig(os.path.join(savedir, 'graph.png'))
if iteration % 1000 == 0:
plt.savefig(os.path.join(savedir, 'graph_{}.png'.format(iteration)))
plt.savefig(os.path.join(savedir, 'graph_{}.eps'.format(iteration)))
chainer.serializers.save_npz(os.path.join(savedir, 'snapshot_{}'.format(iteration)), net)
chainer.serializers.save_npz(os.path.join(savedir, 'opt_{}'.format(iteration)), opt)
if __name__ == '__main__':
train()
``` |
{
"source": "joisino/kirara-slack",
"score": 3
} |
#### File: joisino/kirara-slack/notify.py
```python
import yaml
import os
import sys
import datetime
import slackweb
import config
def main():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
slack = slackweb.Slack(url=config.slack_url)
filename = './holiday_jp/holidays.yml'
if not os.path.exists(filename):
print('holidays.yml not found. Please try "git submodule update -i"', file=sys.stderr)
sys.exit(1)
with open(filename) as f:
holidays = yaml.load(f.read())
def is_holiday(date):
return date.weekday() == 6 or date in holidays
cur = datetime.date.today()
# Holidays are not release days.
if is_holiday(cur):
sys.exit(0)
# append succeeding holidays
delta = datetime.timedelta(days=1)
li = []
while True:
li.append(cur.day)
cur += delta
if not is_holiday(cur):
break
for magazine in config.magazines:
if magazine[1] in li:
text = config.message % magazine[0]
print(text)
try:
slack.notify(text=text)
except ValueError:
print('Slackweb raised ValueError. Please configure `config.py` correctly.')
if __name__ == '__main__':
main()
``` |
{
"source": "joisino/reeval-wmd",
"score": 3
} |
#### File: joisino/reeval-wmd/util.py
```python
import numpy as np
import scipy.io
from scipy.sparse import csr_matrix, coo_matrix
from gensim.matutils import Sparse2Corpus, corpus2csc
from gensim.models import TfidfModel
from sklearn.metrics import pairwise_distances
from sklearn.preprocessing import normalize
from sklearn.model_selection import train_test_split
def compute_tfidf(X_train, X_test):
"""
Compute TF-IDF vectors
It uses only training samples to compute IDF weights
Parameters
----------
X_train : numpy.array
BOW vectors of training samples
Shape: (n, d), where n is the number of training documents, d is the size of the vocabulary
X[i, j] is the number of occurences of word j in document i
X_test : numpy.array
BOW vectors of test samples
Shape: (m, d), where m is the number of test documents, d is the size of the vocabulary
X[i, j] is the number of occurences of word j in document i
Returns
-------
X_train : numpy.array
TF-TDF vectors of training samples
Shape: (n, d), where n is the number of training documents, d is the size of the vocabulary
X_test : numpy.array
BOW vectors of test samples
Shape: (m, d), where m is the number of test documents, d is the size of the vocabulary
"""
corpus = Sparse2Corpus(X_train, documents_columns=False)
model = TfidfModel(corpus, normalize=False)
X_train = csr_matrix(corpus2csc(model[corpus], num_terms=X_train.shape[1]).T)
corpus = Sparse2Corpus(X_test, documents_columns=False)
X_test = csr_matrix(corpus2csc(model[corpus], num_terms=X_train.shape[1]).T)
return X_train, X_test
#####################
# #
# our re-evaluation #
# #
#####################
def knn_evaluation(y_train, y_test, D, k):
"""
Compute kNN accuracy
Parameters
----------
y_train : numpy.array
Labels of training samples
Shape: (n,), where n is the number of training documents
y[i] is the label of document i
y_test : numpy.array
Labels of test samples
Shape: (m,), where m is the number of test documents
y[i] is the label of document i
D : numpy.array
Distance matrix of training and test samples
Shape: (n, m), where n is the number of training documents, m is the number of test documents
D[i, j] is the distance between training document i and test document j
k : int
Size of neighborhood in kNN classification
Returns
-------
acc : float
Accuracy
"""
acc = 0
for i in range(y_test.shape[0]):
rank = np.argsort(D[i])
if np.bincount(y_train[rank[:k]]).argmax() == y_test[i]:
acc += 1
acc = acc / y_test.shape[0]
return acc
def select_k(y_train, D_train):
"""
Select the hyperparameter k using validation data
Parameters
----------
y_train : numpy.array
Labels of training samples
Shape: (n,), where n is the number of training documents
y[i] is the label of document i
D_train : numpy.array
Distance matrix of training samples
Shape: (n, n), where n is the number of training documents
D[i, j] is the distance between training documents i and j
Returns
-------
best_k : int
Chosen hyperparamter k
"""
train, validation = train_test_split(np.arange(len(y_train)), test_size=0.2, random_state=0)
best_score = None
best_k = None
for k in range(1, 20):
score = knn_evaluation(y_train[train], y_train[validation], D_train[validation][:, train], k)
if best_score is None or score > best_score:
best_score = score
best_k = k
return best_k
def evaluate_D(y_train, y_test, D, D_train):
"""
Evaluation using distance metrices
Parameters
----------
y_train : numpy.array
Labels of training samples
Shape: (n,), where n is the number of training documents
y[i] is the label of document i
y_test : numpy.array
Labels of test samples
Shape: (m,), where m is the number of test documents
y[i] is the label of document i
D : numpy.array
Distance matrix of training and test samples
Shape: (n, m), where n is the number of training documents, m is the number of test documents
D[i, j] is the distance between training document i and test document j
D_train : numpy.array
Distance matrix of training samples
Shape: (n, n), where n is the number of training documents
D[i, j] is the distance between training documents i and j
Returns
-------
acc : float
Accuracy
"""
k = select_k(y_train, D_train)
return knn_evaluation(y_train, y_test, D, k)
def evaluate_onehot(X_train, y_train, X_test, y_test, tfidf=False, norm='l1', metric='l1'):
"""
Evaluation using onhot vectors
Parameters
----------
X_train : numpy.array
BOW vectors of training samples
Shape: (n, d), where n is the number of training documents, d is the size of the vocabulary
X[i, j] is the number of occurences of word j in document i
y_train : numpy.array
Labels of training samples
Shape: (n,), where n is the number of training documents
y[i] is the label of document i
X_test : numpy.array
BOW vectors of test samples
Shape: (m, d), where m is the number of test documents, d is the size of the vocabulary
X[i, j] is the number of occurences of word j in document i
y_test : numpy.array
Labels of test samples
Shape: (m,), where m is the number of test documents
y[i] is the label of document i
tfidf : bool
TF-IDF (True) or BOW (False)
norm : {None, 'l1', 'l2'}
Norm to normalize vectors
If norm is None, vectors are not normalized.
Otherwise, this argument is passed to `norm` argument of `sklearn.preprocessing.normalize`.
metric : {'l1', 'l2'}
Norm to compare vectors
This argument is passes to `metric` argument of `sklearn.metrics.pairwise_distances`.
Returns
-------
acc : float
Accuracy
"""
if tfidf:
X_train, X_test = compute_tfidf(X_train, X_test)
if norm:
X_train = normalize(X_train, axis=1, norm=norm)
X_test = normalize(X_test, axis=1, norm=norm)
D = pairwise_distances(X_test, X_train, metric=metric)
D_train = pairwise_distances(X_train, metric=metric)
return evaluate_D(y_train, y_test, D, D_train)
############################
# #
# weighted k-NN evaluation #
# #
############################
def knn_evaluation_smooth(y_train, y_test, D, gamma, k=19):
"""
Compute wkNN accuracy
Parameters
----------
y_train : numpy.array
Labels of training samples
Shape: (n,), where n is the number of training documents
y[i] is the label of document i
y_test : numpy.array
Labels of test samples
Shape: (m,), where m is the number of test documents
y[i] is the label of document i
D : numpy.array
Distance matrix of training and test samples
Shape: (n, m), where n is the number of training documents, m is the number of test documents
D[i, j] is the distance between training document i and test document j
gamma : float
Smoothness
k : int
Size of neighborhood in kNN classification
Returns
-------
acc : float
Accuracy
"""
acc = 0
for i in range(y_test.shape[0]):
rank = np.argsort(D[i])
if np.bincount(y_train[rank[:k]], np.exp(-D[i]/gamma)[rank[:k]]).argmax() == y_test[i]:
acc += 1
acc = acc / y_test.shape[0]
return acc
def select_gamma(y_train, D_train):
"""
Select the hyperparameter gamma in wkNN using validation data
Parameters
----------
y_train : numpy.array
Labels of training samples
Shape: (n,), where n is the number of training documents
y[i] is the label of document i
D_train : numpy.array
Distance matrix of training samples
Shape: (n, n), where n is the number of training documents
D[i, j] is the distance between training documents i and j
Returns
-------
best_gamma : float
Chosen hyperparamter gamma
"""
train, validation = train_test_split(np.arange(len(y_train)), test_size=0.3, random_state=0)
best_score = None
best_gamma = None
for gamma in [(i+1)/200 for i in range(20)]:
score = knn_evaluation_smooth(y_train[train], y_train[validation], D_train[validation][:, train], gamma)
if best_score is None or score > best_score:
best_score = score
best_gamma = gamma
return best_gamma
def evaluate_D_smooth(y_train, y_test, D, D_train):
"""
Evaluation using wkNN and distance metrices
Parameters
----------
y_train : numpy.array
Labels of training samples
Shape: (n,), where n is the number of training documents
y[i] is the label of document i
y_test : numpy.array
Labels of test samples
Shape: (m,), where m is the number of test documents
y[i] is the label of document i
D : numpy.array
Distance matrix of training and test samples
Shape: (n, m), where n is the number of training documents, m is the number of test documents
D[i, j] is the distance between training document i and test document j
D_train : numpy.array
Distance matrix of training samples
Shape: (n, n), where n is the number of training documents
D[i, j] is the distance between training documents i and j
Returns
-------
acc : float
Accuracy
"""
gamma = select_gamma(y_train, D_train)
return knn_evaluation_smooth(y_train, y_test, D, gamma)
def evaluate_onehot_smooth(X_train, y_train, X_test, y_test, tfidf=False):
"""
Evaluation using wkNN and onehot vectors
Parameters
----------
X_train : numpy.array
BOW vectors of training samples
Shape: (n, d), where n is the number of training documents, d is the size of the vocabulary
X[i, j] is the number of occurences of word j in document i
y_train : numpy.array
Labels of training samples
Shape: (n,), where n is the number of training documents
y[i] is the label of document i
X_test : numpy.array
BOW vectors of test samples
Shape: (m, d), where m is the number of test documents, d is the size of the vocabulary
X[i, j] is the number of occurences of word j in document i
y_test : numpy.array
Labels of test samples
Shape: (m,), where m is the number of test documents
y[i] is the label of document i
tfidf : bool
TF-IDF (True) or BOW (False)
Returns
-------
acc : float
Accuracy
"""
if tfidf:
X_train, X_test = compute_tfidf(X_train, X_test)
X_train = normalize(X_train, axis=1, norm='l1')
X_test = normalize(X_test, axis=1, norm='l1')
D = pairwise_distances(X_test, X_train, metric='manhattan')
D_train = pairwise_distances(X_train, metric='manhattan')
return evaluate_D_smooth(y_train, y_test, D, D_train)
###########
# #
# Loading #
# #
###########
def load(filename):
data = scipy.io.loadmat(filename)
X = np.vstack([x.T for x in data['X'][0]])
_, inverse = np.unique(X, axis=0, return_inverse=True)
docid = [[i for w in enumerate(x.T)] for i, x in enumerate(data['X'][0])]
docid = sum(docid, [])
freq = np.hstack([x[0] for x in data['BOW_X'][0]])
X = csr_matrix(coo_matrix((freq, (docid, inverse))))
y = data['Y'][0]
return data, X, y
def load_one(filename):
data = scipy.io.loadmat(filename)
n_train = len(data['xtr'][0])
X = np.vstack([x.T for x in data['xtr'][0]] + [x.T for x in data['xte'][0] if len(x.T) > 0])
_, inverse = np.unique(X, axis=0, return_inverse=True)
docid = [[i for w in enumerate(x.T)] for i, x in enumerate(data['xtr'][0])] + [[n_train + i for w in enumerate(x.T)] for i, x in enumerate(data['xte'][0])]
docid = sum(docid, [])
freq = np.hstack([x[0] for x in data['BOW_xtr'][0]] + [x[0] for x in data['BOW_xte'][0] if len(x.T) > 0])
X = csr_matrix(coo_matrix((freq, (docid, inverse))))
X_train = X[:n_train]
y_train = data['ytr'][0].astype(int)
X_test = X[n_train:]
y_test = data['yte'][0].astype(int)
return X_train, y_train, X_test, y_test
``` |
{
"source": "joisino/tiara",
"score": 2
} |
#### File: joisino/tiara/environments.py
```python
import pickle
import time
import os
import json
import urllib
from PIL import Image
import numpy as np
import fasteners
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
import flickrapi
class OpenImageClassifier():
def __init__(self, class_id, initial_tag_size=None, seed=0):
np.random.seed(seed)
self.class_id = class_id
with open('openimage_output.pickle', 'rb') as f:
self.output = pickle.load(f)
with open('openimage_image_to_tag.pickle', 'rb') as f:
self.item_to_tag_dict = pickle.load(f)
with open('openimage_tag_to_image.pickle', 'rb') as f:
self.tag_to_item_dict = pickle.load(f)
self.tags = list(self.tag_to_item_dict.keys())
if initial_tag_size is not None:
self.tags = np.random.choice(self.tags, size=initial_tag_size, replace=False).tolist()
def item_to_tag(self, item):
return self.item_to_tag_dict[item]
def tag_to_item(self, tag):
return self.tag_to_item_dict[tag]
def f(self, item):
return self.output[item][self.class_id]
def get_image(self, item):
return Image.open('imgs/' + item + '.jpg').convert('RGB')
class FlickerClassifier():
def __init__(self, api_key, api_secret, class_id, seed=0):
np.random.seed(seed)
self.flickr = flickrapi.FlickrAPI(api_key, api_secret, format='json')
self.item_to_tag_pickle = 'flickr_objects/cache_image_to_tag.pickle'
self.tag_to_item_pickle = 'flickr_objects/cache_tag_to_image.pickle'
self.item_to_url_pickle = 'flickr_objects/cache_image_to_url.pickle'
self.results_pickle = 'flickr_objects/cache_results.pickle'
self.initial_tags = 'flickr_objects/initial_tags.txt'
self.cache_lock = 'flickr_objects/cache_lock'
self.api_log = 'flickr_objects/api_log_{}'.format(api_key)
self.api_lock = 'flickr_objects/api_lock_{}'.format(api_key)
with fasteners.InterProcessLock(self.cache_lock):
self.cache_item_to_tag = {}
if os.path.exists(self.item_to_tag_pickle):
with open(self.item_to_tag_pickle, 'rb') as f:
self.cache_item_to_tag = pickle.load(f)
self.cache_tag_to_item = {}
if os.path.exists(self.tag_to_item_pickle):
with open(self.tag_to_item_pickle, 'rb') as f:
self.cache_tag_to_item = pickle.load(f)
self.item_to_url = {}
if os.path.exists(self.item_to_url_pickle):
with open(self.item_to_url_pickle, 'rb') as f:
self.item_to_url = pickle.load(f)
self.cache_results = {}
if os.path.exists(self.results_pickle):
with open(self.results_pickle, 'rb') as f:
self.cache_results = pickle.load(f)
with open(self.initial_tags) as f:
self.tags = [r.strip() for r in f]
self.class_id = class_id
self.preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
self.model = models.resnet18(pretrained=True)
self.model.eval()
if not os.path.exists('flickr_images'):
os.makedirs('flickr_images')
def wait_until_flickr_rate(self):
with fasteners.InterProcessLock(self.api_lock):
if os.path.exists(self.api_log):
with open(self.api_log, 'r') as f:
times = f.readlines()
else:
times = []
if len(times) == 3000:
t = max(0, 3600 - (time.time() - float(times[0])))
time.sleep(t)
times.pop(0)
times.append(time.time())
assert(len(times) <= 3000)
with open(self.api_log, 'w') as f:
for r in times:
print(float(r), file=f)
def item_to_tag(self, item):
if item not in self.cache_item_to_tag:
self.wait_until_flickr_rate()
try:
tags = self.flickr.tags.getListPhoto(photo_id=item)
self.cache_item_to_tag[item] = [t['raw'] for t in json.loads(tags.decode('utf-8'))['photo']['tags']['tag']]
except BaseException:
self.cache_item_to_tag[item] = []
return self.cache_item_to_tag[item]
def tag_to_item(self, tag):
if tag not in self.cache_tag_to_item:
self.wait_until_flickr_rate()
try:
res = self.flickr.photos.search(tags=tag, license='9,10', extras='url_l', per_page=500)
res = [r for r in json.loads(res.decode('utf-8'))['photos']['photo'] if 'url_l' in r and 'id' in r]
except BaseException:
res = []
self.cache_tag_to_item[tag] = [i['id'] for i in res]
for i in res:
self.item_to_url[i['id']] = i['url_l']
return self.cache_tag_to_item[tag]
def get_image(self, item):
filename = 'flickr_images/{}.jpg'.format(item)
try:
if not os.path.exists(filename):
urllib.request.urlretrieve(self.item_to_url[item], filename)
return Image.open(filename).convert('RGB')
except BaseException:
return Image.new('RGB', (256, 256))
def f(self, item):
key = (item, self.class_id)
if key not in self.cache_results:
input_item = self.get_image(item)
input_tensor = self.preprocess(input_item)
input_batch = input_tensor.unsqueeze(0)
with torch.no_grad():
self.cache_results[key] = self.model(input_batch).reshape(-1).numpy()[self.class_id]
return self.cache_results[key]
def merge_save(self, filename, dict):
old_dict = {}
if os.path.exists(filename):
with open(filename, 'rb') as f:
old_dict = pickle.load(f)
for key, value in dict.items():
old_dict[key] = value
with open(filename, 'wb') as f:
pickle.dump(old_dict, f)
def save_cache(self):
with fasteners.InterProcessLock(self.cache_lock):
self.merge_save(self.item_to_tag_pickle, self.cache_item_to_tag)
self.merge_save(self.tag_to_item_pickle, self.cache_tag_to_item)
self.merge_save(self.item_to_url_pickle, self.item_to_url)
self.merge_save(self.results_pickle, self.cache_results)
class FlickerSimilarity(FlickerClassifier):
def __init__(self, api_key, api_secret, class_id, seed=0):
super(FlickerSimilarity, self).__init__(api_key, api_secret, class_id, seed)
modules = list(self.model.children())[:-1]
self.extractor = nn.Sequential(*modules)
input_item = Image.open('flickr_objects/source/{}'.format(class_id)).convert('RGB')
input_tensor = self.preprocess(input_item)
input_batch = input_tensor.unsqueeze(0)
with torch.no_grad():
self.source_feature = self.extractor(input_batch).reshape(-1).numpy()
def f(self, item):
key = (item, self.class_id)
if key not in self.cache_results:
input_item = self.get_image(item)
input_tensor = self.preprocess(input_item)
input_batch = input_tensor.unsqueeze(0)
with torch.no_grad():
target_feature = self.extractor(input_batch).reshape(-1).numpy()
self.cache_results[key] = np.exp(-np.linalg.norm(target_feature - self.source_feature) ** 2 / 100)
return self.cache_results[key]
def get_class_ids(env_str):
if env_str in ['open', 'flickr']:
return [i * 100 for i in range(10)]
elif env_str == 'flickrsim':
return os.listdir('flickr_objects/source')
assert(False)
def get_env(env_str, class_id, seed, api_key, api_secret):
if env_str == 'open':
return OpenImageClassifier(class_id=class_id, initial_tag_size=100, seed=seed)
elif env_str == 'flickr':
return FlickerClassifier(class_id=class_id, api_key=api_key, api_secret=api_secret, seed=seed)
elif env_str == 'flickrsim':
return FlickerSimilarity(class_id=class_id, api_key=api_key, api_secret=api_secret, seed=seed)
assert(False)
```
#### File: joisino/tiara/evaluate.py
```python
import os
import numpy as np
import matplotlib.pyplot as plt
import argparse
from wordcloud import WordCloud
from methods import RandomQuery, Tiara, TiaraS, EPSGreedy, UCB
from environments import get_class_ids, get_env
from utils import load_glove
def save_array(opt, budget, env_name, method_name, class_id, seed):
scores = np.array([opt.history[i][1] for i in range(budget)])
np.save('outputs/{}_{}_{}_{}_scores.npy'.format(env_name, method_name, class_id, seed), scores)
def update_pics(fig, opt, env, ts, num_methods, method_ind):
history = [opt.history[i - 1] for i in ts]
for ind, (loop, score, i) in enumerate(history):
ax = fig.add_subplot(num_methods, len(ts), len(ts) * method_ind + ind + 1)
img = env.get_image(i)
ax.imshow(img)
ax.text(0, img.size[1] + 100, 'i: {}\ns: {:.4f}\n{}'.format(loop + 1, score, i), size=16, color='red')
ax.axis('off')
def savefig(fig, basename):
fig.savefig('outputs/{}.png'.format(basename), bbox_inches='tight')
fig.savefig('outputs/{}.svg'.format(basename), bbox_inches='tight')
def save_curve(scores, methods, env_name, class_id):
fig, ax = plt.subplots()
for method_name, _, _ in methods:
ax.plot(scores[method_name].mean(0), label=method_name)
ax.legend()
fig.savefig('outputs/{}_{}_curve.png'.format(env_name, class_id), bbox_inches='tight')
def wordcloud_col(word, font_size, position, orientation, font_path, random_state):
lam = (font_size - 6) / (48 - 6)
red = np.array([255, 75, 0])
grey = np.array([132, 145, 158])
res = lam * red + (1 - lam) * grey
res = res.astype(int)
return (res[0], res[1], res[2])
def save_wordcloud(opt, env_name, class_id, seed, method_name, font_path):
tag_scores = opt.tag_scores()
score_dict = {tag: tag_scores[tag_id] for tag_id, tag in enumerate(opt.tags)}
x, y = np.ogrid[:300, :300]
mask = (x - 150) ** 2 + (y - 150) ** 2 > 150 ** 2
mask = 255 * mask.astype(int)
wc = WordCloud(font_path=font_path, background_color='white', mask=mask, random_state=0, prefer_horizontal=1.0, max_font_size=48, min_font_size=6)
wc.generate_from_frequencies(score_dict)
wc.recolor(random_state=0, color_func=wordcloud_col)
wc.to_file('outputs/{}_{}_{}_{}_wordcloud.png'.format(env_name, class_id, seed, method_name))
with open('outputs/{}_{}_{}_{}_wordcloud.svg'.format(env_name, class_id, seed, method_name), 'w') as f:
f.write(wc.to_svg().replace('fill:(', 'fill:rgb('))
if not os.path.exists('outputs'):
os.makedirs('outputs')
parser = argparse.ArgumentParser()
parser.add_argument('--tuning', action='store_true')
parser.add_argument('--extra', action='store_true')
parser.add_argument('--env', choices=['open', 'flickr', 'flickrsim'])
parser.add_argument('--num_seeds', type=int, default=10)
parser.add_argument('--budget', type=int, default=500)
parser.add_argument('--api_key', type=str, help='API key for Flickr.')
parser.add_argument('--api_secret', type=str, help='API secret key for Flickr.')
parser.add_argument('--font_path', type=str, help='Font path for wordclouds.')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('-c', '--classes', type=int, nargs='*')
args = parser.parse_args()
glove = load_glove(300, 6)
if args.tuning:
glove50 = load_glove(50, 6)
glove100 = load_glove(100, 6)
glove200 = load_glove(200, 6)
budget = args.budget
budget_ini = 1
class_ids = get_class_ids(args.env)
num_seeds = args.num_seeds
ts = [10, 50, 100, 200, 300, 400, 500] # checkpoints
print(args.classes)
if args.classes:
class_ids = [class_ids[c] for c in args.classes]
print('classes:', class_ids)
methods = [
('Tiara_1_0.01', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 0.01, 'uncase': True}),
('UCB_1', UCB, {'alpha': 1.0}),
('random', RandomQuery, {})
]
if args.extra:
methods += [
('TiaraS_1_0.01', TiaraS, {'word_embedding': glove, 'lam': 1, 'alpha': 0.01}),
('eps_0.01', EPSGreedy, {'eps': 0.01}),
('eps_0.1', EPSGreedy, {'eps': 0.1}),
('eps_0.5', EPSGreedy, {'eps': 0.5}),
('UCB_0.1', UCB, {'alpha': 0.1}),
('UCB_10', UCB, {'alpha': 10.0}),
('adaeps_0.1', EPSGreedy, {'eps': 0.1, 'adaptive': True}),
('adaUCB_1', UCB, {'alpha': 1.0, 'adaptive': True}),
]
if args.tuning:
methods += [
('Tiara_1_0.001', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 0.001}),
('Tiara_1_0.1', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 0.1}),
('Tiara_1_1', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 1}),
('Tiara_1_10', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 10}),
('Tiara_1_100', Tiara, {'word_embedding': glove, 'lam': 1, 'alpha': 100}),
('Tiara_0.01_0.01', Tiara, {'word_embedding': glove, 'lam': 0.01, 'alpha': 0.01}),
('Tiara_0.1_0.01', Tiara, {'word_embedding': glove, 'lam': 0.1, 'alpha': 0.01}),
('Tiara_10_0.01', Tiara, {'word_embedding': glove, 'lam': 10, 'alpha': 0.01}),
('Tiara_100_0.01', Tiara, {'word_embedding': glove, 'lam': 100, 'alpha': 0.01}),
('Tiara_1000_0.01', Tiara, {'word_embedding': glove, 'lam': 1000, 'alpha': 0.01}),
('Tiara_50dim', Tiara, {'word_embedding': glove50, 'lam': 1, 'alpha': 0.01}),
('Tiara_100dim', Tiara, {'word_embedding': glove100, 'lam': 1, 'alpha': 0.01}),
('Tiara_200dim', Tiara, {'word_embedding': glove200, 'lam': 1, 'alpha': 0.01}),
]
for class_ind, class_id in enumerate(class_ids):
scores = {method_name: np.zeros((num_seeds, budget)) for method_name, _, _ in methods}
for seed in range(num_seeds):
fig_pics = plt.figure(figsize=(len(ts) * 4, len(methods) * 3))
for method_ind, (method_name, Opt, config) in enumerate(methods):
if args.verbose:
print(method_name, class_ind, seed)
env = get_env(args.env, class_id, seed, args.api_key, args.api_secret)
opt = Opt(env, budget, seed, budget_ini=budget_ini, verbose=args.verbose, **config)
opt.optimize()
scores[method_name][seed] = [opt.history[i][1] for i in range(budget)]
update_pics(fig_pics, opt, env, ts, len(methods), method_ind)
if hasattr(opt, 'tag_scores'):
save_wordcloud(opt, args.env, class_id, seed, method_name, args.font_path)
if hasattr(env, 'save_cache'):
env.save_cache()
savefig(fig_pics, '{}_{}_{}_figures'.format(args.env, class_id, seed))
plt.close()
save_curve(scores, methods, args.env, class_id)
for method_name, _, _ in methods:
np.save('outputs/{}_{}_{}_scores.npy'.format(args.env, class_id, method_name), scores[method_name])
```
#### File: joisino/tiara/utils.py
```python
import numpy as np
def load_glove(dim=300, token=6):
glove = {}
with open('glove/glove.{}B.{}d.txt'.format(token, dim), 'r') as f:
for r in f:
split = r.split()
glove[''.join(split[:-dim])] = np.array(list(map(float, split[-dim:])))
return glove
``` |
{
"source": "joisino/treegkr",
"score": 3
} |
#### File: joisino/treegkr/sample.py
```python
from treegkr import treegkr
def main():
n = int(input())
vs = []
us = []
cost = []
for i in range(n-1):
l = input().split()
vs.append(int(l[0]) - 1)
us.append(int(l[1]) - 1)
cost.append(int(l[2]))
creation = []
destruction = []
for i in range(n):
c, d = map(float, input().split())
creation.append(c)
destruction.append(d)
x = []
y = []
for i in range(n):
a, b = map(float, input().split())
x.append(a)
y.append(b)
print(treegkr(vs, us, cost, creation, destruction, x, y))
if __name__ == '__main__':
main()
``` |
{
"source": "joismar/Python-FastAPI-MongoDB-API",
"score": 2
} |
#### File: app/controllers/pessoa.py
```python
from bson.objectid import ObjectId
from ..config.database import pessoa_collection
from ..helpers.pessoa import pessoa_helper
from ..controllers.endereco import get_endereco
# Retorna todas as pessoas
async def get_all_pessoas():
pessoas = []
async for pessoa in pessoa_collection.find():
pessoas.append(pessoa_helper(pessoa))
return pessoas
# Adiciona uma nova pessoa
async def add_pessoa(pessoa_data: dict) -> dict:
endereco = {}
if 'cep' in pessoa_data:
endereco = await get_endereco(pessoa_data['cep'])
if not endereco:
return 404
pessoa_data['endereco'] = endereco
pessoa = await pessoa_collection.insert_one(pessoa_data)
new_pessoa = await pessoa_collection.find_one({"_id": pessoa.inserted_id})
return pessoa_helper(new_pessoa)
# Retorna uma pessoa por ID
async def get_pessoa(id: str) -> dict:
pessoa = await pessoa_collection.find_one({"_id": ObjectId(id)})
if pessoa:
return pessoa_helper(pessoa)
# Atualiza uma pessoa por ID
async def update_pessoa(id: str, data: dict):
# Retorna falso se não houver dados
if len(data) < 1:
return False
pessoa = await pessoa_collection.find_one({"_id": ObjectId(id)})
if pessoa:
if 'cep' in data:
endereco = await get_endereco(data['cep'])
if not endereco:
return 404
data['endereco'] = endereco
updated_pessoa = await pessoa_collection.update_one(
{"_id": ObjectId(id)}, {"$set": data}
)
if updated_pessoa:
pessoa = await pessoa_collection.find_one({"_id": ObjectId(id)})
return pessoa_helper(pessoa)
return False
# Deleta uma pessoa por ID
async def delete_pessoa(id: str):
pessoa = await pessoa_collection.find_one({"_id": ObjectId(id)})
if pessoa:
await pessoa_collection.delete_one({"_id": ObjectId(id)})
return True
```
#### File: app/models/pessoa.py
```python
from typing import Optional
from pydantic import BaseModel, Field
from .endereco import EnderecoSchema
from fastapi import HTTPException
class PessoaSchema(BaseModel):
id: str = Field(...)
nome: str = Field(...)
idade: int = Field(...)
endereco: EnderecoSchema = {}
class Config:
schema_extra = {
"example": {
"id" : "hash",
"nome" : "Example",
"idade" : 42,
"endereco" : {}
}
}
class CreatePessoaModel(BaseModel):
nome: str = Field(...)
idade: int = Field(...)
cep: str = Field(...)
class Config:
schema_extra = {
"example": {
"nome" : "Example",
"idade" : 42,
"cep" : "15046250"
}
}
class UpdatePessoaModel(BaseModel):
nome: Optional[str]
idade: Optional[int]
cep: Optional[str]
class Config:
schema_extra = {
"example": {
"nome" : "Example",
"idade" : 42,
"cep" : "15046250"
}
}
def ErrorResponseModel(code, message):
raise HTTPException(status_code=code, detail=message)
``` |
{
"source": "joismar/session",
"score": 3
} |
#### File: session/tests/chrome.py
```python
from unittest.case import TestCase
from unittest import main
import webdriver_session.utils.chrome as utils
import os
from webdriver_session import ChromeSession
from selenium import webdriver
import tracemalloc
from time import sleep
tracemalloc.start()
# python -m unittest tests.chrome.TestChromeUtils
class TestChromeUtils(TestCase):
chrome_version = '93'
# python -m unittest tests.chrome.TestChromeUtils.test_get_chrome_version
def test_get_chrome_version(self):
actual = utils.get_chrome_version()
self.assertRegex(self.chrome_version, '\d*')
self.assertTrue(self.__class__.chrome_version == actual)
# python -m unittest tests.chrome.TestChromeUtils.test_download_chromedriver
def test_download_chromedriver(self):
self.assertTrue(utils.download_chromedriver(
os.getcwd(), self.__class__.chrome_version))
# python -m unittest tests.chrome.TestChromeUtils.test_get_chromedriver_version
def test_get_chromedriver_version(self):
self.assertRegex(utils.get_chromedriver_version(os.getcwd()), '\d*')
self.assertEqual(utils.get_chromedriver_version(
os.getcwd()), self.__class__.chrome_version)
# python -m unittest tests.chrome.TestChromeSession
class TestChromeSession(TestCase):
# python -m unittest tests.chrome.TestChromeSession.test_get_browser
def test_get_browser(self):
session = ChromeSession()
browser = session.get_browser()
self.assertIsNotNone(browser)
self.assertIsNotNone(session.session_id)
self.assertIsNotNone(session.executor_url)
self.assertIsInstance(browser, webdriver.Chrome)
session.close()
self.assertIsNone(session.browser)
# python -m unittest tests.chrome.TestChromeSession.test_profile_folder
def test_profile_folder(self):
session = ChromeSession(profile_folder=True)
session.get_browser()
self.assertTrue(os.path.isdir('ChromeProfile'))
# python -m unittest tests.chrome.TestChromeSession.test_download_path
def test_download_path(self):
session = ChromeSession(download_path=os.getcwd())
browser = session.get_browser()
browser.get('http://speedtest.tele2.net/')
browser.find_element_by_link_text('1MB').click()
sleep(2)
self.assertTrue(os.path.isfile('1MB.zip'))
# python -m unittest tests.chrome.TestChromeSession.test_add_pref
def test_add_pref(self):
session = ChromeSession()
session.add_pref('test', False)
self.assertFalse(session.prefs['test'])
if __name__ == '__main__':
main(verbosity=1)
```
#### File: session/webdriver_session/session.py
```python
from .utils.logger import Logger
from selenium import webdriver
class Session:
'''Store a webdriver session
'''
def __init__(self):
self.webdriver = webdriver
self.log = Logger('session').log
self.browser = None
self.session_id = None
self.executor_url = None
def get_browser(self) -> webdriver.Remote:
'''Configure and return a webdriver session
:returns: A webdriver session
'''
try:
self.browser = self.setup_browser()
except AttributeError as e:
self.log.error(e)
self.log.warning('Method setup_browser should be implemented.')
self.session_id = self.browser.session_id
self.executor_url = self.browser.command_executor._url
self.log.info(
f'Session started - ID: {self.session_id} EXECUTOR_URL: {self.executor_url}')
return self.browser
def get_remote_browser(self, session_id, executor_url) -> webdriver.Remote:
# Code by <EMAIL>
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
# Save the original function, so we can revert our patch
org_command_execute = RemoteWebDriver.execute
def new_command_execute(self, command, params=None):
if command == "newSession":
# Mock the response
return {'success': 0, 'value': None, 'sessionId': session_id}
else:
return org_command_execute(self, command, params)
# Patch the function before creating the driver object
RemoteWebDriver.execute = new_command_execute
new_driver = webdriver.Remote(
command_executor=executor_url, desired_capabilities={})
new_driver.session_id = session_id
# Replace the patched function with original function
RemoteWebDriver.execute = org_command_execute
self.browser = new_driver
self.session_id = self.browser.session_id
self.executor_url = self.browser.command_executor._url
return self.browser
def close(self):
self.browser.quit()
self.browser = None
self.log.info('Exited sucessffully.')
def __delattr__(self):
Logger('session').destroy()
```
#### File: webdriver_session/utils/logger.py
```python
import logging
import sys
class LoggerMeta(type):
_instances = {}
def __call__(self, *args, **kwargs):
if self not in self._instances:
instance = super().__call__(*args, **kwargs)
self._instances[self] = instance
return self._instances[self]
class Logger(metaclass=LoggerMeta):
def __init__(self, name):
self.__handler = None
self.__log = None
self.name = name
self.__setup()
def __setup(self):
self.__log = logging.getLogger(self.name)
self.__log.level = logging.DEBUG
self.__handler = logging.StreamHandler(sys.stdout)
self.__log.addHandler(self.__handler)
@property
def log(self):
return self.__log
@classmethod
def destroy(self):
self.__handler.removeHandler(self.__handler)
``` |
{
"source": "joisse1101/reverb",
"score": 3
} |
#### File: client/modules/hello.py
```python
import random
import re
from client import jasperpath
import RPi.GPIO as GPIO
import time
import sys
import vibrate
WORDS = ["HELLO"]
def handle(text, mic, profile):
vibrate.retrieve_from_DOA('low')
print("hello module")
mic.say('hello')
def isValid(text):
"""
Returns True if the input is related to jokes/humor.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\bhello\b', text, re.IGNORECASE))
"""
Responds to user-input, typically speech text, by telling a joke.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
try:
global motorLeft
global motorRight
GPIO.setmode(GPIO.BCM)
#GPIO.setwarnings(False)
GPIO.setup(18,GPIO.OUT)
GPIO.setup(23,GPIO.OUT)
motorLeft = GPIO.PWM(18,100) #motor left = yellow
motorLeft.start(0)
motorRight = GPIO.PWM(23,100) #motor right = blue
motorRight.start(0)
print("motor vibrating")
mic.say("vibration")
retrieve_from_DOA()
#direction = retrieve_from_DOA()
#print("direction: " + direction)
#if direction == 'left':
#print("left motor running")
#for x in range(num):
#motorLeft.ChangeDutyCycle(80)
#time.sleep(0.5)
#motorLeft.ChangeDutyCycle(0)
#time.sleep(0.2)
#if direction == 'right':
#print("right motor running")
except KeyboardInterrupt: # If CTRL+C is pressed, exit cleanly:
print("Keyboard interrupt")
finally:
motorLeft.stop()
motorRight.stop()
motorLeft = None
motorRight = None
GPIO.cleanup() # cleanup all GPIO
print("clean up")
def retrieve_from_DOA():
sys.path.append('/home/pi/reverb/usb_4_mic_array')
import DOA
doa = DOA.main()
if (doa < 90 and doa >= 0) or (doa >= 270):
print("motor DOA <90 or >= 270: " + str(doa))
#return ("right")
#vibrate_motor("right")
vibrate.start_vibrate('right', 50, 3)
elif (doa >= 90 and doa < 270):
print ("motor DOA >90: " + str(doa))
#return("left")
#vibrate_motor("left")
vibrate.start_vibrate('left', 50, 3)
def vibrate_motor(direction):
if direction == 'left':
print("left motor running")
#GPIO.output(18,True)
#time.sleep(2)
motorLeft_Pulse(50,3)
if direction == 'right':
print("right motor running")
#GPIO.output(23,True)
#time.sleep(2)
motorRight_Pulse(100,2)
def motorLeft_Pulse(intensity, num):
for x in range(num):
print("motor left pulse")
motorLeft.ChangeDutyCycle(intensity)
time.sleep(1)
motorLeft.ChangeDutyCycle(0)
time.sleep(1)
def motorRight_Pulse(intensity, num):
for x in range(num):
print("motor right pulse")
motorRight.ChangeDutyCycle(intensity)
time.sleep(1)
motorRight.ChangeDutyCycle(0)
time.sleep(1)
"""
```
#### File: client/modules/Name.py
```python
import random
import re
from client import jasperpath
import sys
import vibrate
WORDS = ["JOYCE", "TIFFANY", "PROF"]
def handle(text, mic, profile):
vibrate.retrieve_from_DOA('low')
print("Name module")
mic.say('Name')
def isValid(text):
"""
Returns True if the input is related to jokes/humor.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'JOYCE|TIFFANY|PROF', text, re.IGNORECASE))
``` |
{
"source": "joizhang/sifdnet",
"score": 2
} |
#### File: sifdnet/test/sifdnet_test.py
```python
import os
import unittest
import torch
from torch import hub
from torch.backends import cudnn
from torchsummary import summary
from config import Config
from training.models import sifdnet, sifdnet_1, sifdnet_2, sifdnet_3
from training.models.aspp import ASPP
from training.models.sifdnet import ALAM, Decoder
CONFIG = Config()
hub.set_dir(CONFIG['TORCH_HOME'])
os.environ["CUDA_VISIBLE_DEVICES"] = CONFIG['CUDA_VISIBLE_DEVICES']
torch.backends.cudnn.benchmark = True
class SifdNetTestCase(unittest.TestCase):
def test_sifdnet(self):
self.assertTrue(torch.cuda.is_available())
model = sifdnet(pretrained=True)
model = model.cuda()
input_size = model.default_cfg['input_size']
summary(model, input_size=input_size)
def test_aspp(self):
self.assertTrue(torch.cuda.is_available())
model = ASPP(in_channels=448, atrous_rates=[12, 24, 36], out_channels=448)
model = model.cuda()
input_size = (448, 8, 8)
summary(model, input_size=input_size)
def test_alam1(self):
self.assertTrue(torch.cuda.is_available())
model = ALAM(in_channels=448, out_channels=160, size=(16, 16))
model = model.cuda()
input_size = [(160, 16, 16), (448, 8, 8)]
summary(model, input_size=input_size)
def test_alam2(self):
self.assertTrue(torch.cuda.is_available())
model = ALAM(in_channels=32, out_channels=24, size=(128, 128))
model = model.cuda()
input_size = [(24, 128, 128), (32, 64, 64)]
summary(model, input_size=input_size)
def test_decoder(self):
self.assertTrue(torch.cuda.is_available())
model = Decoder(encoder_num_chs=[24, 32, 56, 160, 448], out_channels=2)
model = model.cuda()
input_size = [(448, 8, 8), (160, 16, 16), (56, 32, 32), (24, 128, 128)]
summary(model, input_size=input_size)
def test_sifdnet_1(self):
self.assertTrue(torch.cuda.is_available())
model = sifdnet_1(pretrained=True)
model = model.cuda()
input_size = model.default_cfg['input_size']
summary(model, input_size=input_size)
def test_sifdnet_2(self):
self.assertTrue(torch.cuda.is_available())
model = sifdnet_2(pretrained=True)
model = model.cuda()
input_size = model.default_cfg['input_size']
summary(model, input_size=input_size)
def test_sifdnet_3(self):
self.assertTrue(torch.cuda.is_available())
model = sifdnet_3(pretrained=True)
model = model.cuda()
input_size = model.default_cfg['input_size']
summary(model, input_size=input_size)
if __name__ == '__main__':
unittest.main()
```
#### File: training/models/gbb.py
```python
import torch
from torch import nn as nn
from timm.models.layers import create_conv2d
__all__ = ['GradientBoostNet']
SOBEL_X = [[1., 0., -1.], [2., 0., -2.], [1., 0., -1.]]
SOBEL_Y = [[1., 2., 1.], [0., 0., 0.], [-1., -2., -1.]]
SCHARR_X = [[3., 0., -3.], [10., 0., -10.], [3., 0., -3.]]
SCHARR_Y = [[3., 10., 3.], [0., 0., 0.], [-3., -10., -3.]]
class GradientBoostBlock(nn.Module):
def __init__(self, in_channels, out_channels, pwl_stride, pwl_padding, filter_type='sobel'):
super(GradientBoostBlock, self).__init__()
self.in_channels = in_channels
if filter_type == 'sobel':
filter_x = torch.tensor(SOBEL_X)
filter_y = torch.tensor(SOBEL_Y)
else:
filter_x = torch.tensor(SCHARR_X)
filter_y = torch.tensor(SCHARR_Y)
self.conv_grad_x = create_conv2d(in_channels, in_channels, kernel_size=3, stride=1, depthwise=True)
self.conv_grad_x.weight = nn.Parameter(filter_x.repeat(in_channels, 1, 1, 1))
self.conv_grad_y = create_conv2d(in_channels, in_channels, kernel_size=3, stride=1, depthwise=True)
self.conv_grad_y.weight = nn.Parameter(filter_y.repeat(in_channels, 1, 1, 1), requires_grad=False)
self.bn_grad = nn.BatchNorm2d(in_channels)
# Depth-wise convolution
self.conv_dw = create_conv2d(in_channels, in_channels, kernel_size=3, stride=2, depthwise=True)
self.bn1 = nn.BatchNorm2d(in_channels)
self.act1 = nn.ReLU(inplace=True)
# Point-wise linear projection
self.conv_pw = create_conv2d(in_channels, in_channels, kernel_size=1)
self.bn2 = nn.BatchNorm2d(in_channels)
self.act2 = nn.ReLU(inplace=True)
# Point-wise linear projection
self.conv_pwl = create_conv2d(in_channels, out_channels, kernel_size=3, stride=pwl_stride, padding=pwl_padding)
self.bn3 = nn.BatchNorm2d(out_channels)
def forward(self, x):
# Depth-wise expansion
out = self.conv_dw(x)
out = self.bn1(out)
out = self.act1(out)
# Gradient boost
with torch.no_grad():
grad_x = self.conv_grad_x(out)
grad_y = self.conv_grad_y(out)
grad = torch.sqrt(torch.square(grad_x) + torch.square(grad_y))
grad = self.bn_grad(grad)
out = out + grad
# Point-wise convolution
out = self.conv_pw(out)
out = self.bn2(out)
out = self.act2(out)
# Point-wise linear projection
out = self.conv_pwl(out)
out = self.bn3(out)
return out
class GradientBoostNet(nn.Module):
def __init__(self, num_chs, filter_type):
super(GradientBoostNet, self).__init__()
self.block1 = GradientBoostBlock(num_chs[0], num_chs[2], pwl_stride=2, pwl_padding=1, filter_type=filter_type)
self.block2 = GradientBoostBlock(num_chs[2], num_chs[3], pwl_stride=1, pwl_padding=1, filter_type=filter_type)
self.block3 = GradientBoostBlock(num_chs[3], num_chs[4], pwl_stride=1, pwl_padding=1, filter_type=filter_type)
def forward(self, feat_0, feat_2, feat_3):
x = self.block1(feat_0) + feat_2
x = self.block2(x) + feat_3
x = self.block3(x)
return x
``` |
{
"source": "JOJ0/BeetsPluginXtractor",
"score": 2
} |
#### File: beetsplug/xtractor/__init__.py
```python
import os
from beets.plugins import BeetsPlugin
from beets.util.confit import ConfigSource, load_yaml
from beetsplug.xtractor.command import XtractorCommand
class XtractorPlugin(BeetsPlugin):
_default_plugin_config_file_name_ = 'config_default.yml'
def __init__(self):
super(XtractorPlugin, self).__init__()
config_file_path = os.path.join(os.path.dirname(__file__), self._default_plugin_config_file_name_)
source = ConfigSource(load_yaml(config_file_path) or {}, config_file_path)
self.config.add(source)
# @todo: activate this to store the attributes in media files
# field = mediafile.MediaField(
# mediafile.MP3DescStorageStyle(u'danceability'), mediafile.StorageStyle(u'danceability')
# )
# self.add_media_field('danceability', field)
#
# field = mediafile.MediaField(
# mediafile.MP3DescStorageStyle(u'beats_count'), mediafile.StorageStyle(u'beats_count')
# )
# self.add_media_field('beats_count', field)
def commands(self):
return [XtractorCommand(self.config)]
``` |
{
"source": "JOJ0/BeetsPluginYearFixer",
"score": 2
} |
#### File: beetsplug/yearfixer/__init__.py
```python
import os
from beets.plugins import BeetsPlugin
from beets.util.confit import ConfigSource, load_yaml
from beetsplug.yearfixer.command import YearFixerCommand
class YearFixerPlugin(BeetsPlugin):
_default_plugin_config_file_name_ = 'config_default.yml'
def __init__(self):
super(YearFixerPlugin, self).__init__()
config_file_path = os.path.join(os.path.dirname(__file__), self._default_plugin_config_file_name_)
source = ConfigSource(load_yaml(config_file_path) or {}, config_file_path)
self.config.add(source)
def commands(self):
return [YearFixerCommand(self.config)]
``` |
{
"source": "joj0s/trait-curation",
"score": 2
} |
#### File: traits/datasources/dummy.py
```python
from django.db import transaction
from ..models import OntologyTerm, MappingSuggestion, Trait, Mapping, User, Review, Status, Comment
@transaction.atomic
def import_dummy_data():
Comment.objects.all().delete()
Review.objects.all().delete()
MappingSuggestion.objects.all().delete()
Mapping.objects.all().delete()
Trait.objects.all().delete()
OntologyTerm.objects.all().delete()
User.objects.exclude(email="<EMAIL>").delete()
# ONTOLOGY TERMS
term1 = OntologyTerm(label='/ Diabetes mellitus /', curie='EFO:0000400',
iri='http://www.ebi.ac.uk/efo/EFO_0000400', status=Status.CURRENT)
term2 = OntologyTerm(label='/ digestive system disease /', curie='EFO:0000405',
iri='http://www.ebi.ac.uk/efo/EFO_0000405', status=Status.CURRENT)
# Current term falsely registered as awaiting import, to test ols updates
term3 = OntologyTerm(label='/ Hereditary breast cancer - INCORRECT/', curie='Orphanet:227535',
iri='http://www.orpha.net/ORDO/Orphanet_227535', status=Status.AWAITING_IMPORT)
term4 = OntologyTerm(label='/ breast-ovarian cancer, familial, susceptibility to, 3 /', curie='MONDO:0013253',
iri='http://purl.obolibrary.org/obo/MONDO_0013253', status=Status.NEEDS_IMPORT)
term5 = OntologyTerm(label='/ pancreatic cancer, susceptibility to, 4 /', curie='MONDO:0013685',
iri='http://purl.obolibrary.org/obo/MONDO_0013685', status=Status.AWAITING_IMPORT)
term6 = OntologyTerm(label='/ Hypogonadism, diabetes mellitus, alopecia, mental retardation and \
electrocardiographic abnormalities', description='The description for Hypogonadism, diabetes \
mellitus, alopecia, mental retardation and electrocardiographic abnormalities ',
cross_refs='MONDO:0013685,HP:0000400', status=Status.AWAITING_CREATION)
term7 = OntologyTerm(label='/ Familial cancer of breast, 2 /',
description='Description for familial cancer of breast, 2',
cross_refs="Orphanet:0000405", status=Status.NEEDS_CREATION)
term8 = OntologyTerm(label='/ Diastrophic dysplasia /',
description='Description for Diastrophic dysplasia',
cross_refs="", status=Status.AWAITING_CREATION)
# Obsolete term falsely registered as current, to test ols updates
term9 = OntologyTerm(label='/ obsolete_adrenocortical carcinoma /', curie='EFO:0003093',
iri='http://www.ebi.ac.uk/efo/EFO_0003093', status=Status.CURRENT)
term10 = OntologyTerm(label='/ Spastic paraplegia /', curie='HP:999999999',
iri=' http://purl.obolibrary.org/obo/HP_999999999', status=Status.DELETED)
for term in (term1, term2, term3, term4, term5, term6, term7, term8, term9, term10):
term.save()
# TRAITS
trait1 = Trait(name='/ Diabetes mellitus /', status=Status.UNMAPPED, number_of_source_records=9)
trait2 = Trait(name='/ digestive system disease /', status=Status.UNMAPPED, number_of_source_records=4)
trait3 = Trait(name='/ Familial cancer of breast /', status=Status.NEEDS_IMPORT, number_of_source_records=5)
trait4 = Trait(name='/ Insulin-resistant diabetes mellitus /', status=Status.UNMAPPED, number_of_source_records=1)
trait5 = Trait(name='/ pancreatic cancer, susceptibility to, 4 /',
status=Status.UNMAPPED, number_of_source_records=5)
trait6 = Trait(name='/ Hypogonadism, diabetes mellitus, alopecia, mental retardation and \
electrocardiographic abnormalities /', status=Status.UNMAPPED, number_of_source_records=12)
trait7 = Trait(name='/ Pancreatic cancer 4 /', status=Status.UNMAPPED, number_of_source_records=1)
trait8 = Trait(name='/ Familial cancer of breast /', status=Status.NEEDS_CREATION, number_of_source_records=4)
trait9 = Trait(name='/ Diastrophic dysplasia /', status=Status.UNMAPPED, number_of_source_records=7)
trait10 = Trait(name='/ Spastic paraplegia /', status=Status.UNMAPPED, number_of_source_records=7)
for trait in (trait1, trait2, trait3, trait4, trait5, trait6, trait7, trait8, trait9, trait10):
trait.save()
# USERS
user1 = User(email='<EMAIL>', first_name="John", last_name="Doe")
user2 = User(email='<EMAIL>', first_name="Jane", last_name="Doe")
user3 = User(email='<EMAIL>', first_name="Jack", last_name="Doe")
user4 = User(email="<EMAIL>", first_name="ZOOMA")
for user in (user1, user2, user3, user4):
user.save()
# MAPPINGS
m1 = Mapping(mapped_trait=trait1, mapped_term=term1, curator=user1, is_reviewed=True)
m2 = Mapping(mapped_trait=trait2, mapped_term=term2, curator=user2, is_reviewed=False)
m3 = Mapping(mapped_trait=trait3, mapped_term=term3, curator=user3, is_reviewed=True)
m4 = Mapping(mapped_trait=trait4, mapped_term=term4, curator=user1, is_reviewed=False)
m5 = Mapping(mapped_trait=trait5, mapped_term=term5, curator=user2, is_reviewed=True)
m6 = Mapping(mapped_trait=trait6, mapped_term=term6, curator=user3, is_reviewed=True)
m7 = Mapping(mapped_trait=trait7, mapped_term=term7, curator=user1, is_reviewed=False)
m8 = Mapping(mapped_trait=trait8, mapped_term=term8, curator=user2, is_reviewed=True)
m9 = Mapping(mapped_trait=trait9, mapped_term=term9, curator=user2, is_reviewed=True)
m10 = Mapping(mapped_trait=trait10, mapped_term=term10, curator=user2, is_reviewed=True)
for mapping in (m1, m2, m3, m4, m5, m6, m7, m8, m9, m10):
mapping.save()
# MAPPING SUGGESTIONS
ms1 = MappingSuggestion(mapped_trait=trait1, mapped_term=term1, made_by=user4)
ms2 = MappingSuggestion(mapped_trait=trait2, mapped_term=term2, made_by=user4)
ms3 = MappingSuggestion(mapped_trait=trait3, mapped_term=term3, made_by=user4)
ms4 = MappingSuggestion(mapped_trait=trait4, mapped_term=term4, made_by=user4)
ms5 = MappingSuggestion(mapped_trait=trait5, mapped_term=term5, made_by=user4)
ms6 = MappingSuggestion(mapped_trait=trait6, mapped_term=term6, made_by=user4)
ms7 = MappingSuggestion(mapped_trait=trait7, mapped_term=term7, made_by=user4)
ms8 = MappingSuggestion(mapped_trait=trait8, mapped_term=term8, made_by=user4)
ms9 = MappingSuggestion(mapped_trait=trait9, mapped_term=term9, made_by=user4)
ms10 = MappingSuggestion(mapped_trait=trait10, mapped_term=term10, made_by=user4)
for mapping_suggestion in (ms1, ms2, ms3, ms4, ms5, ms6, ms7, ms8, ms9, ms10):
mapping_suggestion.save()
# SAVE CURRENT MAPPINGS
for i in range(1, 10):
trait = eval('trait' + str(i))
mapping = eval('m' + str(i))
trait.current_mapping = mapping
trait.save()
# REVIEWS
reviews = list()
reviews.append(Review(mapping_id=m1, reviewer=user2))
reviews.append(Review(mapping_id=m1, reviewer=user3))
reviews.append(Review(mapping_id=m2, reviewer=user3))
reviews.append(Review(mapping_id=m3, reviewer=user1))
reviews.append(Review(mapping_id=m3, reviewer=user2))
reviews.append(Review(mapping_id=m4, reviewer=user3))
reviews.append(Review(mapping_id=m5, reviewer=user1))
reviews.append(Review(mapping_id=m5, reviewer=user3))
reviews.append(Review(mapping_id=m6, reviewer=user1))
reviews.append(Review(mapping_id=m6, reviewer=user2))
reviews.append(Review(mapping_id=m7, reviewer=user3))
reviews.append(Review(mapping_id=m8, reviewer=user1))
reviews.append(Review(mapping_id=m8, reviewer=user3))
reviews.append(Review(mapping_id=m9, reviewer=user1))
reviews.append(Review(mapping_id=m9, reviewer=user3))
reviews.append(Review(mapping_id=m10, reviewer=user1))
reviews.append(Review(mapping_id=m10, reviewer=user3))
for review in reviews:
review.save()
```
#### File: traits/datasources/ols.py
```python
import requests
import logging
from retry import retry
from django.db import transaction
from django_admin_conf_vars.global_vars import config
from ..models import Status, OntologyTerm
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
BASE_URL = config.OLS_BASE_URL
@retry(tries=3, delay=5, backoff=1.2, jitter=(1, 3), logger=logger)
def make_ols_query(identifier_value, ontology_id, identifier_type='iri'):
"""
Takes in an identifier type (iri or curie), the value for that indenrifier to query for, and the ontology id to
search against. Returns a dictionary for that term with the fields 'label', 'obo_id' which is the CURIE and
'is_obsolete' as True or False.
"""
response = requests.get(f"{BASE_URL}ontologies/{ontology_id}/terms?{identifier_type}={identifier_value}")
# 404 errors are expected. In any other case, raise an exception and retry the query
if response.status_code == 404:
return None
response.raise_for_status()
results = response.json()
return parse_ols_results(results)
def parse_ols_results(results):
term_info = results["_embedded"]["terms"][0]
term_iri = term_info["iri"] # E.g. http://www.ebi.ac.uk/efo/EFO_0000400
term_curie = term_info["obo_id"] # E.g. EFO:0000400
term_label = term_info["label"] # E.g. Diabetes mellitus
term_is_obsolete = term_info["is_obsolete"] # True or False value on whether the term is obsolete or not
info_dict = {"curie": term_curie, "iri": term_iri, "label": term_label, "is_obsolete": term_is_obsolete}
return info_dict
def get_ontology_id(term_iri):
"""
Extracts the ontology id from the term iri, to be used for OLS queries by reading the last part of an iri and
reading the ontology id using the term prefix
E.g. extracts 'mondo' from http://purl.obolibrary.org/obo/MONDO_0019482
"""
ontology_id = term_iri.split('/')[-1].split('_')[0].lower()
# Orphanet terms use Orphanet_XXXXXXX syntax, but their OLS id is 'ordo'
if ontology_id == 'orphanet':
ontology_id = 'ordo'
return ontology_id
def ols_update():
"""
Query OLS for all terms with 'current', 'awaiting_import' and 'needs_import' status and update their status
"""
terms_to_query = OntologyTerm.objects.filter(
status__in=[Status.CURRENT, Status.AWAITING_IMPORT, Status.NEEDS_IMPORT])
for term in terms_to_query:
ols_query_term(term)
@transaction.atomic
def ols_query_term(term):
logger.info(f"Querying OLS for term {term}")
efo_response = make_ols_query(term.iri, 'efo')
term_ontology_id = get_ontology_id(term.iri)
parent_ontology_response = None
if term_ontology_id != 'efo':
parent_ontology_response = make_ols_query(term.iri, term_ontology_id)
term.status = get_term_status(efo_response, parent_ontology_response, term.status)
if term.status == Status.CURRENT:
term.label = efo_response['label']
elif term.status in [Status.NEEDS_IMPORT, Status.AWAITING_IMPORT]:
term.label = parent_ontology_response['label']
term.save()
def get_term_status(efo_response, parent_ontology_response=None, previous_status=None):
if not efo_response and not parent_ontology_response:
logger.info("FOUND DELETED")
return Status.DELETED
if efo_response and efo_response.get('is_obsolete') is True or \
parent_ontology_response and parent_ontology_response.get('is_obsolete') is True:
logger.info("FOUND OBSOLETE")
return Status.OBSOLETE
if efo_response is not None:
return Status.CURRENT
if not previous_status:
return Status.NEEDS_IMPORT
return previous_status
```
#### File: traits/datasources/zooma.py
```python
import requests
import logging
from django.db import transaction
from django_admin_conf_vars.global_vars import config
from ..models import Trait, MappingSuggestion, OntologyTerm, User, Status, Mapping
from .ols import make_ols_query, get_ontology_id, get_term_status
from .oxo import make_oxo_query
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
BASE_URL = config.ZOOMA_BASE_URL
def run_zooma_for_all_traits():
"""
Retrieves zooma mapping suggestions for all traits and creates terms and suggestions in the app's database for each
of them.
"""
traits = Trait.objects.all()
for trait in traits:
run_zooma_for_single_trait(trait)
def run_zooma_for_single_trait(trait):
logger.info(f"Retrieving ZOOMA suggestions for trait: {trait.name}")
datasource_suggestion_list = get_suggestions_from_datasources(trait)
ols_suggestion_list = get_suggestions_from_ols(trait)
high_confidence_term_iris = list() # A list of terms with 'HIGH' mapping confidence
final_suggestion_iri_set = set() # A set of the term iris which are kept from the ZOOMA queries
for suggestion in datasource_suggestion_list + ols_suggestion_list:
if len(suggestion["semanticTags"]) > 1:
logger.warn(f"Suggestion with combined terms found: Suggestions:{suggestion['semanticTags']} for {trait}")
continue
suggested_term_iri = suggestion["semanticTags"][0] # E.g. http://purl.obolibrary.org/obo/HP_0004839
if suggestion['confidence'] == 'HIGH':
high_confidence_term_iris.append(suggested_term_iri)
if get_ontology_id(suggested_term_iri) in ["efo", "ordo", "hp", "mondo"]:
final_suggestion_iri_set.add(suggested_term_iri)
created_terms = set()
for suggested_iri in final_suggestion_iri_set:
suggested_term = create_local_term(suggested_iri)
created_terms.add(suggested_term)
create_mapping_suggestion(trait, suggested_term)
find_automatic_mapping(trait, created_terms, high_confidence_term_iris)
delete_unused_suggestions(trait, created_terms)
def get_suggestions_from_datasources(trait):
"""
Takes in a trait object as its argument and returns the zooma response of a suggestion list as a dictionary.
"""
formatted_trait_name = requests.utils.quote(trait.name)
response = requests.get(
f"{BASE_URL}/services/annotate?propertyValue={formatted_trait_name}"
"&filter=required:[cttv,sysmicro,atlas,ebisc,uniprot,gwas,cbi,clinvar-xrefs]")
return response.json()
def get_suggestions_from_ols(trait):
"""
Takes in a trait object as its argument and returns the zooma response of a suggestion list as a dictionary.
"""
formatted_trait_name = trait.name.replace(' ', '+')
response = requests.get(
f"{BASE_URL}/services/annotate?propertyValue={formatted_trait_name}"
"&filter=required:[none],ontologies:[efo,mondo,hp,ordo]")
return response.json()
@transaction.atomic
def create_local_term(suggested_term_iri):
"""
Takes in a single zooma suggestion result and creates an ontology term for it in the app's database. If that term
already exists, returns the existing term
"""
logger.info(f"Retrieving info for suggested term: {suggested_term_iri}")
if OntologyTerm.objects.filter(iri=suggested_term_iri).exists():
logger.info(f"Term {suggested_term_iri} already exists in the database")
return OntologyTerm.objects.filter(iri=suggested_term_iri).first()
# Search for the term in EFO and return its information.
efo_response = make_ols_query(suggested_term_iri, 'efo')
term_ontology_id = get_ontology_id(suggested_term_iri)
# If it is a non EFO term, also query its parent ontology, and calulate its status.
if term_ontology_id != 'efo':
term_ontology_id = get_ontology_id(suggested_term_iri)
parent_ontology_response = make_ols_query(suggested_term_iri, term_ontology_id)
term_status = get_term_status(efo_response, parent_ontology_response)
else:
term_status = get_term_status(efo_response)
# Finally create a dict holding the term info that was found in the queries
if term_status == Status.DELETED:
term_info = {'curie': 'Not Found', 'iri': suggested_term_iri, 'label': 'Not Found'}
elif term_status == Status.CURRENT:
term_info = {'curie': efo_response['curie'], 'iri': suggested_term_iri, 'label': efo_response['label']}
else:
term_info = {'curie': parent_ontology_response['curie'],
'iri': suggested_term_iri, 'label': parent_ontology_response['label']}
# Create an ontology term in the database
term = OntologyTerm(curie=term_info['curie'], iri=suggested_term_iri, label=term_info['label'], status=term_status)
term.save()
return term
@transaction.atomic
def create_mapping_suggestion(trait, term, user_email='<EMAIL>'):
"""
Creates a mapping suggestion in the app's database, if it doesn't exist already.
"""
user = User.objects.filter(email=user_email).first()
if user_email == "<EMAIL>" and user is None:
zooma = User(first_name="ZOOMA", email="<EMAIL>")
zooma.save()
if MappingSuggestion.objects.filter(mapped_trait=trait, mapped_term=term).exists():
logger.info(f"Mapping suggestion {trait} - {term} already exists in the database")
return
suggestion = MappingSuggestion(mapped_trait=trait, mapped_term=term, made_by=user)
suggestion.save()
logger.info(f"Created mapping suggestion {suggestion}")
def find_automatic_mapping(trait, created_terms, high_confidence_term_iris):
"""
If a trait is unmapped, attempts to find an automatic mapping. First if it finds a ZOOMA suggestion with 'HIGH'
confidence, then attemps to find an exact text match.
"""
if trait.status != Status.UNMAPPED:
return
# Check if a created term suggestion has 'HIGH" confidence, and map it to the trait if it does
for term in created_terms:
if term.iri in high_confidence_term_iris:
create_mapping(trait, term)
logger.info(f"CREATED HIGH CONFIDENCE MAPPING {trait.current_mapping}")
return
# Check if a created term suggestion is an exact text match with the trait, and map it if it does
for term in created_terms:
if trait.name.lower() == term.label.lower():
create_mapping(trait, term)
logger.info(f"CREATED EXACT TEXT MATCH MAPPING {trait.current_mapping}")
return
# For high confidence term suggestions that weren't in EFO compatible ontologies
for term_iri in high_confidence_term_iris:
# Skip medgen terms since info about them can't be retrieved through OLS
if 'medgen' in term_iri:
continue
# Create term suggestion from OxO cross references
ontology_id = get_ontology_id(term_iri)
term_curie = make_ols_query(identifier_value=term_iri, ontology_id=ontology_id)['curie']
oxo_results = make_oxo_query([term_curie])
for result in oxo_results['_embedded']['searchResults'][0]['mappingResponseList']:
ontology_id = result['targetPrefix'].lower() # E.g. 'efo'
if ontology_id == "orphanet":
ontology_id = "ordo"
result_iri = make_ols_query(identifier_value=result['curie'],
ontology_id=ontology_id, identifier_type='obo_id')['iri']
suggested_term = create_local_term(result_iri)
created_terms.append(suggested_term)
create_mapping_suggestion(trait, suggested_term)
@transaction.atomic
def create_mapping(trait, term):
zooma_user = User.objects.filter(email='<EMAIL>').first()
if Mapping.objects.filter(mapped_trait=trait, mapped_term=term).exists():
return
mapping = Mapping(mapped_trait=trait, mapped_term=term, curator=zooma_user, is_reviewed=False)
mapping.save()
trait.current_mapping = mapping
trait.save()
def delete_unused_suggestions(trait, created_terms):
"""
Takes in a trait and a set of created_terms, found in the previously executed ZOOMA query. This function gets all
the mapping suggestions for that trait that are NOT found in the created_terms list or in any previous mappings
for that trait, and deletes them
"""
trait_mappings = trait.mapping_set.all()
for mapping in trait_mappings:
created_terms.add(mapping.mapped_term)
deleted_suggestions = trait.mappingsuggestion_set.exclude(mapped_term__in=list(created_terms))
deleted_suggestions.delete()
logger.info(f"Deleted mapping suggestions {deleted_suggestions.all()}")
``` |
{
"source": "jojacobsen/coding_challenge",
"score": 2
} |
#### File: coding_challenge/coding_challenge/conftest.py
```python
import pytest
from coding_challenge.users.models import User
from coding_challenge.users.tests.factories import UserFactory
from coding_challenge.ship_manager.models import Ship
from coding_challenge.ship_manager.tests.factories import ShipFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
@pytest.fixture
def ship() -> Ship:
return ShipFactory()
```
#### File: ship_manager/tests/test_drf_views.py
```python
import pytest
from django.urls import reverse
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from rest_framework import status
from coding_challenge.users.models import User
from coding_challenge.ship_manager.models import Ship
pytestmark = pytest.mark.django_db
def test_ship_view_auth(user: User, ship: Ship):
"""
Ensure that the endpoints are only accessible for authenticated user.
"""
client = APIClient()
url = reverse("api:ship-list")
data: dict = {}
response = client.post(url, data, format="json")
# Test unauthorized API call (get, post, delete, put)
assert response.status_code == status.HTTP_403_FORBIDDEN
response = client.get(url)
assert response.status_code == status.HTTP_403_FORBIDDEN
url = reverse("api:ship-detail", kwargs={"code": ship.code})
response = client.delete(url)
assert response.status_code == status.HTTP_403_FORBIDDEN
url = reverse("api:ship-detail", kwargs={"code": ship.code})
response = client.put(url)
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_list_ship(user: User, ship: Ship):
"""
Ensure we can get the list of ship objects.
"""
client = APIClient()
token, created = Token.objects.get_or_create(user=user)
client.credentials(HTTP_AUTHORIZATION="Token " + token.key)
url = reverse("api:ship-list")
response = client.get(url)
assert response.status_code == status.HTTP_200_OK
assert isinstance(response.data, list)
def test_detail_ship_view(user: User, ship: Ship):
"""
Ensure we can get a single of ship object.
"""
client = APIClient()
token, created = Token.objects.get_or_create(user=user)
client.credentials(HTTP_AUTHORIZATION="Token " + token.key)
url = reverse("api:ship-detail", kwargs={"code": ship.code})
response = client.get(url)
assert response.status_code == status.HTTP_200_OK
assert isinstance(response.data, dict)
def test_create_ship(user: User, ship: Ship):
"""
Ensure we can create a new ship object.
"""
client = APIClient()
url = reverse("api:ship-list")
data: dict = {}
token, created = Token.objects.get_or_create(user=user)
client.credentials(HTTP_AUTHORIZATION="Token " + token.key)
# Call API with invalid data
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
# Test ship creating with invalid code
data = {
"code": "test-ship",
"name": "test-ship",
"width": 31,
"length": 10,
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "code" in response.data
# Test ship creating with no unique code
data["code"] = "AAAA-1111-A1"
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "code" in response.data
# Test ship creating with valid data
data["code"] = "AAAA-1111-A9"
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_201_CREATED
assert Ship.objects.count() == 2
def test_ship_serializer(user: User, ship: Ship):
"""
Ensure we can create a new ship object.
"""
client = APIClient()
url = reverse("api:ship-list")
token, created = Token.objects.get_or_create(user=user)
client.credentials(HTTP_AUTHORIZATION="Token " + token.key)
# Test ship creating with valid data
data = {
"code": "AAAA-1111-A9",
"name": "<NAME>",
"width": 31,
"length": 10,
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_201_CREATED
assert Ship.objects.count() == 2
# Test ship creating with duplicate but lowercase data
data["code"] = "aaaa-1111-a9"
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
# Test ship creating with lowercase code that should get
# transformed into uppercase
data["code"] = "aaaa-4444-a9"
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_201_CREATED
assert response.data["code"] == "AAAA-4444-A9"
# Test ship updating with lowercase code that should get
# transformed into uppercase
url = reverse("api:ship-detail", kwargs={"code": ship.code})
response = client.patch(url, data={"code": "bbbb-4444-a9"})
assert response.status_code == status.HTTP_200_OK
assert response.data["code"] == "BBBB-4444-A9"
def test_update_ship(user: User, ship: Ship):
"""
Ensure we can update a ship object.
"""
client = APIClient()
token, created = Token.objects.get_or_create(user=user)
client.credentials(HTTP_AUTHORIZATION="Token " + token.key)
url = reverse("api:ship-detail", kwargs={"code": ship.code})
response = client.patch(url, data={"name": "Aida"})
assert response.status_code == status.HTTP_200_OK
ship_instance = Ship.objects.get(code=ship.code)
assert ship_instance.name == "Aida"
def test_delete_ship(user: User, ship: Ship):
"""
Ensure we can delete a ship object.
"""
client = APIClient()
token, created = Token.objects.get_or_create(user=user)
client.credentials(HTTP_AUTHORIZATION="Token " + token.key)
url = reverse("api:ship-detail", kwargs={"code": ship.code})
response = client.delete(url)
assert response.status_code == status.HTTP_204_NO_CONTENT
assert Ship.objects.count() == 0
``` |
{
"source": "Jojain/jupyter-cadquery",
"score": 2
} |
#### File: examples/ide/2-hexapod.py
```python
import numpy as np
import cadquery as cq
from cadquery_massembly import MAssembly, relocate
from jupyter_cadquery import set_defaults
from jupyter_cadquery.viewer.client import show
from jupyter_cadquery.cad_animation import Animation
set_defaults(zoom=3.5)
# Parts
thickness = 2
height = 40
width = 65
length = 100
diam = 4
tol = 0.05
def create_base(rotate=False):
x1, x2 = 0.63, 0.87
base_holes = {
"right_back": (-x1 * length, -x1 * width),
"right_middle": (0, -x2 * width),
"right_front": (x1 * length, -x1 * width),
"left_back": (-x1 * length, x1 * width),
"left_middle": (0, x2 * width),
"left_front": (x1 * length, x1 * width),
}
stand_holes = {"front_stand": (0.75 * length, 0), "back_stand": (-0.8 * length, 0)}
workplane = cq.Workplane()
if rotate:
workplane = workplane.transformed(rotate=(30, 45, 60))
base = (
workplane.ellipseArc(length, width, 25, -25, startAtCurrent=False)
.close()
.pushPoints(list(base_holes.values()))
.circle(diam / 2 + tol)
.moveTo(*stand_holes["back_stand"])
.rect(thickness + 2 * tol, width / 2 + 2 * tol)
.moveTo(*stand_holes["front_stand"])
.rect(thickness + 2 * tol, width / 2 + 2 * tol)
.extrude(thickness)
)
base
# tag mating points
if rotate:
l_coord = lambda vec2d: workplane.plane.toWorldCoords(vec2d).toTuple()
l_nps = lambda vec2d: cq.NearestToPointSelector(l_coord(vec2d))
base.faces(f"<{l_coord((0,0,1))}").tag("bottom")
base.faces(f">{l_coord((0,0,1))}").tag("top")
for name, hole in base_holes.items():
base.faces(f"<{l_coord((0,0,1))}").edges(l_nps(hole)).tag(name)
for name, hole in stand_holes.items():
base.faces(f"<{l_coord((0,0,1))}").wires(l_nps(hole)).tag(name)
else:
base.faces("<Z").tag("bottom")
base.faces(">Z").tag("top")
for name, hole in base_holes.items():
base.faces("<Z").wires(cq.NearestToPointSelector(hole)).tag(name)
for name, hole in stand_holes.items():
base.faces("<Z").wires(cq.NearestToPointSelector(hole)).tag(name)
return base
base_holes_names = {
"right_back",
"right_middle",
"right_front",
"left_back",
"left_middle",
"left_front",
}
def create_stand():
stand = cq.Workplane().box(height, width / 2 + 10, thickness)
inset = cq.Workplane().box(thickness, width / 2, thickness)
backing = cq.Workplane("ZX").polyline([(10, 0), (0, 0), (0, 10)]).close().extrude(thickness)
stand = (
stand.union(inset.translate(((height + thickness) / 2, 0, 0)))
.union(inset.translate((-(height + thickness) / 2, 0, 0)))
.union(backing.translate((-height / 2, -thickness / 2, thickness / 2)))
.union(backing.rotate((0, 0, 0), (0, 1, 0), -90).translate((height / 2, -thickness / 2, thickness / 2)))
)
return stand
stand_names = ("front_stand", "back_stand")
def create_upper_leg():
l1, l2 = 50, 80
pts = [(0, 0), (0, height / 2), (l1, height / 2 - 5), (l2, 0)]
upper_leg_hole = (l2 - 10, 0)
upper_leg = (
cq.Workplane()
.polyline(pts)
.mirrorX()
.pushPoints([upper_leg_hole])
.circle(diam / 2 + tol)
.extrude(thickness)
.edges("|Z and (not <X)")
.fillet(4)
)
axle = (
cq.Workplane("XZ", origin=(0, height / 2 + thickness + tol, thickness / 2))
.circle(diam / 2)
.extrude(2 * (height / 2 + thickness + tol))
)
upper_leg = upper_leg.union(axle)
# tag mating points
upper_leg.faces(">Z").edges(cq.NearestToPointSelector(upper_leg_hole)).tag("top")
upper_leg.faces("<Z").edges(cq.NearestToPointSelector(upper_leg_hole)).tag("bottom")
return upper_leg
def create_lower_leg():
w, l1, l2 = 15, 20, 120
pts = [(0, 0), (l1, w), (l2, 0)]
lower_leg_hole = (l1 - 10, 0)
lower_leg = (
cq.Workplane()
.polyline(pts)
.mirrorX()
.pushPoints([lower_leg_hole])
.circle(diam / 2 + tol)
.extrude(thickness)
.edges("|Z")
.fillet(5)
)
# tag mating points
lower_leg.faces(">Z").edges(cq.NearestToPointSelector(lower_leg_hole)).tag("top"),
lower_leg.faces("<Z").edges(cq.NearestToPointSelector(lower_leg_hole)).tag("bottom")
return lower_leg
leg_angles = {
"right_back": -105,
"right_middle": -90,
"right_front": -75,
"left_back": 105,
"left_middle": 90,
"left_front": 75,
}
leg_names = list(leg_angles.keys())
base = create_base(rotate=False)
stand = create_stand()
upper_leg = create_upper_leg()
lower_leg = create_lower_leg()
# Assembly
def create_hexapod():
# Some shortcuts
L = lambda *args: cq.Location(cq.Vector(*args))
C = lambda *args: cq.Color(*args)
# Leg assembly
leg = MAssembly(upper_leg, name="upper", color=C("orange")).add(
lower_leg, name="lower", color=C("orange"), loc=L(80, 0, 0)
)
# Hexapod assembly
hexapod = (
MAssembly(base, name="bottom", color=C("gray"), loc=L(0, 1.1 * width, 0))
.add(base, name="top", color=C(0.9, 0.9, 0.9), loc=L(0, -2.2 * width, 0))
.add(stand, name="front_stand", color=C(0.5, 0.8, 0.9), loc=L(40, 100, 0))
.add(stand, name="back_stand", color=C(0.5, 0.8, 0.9), loc=L(-40, 100, 0))
)
for i, name in enumerate(leg_names):
hexapod.add(leg, name=name, loc=L(100, -55 * (i - 1.7), 0))
return hexapod
# Mates
from collections import OrderedDict as odict
hexapod = create_hexapod()
show(hexapod)
hexapod.mate("bottom?top", name="bottom", origin=True)
hexapod.mate("top?bottom", name="top", origin=True, transforms=odict(rx=180, tz=-(height + 2 * tol)))
for name in stand_names:
hexapod.mate(f"bottom?{name}", name=f"{name}_bottom", transforms=odict(rz=-90 if "f" in name else 90))
hexapod.mate(f"{name}@faces@<X", name=name, origin=True, transforms=odict(rx=180))
for name in base_holes_names:
hexapod.mate(f"bottom?{name}", name=f"{name}_hole", transforms=odict(rz=leg_angles[name]))
for name in leg_names:
lower, upper, angle = ("top", "bottom", -75) if "left" in name else ("bottom", "top", -75)
hexapod.mate(f"{name}?{upper}", name=f"leg_{name}_hole", transforms=odict(rz=angle))
hexapod.mate(f"{name}@faces@<Y", name=f"leg_{name}_hinge", origin=True, transforms=odict(rx=180, rz=-90))
hexapod.mate(f"{name}/lower?{lower}", name=f"leg_{name}_lower_hole", origin=True)
# show(hexapod, reset_camera=False)
relocate(hexapod)
# Assemble the parts
for leg in leg_names:
hexapod.assemble(f"leg_{leg}_lower_hole", f"leg_{leg}_hole")
hexapod.assemble(f"leg_{leg}_hinge", f"{leg}_hole")
hexapod.assemble("top", "bottom")
for stand_name in stand_names:
hexapod.assemble(f"{stand_name}", f"{stand_name}_bottom")
show(hexapod)
# Animation
horizontal_angle = 25
def intervals(count):
r = [min(180, (90 + i * (360 // count)) % 360) for i in range(count)]
return r
def times(end, count):
return np.linspace(0, end, count + 1)
def vertical(count, end, offset, reverse):
ints = intervals(count)
heights = [round(35 * np.sin(np.deg2rad(x)) - 15, 1) for x in ints]
heights.append(heights[0])
return times(end, count), heights[offset:] + heights[1 : offset + 1]
def horizontal(end, reverse):
factor = 1 if reverse else -1
return times(end, 4), [0, factor * horizontal_angle, 0, -factor * horizontal_angle, 0]
leg_group = ("left_front", "right_middle", "left_back")
animation = Animation(viewer=True)
for name in leg_names:
# move upper leg
animation.add_track(f"bottom/{name}", "rz", *horizontal(4, "middle" in name))
# move lower leg
animation.add_track(f"bottom/{name}/lower", "rz", *vertical(8, 4, 0 if name in leg_group else 4, "left" in name))
# lift hexapod to run on grid
# animation.add_track(f"bottom", "tz", [0, 4], [61.25] * 2)
animation.animate(speed=3)
```
#### File: jupyter_cadquery/cadquery/cad_objects.py
```python
import html
try:
from cadquery_massembly import MAssembly
HAS_MASSEMBLY = True
except:
HAS_MASSEMBLY = False
import numpy as np
from cadquery.occ_impl.shapes import Face, Edge, Wire
from cadquery import Workplane, Shape, Compound, Vector, Vertex, Location, Assembly as CqAssembly
try:
from cadquery import Sketch
except ImportError:
pass
from jupyter_cadquery.cad_objects import (
_PartGroup,
_Part,
_Edges,
_Faces,
_Vertices,
_show,
)
from jupyter_cadquery.cad_display import get_default
from .cqparts import is_cqparts, convert_cqparts
from ..utils import Color
from ..ocp_utils import get_rgb
from ..defaults import get_default
class Part(_Part):
def __init__(self, shape, name="Part", color=None, show_faces=True, show_edges=True):
if color is None:
color = get_default("default_color")
super().__init__(_to_occ(shape), name, color, show_faces, show_edges)
def to_assembly(self):
return PartGroup([self])
def show(self, grid=False, axes=False):
return show(self, grid=grid, axes=axes)
class Faces(_Faces):
def __init__(self, faces, name="Faces", color=None, show_faces=True, show_edges=True):
super().__init__(_to_occ(faces.combine()), name, color, show_faces, show_edges)
def to_assembly(self):
return PartGroup([self])
def show(self, grid=False, axes=False):
return show(self, grid=grid, axes=axes)
class Edges(_Edges):
def __init__(self, edges, name="Edges", color=None):
super().__init__(_to_occ(edges), name, color)
def to_assembly(self):
return PartGroup([self])
def show(self, grid=False, axes=False):
return show(self, grid=grid, axes=axes)
class Vertices(_Vertices):
def __init__(self, vertices, name="Vertices", color=None):
super().__init__(_to_occ(vertices), name, color)
def to_assembly(self):
return PartGroup([self])
def show(self, grid=False, axes=False):
return show(self, grid=grid, axes=axes)
class PartGroup(_PartGroup):
def to_assembly(self):
return self
def show(self, grid=False, axes=False):
return show(self, grid=grid, axes=axes)
def add(self, cad_obj):
self.objects.append(cad_obj)
def add_list(self, cad_objs):
self.objects += cad_objs
class Assembly(PartGroup):
def __init__(self, *args, **kwargs):
import warnings
super().__init__(*args, **kwargs)
warnings.warn(
"Class 'Assembly' is deprecated (too many assemblies ...). Please use class 'PartGroup' instead",
RuntimeWarning,
)
def _to_occ(cad_obj):
# special case Wire, must be handled before Workplane
if _is_wirelist(cad_obj):
all_edges = []
for edges in cad_obj.objects:
all_edges += edges.Edges()
return [edge.wrapped for edge in all_edges]
elif isinstance(cad_obj, Workplane):
return [obj.wrapped for obj in cad_obj.objects]
elif isinstance(cad_obj, Shape):
return [cad_obj.wrapped]
elif isinstance(cad_obj, Sketch):
return [cad_obj._faces.wrapped]
else:
raise NotImplementedError(type(cad_obj))
def _parent(cad_obj, obj_id):
if cad_obj.parent is not None:
if isinstance(cad_obj.parent.val(), Vector):
return _from_vectorlist(
cad_obj.parent,
obj_id,
name="Parent",
color=Color((0.8, 0.8, 0.8)),
show_parents=False,
)
elif isinstance(cad_obj.parent.val(), Vertex):
return _from_vertexlist(
cad_obj.parent,
obj_id,
name="Parent",
color=Color((0.8, 0.8, 0.8)),
show_parents=False,
)
elif isinstance(cad_obj.parent.val(), Edge):
return _from_edgelist(
cad_obj.parent,
obj_id,
name="Parent",
color=Color((0.8, 0.8, 0.8)),
show_parents=False,
)
elif isinstance(cad_obj.parent.val(), Wire):
return [_from_wirelist(cad_obj.parent, obj_id, name="Parent", color=Color((0.8, 0.8, 0.8)))]
else:
return [
Part(
cad_obj.parent,
"Parent_%d" % obj_id,
show_edges=True,
show_faces=False,
)
]
else:
return []
def _from_facelist(cad_obj, obj_id, name="Faces", show_parents=True):
result = [Faces(cad_obj, "%s_%d" % (name, obj_id), color=Color((0.8, 0.0, 0.8)))]
if show_parents:
result = _parent(cad_obj, obj_id) + result
return result
def _from_edgelist(cad_obj, obj_id, name="Edges", color=None, show_parents=True):
result = [Edges(cad_obj, "%s_%d" % (name, obj_id), color=Color(color or (1.0, 0.0, 1.0)))]
if show_parents:
result = _parent(cad_obj, obj_id) + result
return result
def _from_vector(vec, obj_id, name="Vector"):
tmp = Workplane()
obj = tmp.newObject([vec])
return _from_vectorlist(obj, obj_id, name)
def _from_vectorlist(cad_obj, obj_id, name="Vertices", color=None, show_parents=True):
if cad_obj.vals():
vectors = cad_obj.vals()
else:
vectors = [cad_obj.val()]
obj = cad_obj.newObject([Vertex.makeVertex(v.x, v.y, v.z) for v in vectors])
result = [Vertices(obj, "%s_%d" % (name, obj_id), color=Color(color or (1.0, 0.0, 1.0)))]
if show_parents:
result = _parent(cad_obj, obj_id) + result
return result
def _from_vertexlist(cad_obj, obj_id, name="Vertices", color=None, show_parents=True):
result = [Vertices(cad_obj, "%s_%d" % (name, obj_id), color=Color(color or (1.0, 0.0, 1.0)))]
if show_parents:
result = _parent(cad_obj, obj_id) + result
return result
def _from_wirelist(cad_obj, obj_id, name="Edges", color=None):
return Edges(cad_obj, "%s_%d" % (name, obj_id), color=Color(color or (1.0, 0.0, 1.0)))
def to_edge(mate, loc=None, scale=1) -> Workplane:
w = Workplane()
for d in (mate.x_dir, mate.y_dir, mate.z_dir):
edge = Edge.makeLine(mate.origin, mate.origin + d * scale)
w.objects.append(edge if loc is None else edge.moved(loc))
return w
def from_assembly(cad_obj, top, loc=None, render_mates=False, mate_scale=1, default_color=None):
loc = Location()
render_loc = cad_obj.loc
if cad_obj.color is None:
if default_color is None:
color = Color(get_default("default_color"))
else:
color = Color(default_color)
else:
color = Color(get_rgb(cad_obj.color))
parent = [
Part(
Workplane(shape),
"%s_%d" % (cad_obj.name, i),
color=color,
)
for i, shape in enumerate(cad_obj.shapes)
]
if render_mates and cad_obj.mates is not None:
RGB = (Color((255, 0, 0)), Color((0, 128, 0)), Color((0, 0, 255)))
parent.append(
PartGroup(
[
Edges(to_edge(mate_def.mate, scale=mate_scale), name=name, color=RGB)
for name, mate_def in top.mates.items()
if mate_def.assembly == cad_obj
],
name="mates",
loc=Location(), # mates inherit the parent location, so actually add a no-op
)
)
children = [from_assembly(c, top, loc, render_mates, mate_scale) for c in cad_obj.children]
return PartGroup(parent + children, cad_obj.name, loc=render_loc)
def _from_workplane(cad_obj, obj_id, name="Part", default_color=None):
return Part(cad_obj, "%s_%d" % (name, obj_id), color=Color(default_color))
def _from_sketch(cad_obj, obj_id, name="Sketch", default_color=None):
return Part(cad_obj, "%s_%d" % (name, obj_id), color=Color(default_color))
def _is_facelist(cad_obj):
return (
hasattr(cad_obj, "objects")
and cad_obj.objects != []
and all([isinstance(obj, Face) for obj in cad_obj.objects])
)
def _is_vertexlist(cad_obj):
return (
hasattr(cad_obj, "objects")
and cad_obj.objects != []
and all([isinstance(obj, Vertex) for obj in cad_obj.objects])
)
def _is_edgelist(cad_obj):
return (
hasattr(cad_obj, "objects")
and cad_obj.objects != []
and all([isinstance(obj, Edge) for obj in cad_obj.objects])
)
def _is_wirelist(cad_obj):
return (
hasattr(cad_obj, "objects")
and cad_obj.objects != []
and all([isinstance(obj, Wire) for obj in cad_obj.objects])
)
def to_assembly(*cad_objs, render_mates=None, mate_scale=1, default_color=None):
default_color = get_default("default_color") if default_color is None else default_color
assembly = PartGroup([], "Group")
obj_id = 0
for cad_obj in cad_objs:
if isinstance(cad_obj, (PartGroup, Part, Faces, Edges, Vertices)):
assembly.add(cad_obj)
elif HAS_MASSEMBLY and isinstance(cad_obj, MAssembly):
assembly.add(
from_assembly(
cad_obj, cad_obj, render_mates=render_mates, mate_scale=mate_scale, default_color=default_color
)
)
elif isinstance(cad_obj, CqAssembly):
assembly.add(from_assembly(cad_obj, cad_obj, default_color=default_color))
elif isinstance(cad_obj, Edge):
assembly.add_list(_from_edgelist(Workplane(cad_obj), obj_id))
elif isinstance(cad_obj, Face):
assembly.add_list(_from_facelist(Workplane(cad_obj), obj_id))
elif isinstance(cad_obj, Wire):
assembly.add(_from_wirelist(Workplane(cad_obj), obj_id))
elif isinstance(cad_obj, Vertex):
assembly.add_list(_from_vertexlist(Workplane(cad_obj), obj_id))
elif is_cqparts(cad_obj):
assembly = convert_cqparts(cad_obj)
elif _is_facelist(cad_obj):
assembly.add_list(_from_facelist(cad_obj, obj_id))
elif _is_edgelist(cad_obj):
assembly.add_list(_from_edgelist(cad_obj, obj_id))
elif _is_wirelist(cad_obj):
assembly.add(_from_wirelist(cad_obj, obj_id))
elif _is_vertexlist(cad_obj):
assembly.add_list(_from_vertexlist(cad_obj, obj_id))
elif isinstance(cad_obj, Vector):
assembly.add_list(_from_vector(cad_obj, obj_id))
elif isinstance(cad_obj, (Shape, Compound)):
assembly.add(_from_workplane(Workplane(cad_obj), obj_id, default_color=default_color))
elif isinstance(cad_obj, Sketch):
assembly.add(_from_sketch(cad_obj, obj_id, default_color=default_color))
elif isinstance(cad_obj.val(), Vector):
assembly.add_list(_from_vectorlist(cad_obj, obj_id))
elif isinstance(cad_obj, Workplane):
assembly.add(_from_workplane(cad_obj, obj_id, default_color=default_color))
else:
raise NotImplementedError("Type:", cad_obj)
obj_id += 1
return assembly
def show(*cad_objs, render_mates=None, mate_scale=None, **kwargs):
"""Show CAD objects in Jupyter
Valid keywords:
- height: Height of the CAD view (default=600)
- tree_width: Width of navigation tree part of the view (default=250)
- cad_width: Width of CAD view part of the view (default=800)
- bb_factor: Scale bounding box to ensure compete rendering (default=1.5)
- default_color: Default mesh color (default=(232, 176, 36))
- default_edgecolor: Default mesh color (default=(128, 128, 128))
- render_edges: Render edges (default=True)
- render_normals: Render normals (default=False)
- render_mates: Render mates (for MAssemblies)
- mate_scale: Scale of rendered mates (for MAssemblies)
- quality: Linear deflection for tessellation (default=None)
If None, uses bounding box as in (xlen + ylen + zlen) / 300 * deviation)
- deviation: Deviation from default for linear deflection value ((default=0.1)
- angular_tolerance: Angular deflection in radians for tessellation (default=0.2)
- edge_accuracy: Presicion of edge discretizaion (default=None)
If None, uses: quality / 100
- optimal_bb: Use optimal bounding box (default=False)
- axes: Show axes (default=False)
- axes0: Show axes at (0,0,0) (default=False)
- grid: Show grid (default=False)
- ticks: Hint for the number of ticks in both directions (default=10)
- ortho: Use orthographic projections (default=True)
- transparent: Show objects transparent (default=False)
- ambient_intensity Intensity of ambient ligth (default=1.0)
- direct_intensity Intensity of direct lights (default=0.12)
- position: Relative camera position that will be scaled (default=(1, 1, 1))
- rotation: z, y and y rotation angles to apply to position vector (default=(0, 0, 0))
- zoom: Zoom factor of view (default=2.5)
- reset_camera: Reset camera position, rotation and zoom to default (default=True)
- mac_scrollbar: Prettify scrollbars (default=True)
- display: Select display: "sidecar", "cell", "html"
- tools: Show the viewer tools like the object tree
- timeit: Show rendering times, levels = False, 0,1,2,3,4,5 (default=False)
For example isometric projection can be achieved in two ways:
- position = (1, 1, 1)
- position = (0, 0, 1) and rotation = (45, 35.264389682, 0)
"""
render_mates = render_mates or get_default("render_mates")
mate_scale = mate_scale or get_default("mate_scale")
default_color = kwargs.get("default_color") or get_default("default_color")
assembly = to_assembly(*cad_objs, render_mates=render_mates, mate_scale=mate_scale, default_color=default_color)
if assembly is None:
raise ValueError("%s cannot be viewed" % cad_objs)
if len(assembly.objects) == 1 and isinstance(assembly.objects[0], PartGroup):
# omit leading "PartGroup" group
return _show(assembly.objects[0], **kwargs)
else:
return _show(assembly, **kwargs)
def auto_show():
PartGroup._ipython_display_ = lambda self: self.show()
Part._ipython_display_ = lambda self: self.show()
Faces._ipython_display_ = lambda self: self.show(grid=False, axes=False)
Edges._ipython_display_ = lambda self: self.show(grid=False, axes=False)
Vertices._ipython_display_ = lambda self: self.show(grid=False, axes=False)
print("Overwriting auto display for cadquery Workplane and Shape")
import cadquery as cq
try:
del cq.Workplane._repr_html_
del cq.Shape._repr_html_
except:
pass
cq.Workplane._ipython_display_ = lambda cad_obj: show(cad_obj)
cq.Shape._ipython_display_ = lambda cad_obj: show(cad_obj)
cq.Assembly._ipython_display_ = lambda cad_obj: show(cad_obj)
# Some further cq.Assembly methods
def show_constraints(assy, qs):
colors = [
"#e41a1c",
"#377eb8",
"#4daf4a",
"#984ea3",
"#ff7f00",
"#ffff33",
"#a65628",
"#f781bf",
"#999999",
"#8dd3c7",
"#ffffb3",
"#bebada",
"#fb8072",
"#80b1d3",
"#fdb462",
"#b3de69",
"#fccde5",
"#d9d9d9",
]
constraints = []
objects = []
cache = {}
for i, q1q2 in enumerate(qs):
parts = []
kind = q1q2[-1]
if len(q1q2) == 3:
q1q2 = ((q1q2[0].split("@")[0], q1q2[0]), (q1q2[1].split("@")[0], q1q2[1]))
else:
q1q2 = (q1q2[0:2], q1q2[2:4])
for q in q1q2:
name, shape = q
if name in cache:
obj = cache[name]["obj"]
loc = cache[name]["loc"]
else:
obj = assy.objects[name].obj
loc = assy.objects[name].loc
parent = assy.objects[name].parent
while parent is not None:
loc = parent.loc * loc
parent = parent.parent
cache[name] = {"obj": obj, "loc": loc, "shape": shape}
objects.append(Part(Workplane(obj.val().located(loc)), name=name, show_faces=False))
label = str(shape)
if isinstance(shape, str):
shape = assy._query(shape)[1]
parts.append(
Faces(
Workplane(Workplane(shape).val().located(loc)),
name=html.escape(label),
color=colors[i % len(colors)],
)
)
constraints.append(PartGroup(parts, "%s_%d" % (kind, i)))
show(PartGroup([PartGroup(objects, "objects")] + constraints), axes=True, axes0=True)
def show_accuracy(assy, cs):
def relocate(name, shape):
a = assy.objects[name]
loc = a.loc
parent = a.parent
while parent is not None:
loc = parent.loc * loc
parent = parent.parent
if isinstance(shape, str):
shape = assy._query(shape)[1]
return Workplane(Workplane(shape).val().located(loc))
def center(face):
c = face.Center()
return np.array((c.x, c.y, c.z))
def normal(face):
n = face.normalAt()
return np.array((n.x, n.y, n.z))
def print_metric(results):
l = max([len(r[1]) for r in results])
h = ("Constraint", "Normal-Dist", "Normal-Angle", "Point-Dist")
print(f"{h[0]:{l+7}s} {h[1]:12s} {h[2]:12s} {h[3]:12s}")
print("-" * (l + 46))
for kind, label, nrm_dist, nrm_angle, pnt_dist in results:
metric = f"{kind:5s} {label:{l}s} "
metric += " " * 27 if nrm_dist is None else f"{nrm_dist:12.9f} {nrm_angle:12.8}°"
metric += " " * 13 if pnt_dist is None else f"{pnt_dist:12.9f}"
print(metric)
results = []
for q1q2 in cs:
kind = q1q2[-1]
if len(q1q2) == 3:
n_q1q2 = ((q1q2[0].split("@")[0], q1q2[0]), (q1q2[1].split("@")[0], q1q2[1]))
label = "%s - %s" % q1q2[:2]
else:
n_q1q2 = (q1q2[0:2], q1q2[2:4])
label = "%s<%s> - %s<%s>" % (q1q2[0], q1q2[1].__class__.__name__, q1q2[2], q1q2[3].__class__.__name__)
shape1 = relocate(*n_q1q2[0])
shape2 = relocate(*n_q1q2[1])
pnt_dist = None
nrm_dist = None
nrm_angle = None
if kind in ["Point", "Plane"]:
c1, c2 = center(shape1.val()), center(shape2.val())
pnt_dist = np.linalg.norm(c1 - c2)
if kind in ["Axis", "Plane"]:
n1, n2 = normal(shape1.val()), normal(shape2.val())
nrm_dist = np.linalg.norm(n1 + n2) # distance between n1 and -n2 since n1 and n2 point opposite
c = np.dot(n1, -n2) / np.linalg.norm(n1) / np.linalg.norm(n2)
nrm_angle = np.arccos(np.clip(c, -1, 1)) / np.pi * 180
results.append((kind, label, nrm_dist, nrm_angle, pnt_dist))
print_metric(results)
```
#### File: jupyter-cadquery/jupyter_cadquery/style.py
```python
from IPython.display import display, HTML
LATEST = None
base_css = """
.scroll-area {
overflow: scroll !important;
border: unset !important;
}
.mac-scrollbar::-webkit-scrollbar {
width: 5px !important;
height: 5px !important;
}
.mac-scrollbar::-webkit-scrollbar-track {
background-color: transparent !important;
}
.mac-scrollbar .widget-html-content {
overflow-x: visible;
overflow-y: visible;
}
.tab-content-no-padding .widget-tab-contents {
overflow-x: visible !important;
overflow-y: visible !important;
padding-bottom: 0px !important;
}
.view_renderer {
border: 1px solid var(--jp-border-color1);
margin-top: 3px;
margin-left: 2px;
}
.view_tree {
padding: 0px !important;
}
.view_axes {
width: 60px !important;
margin-left: 5px !important;
}
.view_zero {
width: 55px !important;
}
.view_grid {
width: 56px !important;
}
.view_ortho {
width: 64px !important;
}
.view_transparent {
width: 125px !important;
}
.view_black_edges {
width: 105px !important;
}
.view_button {
padding: 0px !important;
}
.view_button>img {
height: 28px;
width: 36px;
}
.node_entry_wrap {
white-space: pre;
}
.node_entry {
white-space: nowrap;
padding-top: 4px;
}
.t-caret {
cursor: pointer;
-webkit-user-select: none;
/* Safari 3.1+ */
-moz-user-select: none;
/* Firefox 2+ */
-ms-user-select: none;
/* IE 10+ */
user-select: none;
}
.t-caret-down::before {
-ms-transform: rotate(90deg);
/* IE 9 */
-webkit-transform: rotate(90deg);
/* Safari */
transform: rotate(90deg);
}
.toplevel {
list-style-type: none;
padding-inline-start: 0px;
}
.nested {
display: none;
list-style-type: none;
padding-inline-start: 16px;
}
.active {
display: block;
}
.icon {
width: 28px !important;
height: 22px !important;
padding-right: 2px;
vertical-align: middle;
}
.indent {
margin-left: 12px;
}
.tree_label {
padding-left: 2px;
font-size: 14px;
}
.scroll_down {
display: flex;
flex-direction: column-reverse;
}
.small_table {
line-height: 14px;
}
.monospace select {
font-family: monospace;
}
"""
css = {
"light": base_css
+ """
.t-caret::before {
content: u"\u25B6";
font-size: 12px;
color: "#080808";
display: inline-block;
margin-right: 2px;
}
.mac-scrollbar::-webkit-scrollbar-thumb {
background-color: rgba(0, 0, 0, 0.2) !important;
border-radius: 100px !important;
}
.mac-scrollbar::-webkit-scrollbar-thumb:hover {
background: rgba(0, 0, 0, 0.4) !important;
}
.mac-scrollbar::-webkit-scrollbar-thumb:active {
background: #181818 !important;
}
.mac-scrollbar::-webkit-scrollbar-corner {
background: white;
}
.view_output {
border: 1px solid var(--jp-border-color1);
margin: 2px 2px 2px 2px !important;
padding-right: 1px !important;
background-color: white;
}
""",
"dark": base_css
+ """
.t-caret::before {
content: u"\u25B6";
font-size: 12px;
color: #e0e0e0;
display: inline-block;
margin-right: 2px;
}
.mac-scrollbar::-webkit-scrollbar-thumb {
background-color: rgba(255, 255, 255, 0.3) !important;
border-radius: 100px !important;
}
.mac-scrollbar::-webkit-scrollbar-thumb:hover {
background: rgba(255, 255, 255, 0.5) !important;
}
.mac-scrollbar::-webkit-scrollbar-thumb:active {
background: #e0e0e0 !important;
}
.mac-scrollbar::-webkit-scrollbar-corner {
background: #212121;
}
.view_output {
border: 1px solid var(--jp-border-color1);
margin: 2px 2px 2px 2px !important;
padding-right: 1px !important;
background-color: #212121;
}
""",
}
def set_css(theme, force=False):
global LATEST
if force or theme != LATEST:
display(HTML(f"""<style>{css[theme]}</style>"""))
LATEST = theme
``` |
{
"source": "Jojain/Nales",
"score": 2
} |
#### File: Nales/nales/actions.py
```python
from PyQt5.QtWidgets import QAction
class FitViewAction(QAction):
def __init__(self, parent):
super().__init__(parent)
self.setShortcut("f")
```
#### File: nales/commands/base_commands.py
```python
from typing import TYPE_CHECKING, Any
from PyQt5.QtCore import QModelIndex
from PyQt5.QtWidgets import QUndoCommand
from nales.NDS.interfaces import NOperation, NPart, NShape
if TYPE_CHECKING:
from nales.NDS.model import NModel
class BaseCommand(QUndoCommand):
def __init__(self):
super().__init__()
self.setText(self.__class__.__name__)
class AddTreeItem(BaseCommand):
def __init__(self, model: "NModel", item_name: str, item_obj: Any = None):
super().__init__()
self.model = model
self.item_name = item_name
self.item_obj = item_obj
class DeleteTreeItem(BaseCommand):
def __init__(self, model: "NModel", index: QModelIndex):
super().__init__()
self.model = model
self.node = index.internalPointer()
if isinstance(self.node, NPart):
self.item_obj = self.node.part
elif isinstance(self.node, NOperation):
self.item_obj = self.node.parent.part
elif isinstance(self.node, NShape):
self.item_obj = self.node.shape
```
#### File: nales/NDS/interfaces.py
```python
import typing
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Literal,
Optional,
Set,
Tuple,
Union,
)
from ncadquery import Workplane
from OCP.Quantity import Quantity_NameOfColor
from OCP.TCollection import TCollection_ExtendedString
from OCP.TDataStd import TDataStd_Name
from OCP.TDF import TDF_Label, TDF_TagSource
from OCP.TNaming import TNaming_Builder, TNaming_NamedShape
from OCP.TopoDS import TopoDS_Shape
from OCP.TPrsStd import TPrsStd_AISPresentation
from PyQt5.QtCore import QPersistentModelIndex, Qt
from nales.nales_cq_impl import NALES_TYPES, CQMethodCall, Part
from nales.utils import TypeChecker
from nales.widgets.msg_boxs import StdErrorMsgBox
class NNode:
def __init__(self, name=None, parent=None):
self._parent = parent
self._columns_nb = 1
self._childs = []
if parent:
self._row = len(parent._childs)
parent._childs.append(self)
parent._columns_nb = max(self.column, parent.column)
self._label = TDF_TagSource.NewChild_s(parent._label)
self._name = name
TDataStd_Name.Set_s(self._label, TCollection_ExtendedString(name))
else:
self._label = TDF_Label()
self._name = "root"
self._row = 0
def _create_sublabel(self):
"""
Create an additionnal OCCT label that is needed if you want to display several shapes
(It's one shape per label)
"""
sublabel = TDF_TagSource.NewChild_s(self._label)
TDataStd_Name.Set_s(
sublabel, TCollection_ExtendedString(f"{self.name} subshape")
)
return sublabel
def walk(self, node: "NNode" = None) -> "NNode":
"""
Walks all the node starting from 'node'
If 'node' is None, starts from the called node
"""
base_node = node if node else self
yield base_node
for child in base_node.childs:
yield from self.walk(child)
def find(self, node_name: str, node_type=None) -> "NNode" or None:
for node in self.walk():
if node.name == node_name:
if node_type:
if isinstance(node, node_type):
return node
else:
return node
def data(self, column):
if column >= 0 and column < len(self._data):
return self._data[column]
@property
def column(self):
return self._columns_nb
def child_count(self):
return len(self._childs)
def child(self, row) -> "NNode":
if row >= 0 and row < self.child_count():
return self._childs[row]
def has_children(self):
if len(self._childs) != 0:
return True
else:
return False
@property
def parent(self):
return self._parent
@property
def childs(self):
return self._childs
@childs.setter
def childs(self, new_childs):
self._childs = new_childs
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def root_node(self):
root = self.parent
while True:
if root.parent:
root = root.parent
else:
return root
@property
def row(self):
return self._row
class NPart(NNode):
def __init__(self, name: str, parent):
super().__init__(name, parent=parent)
self.visible = True
self._solid = TopoDS_Shape()
self._active_shape = None
self.display()
@property
def part(self):
return self.childs[-1].part_obj
def _update_display_shapes(self):
try:
solid = self.part._findSolid().wrapped
except ValueError:
solid = TopoDS_Shape()
self._solid = solid
if not (active_shape := self.part._val().wrapped) is solid and isinstance(
active_shape, TopoDS_Shape
):
self._active_shape = active_shape
else:
self._active_shape = None
def hide(self):
self.visible = False
self.ais_solid.Erase(remove=True)
self.ais_active_shape.Erase(remove=True)
self.root_node._viewer.Update()
def display(self, update=False):
"""
Builds the display object and attach it to the OCAF tree
"""
if update:
self.ais_solid.Erase(remove=True)
if self._active_shape:
self.ais_active_shape.Erase(remove=True)
self._update_display_shapes()
# self.root_node._viewer.Update()
solid_bldr = TNaming_Builder(self._label) # _label is TDF_Label
solid_bldr.Generated(self._solid)
solid_shape_attr = solid_bldr.NamedShape()
self.ais_solid = TPrsStd_AISPresentation.Set_s(solid_shape_attr)
if self._active_shape:
active_shape_bldr = TNaming_Builder(self._create_sublabel())
active_shape_bldr.Generated(self._active_shape)
active_shape_attr = active_shape_bldr.NamedShape()
self.ais_active_shape = TPrsStd_AISPresentation.Set_s(active_shape_attr)
self.ais_active_shape.Display(update=True)
self.root_node._viewer.Update()
# There is color mixing due to overlapping, maybe this can help to solve the issue :
# https://dev.opencascade.org/doc/refman/html/class_a_i_s___interactive_context.html#a1e0f9550cc001adbb52329ac243bb3b2
# It's considered good enough for now
self.ais_solid.SetTransparency(0.9)
self.ais_solid.Display()
self.root_node._viewer.Update()
self.visible = True
def update(self):
"""
When called this method rebuild the entire Part, by calling each child Operation
"""
child_ops = self.childs
for pos, child_op in enumerate(child_ops):
child_op.update(pos)
def remove_operation(self, row: int):
"""
Remove an operation from the operation tree
"""
ops: List[NOperation] = self.childs
ops.pop(row)
ops[row - 1].update_from_node()
class NShape(NNode):
def __init__(self, name, cq_shape, parent: NNode):
self._occt_shape = shape = cq_shape.wrapped
self.shape = cq_shape
self.visible = True
super().__init__(name, parent=parent)
self.bldr = TNaming_Builder(self._label) # _label is TDF_Label
self.bldr.Generated(shape)
named_shape = self.bldr.NamedShape()
self._label.FindAttribute(TNaming_NamedShape.GetID_s(), named_shape)
self.ais_shape = TPrsStd_AISPresentation.Set_s(named_shape)
self.ais_shape.SetTransparency(0.5)
self.ais_shape.SetColor(Quantity_NameOfColor.Quantity_NOC_ALICEBLUE)
self.ais_shape.Display(update=True)
def hide(self):
self.visible = False
self.ais_shape.Erase()
self.root_node._viewer.Update()
def display(self, update=False):
"""
Builds the display object and attach it to the OCAF tree
"""
if update:
self.ais_shape.Erase(remove=True)
self.root_node._viewer.Update()
self.bldr = TNaming_Builder(self._label) # _label is TDF_Label
self.bldr.Generated(self._occt_shape)
named_shape = self.bldr.NamedShape()
self._label.FindAttribute(TNaming_NamedShape.GetID_s(), named_shape)
self.ais_shape = TPrsStd_AISPresentation.Set_s(named_shape)
self.ais_shape.SetTransparency(0.5)
self.ais_shape.SetColor(Quantity_NameOfColor.Quantity_NOC_ALICEBLUE)
self.ais_shape.Display(update=True)
self.root_node._viewer.Update()
self.visible = True
def update(self):
"""
Update the shape object
"""
self._occt_shape = self.shape.wrapped
self.display(True)
class NShapeOperation(NNode):
def __init__(self, maker_method: Callable, shape_class, parent=None):
super().__init__(maker_method.__name__, parent)
self.maker_method = maker_method
self.shape_class = shape_class
def update(self) -> None:
args = [child.value for child in self.childs]
self.parent.shape = self.maker_method(self.shape_class, *args)
self.parent.update()
class NOperation(NNode):
def __init__(
self, method_name: str, part_obj: Part, parent: NNode, operation: CQMethodCall
):
super().__init__(method_name, parent=parent)
self.part_obj = part_obj
self.operation = operation
self.method = getattr(part_obj, method_name).__func__
if method_name == "Workplane":
self._root_operation = True
else:
self._root_operation = False
def update_from_node(self):
"""
Update the Part from this node
It recomputes every operation from this node to the end
"""
ops: List[NOperation] = self.parent.childs[self.row :]
for op in ops:
op.update()
self.parent.display(update=True)
def _update_init_part(self):
"""
This method is called when the user try to update __init__ method arguments
There is a special handling because it is a bit different from the regular methods
"""
args = [
child.value if not child.is_linked("obj") else child.linked_obj
for child in self.childs
]
try:
self.method(self.part_obj, *args, internal_call=True)
except Exception as exc:
StdErrorMsgBox(repr(exc))
def update(self) -> bool:
"""
Update the CQ objects stack from param modification in the GUI view
"""
# Special handling of __init__ method
if self.row == 0:
self._update_init_part()
return True
previous_operations: List[NOperation] = self.parent.childs[: self.row]
old_part_obj = previous_operations[-1].part_obj
args = [
child.value if not child.is_linked("obj") else child.linked_obj
for child in self.childs
]
try:
self.part_obj = self.method(old_part_obj, *args, internal_call=True)
return True
except ValueError as exc: # we update parent operations until pending wires have reset
if exc.args[0] == "No pending wires present":
tried_updates = [self]
# recursively call parent ops and store all the failed updates to update them again afterwards
while (tried_update := previous_operations[-1].update()) is False:
tried_updates.append(tried_update)
for tried_update in tried_updates:
tried_update.update()
else:
StdErrorMsgBox(repr(exc))
return False
except Exception as exc:
StdErrorMsgBox(repr(exc))
return False
def _restore_pending_wires(self):
index = 2
previous_ops = self.parent.childs[: self._row]
while len(self.parent.part.ctx.pendingWires) == 0:
op = previous_ops[-index]
op.update(len(previous_ops) - op._row)
index += 1
class NShapeArgument(NNode):
def __init__(self, name=None, parent=None):
super().__init__(name, parent)
class NArgument(NNode):
"""
The underlying data of an Argument is as follow :
name : cq argument name
value : value
linked_param : the name of the parameter linked to this arg, None if not connected to any
type: value type : a voir si je garde ca
If the Argument is linked to a Parameter, the Parameter name is displayed
"""
def __init__(self, arg_name: str, value, arg_type, parent: NNode, kwarg=False):
super().__init__(arg_name, parent=parent)
self._name = arg_name
self._type = arg_type
self._value = value
self._typechecker = TypeChecker(arg_type)
self._kwarg = kwarg # Boolean indicating if the arg is a kwarg or not
self._linked_param = None
self._linked_nobj_idx: QPersistentModelIndex = None
self._param_name_pidx = None
self._param_value_pidx = None
def link(
self,
by: Literal["param", "obj"],
value: Union[Tuple, QPersistentModelIndex, Any],
):
"""
Link this parameter to an object in available in the data model
"""
if by == "param":
raw_val = value[1]
if not self.is_type_compatible(raw_val):
raise TypeError("Couldn't link the param")
self._linked_param = value[0]
self._value = value[1]
self._param_name_pidx = value[2]
self._param_value_pidx = value[3]
else:
self._linked_nobj_idx = value
def unlink_param(self):
self._linked_param = None
self._param_name_pidx = None
self._param_value_pidx = None
def is_kwarg(self):
return self._kwarg
def is_linked(self, by: str = None):
if by == "obj":
return True if self._linked_nobj_idx else False
elif by == "param":
return True if self._linked_param else False
elif by is None:
if self._linked_param or self._linked_nobj_idx:
return True
else:
return False
else:
raise ValueError("Argument 'by' must be either 'obj' or 'param'")
def is_optional_type(self) -> bool:
"""
Indicates if the NArgument is optional, i.e the function signature looks something like :
method(nargument:Union[float,None] = None) or method(nargument:Optional[float] = None)
"""
if self.is_kwarg():
origin = typing.get_origin(self._type)
if origin == Optional:
return True
if origin == Union:
for allowed_type in typing.get_args(self._type):
if allowed_type == type(None):
return True
return False
else:
return False
else:
return False
def is_literal_type(self) -> bool:
origin = typing.get_origin(self.type)
if self.type == str or origin == Literal:
return True
if origin == Union:
possible_types = typing.get_args(self.type)
for possible_type in possible_types:
if possible_type == str or possible_type == Literal:
return True
return False
def is_type_compatible(self, value: str) -> bool:
return self._typechecker.check(value)
def _cast(self, value: Any):
if type(value) == self._type:
return value
return self._typechecker.cast(value)
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def linked_param(self):
if self.is_linked():
return self._linked_param
else:
raise ValueError("This argument is not linked to a param")
@property
def linked_node(self):
if not self._linked_nobj_idx:
raise ValueError("This argument isn't linked to any node")
else:
return self._linked_nobj_idx.data(Qt.EditRole)
@property
def linked_obj(self):
if self.is_linked(by="obj"):
if hasattr(self.linked_node, "part"):
return self.linked_node.part
elif hasattr(self.linked_node, "shape"):
return self.linked_node.shape
else:
raise NotImplementedError(
"This argument is linked to a object that is not supported yet"
)
else:
raise ValueError("This argument is not linked to an object")
@property
def columns_nb(self):
return 1
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def value(self):
if self.is_optional_type() and self._value is None:
return None
if self.is_linked(by="param"):
return self._cast(self._param_value_pidx.data())
elif self.is_linked(by="obj"):
return self.linked_obj
elif not isinstance(self._value, str):
# Upon argument creation self._value is already of the right type
return self._value
else:
# If self._value is a string, means the users modified the argument in the GUI
return self._cast(self._value)
@value.setter
def value(self, value):
self._value = value
@property
def linked_param(self):
return self._linked_param
```
#### File: nales/NDS/NOCAF.py
```python
import OCP
from OCP.BinDrivers import BinDrivers
from OCP.TCollection import TCollection_ExtendedString
from OCP.TDocStd import TDocStd_Application, TDocStd_Document
from OCP.TPrsStd import TPrsStd_AISViewer
from OCP.XmlDrivers import XmlDrivers
class Application(TDocStd_Application):
def __init__(self, binary=False):
super().__init__()
if binary:
BinDrivers.DefineFormat_s(self)
self._file_extension = ".cbf"
self.doc_format = "BinOcaf"
else:
XmlDrivers.DefineFormat_s(self)
self._file_extension = ".xml"
self.doc_format = "XmlOcaf"
self.doc = TDocStd_Document(TCollection_ExtendedString(self.doc_format))
self.NewDocument(TCollection_ExtendedString(self.doc_format), self.doc)
def viewer_redraw(self):
"""
Redraw the viewer (refresh the view even if the user isn't moving the view)
"""
self._pres_viewer.Update()
def init_viewer_presentation(self, context: OCP.AIS.AIS_InteractiveContext):
self._pres_viewer = TPrsStd_AISViewer.New_s(self.doc.GetData().Root(), context)
def save_as(self, path: str):
"""
Saves the application document in the specified path.
The file extension is automatically added by the :Application:
"""
path += self._file_extension
status = self.SaveAs(self.doc, TCollection_ExtendedString(path))
if status != OCP.PCDM.PCDM_SS_OK:
self.Close(self.doc)
raise Exception("The document could not be saved !")
def close(self):
self.Close(self.document)
```
#### File: nales/tests/test_import_export.py
```python
import os
from typing import List, Tuple
from nales.main_window import MainWindow
TESTS_FILES_FOLDER = __file__.strip(__name__ + ".py") + r"\\tests_files"
def _get_file_content(path: str) -> List[str]:
with open(path, "r") as f:
content = f.readlines()
return content
def _read_params(file_data: List[str]) -> Tuple:
names = []
values = []
types = []
for idx, line in enumerate(file_data):
if line.startswith("#Paramsdef>>"):
start_idx = idx
nb_of_params = int(line.split()[1])
break
params_lines = file_data[idx + 1 : idx + nb_of_params + 1]
for line in params_lines:
data = line.split("#")
paramdef = data[0].split("=")
names.append(paramdef[0].strip())
values.append(paramdef[1].strip())
types.append(data[1].strip())
return tuple(names), tuple(values), tuple(types)
def test_export_param(qtbot):
test_export_file = os.path.join(TESTS_FILES_FOLDER, "test_export_param.py")
mw = MainWindow()
mw.hide()
qtbot.addWidget(mw)
mw.param_model.add_parameter("p1", 15)
mw.param_model.add_parameter("p2", "bonjour")
mw.param_model.add_parameter("p3", None)
mw._console.execute_command(f"nales.save(r'{test_export_file}')")
assert os.path.isfile(test_export_file)
content = _get_file_content(test_export_file)
names, values, types = _read_params(content)
assert names == ("p1", "p2", "p3")
assert values == ("15", '"bonjour"', "None")
assert types == ("int", "str", "None")
def test_import_param(qtbot):
# Load the file created by the export test
test_import_file = os.path.join(TESTS_FILES_FOLDER, "test_export_param.py")
mw = MainWindow()
mw.hide()
qtbot.addWidget(mw)
# Read the file and update the GUI
mw._console.execute_command(f"nales.load(r'{test_import_file}')")
params = mw.param_model.parameters
assert len(params) == 3
assert (params[0].name, params[0].value, params[0].type) == ("p1", 15, "int")
assert (params[1].name, params[1].value, params[1].type) == (
"p2",
'"bonjour"',
"str",
)
assert (params[2].name, params[2].value, params[2].type) == ("p3", None, None)
def test_export_parts(qtbot):
test_import_file = os.path.join(TESTS_FILES_FOLDER, "test_import_parts.py")
mw = MainWindow()
mw.hide()
qtbot.addWidget(mw)
mw._console.execute_command(
"p = Part(name='test_part').box(10,10,10).faces('>Z').workplane().hole(1.5)"
)
mw._console.execute_command(f"nales.save(r'{test_import_file}')")
# Open the test file and store the part definition lines in a list
with open(test_import_file, "r") as test_file:
lines = test_file.readlines()
part_def_lines = []
for i, line in enumerate(lines):
if line.startswith("#Partdef>>"):
def_line_nb = int(line.split()[2])
for l in lines[i + 1 : i + def_line_nb + 1]:
part_def_lines.append(l.strip())
break
# In the futur it would be better to test that we have the ast node correct, so we don't care about formatting
assert part_def_lines[0] == "test_part = cq.Workplane()"
assert (
part_def_lines[1]
== "test_part = test_part.box(length = 10, width = 10, height = 10, centered = True, combine = True, clean = True)"
)
assert (
part_def_lines[2]
== 'test_part = test_part.faces(selector = ">Z", tag = None)'
)
assert (
part_def_lines[3]
== 'test_part = test_part.workplane(offset = 0.0, invert = False, centerOption = "ProjectedOrigin", origin = None)'
)
assert (
part_def_lines[4]
== "test_part = test_part.hole(diameter = 1.5, depth = None, clean = True)"
)
def test_import_parts(qtbot):
test_import_file = os.path.join(TESTS_FILES_FOLDER, "test_import_parts.py")
mw = MainWindow()
mw.hide()
qtbot.addWidget(mw)
mw._console.execute_command(f"nales.load(r'{test_import_file}')")
assert len(mw.model.parts) == 1
p = mw.model.parts[0]
assert (
len(p.childs) == 4
) # check that all operations have been loaded, atm doesn't consider __init__ operation
assert (
len(p.childs[0].childs) == 6
) # check that we have all args of the method `box` in this case
```
#### File: nales/widgets/console.py
```python
import sys
from pprint import pprint
from typing import Any, List
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QApplication
from qtconsole.inprocess import QtInProcessKernelManager
from qtconsole.rich_jupyter_widget import RichJupyterWidget
from nales.nales_cq_impl import Part
# sys.stdout = sys.stderr = io.StringIO() # QtInProcessKernelManager related see https://github.com/ipython/ipython/issues/10658#issuecomment-307757082
class ConsoleWidget(RichJupyterWidget):
name = "Console"
def __init__(self, customBanner=None, namespace=dict(), *args, **kwargs):
super(ConsoleWidget, self).__init__(*args, **kwargs)
self.font_size = 6
self.kernel_manager = kernel_manager = QtInProcessKernelManager()
kernel_manager.start_kernel(show_banner=False)
kernel_manager.kernel.gui = "qt"
kernel_manager.kernel.shell.banner1 = ""
self.kernel_client = kernel_client = self._kernel_manager.client()
kernel_client.start_channels()
self.namespace = self.kernel_manager.kernel.shell.user_global_ns
def stop():
kernel_client.stop_channels()
kernel_manager.shutdown_kernel()
QApplication.instance().exit()
self.exit_requested.connect(stop)
self.clear()
self.push_vars(namespace)
def remove_obj(self, obj: Any) -> None:
"""
Remove the given `obj` (and all the vars associated) from the console namespace
"""
ns = self.namespace
for var, value in ns.copy().items():
if id(value) == id(obj):
ns.pop(var)
def get_obj_varnames(self, obj: Any) -> List[str]:
ns = self.namespace
vars = []
for var, value in ns.items():
if value is obj:
vars.append(var)
return vars
def _execute(self, source, hidden):
"""
Execute codes in the IKernel,
"""
super()._execute(source, hidden)
def _get_cq_obj(self, var_name):
"""
Retrieve a Workplane object from the IKernel namespace
"""
ns = self.namespace
try:
return ns[var_name]
except KeyError:
return None
@pyqtSlot(dict)
def push_vars(self, variableDict):
"""
Given a dictionary containing name / value pairs, push those variables
to the Jupyter console widget
"""
self.kernel_manager.kernel.shell.push(variableDict)
def clear(self):
"""
Clears the terminal
"""
self._control.clear()
def print_text(self, text):
"""
Prints some plain text to the console
"""
self._append_plain_text(text)
def execute_command(self, command):
"""
Execute a command in the frame of the console widget
"""
self._execute(command, False)
def _banner_default(self):
return ""
def update_part(self, name: str, updated_part: "Part"):
"""
Update all instances of a part in the console when it's modified in the GUI
"""
for var, part in [
(var, part)
for var, part in self.namespace.items()
if isinstance(part, Part)
]:
if part._name == name:
self.namespace[var] = updated_part
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
console = ConsoleWidget(customBanner="IPython console test")
console.show()
sys.exit(app.exec_())
```
#### File: nales/widgets/occt_widget.py
```python
from sys import platform
from OCP.AIS import AIS_ColoredShape, AIS_DisplayMode, AIS_InteractiveContext
from OCP.Aspect import Aspect_DisplayConnection, Aspect_TypeOfTriedronPosition
from OCP.OpenGl import OpenGl_GraphicDriver
from OCP.Quantity import Quantity_Color
from OCP.V3d import V3d_Viewer
from PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QWidget
ZOOM_STEP = 0.9
class OCCTWidget(QWidget):
sigObjectSelected = pyqtSignal(list)
def __init__(self, parent=None):
super(OCCTWidget, self).__init__(parent)
self.setAttribute(Qt.WA_NativeWindow)
self.setAttribute(Qt.WA_PaintOnScreen)
self.setAttribute(Qt.WA_NoSystemBackground)
self._initialized = False
self._needs_update = False
# OCCT secific things
self.display_connection = Aspect_DisplayConnection()
self.graphics_driver = OpenGl_GraphicDriver(self.display_connection)
self.viewer = V3d_Viewer(self.graphics_driver)
self.view = self.viewer.CreateView()
self.context = AIS_InteractiveContext(self.viewer)
# Trihedorn, lights, etc
self.prepare_display()
def fit(self):
self.view.FitAll()
def prepare_display(self):
view = self.view
params = view.ChangeRenderingParams()
params.NbMsaaSamples = 8
params.IsAntialiasingEnabled = True
view.TriedronDisplay(
Aspect_TypeOfTriedronPosition.Aspect_TOTP_RIGHT_LOWER, Quantity_Color(), 0.1
)
viewer = self.viewer
viewer.SetDefaultLights()
viewer.SetLightOn()
ctx = self.context
ctx.SetDisplayMode(AIS_DisplayMode.AIS_Shaded, True)
ctx.DefaultDrawer().SetFaceBoundaryDraw(True)
def display(self, shape: AIS_ColoredShape):
self.context.Display(shape, True)
def wheelEvent(self, event):
delta = event.angleDelta().y()
factor = ZOOM_STEP if delta < 0 else 1 / ZOOM_STEP
self.view.SetZoom(factor)
def mousePressEvent(self, event):
pos = event.pos()
if event.button() == Qt.LeftButton:
self.view.StartRotation(pos.x(), pos.y())
elif event.button() == Qt.RightButton:
self.view.StartZoomAtPoint(pos.x(), pos.y())
self.old_pos = pos
def mouseMoveEvent(self, event):
pos = event.pos()
x, y = pos.x(), pos.y()
if event.buttons() == Qt.LeftButton:
self.view.Rotation(x, y)
elif event.buttons() == Qt.MiddleButton:
self.view.Pan(x - self.old_pos.x(), self.old_pos.y() - y, theToStart=True)
elif event.buttons() == Qt.RightButton:
self.view.ZoomAtPoint(self.old_pos.x(), y, x, self.old_pos.y())
self.old_pos = pos
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton:
pos = event.pos()
x, y = pos.x(), pos.y()
self.context.MoveTo(x, y, self.view, True)
self._handle_selection()
def _handle_selection(self):
self.context.Select(True)
self.context.InitSelected()
selected = []
if self.context.HasSelectedShape():
selected.append(self.context.SelectedShape())
self.sigObjectSelected.emit(selected)
def paintEngine(self):
return None
def paintEvent(self, event):
if not self._initialized:
self._initialize()
else:
self.view.Redraw()
def showEvent(self, event):
super(OCCTWidget, self).showEvent(event)
def resizeEvent(self, event):
super(OCCTWidget, self).resizeEvent(event)
self.view.MustBeResized()
def _initialize(self):
wins = {
"darwin": self._get_window_osx,
"linux": self._get_window_linux,
"win32": self._get_window_win,
}
self.view.SetWindow(wins.get(platform, self._get_window_linux)(self.winId()))
self._initialized = True
def _get_window_win(self, wid):
from OCP.WNT import WNT_Window
return WNT_Window(wid.ascapsule())
def _get_window_linux(self, wid):
from OCP.Xw import Xw_Window
return Xw_Window(self.display_connection, int(wid))
def _get_window_osx(self, wid):
from OCP.Cocoa import Cocoa_Window
return Cocoa_Window(wid.ascapsule())
``` |
{
"source": "Jojain/ncadquery",
"score": 3
} |
#### File: ncadquery/doc/gen_colors.py
```python
from OCP import Quantity
import ncadquery as cq
from typing import Dict
from itertools import chain
OCP_COLOR_LEADER, SEP = "Quantity_NOC", "_"
TEMPLATE = """\
<div style="background-color:rgba({background_color});padding:10px;border-radius:5px;color:rgba({text_color});">{color_name}</div>\
"""
def color_to_rgba_str(c: cq.Color) -> str:
""" Convert a Color object to a string for the HTML/CSS template.
"""
t = c.toTuple()
vals = [int(v * 255) for v in t[:3]]
return ",".join([str(v) for v in chain(vals, [t[3]])])
def calc_text_color(c: cq.Color) -> str:
""" Calculate required overlay text color from background color.
"""
val = sum(c.toTuple()[:3]) / 3
if val < 0.5:
rv = "255,255,255"
else:
rv = "0,0,0"
return rv
def get_colors() -> Dict[str, cq.Color]:
""" Scan OCP for colors and output to a dict.
"""
colors = {}
for name in dir(Quantity):
splitted = name.rsplit(SEP, 1)
if splitted[0] == OCP_COLOR_LEADER:
colors.update({splitted[1].lower(): cq.Color(splitted[1])})
return colors
def rst():
""" Produce the text for a Sphinx directive.
"""
lines = [
".. raw:: html",
"",
' <div class="color-grid" style="display:grid;grid-gap:10px;grid-template-columns:repeat(auto-fill, minmax(200px,1fr));">',
]
colors = get_colors()
for name, c in colors.items():
lines += [
TEMPLATE.format(
background_color=color_to_rgba_str(c),
text_color=calc_text_color(c),
color_name=name,
)
]
lines.append(" </div>")
return "\n".join(lines)
if __name__ == "__main__":
print(rst())
```
#### File: ncadquery/ncadquery/cq_directive.py
```python
import traceback
from pathlib import Path
from uuid import uuid1 as uuid
from textwrap import indent
from ncadquery import exporters, Assembly, Compound, Color, Sketch
from ncadquery import cqgi
from ncadquery.occ_impl.jupyter_tools import (
toJSON,
dumps,
TEMPLATE_RENDER,
DEFAULT_COLOR,
)
from docutils.parsers.rst import directives, Directive
template = """
.. raw:: html
<div class="cq" style="text-align:%(txt_align)s;float:left;">
%(out_svg)s
</div>
<div style="clear:both;">
</div>
"""
template_content_indent = " "
rendering_code = """
const RENDERERS = {};
var ID = 0;
const renderWindow = vtk.Rendering.Core.vtkRenderWindow.newInstance();
const openglRenderWindow = vtk.Rendering.OpenGL.vtkRenderWindow.newInstance();
renderWindow.addView(openglRenderWindow);
const rootContainer = document.createElement('div');
rootContainer.style.position = 'fixed';
//rootContainer.style.zIndex = -1;
rootContainer.style.left = 0;
rootContainer.style.top = 0;
rootContainer.style.pointerEvents = 'none';
rootContainer.style.width = '100%';
rootContainer.style.height = '100%';
openglRenderWindow.setContainer(rootContainer);
const interact_style = vtk.Interaction.Style.vtkInteractorStyleManipulator.newInstance();
const manips = {
rot: vtk.Interaction.Manipulators.vtkMouseCameraTrackballRotateManipulator.newInstance(),
pan: vtk.Interaction.Manipulators.vtkMouseCameraTrackballPanManipulator.newInstance(),
zoom1: vtk.Interaction.Manipulators.vtkMouseCameraTrackballZoomManipulator.newInstance(),
zoom2: vtk.Interaction.Manipulators.vtkMouseCameraTrackballZoomManipulator.newInstance(),
roll: vtk.Interaction.Manipulators.vtkMouseCameraTrackballRollManipulator.newInstance(),
};
manips.zoom1.setControl(true);
manips.zoom2.setButton(3);
manips.roll.setShift(true);
manips.pan.setButton(2);
for (var k in manips){{
interact_style.addMouseManipulator(manips[k]);
}};
const interactor = vtk.Rendering.Core.vtkRenderWindowInteractor.newInstance();
interactor.setView(openglRenderWindow);
interactor.initialize();
interactor.setInteractorStyle(interact_style);
document.addEventListener('DOMContentLoaded', function () {
document.body.appendChild(rootContainer);
});
function updateViewPort(element, renderer) {
const { innerHeight, innerWidth } = window;
const { x, y, width, height } = element.getBoundingClientRect();
const viewport = [
x / innerWidth,
1 - (y + height) / innerHeight,
(x + width) / innerWidth,
1 - y / innerHeight,
];
renderer.setViewport(...viewport);
}
function recomputeViewports() {
const rendererElems = document.querySelectorAll('.renderer');
for (let i = 0; i < rendererElems.length; i++) {
const elem = rendererElems[i];
const { id } = elem;
const renderer = RENDERERS[id];
updateViewPort(elem, renderer);
}
renderWindow.render();
}
function resize() {
rootContainer.style.width = `${window.innerWidth}px`;
openglRenderWindow.setSize(window.innerWidth, window.innerHeight);
recomputeViewports();
}
window.addEventListener('resize', resize);
document.addEventListener('scroll', recomputeViewports);
function enterCurrentRenderer(e) {
interactor.bindEvents(document.body);
interact_style.setEnabled(true);
interactor.setCurrentRenderer(RENDERERS[e.target.id]);
}
function exitCurrentRenderer(e) {
interactor.setCurrentRenderer(null);
interact_style.setEnabled(false);
interactor.unbindEvents();
}
function applyStyle(element) {
element.classList.add('renderer');
element.style.width = '100%';
element.style.height = '100%';
element.style.display = 'inline-block';
element.style.boxSizing = 'border';
return element;
}
window.addEventListener('load', resize);
function render(data, parent_element, ratio){
// Initial setup
const renderer = vtk.Rendering.Core.vtkRenderer.newInstance({ background: [1, 1, 1 ] });
// iterate over all children children
for (var el of data){
var trans = el.position;
var rot = el.orientation;
var rgba = el.color;
var shape = el.shape;
// load the inline data
var reader = vtk.IO.XML.vtkXMLPolyDataReader.newInstance();
const textEncoder = new TextEncoder();
reader.parseAsArrayBuffer(textEncoder.encode(shape));
// setup actor,mapper and add
const mapper = vtk.Rendering.Core.vtkMapper.newInstance();
mapper.setInputConnection(reader.getOutputPort());
mapper.setResolveCoincidentTopologyToPolygonOffset();
mapper.setResolveCoincidentTopologyPolygonOffsetParameters(0.5,100);
const actor = vtk.Rendering.Core.vtkActor.newInstance();
actor.setMapper(mapper);
// set color and position
actor.getProperty().setColor(rgba.slice(0,3));
actor.getProperty().setOpacity(rgba[3]);
actor.rotateZ(rot[2]*180/Math.PI);
actor.rotateY(rot[1]*180/Math.PI);
actor.rotateX(rot[0]*180/Math.PI);
actor.setPosition(trans);
renderer.addActor(actor);
};
//add the container
const container = applyStyle(document.createElement("div"));
parent_element.appendChild(container);
container.addEventListener('mouseenter', enterCurrentRenderer);
container.addEventListener('mouseleave', exitCurrentRenderer);
container.id = ID;
renderWindow.addRenderer(renderer);
updateViewPort(container, renderer);
renderer.getActiveCamera().set({ position: [1, -1, 1], viewUp: [0, 0, 1] });
renderer.resetCamera();
RENDERERS[ID] = renderer;
ID++;
};
"""
template_vtk = """
.. raw:: html
<div class="cq-vtk"
style="text-align:{txt_align}s;float:left;border: 1px solid #ddd; width:{width}; height:{height}"">
<script>
var parent_element = {element};
var data = {data};
render(data, parent_element);
</script>
</div>
<div style="clear:both;">
</div>
"""
class cq_directive(Directive):
has_content = True
required_arguments = 0
optional_arguments = 2
option_spec = {
"height": directives.length_or_unitless,
"width": directives.length_or_percentage_or_unitless,
"align": directives.unchanged,
}
def run(self):
options = self.options
content = self.content
state_machine = self.state_machine
# only consider inline snippets
plot_code = "\n".join(content)
# Since we don't have a filename, use a hash based on the content
# the script must define a variable called 'out', which is expected to
# be a CQ object
out_svg = "Your Script Did not assign call build_output() function!"
try:
result = cqgi.parse(plot_code).build()
if result.success:
out_svg = exporters.getSVG(
exporters.toCompound(result.first_result.shape)
)
else:
raise result.exception
except Exception:
traceback.print_exc()
out_svg = traceback.format_exc()
# now out
# Now start generating the lines of output
lines = []
# get rid of new lines
out_svg = out_svg.replace("\n", "")
txt_align = "left"
if "align" in options:
txt_align = options["align"]
lines.extend((template % locals()).split("\n"))
lines.extend(["::", ""])
lines.extend([" %s" % row.rstrip() for row in plot_code.split("\n")])
lines.append("")
if len(lines):
state_machine.insert_input(lines, state_machine.input_lines.source(0))
return []
class cq_directive_vtk(Directive):
has_content = True
required_arguments = 0
optional_arguments = 2
option_spec = {
"height": directives.length_or_unitless,
"width": directives.length_or_percentage_or_unitless,
"align": directives.unchanged,
"select": directives.unchanged,
}
def run(self):
options = self.options
content = self.content
state_machine = self.state_machine
env = self.state.document.settings.env
build_path = Path(env.app.builder.outdir)
out_path = build_path / "_static"
# only consider inline snippets
plot_code = "\n".join(content)
# collect the result
try:
result = cqgi.parse(plot_code).build()
if result.success:
if result.first_result:
shape = result.first_result.shape
else:
shape = result.env[options.get("select", "result")]
if isinstance(shape, Assembly):
assy = shape
elif isinstance(shape, Sketch):
assy = Assembly(shape._faces, color=Color(*DEFAULT_COLOR))
else:
assy = Assembly(shape, color=Color(*DEFAULT_COLOR))
else:
raise result.exception
except Exception:
traceback.print_exc()
assy = Assembly(Compound.makeText("CQGI error", 10, 5))
# save vtkjs to static
fname = Path(str(uuid()))
exporters.assembly.exportVTKJS(assy, out_path / fname)
fname = str(fname) + ".zip"
# add the output
lines = []
data = dumps(toJSON(assy))
lines.extend(
template_vtk.format(
code=indent(TEMPLATE_RENDER.format(), " "),
data=data,
ratio="null",
element="document.currentScript.parentNode",
txt_align=options.get("align", "left"),
width=options.get("width", "100%"),
height=options.get("height", "500px"),
).splitlines()
)
lines.extend(["::", ""])
lines.extend([" %s" % row.rstrip() for row in plot_code.split("\n")])
lines.append("")
if len(lines):
state_machine.insert_input(lines, state_machine.input_lines.source(0))
return []
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_directive("cq_plot", cq_directive)
app.add_directive("cadquery", cq_directive_vtk)
# add vtk.js
app.add_js_file("vtk.js")
app.add_js_file(None, body=rendering_code)
``` |
{
"source": "JoJaJones/StarTrekDBProject",
"score": 3
} |
#### File: JoJaJones/StarTrekDBProject/STForms.py
```python
from flask import Flask, render_template
from flask_wtf import FlaskForm
from constants import CHARACTERS, SPECIES, SERIES, LOCATIONS, AFFILIATIONS, ACTORS
from wtforms import (widgets, validators, StringField, SubmitField, RadioField, SelectMultipleField, FormField,
IntegerField, SelectField, DateField, TextAreaField, Form)
app = Flask(__name__)
app.config["SECRET_KEY"] = "tempsecret"
class DateSubForm(Form):
month = IntegerField("MM", validators=[validators.Optional(), validators.NumberRange(1, 12)])
day = IntegerField("DD", validators=[validators.Optional(), validators.number_range(1, 31)])
year = IntegerField("YYYY", validators=[validators.Optional(), validators.number_range(1966, message="Year must be 1966 or later")])
def clear(self):
self.month.data = None
self.day.data = None
self.year.data = None
class Display:
pass
class SingleFieldForm(FlaskForm):
first_field = StringField("", validators=[validators.DataRequired()])
submit = SubmitField("Submit")
class Row:
def __init__(self, id, values, data_type):
self.id = id
self.data_type = data_type
self.table_values = values
self.name = None
self.set_name()
def set_name(self):
if self.data_type == CHARACTERS and self.table_values[2] and len(self.table_values[2]) > 0:
self.name = self.table_values[2]
else:
self.name = self.table_values[0]
def reformat_date(self, idx: int):
temp_date = str(self.table_values[idx]).split("-")
temp_date = temp_date[1:] + [temp_date[0]]
self.table_values[idx] = "-".join(temp_date)
def temp_char_buffer(self):
self.table_values += [""]
class DeleteForm(FlaskForm):
submit = SubmitField("Delete")
class TestForm(FlaskForm):
select_field = SelectField("", coerce=int)
class LocationForm(FlaskForm):
first_field = StringField("")
second_field = SelectField("", choices=[("station", "Space Station"),
("planet", "Planet"),
("ship", "Space Ship")])
submit = SubmitField("Submit")
class SeriesForm(FlaskForm):
first_field = StringField("Series Name", validators=[validators.DataRequired()])
second_field = FormField(DateSubForm)
third_field = FormField(DateSubForm)
submit = SubmitField("Submit")
class CharacterForm(FlaskForm):
first_field = StringField("First Name")
second_field = StringField("Last Name", validators=[validators.Optional()])
third_field = StringField("Alias", validators=[validators.Optional()])
fourth_field = StringField("Title", validators=[validators.Optional()])
fifth_field = TextAreaField("Description", validators=[validators.Optional()])
sixth_field = TextAreaField("Biography", validators=[validators.Optional()])
seventh_field = SelectMultipleField("Species", coerce=int, validators=[validators.Optional()])
eighth_field = SelectMultipleField("Affiliations", coerce=int, validators=[validators.Optional()])
ninth_field = SelectMultipleField("Series", coerce=int, validators=[validators.Optional()])
# add_location = SubmitField("Add Location to Character")
submit = SubmitField("Submit")
class AddLocationToCharacter(FlaskForm):
first_field = SelectField(coerce=int)
second_field = SelectField(coerce=int)
submit = SubmitField("Submit")
class AddActorForm(FlaskForm):
fname_field = StringField("First name", validators=[validators.Required()])
lname_field = StringField("Last name", validators=[validators.Optional()])
birthday_field = FormField(DateSubForm)
imdb_field = StringField("IMDB link", validators=[validators.Optional()])
submit = SubmitField("Submit")
class CharacterSearchForm(FlaskForm):
fname = StringField("First Name", validators=[validators.Optional()])
lname = StringField("Last Name", validators=[validators.Optional()])
actors = SelectMultipleField("Actors", coerce=int, validators=[validators.Optional()])
species = SelectMultipleField("Species", coerce=int, validators=[validators.Optional()])
series = SelectMultipleField("Series", coerce=int, validators=[validators.Optional()])
affiliations = SelectMultipleField("Affiliations", coerce=int, validators=[validators.Optional()])
submit = SubmitField("Search")
class LinkForm(FlaskForm):
entity1 = SelectField("", coerce=int, validators=[validators.Required()])
entity2 = SelectField("", coerce=int, validators=[validators.Required()])
submit = SubmitField("Add Link")
``` |
{
"source": "jojanper/draalcore",
"score": 2
} |
#### File: draalcore/draalcore/app_config.py
```python
from django.apps import AppConfig
from draalcore.exceptions import ActionError
class BaseAppConfig(AppConfig):
"""Base application configuration"""
# Django to select a configuration class automatically.
# App config using this should enable this.
default = False
# Application is public, that is, exposed to API to some extent
public_app = True
# Display name for the apps related API calls, this name is used to identify this app in the URL.
# Model related actions use the app_label from Meta as display name.
# The API calls here are not attached to any model.
display_name = None
# List of public actions (authentication required) available for this application
actions = []
# List of public actions (no authentication required) available for this application
noauth_actions = []
def serialize_all_actions(self, serializer_fn):
"""
Serialize application level actions with and without authentication requirement.
Parameters
----------
serializer_fn
Serializer implementation for action object.
Returns
-------
dict
Key describes name of action and value action details.
"""
data = self.serialize_actions(serializer_fn, False)
data.update(self.serialize_actions(serializer_fn, True))
return data
def serialize_actions(self, serializer_fn, noauth=False):
"""
Serialize application level actions.
Parameters
----------
serializer_fn
Serializer implementation for action object.
noauth
True if actions requiring authentication are to be serialized, False otherwise.
Returns
-------
dict
Key describes name of action and value action details.
"""
if noauth:
actions = self.noauth_actions
name = 'rest-api-app-public-action'
else:
actions = self.actions
name = 'rest-api-app-action'
data = {}
for action in actions:
resolve_kwargs = {
'app': self.display_name,
'action': action.ACTION
}
data[action.ACTION] = serializer_fn(action, name, resolve_kwargs)
data[action.ACTION].update({'authenticate': not noauth})
return data
def get_action_obj(self, request_obj, method):
action = request_obj.kwargs['action']
noauth = request_obj.kwargs.get('noauth', False)
actions = self.noauth_actions if noauth else self.actions
for action_cls in actions:
if action_cls.match_action(action) and method in action_cls.ALLOWED_METHODS:
return action_cls(request_obj, action_cls.MODEL)
msg = "{} action for '{}' application using HTTP {} is not supported via the API"\
.format(action, self.display_name, method.upper())
raise ActionError(msg)
```
#### File: draalcore/auth/backend.py
```python
import logging
from django.contrib.auth.models import User
from django.contrib.auth.backends import BaseBackend
__author__ = "<NAME>"
__copyright__ = "Copyright 2013-2016, 2021"
__email__ = "<EMAIL>"
__status__ = "Development"
logger = logging.getLogger(__name__)
class BaseAuthBackend(BaseBackend):
"""
Base class for custom authentication.
"""
def authenticate(self, request, username=None, password=<PASSWORD>):
# As Django goes through all authentication backends, limit backend usage
# only to social authentication
customBackend = request.META.get('IS_SOCIAL', False) if request else False
if not customBackend:
return None
return User.objects.get(username=username)
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class GoogleOAuth2Backend(BaseAuthBackend):
"""
Authenticate user based on Google OAuth2 authentication information.
"""
class TwitterOAuthBackend(BaseAuthBackend):
"""
Authenticate user based on Twitter authentication information.
"""
class FacebookOAuthBackend(BaseAuthBackend):
"""
Authenticate user based on Facebook authentication information.
"""
class OneDriveOAuth2Backend(BaseAuthBackend):
"""
Authenticate user based on OneDrive authentication information.
"""
```
#### File: auth/registration/actions.py
```python
from django.conf import settings
from django.db import transaction
from collections import OrderedDict
from django.contrib.auth.models import User
from django.template.loader import render_to_string
# Project imports
from draalcore.mailer import Mailer
from draalcore.exceptions import ModelManagerError
from draalcore.auth.models import UserAccountProfile
from draalcore.rest.actions import CreateActionWithParameters
from draalcore.models.fields import StringFieldType, NotNullable
class RegisterUserAction(CreateActionWithParameters):
ACTION = 'register'
MODEL = UserAccountProfile
DISPLAY_NAME = 'Register new user'
PARAMETERS = OrderedDict([
('username', (StringFieldType, NotNullable)),
('email', (StringFieldType, NotNullable)),
('password', (StringFieldType, NotNullable)),
('first_name', (StringFieldType, NotNullable)),
('last_name', (StringFieldType, NotNullable))
])
@transaction.atomic
def _execute(self):
username = self.request_obj.data_params['username']
if User.objects.filter(username=username).exists():
err_text = 'Username {} is already reserved, please select another name'.format(username)
raise ModelManagerError(err_text)
obj = self.MODEL.objects.register_user(**self.request_obj.data_params)
message = self._get_email_message(obj)
Mailer(settings.ACCOUNT_ACTIVATION_SUBJECT).send_message(message, [obj.user.email])
return 'Check your email! An activation link has been sent to the email ' \
'address you supplied, along with instructions for activating your account.'
def _get_email_message(self, model_obj):
context = {
'activation_key': model_obj.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'activation_url': settings.ACTIVATION_URL
}
return render_to_string('registration/activation_email.txt', context)
class ActivateUserAction(CreateActionWithParameters):
ACTION = 'activate'
MODEL = UserAccountProfile
DISPLAY_NAME = 'Activate user account'
PARAMETERS = OrderedDict([
('activation_key', (StringFieldType, NotNullable))
])
@transaction.atomic
def _execute(self):
return self.MODEL.objects.activate_user(**self.request_obj.data_params)
```
#### File: auth/sites/base_auth.py
```python
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
# Project imports
from draalcore.exceptions import ExtAuthError
logger = logging.getLogger(__name__)
class Base3rdPartyAuth(object):
PROVIDER = None
BACKEND = None
def get_callback_url(self):
action = 'callback-{}'.format(self.PROVIDER)
return '{}{}'.format(settings.EXT_AUTH_CALLBACK_URL, action)
def get_redirect_url(self, request):
return request.GET.get('next', '')
def get_user(self, user_details):
try:
user = User.objects.get(username=user_details['username'])
user.first_name = user_details['first_name']
user.last_name = user_details['last_name']
user.password = settings.SOCIAL_AUTH_USER_PASSWORD
user.save()
except User.DoesNotExist:
user = User.objects.create_user(username=user_details['username'],
email=user_details['email'],
password=settings.SOCIAL_AUTH_USER_PASSWORD,
first_name=user_details['first_name'],
last_name=user_details['last_name'])
return user
def authenticate(self, request, username, password=None):
request.META['IS_SOCIAL'] = True
user = authenticate(request, username=username, password=settings.SOCIAL_AUTH_USER_PASSWORD)
login(request, user, backend=self.BACKEND)
return user
def login_failure(self):
raise ExtAuthError('Login failed, please try again')
```
#### File: auth/tests/test_auth_factory.py
```python
from draalcore.test_utils.basetest import BaseTest
from draalcore.auth.sites import AuthFactory
class AuthFactoryTestCase(BaseTest):
def test_create(self):
"""Authentication instance is created"""
obj = AuthFactory.create('google')
self.assertTrue(obj is not None)
```
#### File: draalcore/factory/base_factory.py
```python
from abc import ABCMeta
from django.db.models.query import QuerySet
# Project imports
from draalcore.cache.cache import CacheBase
__author__ = "<NAME>"
__copyright__ = "Copyright 2013"
__email__ = "<EMAIL>"
__status__ = "Development"
class QueryResult(object):
"""Encapsulates queryset or query object"""
def __init__(self, query, cached=False, is_object=False):
self._query = query
self._cached = cached
self._is_object = is_object
@classmethod
def create(cls, response):
if isinstance(response, cls):
return response
is_object = not isinstance(response, QuerySet)
return cls(response, is_object=is_object)
@property
def query(self):
"""Return queryset or object"""
return self._query
@property
def cached(self):
"""Return True if result is cached version, False otherwise"""
return self._cached is True
@property
def is_object(self):
return self._is_object
class FactoryBase(CacheBase):
"""
Base class for a factory. Factory is a collection of one or more
model managers that comprise the required interface implementation.
"""
__metaclass__ = ABCMeta
manager = None
def __init__(self, manager):
super(FactoryBase, self).__init__()
self.manager = manager
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.manager)
def model_history(self, model_id):
"""Return the model events"""
return QueryResult(self.manager.history(model_id=model_id))
def do_query(self, req_obj):
"""
Call either factory or manager method and return result
as QueryResult object.
"""
ref_obj = self.manager if not req_obj.is_factory else self
response = getattr(ref_obj, req_obj.method)(*req_obj.args, **req_obj.kwargs)
return QueryResult.create(response)
class Factory(FactoryBase):
"""Generic factory class"""
pass
```
#### File: draalcore/middleware/current_user.py
```python
from threading import local
# Project imports
from draalcore.middleware.base import BaseMiddleware
__author__ = "<NAME>"
__copyright__ = "Copyright 2014,2021"
__email__ = "<EMAIL>"
__status__ = "Development"
_thread_locals = local()
def get_current_user():
"""Returns the current user, if exist, otherwise None"""
return getattr(_thread_locals, "user", None)
def set_current_user(user):
_thread_locals.user = user
def get_current_request():
"""Returns the HTTP request, if exist, otherwise None"""
return getattr(_thread_locals, "request", None)
class CurrentUserMiddleware(BaseMiddleware):
def process_request(self, request):
_thread_locals.user = request.user
_thread_locals.request = request
```
#### File: draalcore/models/admin_log.py
```python
from django.contrib import admin
from django.contrib.admin.models import DELETION
from django.utils.html import escape
from django.urls import reverse, NoReverseMatch
class LogEntryAdmin(admin.ModelAdmin):
date_hierarchy = 'action_time'
readonly_fields = ['user', 'content_type', 'object_id', 'change_message', 'object_repr', 'action_flag']
list_filter = [
'content_type'
]
search_fields = [
'user__username',
'change_message'
]
list_display = [
'action_time',
'user',
'content_type',
'object_link',
'change_message',
]
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return request.user.is_superuser and request.method != 'POST'
def has_delete_permission(self, request, obj=None):
return False
def object_link(self, obj):
if obj.action_flag == DELETION:
link = escape(obj.object_repr)
else:
ct = obj.content_type
try:
link = u'<a href="%s">%s</a>' % (
reverse('admin:%s_%s_change' % (ct.app_label, ct.model), args=[obj.object_id]),
escape(obj.object_repr),
)
except NoReverseMatch:
link = u'Unknown'
return link
object_link.allow_tags = True
object_link.admin_order_field = 'object_repr'
object_link.short_description = u'object'
def get_queryset(self, request):
return super(LogEntryAdmin, self).get_queryset(request).prefetch_related('content_type')
```
#### File: draalcore/rest/apps.py
```python
from django.apps import AppConfig
class RestConfig(AppConfig):
default = True
name = 'draalcore.rest'
label = 'draalcore_rest'
def ready(self):
# Import signal handlers
from draalcore.rest.handlers import create_auth_token # noqa
```
#### File: draalcore/rest/file_upload.py
```python
import os
import logging
from django.core.files.uploadedfile import UploadedFile
# Project imports
from draalcore.exceptions import AppException
from draalcore.rest.mixins import PostMixin
from draalcore.rest.response_data import ResponseData
from draalcore.rest.views import RestAPIBasicAuthView
logger = logging.getLogger(__name__)
class NginxUploadedFile(UploadedFile):
"""
Construct File instances that point at the uploaded (handled by Nginx) files
and have the metadata parsed out of the passed parameters.
"""
def __init__(self, path, name, content_type, size, charset):
file = open(path, 'rb')
super(NginxUploadedFile, self).__init__(file, name, content_type, size, charset)
def temporary_file_path(self):
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != 2:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class FileLoader(object):
def __init__(self, request, file_identifier):
self._request = request
self.file_identifier = file_identifier
@property
def request(self):
return self._request
def get_upload_file(self):
"""Application handles the file upload"""
if self.request.FILES is None or self.request.FILES.get(self.file_identifier, None) is None:
raise AppException('No files attached')
return UploadedFile(self.request.FILES.get(self.file_identifier, None))
def get_nginx_file(self):
"""Nginx server handled the upload, determine the file details from POST parameters"""
path = self.request.POST.get(self.file_identifier + '.path', None)
if path:
logger.debug('Nginx upload file {} found'.format(path))
file_size = os.path.getsize(path)
filename = self.request.POST.get(self.file_identifier + '.name', 'noname')
content_type = self.request.POST.get(self.file_identifier + '.content_type', '')
return NginxUploadedFile(path, filename, content_type, file_size, 'utf8')
# This ensures backward compatibility
return self.get_upload_file()
def get_file(self):
"""
Interface for receiving uploaded files from clients. This is Nginx specific configuration
where the web server first handles the files upload, stores those to a directory and then
lets the Django app to handle the file from there on. Includes also backwards compatibility
to app based upload handling.
"""
obj = self.get_nginx_file()
obj.name = self.request.POST.get('filename', obj.name)
obj.name = obj.name.replace('(', '').replace(')', '')
return obj
class FileUploadHandler(PostMixin, RestAPIBasicAuthView):
"""
ReST base class for file uploading.
Implementing class must have method defined by UPLOAD_METHOD.
"""
UPLOAD_METHOD = '_upload'
def _post(self, request_obj):
request = request_obj.request
try:
fn = getattr(self, self.UPLOAD_METHOD)
except AttributeError:
raise AppException('Upload implementation missing, please contact application support')
obj = FileLoader(request, 'file').get_file()
return ResponseData(fn(obj.name, obj, request_obj))
```
#### File: draalcore/rest/model_serializers.py
```python
import json
import logging
from rest_framework import serializers
from django.contrib.admin.models import LogEntry
# Project imports
from .actions import ActionsSerializer
from .base_serializers import DynamicFieldsModelSerializer
logger = logging.getLogger(__name__)
class ActionsUrlSerializer(DynamicFieldsModelSerializer):
"""Base serializer class for model actions"""
actions = serializers.SerializerMethodField('field_actions')
def field_actions(self, obj):
return ActionsSerializer.serialize_model_id_actions(self.Meta.model, obj.id)
def field_impl(field):
"""
Provide serialization implementation for specified model field data.
Parameters
----------
field
Name of field of the serialization data.
"""
def set_field(obj):
return getattr(obj, 'serialize_{}'.format(field))()
return set_field
class ModelSerializer(ActionsUrlSerializer):
"""
Base serializer class for application models. Support for BaseDetails model details and actions serialization.
"""
# Dynamically create custom serializer fields from model configuration
DYNAMIC_FIELDS_SETUP = True
def __init__(self, *args, **kwargs):
if self.DYNAMIC_FIELDS_SETUP:
# Fields for which serialization method is needed
fields = list(set(self.Meta.model.ADDITIONAL_SERIALIZE_FIELDS) - set(['actions']))
for field in fields:
# Create field serialization method only if not already specified
if not hasattr(self, field):
# Name of serialization method
method_name = 'field_{}'.format(field)
# Add to serializer fields
self._declared_fields[field] = serializers.SerializerMethodField(method_name)
# Provide implementation
setattr(self, method_name, field_impl(field))
# Instantiate the superclass normally
super(ModelSerializer, self).__init__(*args, **kwargs)
modified_by = serializers.SerializerMethodField('field_modified_by')
last_modified = serializers.SerializerMethodField('field_last_modified')
def field_modified_by(self, obj):
user = obj.modified_by
return user.first_name + ' ' + user.last_name if user.first_name else user.username
def field_last_modified(self, obj):
return str(obj.last_modified)
class HistorySerializer(ModelSerializer):
"""
Serialize model change history. The data consists of 'events', 'last_modified',
and 'modified_by' fields. The first one is list of strings or dicts describing
the fields that have changed, the second one is the timestamp when change
occured and the last one describes the user responsible of the change or
event message.
"""
DYNAMIC_FIELDS_SETUP = False
events = serializers.SerializerMethodField("field_events")
last_modified = serializers.SerializerMethodField("field_last_modified")
class Meta:
depth = 1
model = LogEntry
fields = ['modified_by', 'last_modified', 'events']
def field_modified_by(self, obj):
return obj.user.first_name + ' ' + obj.user.last_name if obj.user.first_name else obj.user.username
def field_last_modified(self, obj):
return obj.action_time
def field_events(self, obj):
try:
return json.loads(obj.change_message)
except ValueError:
return obj.change_message
```
#### File: draalcore/rest/serializer_history_object.py
```python
import logging
# Project imports
from .req_query import QueryRequest
from draalcore.factory import Factory
from draalcore.rest.model_serializers import HistorySerializer
from draalcore.rest.serializer_object import SerializerDataItemObject, SerializerPaginatorMixin
logger = logging.getLogger(__name__)
class SerializerDataItemHistoryObject(SerializerPaginatorMixin, SerializerDataItemObject):
"""
Base class for serializing model data item history. The class fetches history for model
based on its ID.
"""
has_history = True
@classmethod
def create(cls, request_obj, model_cls):
obj = cls(request_obj)
obj.factory = Factory(model_cls.objects)
obj.serializer = HistorySerializer
return obj
def get_request_object(self):
"""Request queryset that returns the model changes/events"""
return QueryRequest(method='model_history', is_factory=True,
query_kwargs={'model_id': self.data_id})
```
#### File: admin/tests/test_admin.py
```python
import logging
# Project imports
from draalcore.test_utils.basetest import BaseTestUser
from draalcore.test_apps.test_models.tests.utils.mixins import TestModelMixin
logger = logging.getLogger(__name__)
class AdminAppTestCase(TestModelMixin, BaseTestUser):
"""Admin app tests"""
APP = 'admin'
def test_unsupported_action(self):
"""Actions for unsupported applications are queried"""
# GIVEN app that does not have application level actions
app = 'dummy'
# WHEN quering the aplication level actions
response = self.api.app_actions(app)
# THEN it should fail
self.assertTrue(response.error)
def test_admin_app_actions(self):
"""Admin app actions requiring user authentication are queried"""
# GIVEN admin app
# WHEN quering the application level actions
response = self.api.app_actions(self.APP)
# THEN it should succeed
self.assertTrue(response.success)
# -----
for action, data in response.data.items():
# WHEN calling available actions
response = self.api.app_action(self.APP, action, data['method'], data=None)
# THEN it should succeed
self.assertTrue(response.success)
# AND response data is available
self.assertEqual(len(response.data), 1)
# -----
# WHEN calling action using HTTP method that is not supported
response = self.api.app_action(self.APP, action, 'GET')
# THEN it should fail
self.assertTrue(response.error)
def test_admin_app_public_actions(self):
"""Public admin actions are queried"""
# GIVEN unauthenticated user
self.logout()
# WHEN quering the application level actions
response = self.api.app_public_actions(self.APP)
# THEN it should succeed
self.assertTrue(response.success)
# AND expected action data is received
self.assertTrue('admin-public-action' in response.data)
self.assertFalse(response.data['admin-public-action']['authenticate'], False)
def test_admin_app_public_action(self):
"""Public admin action is executed"""
# GIVEN unauthenticated user
self.logout()
# WHEN executing action
kwargs = {'data': None}
response = self.api.app_public_action(self.APP, 'admin-public-action', 'post', **kwargs)
# THEN it should succeed
self.assertTrue(response.success)
# AND expected action data is received
self.assertTrue('Ok' in response.data)
```
#### File: test_models/tests/test_serializerobject.py
```python
import logging
import importlib
from mock import patch, MagicMock
# Project imports
from ..models import TestModel2, TestModel5, TestModel6
from .utils.mixins import TestModelMixin
from draalcore.test_utils.basetest import BaseTestUser
from draalcore.rest.model import SerializerFinder
from draalcore.rest.serializer_object import SerializerDataObject
logger = logging.getLogger(__name__)
class ModelSerializerObjectTestCase(TestModelMixin, BaseTestUser):
"""Model has SERIALIZER_OBJECT attribute defined."""
def initialize(self):
super(ModelSerializerObjectTestCase, self).initialize()
TestModel6.objects.create(name='test6')
def test_object_loading(self):
"""Serializer object is loaded."""
# GIVEN model class that has no serializer object attribute defined
model_cls = TestModel2
# WHEN retrieving model serializer object
cls = SerializerFinder(model_cls).object
# THEN no object is returned
self.assertTrue(cls is None)
# ----------
# GIVEN model class that has serializer object attribute defined but not implemented
model_cls = TestModel5
# WHEN retrieving model serializer object
cls = SerializerFinder(model_cls).object
# THEN no object is returned
self.assertTrue(cls is None)
# ----------
# GIVEN model class that has valid serializer object attribute defined
model_cls = TestModel6
# WHEN retrieving model serializer object
cls = SerializerFinder(model_cls).object
# THEN serializer object is returned
self.assertTrue(cls is not None)
@patch.object(importlib, 'import_module')
def test_object_loading_import_error(self, import_module):
"""Import error occurs when serializer object is loaded."""
# GIVEN import error occurs when importing serializer object for model class
model_cls = TestModel6
import_module.side_effect = ImportError()
# WHEN retrieving model serializer object
cls = SerializerFinder(model_cls).object
# THEN no object is returned
self.assertTrue(cls is None)
def test_object_serializer(self):
"""Data is serialized through serializer object."""
# GIVEN model class that has valid serializer object attribute
params = {}
model_name = TestModel6._meta.db_table
# WHEN fetching listing data
response = self.api.GET(self.app_label, model_name, params)
# THEN it should succeed
self.assertTrue(response.success)
# AND correct data is returned
self.assertEqual(len(response.data[0].keys()), 6)
# ----------
# GIVEN fields restriction for data listing
params['fields'] = 'name,request_user'
# WHEN fetching listing data
response = self.api.GET(self.app_label, model_name, params)
# THEN it should succeed
self.assertTrue(response.success)
# AND correct data is returned
keys = response.data[0].keys()
self.assertEqual(len(keys), 2)
self.assertEqual(set(keys), set(['request_user', 'name']))
class ModelSerializerDataObjectTestCase(TestModelMixin, BaseTestUser):
"""SerializerDataObject tests."""
def initialize(self):
super(ModelSerializerDataObjectTestCase, self).initialize()
self.test_model_name = 'test6'
TestModel6.objects.create(name=self.test_model_name)
self.test_model_name2 = 'test62'
TestModel6.objects.create(name=self.test_model_name2)
def test_set_query(self):
"""Custom query is specified for serialization object"""
request_obj = MagicMock(kwargs={})
# GIVEN query is set for serializer
obj = SerializerDataObject.create(request_obj, TestModel6)
obj.set_query('test_model6', dict())
# WHEN query data is serialized
data = obj.serialize().data
# THEN it should return all items
self.assertEqual(len(data), 2)
self.assertEqual(data[0]['name'], self.test_model_name)
self.assertEqual(data[1]['name'], self.test_model_name2)
```
#### File: test_apps/test_models/urls.py
```python
from django.conf.urls import url
from draalcore.rest.mixins import GetMixin, PutMixin, PostMixin, PatchMixin, DeleteMixin
from draalcore.rest.handlers import FileUploadHandler, RestAPIBasicAuthView, AppActionsPermission
__author__ = "<NAME>"
__copyright__ = "Copyright 2015"
__email__ = "<EMAIL>"
__status__ = "Development"
class TestUploadHandler(FileUploadHandler):
"""File upload handler that does not have upload method defined"""
UPLOAD_METHOD = 'none'
class TestUploadHandler2(FileUploadHandler):
"""File upload handler that includes also upload method"""
def _upload(self, filename, file_obj, request_obj):
pass
class FileUploadPermission(AppActionsPermission):
"""Custom permission for file upload"""
perms_map = {
'POST': ['upload-permission']
}
class TestUploadHandler3(FileUploadHandler):
"""File upload handler with custom permission"""
permission_classes = (FileUploadPermission, )
class InvalidAPIHandler(GetMixin, PutMixin, PatchMixin, PostMixin, DeleteMixin, RestAPIBasicAuthView):
"""API handler that does not implement any HTTP methods"""
pass
urlpatterns = [
url(r'^file-upload-invalid$', TestUploadHandler.as_view(), name='test-file-upload'),
url(r'^file-upload-valid$', TestUploadHandler2.as_view(), name='test-file-upload2'),
url(r'^file-upload-permission$', TestUploadHandler3.as_view(), name='test-file-upload3'),
url(r'^invalid-http-api$', InvalidAPIHandler.as_view(), name='invalid-http-api')
]
```
#### File: draalcore/test_utils/upload.py
```python
import os
TEST_FILE_IMAGE = os.path.join(os.path.dirname(__file__), 'pic.jpg')
TEST_FILE_CONTENT_HEADER = 'attachment; filename="pic.jpg"'
TEST_FILE_INVALID = os.path.join(os.path.dirname(__file__), 'test.invalid')
TEST_FILE_GIF = os.path.join(os.path.dirname(__file__), 'pic.gif')
TEST_FILE_MP3 = os.path.join(os.path.dirname(__file__), 'audio.mp3')
TEST_FILE_MP4 = os.path.join(os.path.dirname(__file__), 'video.mp4')
def upload_file(api, method='test_upload1', with_file=True, test_file='test1', **kwargs):
if test_file == 'test1':
upload_file = TEST_FILE_IMAGE
elif test_file == 'test3':
upload_file = TEST_FILE_GIF
elif test_file == 'audio':
upload_file = TEST_FILE_MP3
elif test_file == 'video':
upload_file = TEST_FILE_MP4
else:
upload_file = TEST_FILE_INVALID
with open(upload_file, 'rb') as fp:
attachment = {"name": "test upload"}
if with_file:
attachment['file'] = fp
return getattr(api, method)(attachment, **kwargs)
```
#### File: draalcore/project/urls.py
```python
from django.contrib import admin
from django.conf import settings
from django.shortcuts import render
from django.conf.urls import include, url
from django.contrib.auth import views as auth_views
from draalcore.views.baseviews import BaseView
__author__ = "<NAME>"
__copyright__ = "Copyright 2013-2016,2021"
__email__ = "<EMAIL>"
__status__ = "Development"
admin.autodiscover()
class DummyView(BaseView):
def get(self, request, *args, **kwargs):
return render(request, '')
urlpatterns = [
url(r'^$', DummyView.as_view(), name='main-view'),
url(r'^admin/', admin.site.urls),
url(r'^api/', include('draalcore.rest.urls')),
url(r'^login/$', auth_views.LoginView.as_view(template_name=''), name='auth-login'),
url(r'^settings$', DummyView.as_view(), name='settings-view'),
]
# ReST APIs for testing
if settings.TEST_URLS:
urlpatterns += [url(r'^test-api/', include('draalcore.test_apps.test_models.urls'))]
if settings.ENABLE_DEBUG_TOOLBAR:
import debug_toolbar
urlpatterns += [url(r'^djdt/', include(debug_toolbar.urls))]
``` |
{
"source": "Jojas/GribApi.NET",
"score": 2
} |
#### File: examples/python/multi_write.py
```python
import traceback
import sys
from gribapi import *
INPUT='../../data/sample.grib2'
OUTPUT='out.mw.grib'
VERBOSE=1 # verbose error reporting
def example():
fin = open(INPUT)
fout = open(OUTPUT,'w')
gid = grib_new_from_file(fin)
mgid = grib_multi_new()
for step in range(12,132,12):
grib_set(gid,"step",step)
grib_multi_append(gid,4,mgid)
grib_multi_write(mgid,fout)
grib_multi_release(mgid)
grib_release(gid)
fin.close()
fout.close()
def main():
try:
example()
except GribInternalError,err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
print >>sys.stderr,err.msg
return 1
if __name__ == "__main__":
sys.exit(main())
```
#### File: examples/python/set.py
```python
import traceback
import sys
from gribapi import *
from datetime import date
INPUT='../../data/regular_latlon_surface_constant.grib1'
OUTPUT='out.set.grib'
VERBOSE=1 # verbose error reporting
def example():
fin = open(INPUT)
fout = open(OUTPUT,'w')
gid = grib_new_from_file(fin)
dt = date.today()
today = "%d%02d%02d" % (dt.year,dt.month,dt.day)
grib_set(gid,'dataDate',int(today))
grib_set(gid,'centre',80)
centreIntVal = grib_get(gid,'centre',int)
centreStrVal = grib_get(gid,'centre',str)
dateStrVal = grib_get(gid,'dataDate',str)
assert(centreIntVal == 80)
assert(centreStrVal == 'cnmc')
assert(dateStrVal == today)
print 'get centre as an integer - centre = %d' % centreIntVal
print 'get centre as a string - centre = %s' % centreStrVal
print 'get date as a string - date = %s' % dateStrVal
# Now do the same but using set_key_vals, setting keys all at once
grib_set_key_vals(gid, 'level=1,centre=98') # with a String
assert(grib_get(gid,'centre',str) == 'ecmf')
assert(grib_get(gid,'level',int) == 1)
grib_set_key_vals(gid, ['level=2', 'centre=kwbc']) # with a Tuple
assert(grib_get(gid,'centre',int) == 7)
assert(grib_get(gid,'level',int) == 2)
grib_set_key_vals(gid, {'level': 3, 'centre': 84}) # with a Dictionary
assert(grib_get(gid,'centre',str) == 'lfpw')
assert(grib_get(gid,'level',int) == 3)
grib_gts_header(True)
grib_gts_header(False)
grib_write(gid,fout)
grib_release(gid)
fin.close()
fout.close()
def main():
try:
example()
except GribInternalError,err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
print >>sys.stderr,err.msg
return 1
if __name__ == "__main__":
sys.exit(main())
```
#### File: grib_api/python/test_general.py
```python
import sys
from gribapi import *
import random
import traceback
VERBOSE=1
WRITE=0
class Usage(Exception):
def __init__(self):
pass
def test():
# test new from sample
#grib_release(grib_new_from_samples("GRIB2"))
if len(sys.argv) < 2:
raise Usage
infile = sys.argv[1]
outfile = infile + ".out"
multifile = infile + ".multi"
clonefile = infile + ".clone"
binstrfile = infile + ".binstr"
fid = open(infile,"r")
while 1:
gid = grib_new_from_file(fid)
if not gid: break
grib_release(gid)
fid.close()
fid = open(infile,"r")
out = open(outfile,"w")
multi = open(outfile,"w")
clone_fid = open(clonefile,"w")
binstr_fid = open(binstrfile,"w")
#print "Writing from binary string to ",binstrfile
#x = grib_read_file(fid)
#print len(x)
#grib_write_file(binstr_fid,x)
#grib_close_file(binstr_fid)
print "Operating on file '%s'" % infile
n = grib_count_in_file(fid)
print "Message count ",n
# multi support test
grib_multi_support_on()
ismulti = "no" if (n == grib_count_in_file(fid)) else "yes"
print "Is multi field - %s" % ismulti
grib_multi_support_off()
# end multi support test
# gribex support test on/off
print "Gribex support on/off"
grib_gribex_mode_on()
grib_gribex_mode_off
# end gribex support test
print "Browsing through messages "
for i in range(n):
gid = grib_new_from_file(fid)
#grib_dump(gid)
#grib_print(gid,"centre")
if i == 0:
print "Message size: ",grib_get_message_size(gid)
nval = grib_get_size(gid,"values")
print "Number of values in message %d is %d" % (i,nval)
print "== %s %s %s %d ==" % \
( \
grib_get_string(gid,"shortName"), \
grib_get_string(gid,"name"), \
grib_get_string(gid,"typeOfLevel"), \
grib_get_long(gid,"level"), \
)
print "Nearest point to 10,10: "
print grib_find_nearest(gid,10,10)
print grib_find_nearest(gid,10,10,npoints=4)
rand_list = []
for i in range(0,5):
rand_index = random.randint(1,nval)
rand_list.append(rand_index)
myval = grib_get_double_element(gid,"values",rand_index)
print "Random index value[%d] = %.8f" % (rand_index,myval)
all4rand = grib_get_double_elements(gid,"values",rand_list)
print "All at once index values: ",all4rand
centre = grib_get_string(gid,"centre")
grib_set_string(gid,"centre","ecmf")
new_centre = grib_get_string(gid,"centre")
print "Before/after string centre: %s/%s" % (centre,new_centre)
centre = grib_get_long(gid,"centre")
grib_set_long(gid,"centre",99)
new_centre = grib_get_long(gid,"centre")
print "Before/after numeric centre: %d/%d" % (centre,new_centre)
centre = grib_get_double(gid,"centre")
grib_set_double(gid,"centre",9)
new_centre = grib_get_double(gid,"centre")
print "Before/after numeric floating point centre: %f/%f" % (centre,new_centre)
vals = grib_get_double_array(gid,"values")
print "Values before: ",vals[:10]
grib_set_double_array(gid,"values",(1.0, 2.0, 3.14))
vals = grib_get_double_array(gid,"values")
print "Values after: ",vals[:10]
print "Saving modified message to %s" % outfile
if WRITE: grib_write(gid,out)
print "Creating and saving a clone to %s" % clonefile
clone_gid = grib_clone(gid)
if WRITE: grib_write(clone_gid,clone_fid)
grib_release(clone_gid)
Ni = grib_get(gid,"Ni")
print "Setting Ni to missing from --> ",Ni
grib_set_missing(gid,"Ni")
assert grib_is_missing(gid,"Ni")
miss_Ni = grib_get(gid,"Ni")
print "Ni is now --> ",miss_Ni
grib_set(gid,"Ni",Ni)
new_Ni = grib_get(gid,"Ni")
print "Set Ni back to its original value --> ",new_Ni
assert Ni == new_Ni
print "Check some keys to see if they are defined"
assert grib_is_defined(gid,"Ni")
assert grib_is_defined(gid,"edition")
assert not grib_is_defined(gid,"DarkThrone")
#grib_multi_write(gid,multi)
grib_release(gid)
fid.close()
out.close()
clone_fid.close()
print "Closed file"
def main():
try:
test()
except GribInternalError,err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
print >>sys.stderr,err.msg
return 1
except Usage:
print "Usage: %s infile" % sys.argv[0]
sys.exit(2)
if __name__ == "__main__":
main()
print "------------------------------------"
``` |
{
"source": "jojees/housekeeper",
"score": 2
} |
#### File: mgmt/api/api.py
```python
from flask import Flask, request
from flasgger import Swagger
from nameko.standalone.rpc import ClusterRpcProxy
app = Flask(__name__)
app.config['SWAGGER'] = {
'title': 'SkyHigh Network Operations',
'description': 'House Keeper Backend API\'s'
}
Swagger(app)
CONFIG = {'AMQP_URI': "amqp://shnops:[email protected]"}
@app.route('/compute', methods=['POST'])
def compute():
"""
Micro Service Based Compute and Mail API
This API is made with Flask, Flasgger and Nameko
---
parameters:
- name: body
in: body
required: true
schema:
id: data
properties:
operation:
type: string
enum:
- sum
- mul
- sub
- div
email:
type: string
value:
type: integer
other:
type: integer
responses:
200:
description: Please wait the calculation, you'll receive an email with results
"""
operation = request.json.get('operation')
value = request.json.get('value')
other = request.json.get('other')
email = request.json.get('email')
msg = "Please wait the calculation, you'll receive an email with results"
subject = "API Notification"
with ClusterRpcProxy(CONFIG) as rpc:
# asynchronously spawning and email notification
rpc.mail.send.async(email, subject, msg)
# asynchronously spawning the compute task
result = rpc.compute.compute.async(operation, value, other, email)
return msg, 200
@app.route('/confluence', methods=['POST'])
def confluencepublish():
"""
Micro Service Based Confluence API
This API is made with Flask, Flasgger and Nameko
---
parameters:
- name: body
in: body
required: true
schema:
id: data
properties:
operation:
type: string
enum:
- sum
- mul
- sub
- div
email:
type: string
value:
type: integer
other:
type: integer
responses:
200:
description: Please wait the calculation, you'll receive an email with results
"""
operation = request.json.get('operation')
value = request.json.get('value')
other = request.json.get('other')
email = request.json.get('email')
msg = "Please wait the calculation, you'll receive an email with results"
subject = "API Notification"
with ClusterRpcProxy(CONFIG) as rpc:
# asynchronously spawning and email notification
rpc.mail.send.async(email, subject, msg)
# asynchronously spawning the compute task
result = rpc.compute.compute.async(operation, value, other, email)
return msg, 200
@app.route('/exporttopdf', methods=['POST'])
def exporttopdf():
"""
Micro Service Based Confluence API
This API is made with Flask, Flasgger and Nameko
---
parameters:
- name: body
in: body
required: true
schema:
id: data
properties:
operation:
type: string
enum:
- sum
- mul
- sub
- div
email:
type: string
value:
type: integer
other:
type: integer
responses:
200:
description: Please wait the calculation, you'll receive an email with results
"""
operation = request.json.get('operation')
value = request.json.get('value')
other = request.json.get('other')
email = request.json.get('email')
msg = "Please wait the calculation, you'll receive an email with results"
subject = "API Notification"
with ClusterRpcProxy(CONFIG) as rpc:
# asynchronously spawning and email notification
rpc.mail.send.async(email, subject, msg)
# asynchronously spawning the compute task
result = rpc.compute.compute.async(operation, value, other, email)
return msg, 200
@app.route('/publishtoflowdock', methods=['POST'])
def publishtoflowdock():
"""
Micro Service Based Confluence API
This API is made with Flask, Flasgger and Nameko
---
parameters:
- name: body
in: body
required: true
schema:
id: data
properties:
operation:
type: string
enum:
- sum
- mul
- sub
- div
email:
type: string
value:
type: integer
other:
type: integer
responses:
200:
description: Please wait the calculation, you'll receive an email with results
"""
operation = request.json.get('operation')
value = request.json.get('value')
other = request.json.get('other')
email = request.json.get('email')
msg = "Please wait the calculation, you'll receive an email with results"
subject = "API Notification"
with ClusterRpcProxy(CONFIG) as rpc:
# asynchronously spawning and email notification
rpc.mail.send.async(email, subject, msg)
# asynchronously spawning the compute task
result = rpc.compute.compute.async(operation, value, other, email)
return msg, 200
@app.route('/pagerduty', methods=['POST'])
def pagerduty():
"""
file: specifications/pagerduty_post.yml
"""
operation = request.json.get('operation')
value = request.json.get('value')
other = request.json.get('other')
email = request.json.get('email')
msg = "Please wait the calculation, you'll receive an email with results"
subject = "API Notification"
with ClusterRpcProxy(CONFIG) as rpc:
# asynchronously spawning and email notification
rpc.mail.send.async(email, subject, msg)
# asynchronously spawning the compute task
result = rpc.compute.compute.async(operation, value, other, email)
return msg, 200
app.run(host='0.0.0.0', debug=True)
```
#### File: web/housekeeper_web/list_schedules.py
```python
import requests
import json
# Update to match your API key
API_KEY = '<KEY>'
# Update to match your chosen parameters
QUERY = ''
def list_schedules():
url = 'https://api.pagerduty.com/schedules'
headers = {
'Accept': 'application/vnd.pagerduty+json;version=2',
'Authorization': 'Token token={token}'.format(token=API_KEY)
}
payload = {
'query': QUERY
}
r = requests.get(url, headers=headers, params=payload)
print 'Status Code: {code}'.format(code=r.status_code)
schedules = r.json()
print json.dumps(schedules['schedules'])
if __name__ == '__main__':
list_schedules()
```
#### File: web/housekeeper_web/web.py
```python
from flask import Flask, render_template
from icalendar import Calendar, Event
from datetime import date, datetime, timedelta
import os
app = Flask(__name__)
@app.route('/')
def index():
return render_template('home.html',
title = 'SHN OPS')
@app.route('/hello')
def hello():
return render_template('hello.html')
@app.route('/sre')
def sre():
return render_template('sre.html',
title = 'Site Reliability Analysis')
@app.route('/inventory')
def inventory():
return render_template('inventory.html',
title = 'Inventory')
@app.route('/tools')
def tools():
return render_template('tools.html',
title = 'Tools')
@app.route('/tools/oc-logs')
def oc_schedule():
today = date.today()
first_day = today - timedelta(days=2)
last_day = today + timedelta(days=1)
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
ical_url = os.path.join(SITE_ROOT, "data", "OpsSchedule.ics")
g = open(ical_url,'rb')
gcal = Calendar.from_ical(g.read())
onc_events = []
for component in gcal.walk():
if component.name == "VEVENT":
if ((component.get('DTSTART').dt).date() >= first_day) and ((component.get('DTEND').dt).date() <= last_day):
event = {}
event['attendee'] = component.get('attendee')
event['uid'] = component.get('uid')
event['url'] = component.get('url')
event['start'] = component.get('DTSTART').dt
event['end'] = component.get('DTEND').dt
# event['start'] = (component.get('DTSTART').dt).strftime("%Y-%m-%d %H:%M:%S")
# event['end'] = (component.get('DTEND').dt).strftime("%Y-%m-%d %H:%M:%S")
event['summary'] = component.get('summary')
onc_events.append(event)
g.close()
return render_template('sre_onc.html',
title = 'On-Call Logs',
entries=onc_events)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5001, debug=True)
``` |
{
"source": "jojees/operations",
"score": 2
} |
#### File: webapp/indicators/indicators.py
```python
from flask import Blueprint
from flask import render_template
from flask import current_app as app
from ..api import get_indicators, get_patterns
# Blueprint Configuration
indicators_bp = Blueprint(
'indicators_bp', __name__,
template_folder='templates',
static_folder='static'
)
@indicators_bp.route('/', defaults={'ind': 'default'})
@indicators_bp.route('/<ind>')
def indicators(ind):
"""Indicator Page."""
indicators = get_indicators(app)
app.logger.info("Indicator is : {}".format(ind))
return render_template(
'indicators.jinja2',
title='Indicators',
subtitle='Indicators.',
template='indicators-template',
indicators=indicators,
patterns=get_patterns(app),
INDI=ind
)
```
#### File: stock_analysis/webapp/__init__.py
```python
import os
import yaml
from flask import Flask
from flask_assets import Environment
import logging.config
def setup_logging(default_path='webapp/logging.yaml', default_level=logging.INFO, env_key='LOG_CFG'):
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
try:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
except Exception as e:
print(e)
print('Error in Logging Configuration. Using default configs')
logging.basicConfig(level=default_level)
else:
logging.basicConfig(level=default_level)
print('Failed to load configuration file. Using default configs:'+ path)
def init_app():
"""Create Flask application."""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object("config.Config")
assets = Environment()
assets.init_app(app)
setup_logging()
with app.app_context():
# Import parts of our application
from .assets import compile_static_assets
from .dashboard import dashboard
from .indicators import indicators
from .admin import admin
# from .strategies import strategies
# from .prediction import prediction
# from .symbols import symbols
# from .profile import profile
# Register Blueprints
app.register_blueprint(admin.admin_bp, url_prefix='/admin')
app.register_blueprint(dashboard.dashboard_bp, url_prefix='/')
app.register_blueprint(indicators.indicators_bp, url_prefix='/indicators')
# app.register_blueprint(candlestickpattern.candlestickpattern_bp)
# Compile static assets
compile_static_assets(assets)
return app
``` |
{
"source": "Jojendersie/Epsilon-Intersection",
"score": 3
} |
#### File: Epsilon-Intersection/doc/epsilon_pretty_printing.py
```python
import gdb.printing
import gdb
class EiMatrixPrinter:
"Prints ei::Matrix<> type"
def __init__(self, val):
self.val = val
# Does not work: https://stackoverflow.com/questions/26472066/gdb-pretty-printing-returning-string-from-a-childrens-iterator-but-displaye/29752860
def children(self):
rows = self.val.type.template_argument(1)
cols = self.val.type.template_argument(2)
data = self.val["m_data"]
for y in range(rows):
line = ''
for x in range(cols):
idx = x+y*cols
yield (str(idx), data[idx])
def to_string(self):
return 'Matrix'
#rows = self.val.type.template_argument(1)
#cols = self.val.type.template_argument(2)
#ps = '['
#data = self.val["m_data"]
#if rows == 1 or cols == 1:
# for i in range(rows * cols):
# ps += str(data[i])
# if (i+1) != rows*cols: ps += ', '
# if rows > 1: ps += "]'"
# else: ps += ']'
#else:
# for y in range(rows):
# for x in range(cols):
# ps += "%12.6g" % self.val["m_data"][x+y*cols] # Multiversion line, looks good on hover, but fails in oneline views
# if x != cols-1: ps += ' '
# if y != rows-1: ps +=']\n['
# else: ps += ']'
#return ps
def build_pretty_printer():
pp = gdb.printing.RegexpCollectionPrettyPrinter(
"Epsilon Intersection")
pp.add_printer('ei::Matrix', '^ei::Matrix.*$', EiMatrixPrinter)
return pp
``` |
{
"source": "jojenreed/Python-CLI-Dictionary",
"score": 4
} |
#### File: jojenreed/Python-CLI-Dictionary/dictionary.py
```python
import json
from difflib import get_close_matches
def definition(word):
'''This function returns the available definitions(s) of the input'''
if data.__contains__(word):
return(data[word])
elif data.__contains__(word.lower()):
return(data[word.lower()])
# Generate suggestions for user
elif len(get_close_matches(word, data.keys(), cutoff=0.8)) > 0:
choice = input("Did you mean to type %s ?(y/n):"
% get_close_matches(word, data.keys(), cutoff=0.8)[0])
choice = choice.lower()
if choice == 'y':
ip = get_close_matches(word, data.keys(), cutoff=0.8)[0]
return(data[ip])
elif choice == 'n':
return("Please try again with the correct spelling")
else:
return("Invalid input!")
else:
return("Could not find a similar word!!")
# Load dictionary data from data.json to python dictionary
data = json.load(open('data.json', 'r'))
# Infinite loop for processing
while True:
ip = input("Enter word:(!q to quit) ")
# Exit from program - user choice
if ip == '!q' or ip == '!Q':
break
else:
# Check dictionary for definition
output = definition(ip)
if type(output) == list:
for i in output:
print(i)
else:
print(output)
``` |
{
"source": "jojeya/python-sdk",
"score": 3
} |
#### File: testproject/enums/environmentvariable.py
```python
import os
from enum import Enum, unique
@unique
class EnvironmentVariable(Enum):
"""Enumeration of environment variable names used in the SDK"""
TP_TEST_NAME = "TP_TEST_NAME"
TP_PROJECT_NAME = "TP_PROJECT_NAME"
TP_JOB_NAME = "TP_JOB_NAME"
def remove(self):
"""Try and remove the environment variable, proceed if the variable doesn't exist"""
try:
os.environ.pop(self.value)
except KeyError:
pass
```
#### File: testproject/helpers/confighelper.py
```python
import os
import logging
from src.testproject import definitions
from src.testproject.sdk.exceptions import SdkException
class ConfigHelper:
"""Contains helper methods for SDK configuration"""
@staticmethod
def get_agent_service_address() -> str:
"""Returns the Agent service address as defined in the TP_AGENT_URL environment variable.
Defaults to http://127.0.0.1:8585 (localhost)
Returns:
str: the Agent service address
"""
address = os.getenv("TP_AGENT_URL")
if address is None:
logging.info(
"No Agent service address found in TP_AGENT_URL environment variable, "
"defaulting to http://127.0.0.1:8585 (localhost)"
)
return "http://127.0.0.1:8585"
# Replace 'localhost' with '127.0.0.1' to prevent delays as a result of DNS lookups
address = address.replace("localhost", "127.0.0.1")
logging.info(f"Using {address} as the Agent URL")
return address
@staticmethod
def get_developer_token() -> str:
"""Returns the TestProject developer token as defined in the TP_DEV_TOKEN environment variable
Returns:
str: the developer token
"""
token = os.getenv("TP_DEV_TOKEN")
if token is None:
logging.error("No developer token was found, did you set it in the TP_DEV_TOKEN environment variable?")
logging.error("You can get a developer token from https://app.testproject.io/#/integrations/sdk?lang=Python")
raise SdkException("No development token defined in TP_DEV_TOKEN environment variable")
return token
@staticmethod
def get_sdk_version() -> str:
"""Returns the SDK version as defined in the definitions module
Returns:
str: the current SDK version
"""
return definitions.get_sdk_version()
```
#### File: rest/messages/stepreport.py
```python
import uuid
class StepReport:
"""Payload object sent to the Agent when reporting a test step.
Args:
description (str): The step description
message (str): A message that goes with the step
passed (bool): True if the step should be marked as passed, False otherwise
screenshot (str): A base64 encoded screenshot that is associated with the step
Attributes:
_description (str): The step description
_message (str): A message that goes with the step
_passed (bool): True if the step should be marked as passed, False otherwise
_screenshot (str): A base64 encoded screenshot that is associated with the step
"""
def __init__(self, description: str, message: str, passed: bool, screenshot: str = None):
self._description = description
self._message = message
self._passed = passed
self._screenshot = screenshot
def to_json(self) -> dict:
"""Generates a dict containing the JSON representation of the step payload"""
json = {
"guid": str(uuid.uuid4()),
"description": self._description,
"message": self._message,
"passed": self._passed,
}
if self._screenshot is not None:
json["screenshot"] = self._screenshot
return json
```
#### File: drivers/android/android_driver_test.py
```python
import os
import pytest
from src.testproject.sdk.drivers import webdriver
from tests.pageobjects.android import LoginPage, ProfilePage
@pytest.fixture
def driver():
app_activity = os.environ.get("TP_ANDROID_AUT_ACTIVITY", None)
app_package = os.environ.get("TP_ANDROID_AUT_PACKAGE", None)
emulator_id = os.environ.get("TP_ANDROID_DUT_UDID", None)
if not all([app_activity, app_package, emulator_id]):
raise KeyError("Not all environment variables were set correctly.")
desired_capabilities = {
"appActivity": app_activity,
"appPackage": app_package,
"udid": emulator_id,
"browserName": "",
"platformName": "Android",
"unicodeKeyboard": "true",
"resetKeyboard": "true"
}
driver = webdriver.Remote(desired_capabilities=desired_capabilities)
driver.start_activity(app_package=app_package, app_activity=app_activity)
yield driver
driver.close_app()
driver.quit()
def test_example_on_native_android_app(driver):
LoginPage(driver).login_as("<NAME>", "12345")
profile_page = ProfilePage(driver)
profile_page.update_profile(
"United States",
"Street name and number",
"<EMAIL>",
"+1 555 555 55",
)
assert profile_page.saved_message_is_displayed() is True
``` |
{
"source": "JojiJohnson/Python-Basics",
"score": 4
} |
#### File: Python-Basics/Codes/basic translator.py
```python
def translate(phrase):
translation = ""
for letter in phrase:
if letter in "AEIOUaeiou": #also if letter.lower() in "aeiou":
if letter.isupper():
translation = translation + "G"
else:
translation = translation + "g"
else:
translation = translation + letter
return translation
print (translate(input("Enter the phrase: ")))
```
#### File: Python-Basics/Codes/Chef.py
```python
class Chef:
def make_chicken(self):
print ("Chef makes Chicken")
def make_salad(self):
print ("Chef makes Salad")
def make_special_dish(self):
print ("Chef makes bbq ribs")
```
#### File: Python-Basics/Codes/if statements and comparisons.py
```python
print("Comparing nos")
def max_num(num1, num2, num3):
if num1 >= num2 and num1 >= num3:
return num1
elif num2 >= num1 and num2 >= num3:
return num2
else:
return num3
print(max_num(3, 401 , 7))
print("Comparing Strings")
def equal_str(str1, str2):
if str1 == str2:
return "Equal Strings"
else:
return "Unequal Strings"
print (equal_str("dog", "dog"))
``` |
{
"source": "JojiJoseph/Deep-RL",
"score": 3
} |
#### File: Deep-RL/DDPG/buffer.py
```python
import numpy as np
class ReplayBuffer:
def __init__(self, action_dim, state_dim, size=10_000):
self.idx = 0
self.action_dim = action_dim
self.state_dim = state_dim
self.size = size
self.states = np.zeros([size, state_dim])
self.actions = np.zeros([size, action_dim])
self.rewards = np.zeros((size,))
self.next_states = np.zeros([size, state_dim])
self.dones = np.zeros((size,))
self.choice_from = [x for x in range(size)]
def add(self, state, action, reward, next_state, done):
idx = self.idx
self.states[idx] = state
self.actions[idx] = action
self.rewards[idx] = reward
self.next_states[idx] = next_state
self.dones[idx] = done
self.idx += 1
if self.idx >= self.size:
self.idx = 0
def get_batch(self, batch_size=128, rg=None):
if rg is None:
indices = np.random.choice(self.choice_from, batch_size)
else:
indices = np.random.choice(self.choice_from[:rg], batch_size)
state_batch = self.states[indices]
action_batch = self.actions[indices]
reward_batch = self.rewards[indices]
next_batch = self.next_states[indices]
done_batch = self.dones[indices]
return state_batch, action_batch, reward_batch, next_batch, done_batch
```
#### File: Deep-RL/SAC/net.py
```python
import torch
import torch.nn as nn
import numpy as np
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, size=256):
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.l1 = nn.Linear(state_dim, size)
self.l2 = nn.Linear(size, size)
self.mu = nn.Linear(size, action_dim)
self.log_std = nn.Linear(size, action_dim)
self.normal = torch.distributions.Normal(0, 1)
def forward(self, x):
y = torch.relu(self.l1(x))
y = torch.relu(self.l2(y))
mu = self.mu(y)
log_std = self.log_std(y)
return mu, log_std
def get_action(self, x, eval=False):
mu, log_std = self.forward(x)
if eval:
return torch.tanh(mu), None
log_std = torch.clamp(log_std, -20, 2)
dist = torch.distributions.Normal(mu, torch.exp(log_std))
action = dist.rsample()
log_prob = dist.log_prob(action).sum(axis=-1)
log_prob -= torch.log(1 - torch.tanh(action)**2 + 1e-9).sum(axis=-1)
action = torch.tanh(action)
return action, log_prob
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, size=256):
super().__init__()
self.l1 = nn.Linear(state_dim + action_dim, size)
self.l2 = nn.Linear(size, size)
self.l3 = nn.Linear(size, 1)
def forward(self, s, a):
x = torch.cat((s, a), dim=-1)
y = torch.relu(self.l1(x))
y = torch.relu(self.l2(y))
y = self.l3(y)
return y
```
#### File: Deep-RL/VPG/buffer.py
```python
import numpy as np
class RolloutBuffer:
def __init__(self, action_dim, state_dim, size=10_000, batch_size=100):
self.idx = 0
self.action_dim = action_dim
self.state_dim = state_dim
self.size = size
self.batch_size = batch_size
assert size % batch_size == 0, "Buffer size should be divisible by batch size"
self.states = np.zeros([size, state_dim])
self.actions = np.zeros([size, action_dim])
self.rewards = np.zeros((size,))
self.next_states = np.zeros([size, state_dim])
self.dones = np.zeros((size,))
self.advantages = np.zeros((size,))
self.returns = np.zeros((size,))
self.values = np.zeros((size,))
def add(self, state, action, reward, next_state, done, val):
idx = self.idx
self.states[idx] = state
self.actions[idx] = action
self.rewards[idx] = reward
self.next_states[idx] = next_state
self.dones[idx] = done
self.values[idx] = done
self.idx += 1
if self.idx > self.size:
self.idx = 0
def calc_advatages(self, last_value=0, gamma=0.99, lda=0.95):
n = self.idx
prev_adv = 0 # Hardcoded
for i in range(n - 1, -1, -1):
delta = self.rewards[i] + gamma * last_value * \
(1 - self.dones[i]) - self.values[i]
adv = delta + lda * gamma * (1 - self.dones[i]) * prev_adv
prev_adv = adv
last_value = self.values[i]
self.advantages[i] = adv
self.returns[i] = adv + self.values[i]
def clear(self):
self.idx = 0
def __iter__(self):
self.idx = 0
return self
def __next__(self):
idx, batch_size = self.idx, self.batch_size
if self.idx + self.batch_size <= len(self.states):
state_batch = self.states[idx:idx + batch_size]
action_batch = self.actions[idx:idx + batch_size]
adv_batch = self.advantages[idx:idx + batch_size]
ret_batch = self.returns[idx:idx + batch_size]
done_batch = self.dones[idx:idx + batch_size]
next_batch = self.next_states[idx:idx + batch_size]
state_batch = state_batch.reshape((-1, self.state_dim))
action_batch = action_batch.reshape((-1, self.action_dim))
adv_batch = adv_batch.reshape((-1,))
ret_batch = ret_batch.reshape((-1,))
self.idx += batch_size
return state_batch, action_batch, next_batch, done_batch, adv_batch, ret_batch
else:
raise StopIteration
``` |
{
"source": "JojiKoike/OMWebAppEngine",
"score": 3
} |
#### File: response/body/body.py
```python
from bpmappers import Mapper, DelegateField
from .default_values import DefaultValues, DefaultValuesMapper
from .select_options import SelectOptions, SelectOptionsMapper
from .solution_options import SolutionOptions, SolutionOptionsMapper
from .results import SimulationResults, SimulationResultsMapper
class SimulationResponseBody(object):
"""
シミュレーション結果レスポンスボディオブジェクト
"""
def __init__(self, results: SimulationResults) -> None:
"""
コンストラクタ
:param results: 計算結果
"""
self.results = results
class SimulationResponseBodyMapper(Mapper):
"""
シミュレーション結果レスポンスとJSONを紐づけるマッパーオブジェクト
"""
results = DelegateField(SimulationResultsMapper, attach_parent=True)
class UISetValueResponseBody(object):
"""
画面初期値レスポンスボディオブジェクト
"""
def __init__(self,
default_values: DefaultValues,
select_options: SelectOptions,
solution_options: SolutionOptions) -> None:
"""
コンストラクタ
:param default_values: デフォルト値
:param select_options: シミュレーションオプション選択肢
:param solution_options: 利用可能な計算結果アイテム
"""
self.default_values = default_values
self.select_options = select_options
self.solution_options = solution_options
class UISetValueResponseBodyMapper(Mapper):
"""
画面初期値レスポンスボディマッパーオブジェクト
"""
default_values = DelegateField(DefaultValuesMapper)
select_options = DelegateField(SelectOptionsMapper)
solution_options = DelegateField(SolutionOptionsMapper)
```
#### File: response/body/default_values.py
```python
from typing import Dict, Union
from bpmappers import Mapper, RawField
class DefaultValues(object):
"""
デフォルト値を格納するオブジェクト
"""
def __init__(self,
simulation_options: Dict[str, Union[float, str]],
parameters: Dict[str, float]) -> None:
self.simulation_options = simulation_options
self.parameters = parameters
class DefaultValuesMapper(Mapper):
"""
デフォルト値マッパーオブジェクト
"""
simulation_options: Dict[str, Union[float, str]] = RawField('simulation_options')
parameters: Dict[str, float] = RawField('parameters')
```
#### File: app/omwebapp/__init__.py
```python
import os
import sys
import traceback
from typing import Dict, Optional, Union
from flask import Flask
from flask_cors import CORS
from flask_restful import Api
from OMPython import ModelicaSystem
from .config.routing import configure_routing
from .config.model import get_model_map, get_default_input_values_map, get_available_solutions_map
def create_app(test_config=None) -> Flask:
"""
Flaskアプリケーションの組み込みテスト用サーバー環境下での起動処理
:param test_config: テスト用設定
:return: Flaskアプリケーションインスタンス
"""
# Create and Configure Application
app: Flask = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
)
# Angular, React等のフレームワーク組み込みテスト用サーバーは
# どうしてもFlask組み込みサーバーと別マシン扱いになる為、
# Flask組み込みサーバー用の設定に限り、CORS設定を許可する。
CORS(app)
api: Api = Api(app)
if test_config is None:
# Load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# Load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# Modelica Model Activation
model_map: Optional[Dict[str, ModelicaSystem]] = None
try:
model_map = get_model_map()
except Exception as e:
print(e)
sys.stderr.write(traceback.format_exc())
# Model Default Option and Param Values Map
default_input_values_map: Optional[Dict[str, Dict[str, Union]]] = None
try:
default_input_values_map = get_default_input_values_map(model_map)
except Exception as e:
print(e)
sys.stderr.write(traceback.format_exc())
# Model Available Solutions Map
available_solutions_map: Optoinal[Dict[str, List[str]]] = None
try:
available_solutions_map = get_available_solutions_map(model_map)
except Exception as e:
print(e)
sys.stderr.write(traceback.format_exc())
# Configure Routing
configure_routing(api, model_map, default_input_values_map, available_solutions_map)
return app
```
#### File: omwebapp/tests/conftest.py
```python
import pytest
from flask import Flask
from app.omwebapp import create_app
@pytest.fixture
def app() -> Flask:
return create_app()
@pytest.fixture
def client(app: Flask):
return app.test_client()
```
#### File: entity/request/test_request.py
```python
import unittest
from app.omwebapp.entity.request.request import RunSimulationRequestMapper
class SimpleMSDRequestTestCase(unittest.TestCase):
def test_request_simple_msd(self):
# Prepare Request JSON Data
request: dict = {"simulation_input": {
"head": {
"index": 0,
"simulation_model_name": "SimpleMSD",
"version": "1.0.0"
},
"body": {
"simulation_options": {
"startTime": 0.0,
"stopTime": 10.0,
"stepSize": 0.02,
"tolerance": 0.000001,
"solver": "dassl"
},
"parameters": {
"m": 1.0,
"k": 2.0,
"c": 1.0,
"v0": 5.0
},
"results_options": {
"target_results": ["time", "v", "x"]
}
}
}}
# Parse JSON
result = RunSimulationRequestMapper(request).as_dict()
# Assertion
head = result['simulation_input']['head']
self.assertEqual(head['index'], 0)
self.assertEqual(type(head['index']), int)
self.assertEqual(head['simulation_model_name'], 'SimpleMSD')
self.assertEqual(type(head['simulation_model_name']), str)
self.assertEqual(head['version'], '1.0.0')
self.assertEqual(type(head['version']), str)
body = result['simulation_input']['body']
simulation_options = body['simulation_options']
self.assertEqual(simulation_options['startTime'], 0.0)
self.assertEqual(type(simulation_options['startTime']), float)
self.assertEqual(simulation_options['stopTime'], 10.0)
self.assertEqual(type(simulation_options['stopTime']), float)
self.assertEqual(simulation_options['stepSize'], 0.02)
self.assertEqual(type(simulation_options['stepSize']), float)
self.assertEqual(simulation_options['tolerance'], 0.000001)
self.assertEqual(type(simulation_options['tolerance']), float)
self.assertEqual(simulation_options['solver'], 'dassl')
self.assertEqual(type(simulation_options['solver']), str)
parameters = body['parameters']
self.assertEqual(parameters['m'], 1.0)
self.assertEqual(type(parameters['m']), float)
self.assertEqual(parameters['k'], 2.0)
self.assertEqual(type(parameters['k']), float)
self.assertEqual(parameters['c'], 1.0)
self.assertEqual(type(parameters['c']), float)
self.assertEqual(parameters['v0'], 5.0)
self.assertEqual(type(parameters['v0']), float)
target_results = body["results_options"]["target_results"]
self.assertEqual(len(target_results),
len(request['simulation_input']["body"]["results_options"]["target_results"]))
self.assertEqual(type(target_results), list)
for i in range(len(target_results)):
self.assertEqual(target_results[i],
request['simulation_input']['body']["results_options"]['target_results'][i])
if __name__ == '__main__':
unittest.main()
```
#### File: tests/view/test_hello.py
```python
from flask.testing import FlaskClient
class TestViewHelloGroup(object):
"""
疎通確認用APIのテストグループクラス
"""
def test_hello(self, client: FlaskClient):
res = client.get('/hello')
assert res.status_code == 200
assert b'{"hello": "world"}\n' == res.data
```
#### File: omwebapp/util/timer.py
```python
import time
class Timer(object):
"""
時間計測ユーティリティクラス
"""
def __init__(self, verbose: bool = False) -> None:
"""
コンストラクタ
:param verbose: 冗長出力(デフォルトはFalse)
"""
self.verbose: bool = verbose
def __enter__(self):
"""
計測開始処理
:return: Timerオブジェクト
"""
self.start: int = time.time()
return self
def __exit__(self, *args) -> None:
"""
計測終了処理
:param args
:return: None
"""
self.end: int = time.time()
self.secs: int = self.end - self.start
self.msecs: int = self.secs * 1000
if self.verbose:
print('elapsed time: %f ms' % self.msecs)
```
#### File: omwebapp/view/view_base.py
```python
from typing import List, Dict, Union, Optional
import sys
import traceback
from http import HTTPStatus
from flask import request
from flask_restful import Resource
from OMPython import ModelicaSystem
from ..entity.response.body.results import SimulationResults
from ..entity.request.request import RunSimulationRequestMapper
from ..entity.response.response import UISetValue, UISetValueResponse, \
UISetValueResponseMapper, SimulationOutput, SimulationResponse, SimulationResponseMapper
from ..entity.response.body.body import UISetValueResponseBody, SimulationResponseBody
from ..entity.response.body.select_options import SelectOptions
from ..entity.response.body.solution_options import SolutionOptions
from ..entity.response.body.default_values import DefaultValues
from ..entity.response.header.header import ResponseHeader
from ..util.logger import get_info_logger
from ..config.constants import TOLERANCE, SOLVER
class ViewBase(Resource):
"""
Viewの基底クラス
"""
def __init__(self, **kwargs):
# Logger初期化処理
self.logger = get_info_logger(kwargs['name'])
class ViewBase4Modelica(ViewBase):
"""
Modelica計算APIの基底クラス
"""
def __init__(self, **kwargs):
"""
Modelica計算API基底クラスのコンストラクタ
:param kwargs:
"""
# 基底クラスの初期化
ViewBase.__init__(self, **kwargs)
# 紐づくモデルのModelicaSystemオブジェクト取得
self.model: ModelicaSystem = kwargs['model']
# 紐づくモデルのモデル名称を取得
self.model_name = kwargs['model_name']
# エラーフラグの初期化
self.flg_error: bool = False
# デフォルト値取得
self.default_sim_options: Optional[Dict[str, Union[str, int, float]]] = kwargs["default_options"]
self.default_sim_params: Optional[Dict[str, Union[str, int, float]]] = kwargs["default_params"]
# 取得可能な結果アイテムリスト取得
self.available_solutions: Optional[List[str]] = kwargs["available_solutions"]
def get(self):
"""
画面初期化用設定値を返します
:return: JSON文字列, HTTP Response Code
"""
# Reset Error Flag
self.flg_error = False
# Header Section
head: Dict[str, Union[str, int]] = {
"index": 0,
"simulation_model_name": self.model_name,
"status": "Success" if not self.flg_error else "Failed",
"version": "1.0.0",
"data_length": 0
}
response_header: ResponseHeader = ResponseHeader(head)
# Default Values
default_values: DefaultValues \
= DefaultValues(self.default_sim_options, self.default_sim_params)
# Select Options
select_options: SelectOptions = SelectOptions(TOLERANCE, SOLVER)
# Solution Options
solution_options: SolutionOptions = SolutionOptions(self.available_solutions)
# Response Body
response_body: UISetValueResponseBody \
= UISetValueResponseBody(default_values, select_options, solution_options)
# Unite Header And Body
ui_set_value: UISetValue = UISetValue(response_header, response_body)
response: UISetValueResponse = UISetValueResponse(ui_set_value)
# Serialize
mapper: UISetValueResponseMapper = UISetValueResponseMapper(response)
return mapper.as_dict(), \
HTTPStatus.INTERNAL_SERVER_ERROR if self.flg_error else HTTPStatus.OK
def post(self):
"""
計算実行用API定義
:return: JSON文字列, HTTP Response Code
"""
# Reset Error Flag
self.flg_error = False
# Request JSON 取得
json_data = request.get_json()
request_json_dict = RunSimulationRequestMapper(json_data).as_dict()
# Head
# Unusedなので一旦コメントアウト
# head = request_json_dict["simulation_input"]["head"]
# Body
body = request_json_dict["simulation_input"]["body"]
# Simulation Options
simulation_options: Dict[str, Union[str, float]] = body["simulation_options"]
# Parameters
parameters: Dict[str, float] = body["parameters"]
# Target Results
target_results: List[str] = body["results_options"]["target_results"]
try:
# Set Simulation Options
self.model.setSimulationOptions(**simulation_options)
# Set Simulation Parameters
self.model.setParameters(**parameters)
# Execute Calculation
self.model.simulate()
except Exception as e:
self.flg_error = True
self.logger.error("Error: Failed to run simulation. {0}".format(e))
sys.stderr.write(traceback.format_exc())
# Generate Result Object
# Body
results: Optional[Dict[str, List[float]]] = \
{key.replace(".", "_").replace("[", "").replace("]", ""):
self.model.getSolutions(key).tolist() for key in target_results}
simulation_results: SimulationResults = SimulationResults(results)
response_body: SimulationResponseBody = SimulationResponseBody(simulation_results)
# Head
header_values: Dict[str, Union[str, int]] = {
"index": 0,
"simulation_model_name": self.model_name,
"status": "Success" if not self.flg_error else "Failed",
"version": "1.0.0",
"data_length": 0 if results is None else len(results[target_results[0]])
}
response_header: ResponseHeader = ResponseHeader(header_values)
# Unite Head and Body
output: SimulationOutput = SimulationOutput(response_header, response_body)
response: SimulationResponse = SimulationResponse(output)
# Serialize
mapper: SimulationResponseMapper = SimulationResponseMapper(response)
return mapper.as_dict(), \
HTTPStatus.INTERNAL_SERVER_ERROR if self.flg_error else HTTPStatus.CREATED
``` |
{
"source": "jojker/D-VAE",
"score": 2
} |
#### File: src/cifar10/evaluation.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
#import cPickle as pickle
import pickle
import shutil
import sys
import time
import pdb
import numpy as np
import tensorflow as tf
from src import utils
from src.utils import Logger
from src.utils import DEFINE_boolean
from src.utils import DEFINE_float
from src.utils import DEFINE_integer
from src.utils import DEFINE_string
from src.utils import print_user_flags
from src.cifar10.data_utils import read_data
from src.cifar10.general_controller import GeneralController
from src.cifar10.eval_child import GeneralChild
from src.cifar10.micro_controller import MicroController
from src.cifar10.micro_child import MicroChild
sys.argv = sys.argv[:1] # suppress cmd arguments, use default ones defined below
flags = tf.app.flags
FLAGS = flags.FLAGS
DEFINE_boolean("reset_output_dir", False, "Delete output_dir if exists.")
DEFINE_string("data_path", '%s/../../data/cifar10' % os.path.dirname(os.path.realpath(__file__)), "")
DEFINE_string("output_dir", '%s/../../outputs_6' % os.path.dirname(os.path.realpath(__file__)), "")
DEFINE_string("data_format", "NCHW", "'NHWC' or 'NCWH'")
DEFINE_string("search_for", "macro", "Must be [macro|micro]")
DEFINE_integer("batch_size", 128, "")
DEFINE_integer("num_epochs", 10, "")
DEFINE_integer("child_lr_dec_every", 100, "")
DEFINE_integer("child_num_layers", 6, "")
DEFINE_integer("child_num_cells", 5, "")
DEFINE_integer("child_filter_size", 5, "")
DEFINE_integer("child_out_filters", 36, "")
DEFINE_integer("child_out_filters_scale", 1, "")
DEFINE_integer("child_num_branches", 6, "")
DEFINE_integer("child_num_aggregate", None, "")
DEFINE_integer("child_num_replicas", 1, "")
DEFINE_integer("child_block_size", 3, "")
DEFINE_integer("child_lr_T_0", 10, "for lr schedule")
DEFINE_integer("child_lr_T_mul", 2, "for lr schedule")
DEFINE_integer("child_cutout_size", None, "CutOut size")
DEFINE_float("child_grad_bound", 5.0, "Gradient clipping")
DEFINE_float("child_lr", 0.1, "")
DEFINE_float("child_lr_dec_rate", 0.1, "")
DEFINE_float("child_keep_prob", 0.9, "")
DEFINE_float("child_drop_path_keep_prob", 0.6, "minimum drop_path_keep_prob")
DEFINE_float("child_l2_reg", 0.00025, "")
DEFINE_float("child_lr_max", 0.05, "for lr schedule")
DEFINE_float("child_lr_min", 0.0005, "for lr schedule")
DEFINE_string("child_skip_pattern", None, "Must be ['dense', None]")
DEFINE_string("child_fixed_arc", None, "")
DEFINE_string("structure_path", "sample_structures6.txt", "")
DEFINE_boolean("child_use_aux_heads", True, "Should we use an aux head")
DEFINE_boolean("child_sync_replicas", False, "To sync or not to sync.")
DEFINE_boolean("child_lr_cosine", True, "Use cosine lr schedule")
DEFINE_float("controller_lr", 1e-3, "")
DEFINE_float("controller_lr_dec_rate", 1.0, "")
DEFINE_float("controller_keep_prob", 0.5, "")
DEFINE_float("controller_l2_reg", 0.0, "")
DEFINE_float("controller_bl_dec", 0.99, "")
DEFINE_float("controller_tanh_constant", None, "")
DEFINE_float("controller_op_tanh_reduce", 1.0, "")
DEFINE_float("controller_temperature", None, "")
DEFINE_float("controller_entropy_weight", 0.0001, "")
DEFINE_float("controller_skip_target", 0.8, "")
DEFINE_float("controller_skip_weight", 0.0, "")
DEFINE_integer("controller_num_aggregate", 1, "")
DEFINE_integer("controller_num_replicas", 1, "")
DEFINE_integer("controller_train_steps", 50, "")
DEFINE_integer("controller_forwards_limit", 2, "")
DEFINE_integer("controller_train_every", 2,
"train the controller after this number of epochs")
DEFINE_boolean("controller_search_whole_channels", True, "")
DEFINE_boolean("controller_sync_replicas", False, "To sync or not to sync.")
DEFINE_boolean("controller_training", False, "")
DEFINE_boolean("controller_use_critic", False, "")
DEFINE_integer("log_every", 50, "How many steps to log")
DEFINE_integer("eval_every_epochs", 1, "How many epochs to eval")
class Eval(object):
def get_ops(self, images, labels):
"""
Args:
images: dict with keys {"train", "valid", "test"}.
labels: dict with keys {"train", "valid", "test"}.
"""
assert FLAGS.search_for is not None, "Please specify --search_for"
if FLAGS.search_for == "micro":
ControllerClass = MicroController
ChildClass = MicroChild
else:
ControllerClass = GeneralController
ChildClass = GeneralChild
child_model = ChildClass(
images,
labels,
use_aux_heads=FLAGS.child_use_aux_heads,
cutout_size=FLAGS.child_cutout_size,
whole_channels=FLAGS.controller_search_whole_channels,
num_layers=FLAGS.child_num_layers,
num_cells=FLAGS.child_num_cells,
num_branches=FLAGS.child_num_branches,
fixed_arc=FLAGS.child_fixed_arc,
out_filters_scale=FLAGS.child_out_filters_scale,
out_filters=FLAGS.child_out_filters,
keep_prob=FLAGS.child_keep_prob,
drop_path_keep_prob=FLAGS.child_drop_path_keep_prob,
num_epochs=FLAGS.num_epochs,
l2_reg=FLAGS.child_l2_reg,
data_format=FLAGS.data_format,
batch_size=FLAGS.batch_size,
clip_mode="norm",
grad_bound=FLAGS.child_grad_bound,
lr_init=FLAGS.child_lr,
lr_dec_every=FLAGS.child_lr_dec_every,
lr_dec_rate=FLAGS.child_lr_dec_rate,
lr_cosine=FLAGS.child_lr_cosine,
lr_max=FLAGS.child_lr_max,
lr_min=FLAGS.child_lr_min,
lr_T_0=FLAGS.child_lr_T_0,
lr_T_mul=FLAGS.child_lr_T_mul,
optim_algo="momentum",
sync_replicas=FLAGS.child_sync_replicas,
num_aggregate=FLAGS.child_num_aggregate,
num_replicas=FLAGS.child_num_replicas,
)
if FLAGS.child_fixed_arc is None:
controller_model = ControllerClass(
search_for=FLAGS.search_for,
search_whole_channels=FLAGS.controller_search_whole_channels,
skip_target=FLAGS.controller_skip_target,
skip_weight=FLAGS.controller_skip_weight,
num_cells=FLAGS.child_num_cells,
num_layers=FLAGS.child_num_layers,
num_branches=FLAGS.child_num_branches,
out_filters=FLAGS.child_out_filters,
lstm_size=64,
lstm_num_layers=1,
lstm_keep_prob=1.0,
tanh_constant=FLAGS.controller_tanh_constant,
op_tanh_reduce=FLAGS.controller_op_tanh_reduce,
temperature=FLAGS.controller_temperature,
lr_init=FLAGS.controller_lr,
lr_dec_start=0,
lr_dec_every=1000000, # never decrease learning rate
l2_reg=FLAGS.controller_l2_reg,
entropy_weight=FLAGS.controller_entropy_weight,
bl_dec=FLAGS.controller_bl_dec,
use_critic=FLAGS.controller_use_critic,
optim_algo="adam",
sync_replicas=FLAGS.controller_sync_replicas,
num_aggregate=FLAGS.controller_num_aggregate,
num_replicas=FLAGS.controller_num_replicas,
structure_path=FLAGS.structure_path)
child_model.connect_controller(controller_model)
controller_model.build_trainer(child_model)
controller_ops = {
"train_step": controller_model.train_step,
"loss": controller_model.loss,
"train_op": controller_model.train_op,
"lr": controller_model.lr,
"grad_norm": controller_model.grad_norm,
"valid_acc": controller_model.valid_acc,
"optimizer": controller_model.optimizer,
"baseline": controller_model.baseline,
"entropy": controller_model.sample_entropy,
"sample_arc": controller_model.sample_arc,
"sample_arc2": controller_model.sample_arc2,
"sample_arc3": controller_model.sample_arc3,
"skip_rate": controller_model.skip_rate,
"structures": controller_model.structures,
}
else:
assert not FLAGS.controller_training, (
"--child_fixed_arc is given, cannot train controller")
child_model.connect_controller(None)
controller_ops = None
child_ops = {
"child": child_model,
"global_step": child_model.global_step,
"loss": child_model.loss,
"train_op": child_model.train_op,
"lr": child_model.lr,
"grad_norm": child_model.grad_norm,
"train_acc": child_model.train_acc,
"optimizer": child_model.optimizer,
"num_train_batches": child_model.num_train_batches,
}
ops = {
"child": child_ops,
"controller": controller_ops,
"eval_every": child_model.num_train_batches * FLAGS.eval_every_epochs,
"eval_func": child_model.customized_eval_once,
"reset_idx": child_model.reset_idx,
"num_train_batches": child_model.num_train_batches,
}
return ops
def __init__(self):
if FLAGS.child_fixed_arc is None:
images, labels = read_data(FLAGS.data_path)
else:
images, labels = read_data(FLAGS.data_path, num_valids=0)
g = tf.Graph()
with g.as_default():
self.ops = self.get_ops(images, labels)
child_ops = self.ops["child"]
controller_ops = self.ops["controller"]
saver = tf.train.Saver(max_to_keep=2)
checkpoint_saver_hook = tf.train.CheckpointSaverHook(
FLAGS.output_dir, save_steps=child_ops["num_train_batches"]*10000, saver=saver)
hooks = [checkpoint_saver_hook]
if FLAGS.child_sync_replicas:
sync_replicas_hook = child_ops["optimizer"].make_session_run_hook(True)
hooks.append(sync_replicas_hook)
if FLAGS.controller_training and FLAGS.controller_sync_replicas:
sync_replicas_hook = controller_ops["optimizer"].make_session_run_hook(True)
hooks.append(sync_replicas_hook)
print("-" * 80)
print("Starting session")
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
self.sess = tf.train.SingularMonitoredSession(
config=config, hooks=hooks, checkpoint_dir=FLAGS.output_dir)
def eval(self, arch_str):
if type(arch_str) == type(''):
arch_str = [int(x) for x in arch_str.split()]
return self.ops["eval_func"](self.sess, "valid", feed_dict={self.ops["controller"]["sample_arc3"]: np.asarray(arch_str)})
def Eval_NN():
print("-" * 80)
if not os.path.isdir(FLAGS.output_dir):
print("Path {} does not exist. Creating.".format(FLAGS.output_dir))
os.makedirs(FLAGS.output_dir)
elif FLAGS.reset_output_dir:
print("Path {} exists. Remove and remake.".format(FLAGS.output_dir))
shutil.rmtree(FLAGS.output_dir)
os.makedirs(FLAGS.output_dir)
print("-" * 80)
log_file = os.path.join(FLAGS.output_dir, "stdout")
print("Logging to {}".format(log_file))
sys.stdout = Logger(log_file)
utils.print_user_flags()
'''
# below are for batch evaluation of all arcs defined in the structure_path
if not FLAGS.structure_path:
exit()
with open(FLAGS.structure_path, 'r') as fp:
lines = fp.readlines()
lines = [eval(line.strip()) for line in lines]
structures = []
for line in lines:
row = []
for ele in line:
row += ele
structures.append(row)
n = len(lines)
# eval the first structure
Acc = []
eva = Eval()
eva.eval(structures[0])
eva.eval(structures[1])
acc = eva.eval(structures[0])
print(acc)
pdb.set_trace()
'''
eva = Eval()
return eva
if __name__ == "__main__":
tf.app.run()
``` |
{
"source": "jojkos/neural-machine-translation",
"score": 2
} |
#### File: neural-machine-translation/nmt/translator.py
```python
import logging
import os
import math
import random
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
import numpy as np
import nmt.utils as utils
from nmt import SpecialSymbols, Dataset, Vocabulary, Candidate
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from keras.layers import LSTM, Dense, Embedding, Input, Bidirectional, Concatenate, Average, Dropout
from keras.layers.merge import add
from keras.models import Model
from keras.preprocessing.text import text_to_word_sequence
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Translator(object):
"""
Main class of the module, takes care of the datasets, fitting, evaluation and translating
"""
def __init__(self, source_lang, model_file, model_folder,
target_lang, test_dataset, training_dataset,
reverse_input=True, max_source_vocab_size=10000, max_target_vocab_size=10000,
source_embedding_path=None, target_embedding_path=None,
clear=False, tokenize=True, log_folder="logs/", num_units=256, num_threads=1, dropout=0.2,
optimizer="rmsprop",
source_embedding_dim=300, target_embedding_dim=300,
max_source_embedding_num=None, max_target_embedding_num=None,
num_training_samples=-1, num_test_samples=-1,
num_encoder_layers=1, num_decoder_layers=1):
"""
Args:
source_embedding_dim (int): Dimension of embeddings
target_embedding_dim (int): Dimension of embeddings
source_embedding_path (str): Path to pretrained fastText embeddings file
target_embedding_path (str): Path to pretrained fastText embeddings file
max_source_embedding_num (int): how many first lines from embedding file should be loaded, None means all of them
max_target_embedding_num (int): how many first lines from embedding file should be loaded, None means all of them
source_lang (str): Source language (dataset file extension)
num_units (int): Size of each network layer
dropout (float): Size of dropout
optimizer (str): Keras optimizer name
log_folder (str): Path where the result logs will be stored
max_source_vocab_size (int): Maximum size of source vocabulary
max_target_vocab_size (int): Maximum size of target vocabulary
model_file (str): Model file name. Either will be created or loaded.
model_folder (str): Path where the result model will be stored
num_training_samples (int, optional): How many samples to take from the training dataset, -1 for all of them (default)
num_test_samples (int, optional): How many samples to take from the test dataset, -1 for all of them (default)
reverse_input (bool): Whether to reverse source sequences (optimization for better learning)
target_lang (str): Target language (dataset file extension)
test_dataset (str): Path to the test set. Dataset are two files (one source one target language)
training_dataset (str): Path to the training set
clear (bool): Whether to delete old weights and logs before running
tokenize (bool): Whether to tokenize the sequences or not (they are already tokenizes e.g. using Moses tokenizer)
num_encoder_layers (int): Number of layers in encoder
num_decoder_layers (int): Number of layers in decoder
"""
self.source_embedding_dim = source_embedding_dim
self.target_embedding_dim = target_embedding_dim
self.source_embedding_path = source_embedding_path
self.target_embedding_path = target_embedding_path
self.max_source_embedding_num = max_source_embedding_num
self.max_target_embedding_num = max_target_embedding_num
self.source_lang = source_lang
self.num_units = num_units
self.num_threads = num_threads
self.dropout = dropout
self.optimizer = optimizer
self.log_folder = log_folder
self.max_source_vocab_size = max_source_vocab_size
self.max_target_vocab_size = max_target_vocab_size
self.model_folder = model_folder
self.model_weights_path = "{}".format(os.path.join(model_folder, model_file))
self.num_training_samples = num_training_samples
self.num_test_samples = num_test_samples
self.reverse_input = reverse_input
self.target_lang = target_lang
self.test_dataset_path = test_dataset
self.training_dataset_path = training_dataset
self.clear = clear
self.tokenize = tokenize
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
# configure number of threads
# intra_op_parallelism_threads = self.num_threads,
# inter_op_parallelism_threads = self.num_threads,
# allow_soft_placement = True,
# device_count = {'CPU': self.num_threads}
# FAILS ON FIT CLUSTER when started manually and not through qsub
config = tf.ConfigProto(intra_op_parallelism_threads=self.num_threads,
inter_op_parallelism_threads=self.num_threads,
allow_soft_placement=True,
device_count={'CPU': self.num_threads})
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
utils.prepare_folders([self.log_folder, self.model_folder], clear)
self.training_dataset = Dataset(self.training_dataset_path, self.source_lang, self.target_lang,
self.num_training_samples,
self.tokenize)
self.test_dataset = Dataset(self.test_dataset_path, self.source_lang, self.target_lang,
self.num_test_samples,
self.tokenize)
logger.info("There are {} samples in training dataset".format(self.training_dataset.num_samples))
logger.info("There are {} samples in test dataset".format(self.test_dataset.num_samples))
self.source_vocab = Vocabulary(self.training_dataset.x_word_seq, self.max_source_vocab_size)
self.target_vocab = Vocabulary(self.training_dataset.y_word_seq, self.max_target_vocab_size)
logger.info("Source vocabulary has {} symbols".format(self.source_vocab.vocab_len))
logger.info("Target vocabulary has {} symbols".format(self.target_vocab.vocab_len))
self.source_embedding_weights = None
if not os.path.isfile(self.model_weights_path) and self.source_embedding_path:
# load pretrained embeddings
self.source_embedding_weights = utils.load_embedding_weights(self.source_embedding_path,
self.source_vocab.ix_to_word,
limit=self.max_source_embedding_num)
self.target_embedding_weights = None
if not os.path.isfile(self.model_weights_path) and self.target_embedding_path:
# load pretrained embeddings
self.target_embedding_weights = utils.load_embedding_weights(self.target_embedding_path,
self.target_vocab.ix_to_word,
limit=self.max_target_embedding_num)
self.model, self.encoder_model, self.decoder_model = self._define_models()
logger.info("Global model")
self.model.summary()
logger.info("Encoder model")
self.encoder_model.summary()
logger.info("Decoder model")
self.decoder_model.summary()
# model_to_dot(self.model).write_pdf("model.pdf")
# model_to_dot(self.encoder_model).write_pdf("encoder_model.pdf")
# model_to_dot(self.decoder_model).write_pdf("decoder_model.pdf")
logger.info("compiling model...")
# Run training
self.model.compile(optimizer=self.optimizer, loss='categorical_crossentropy',
metrics=['acc'])
if os.path.isfile(self.model_weights_path):
logger.info("Loading model weights from file..")
self.model.load_weights(self.model_weights_path)
def _get_encoded_data(self, dataset, from_index=0, to_index=None):
encoder_input_data, decoder_input_data, decoder_target_data = Translator.encode_sequences(
dataset.x_word_seq[from_index: to_index],
dataset.y_word_seq[from_index: to_index],
dataset.x_max_seq_len, dataset.y_max_seq_len,
self.source_vocab, self.target_vocab, self.reverse_input
)
return {
"encoder_input_data": encoder_input_data,
"decoder_input_data": decoder_input_data,
"decoder_target_data": decoder_target_data
}
def _get_training_data(self, from_index=0, to_index=None):
"""
Returns: dict with encoder_input_data, decoder_input_data and decoder_target_data of whole dataset size
"""
return self._get_encoded_data(self.training_dataset, from_index, to_index)
def _training_data_gen_WRONG_SHUFFLE(self, batch_size, infinite=True, shuffle=True, bucketing=False,
bucket_range=3):
"""
Creates generator for keras fit_generator. First yielded value is number of steps needed for whole epoch.
Args:
infinite: whether to yield data infinitely or stop after one walkthrough the dataset
shuffle: whether to shuffle the training data and return them in random order every epoch
bucketing: whetether to use bucketing
bucket_range: range of each bucket
Returns: First yielded value is number of steps needed for whole epoch.
Then yields ([encoder_input_data, decoder_input_data], decoder_target_data)
"""
# shuffling
# https://stackoverflow.com/questions/46570172/how-to-fit-generator-in-keras
# https://github.com/keras-team/keras/issues/2389
# first value returned from generator is the number of steps for the whole epoch
first = True
if bucketing:
buckets = utils.split_to_buckets(self.training_dataset.x_word_seq,
self.training_dataset.y_word_seq,
bucket_range,
self.training_dataset.x_max_seq_len,
self.training_dataset.y_max_seq_len,
batch_size)
while True:
if bucketing:
indices = []
for bucket in sorted(buckets.keys()):
bucket_indices = list(range(0, len(buckets[bucket]["x_word_seq"]), batch_size))
for index in bucket_indices:
indices.append([bucket, index])
if first:
yield len(indices)
first = False
if shuffle:
random.shuffle(indices)
for bucket_ix, i in indices:
training_data = Translator.encode_sequences(
buckets[bucket_ix]["x_word_seq"][i: i + batch_size],
buckets[bucket_ix]["y_word_seq"][i: i + batch_size],
buckets[bucket_ix]["x_max_seq_len"],
buckets[bucket_ix]["y_max_seq_len"],
self.source_vocab, self.target_vocab, self.reverse_input
)
yield [training_data[0], training_data[1]], training_data[2]
else:
indices = list(range(0, self.training_dataset.num_samples, batch_size))
if first:
yield len(indices)
first = False
if shuffle:
random.shuffle(indices)
for i in indices:
training_data = self._get_training_data(i, i + batch_size)
yield [training_data["encoder_input_data"], training_data["decoder_input_data"]], training_data[
"decoder_target_data"]
if not infinite:
break
def _training_data_gen(self, batch_size, infinite=True, shuffle=True):
"""
Creates generator for keras fit_generator. First yielded value is number of steps needed for whole epoch.
Args:
infinite: whether to yield data infinitely or stop after one walkthrough the dataset
shuffle: whether to shuffle the training data and return them in random order every epoch
Returns: First yielded value is number of steps needed for whole epoch.
Then yields ([encoder_input_data, decoder_input_data], decoder_target_data)
"""
# shuffling
# https://stackoverflow.com/questions/46570172/how-to-fit-generator-in-keras
# https://github.com/keras-team/keras/issues/2389
# first value returned from generator is the number of steps for the whole epoch
first = True
while True:
x = []
y = []
indices = list(range(0, self.training_dataset.num_samples))
if first:
yield math.ceil(len(indices) / batch_size)
first = False
if shuffle:
random.shuffle(indices)
for ix in indices:
x.append(self.training_dataset.x_word_seq[ix])
y.append(self.training_dataset.y_word_seq[ix])
i = 0
while i < len(indices):
encoder_input_data, decoder_input_data, decoder_target_data = Translator.encode_sequences(
x[i: i + batch_size],
y[i: i + batch_size],
self.training_dataset.x_max_seq_len, self.training_dataset.y_max_seq_len,
self.source_vocab, self.target_vocab, self.reverse_input
)
yield [encoder_input_data, decoder_input_data], decoder_target_data
i += batch_size
if not infinite:
break
def _training_data_bucketing(self, batch_size, infinite=True, shuffle=True, bucket_range=3):
"""
Creates generator for keras fit_generator. First yielded value is number of steps needed for whole epoch.
Args:
infinite: whether to yield data infinitely or stop after one walkthrough the dataset
shuffle: whether to shuffle the training data and return them in random order every epoch
bucket_range: range of each bucket
Returns: First yielded value is number of steps needed for whole epoch.
Then yields ([encoder_input_data, decoder_input_data], decoder_target_data)
"""
# shuffling
# https://stackoverflow.com/questions/46570172/how-to-fit-generator-in-keras
# https://github.com/keras-team/keras/issues/2389
# first value returned from generator is the number of steps for the whole epoch
first = True
buckets = utils.split_to_buckets(self.training_dataset.x_word_seq,
self.training_dataset.y_word_seq,
bucket_range,
self.training_dataset.x_max_seq_len,
self.training_dataset.y_max_seq_len,
batch_size)
indices = []
# create indices to access each bucket and then each batch inside that bucket
for bucket in sorted(buckets.keys()):
bucket_indices = list(range(0, len(buckets[bucket]["x_word_seq"]), batch_size))
for index in bucket_indices:
indices.append([bucket, index])
while True:
if first:
yield len(indices)
first = False
if shuffle:
# we need as much random shufflin as possible
# so we shuffle both data inside buckets and then the order in which they are accessed
# shuffle all data inside the buckets
for bucket in sorted(buckets.keys()):
zipped = list(zip(buckets[bucket]["x_word_seq"], buckets[bucket]["y_word_seq"]))
random.shuffle(zipped)
buckets[bucket]["x_word_seq"], buckets[bucket]["y_word_seq"] = zip(*zipped)
# shuffle the global bucket->batch indices
random.shuffle(indices)
for bucket_ix, i in indices:
training_data = Translator.encode_sequences(
buckets[bucket_ix]["x_word_seq"][i: i + batch_size],
buckets[bucket_ix]["y_word_seq"][i: i + batch_size],
buckets[bucket_ix]["x_max_seq_len"],
buckets[bucket_ix]["y_max_seq_len"],
self.source_vocab, self.target_vocab, self.reverse_input
)
yield [training_data[0], training_data[1]], training_data[2]
if not infinite:
break
def _get_test_data(self, from_index=0, to_index=None):
return self._get_encoded_data(self.test_dataset, from_index, to_index)
def _test_data_gen(self, batch_size, infinite=True, return_steps=False):
"""
# vocabularies of test dataset has to be the same as of training set
# otherwise embeddings would not correspond are use OOV
# and y one hot encodings wouldnt correspond either
Args:
batch_size (int): size of the batch
infinite (bool): whether to run infinitely or just do one loop over the dataset
return_steps (bool): whether to return number of steps for the whole epoch as a first yield
Yields: x inputs, y inputs
"""
i = 0
once_through = False
if return_steps:
steps = self.test_dataset.num_samples / batch_size
yield math.ceil(steps)
while infinite or not once_through:
test_data = self._get_test_data(i, i + batch_size)
yield (
[test_data["encoder_input_data"], test_data["decoder_input_data"]],
test_data["decoder_target_data"]
)
i += batch_size
if i >= self.test_dataset.num_samples:
once_through = True
i = 0
@staticmethod
def encode_sequences(x_word_seq, y_word_seq, x_max_seq_len, y_max_seq_len,
source_vocab, target_vocab, reverse_input=True):
"""
Take word sequences and convert them so that the model can be fit with them.
Input words are just converted to integer index
Target words are encoded to one hot vectors of target vocabulary length
Args:
x_word_seq: input word sequences
y_word_seq: target word sequences
x_max_seq_len (int): max lengh of input word sequences
y_max_seq_len (int): max lengh of target word sequences
source_vocab (Vocabulary): source vocabulary object
target_vocab (Vocabulary): target vocabulary object
reverse_input (bool): whether to reverse input sequences
Returns: encoder_input_data, decoder_input_data, decoder_target_data
"""
# if we try to allocate memory for whole dataset (even for not a big one), Memory Error is raised
# always encode only a part of the dataset
encoder_input_data = np.zeros(
(len(x_word_seq), x_max_seq_len), dtype='float32')
decoder_input_data = np.zeros(
(len(x_word_seq), y_max_seq_len - 1), dtype='float32')
# - 1 because decder_input doesn't take last EOS and decoder_target doesn't take first GO symbol
decoder_target_data = np.zeros(
(len(x_word_seq), y_max_seq_len - 1, target_vocab.vocab_len),
dtype='float32')
# prepare source sentences for embedding layer (encode to indexes)
for i, seq in enumerate(x_word_seq):
if reverse_input: # for better results according to paper Sequence to seq...
seq = seq[::-1]
for t, word in enumerate(seq):
if word in source_vocab.word_to_ix:
encoder_input_data[i, t] = source_vocab.word_to_ix[word]
else:
encoder_input_data[i, t] = SpecialSymbols.UNK_IX
# encode target sentences to one hot encoding
for i, seq in enumerate(y_word_seq):
for t, word in enumerate(seq):
if word in target_vocab.word_to_ix:
index = target_vocab.word_to_ix[word]
else:
index = SpecialSymbols.UNK_IX
# decoder_target_data is ahead of decoder_input_data by one timestep
# ignore EOS symbol at the end
if t < len(seq) - 1:
decoder_input_data[i, t] = index
if t > 0:
# decoder_target_data will be ahead by one timestep
# and will not include the start character.
decoder_target_data[i, t - 1, index] = 1
return encoder_input_data, decoder_input_data, decoder_target_data
def _define_models(self):
"""
Defines main model for learning, encoder_model for prediction of encoder state in inference time
and decoder_model for predicting of results in inference time
Returns: model, encoder_model, decoder_model
"""
# model based on https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html
logger.info("Creating models...")
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None,), name="encoder_input")
if self.source_embedding_weights is not None:
self.source_embedding_weights = [self.source_embedding_weights] # Embedding layer wantes list as parameter
# according to https://keras.io/layers/embeddings/
# input dim should be +1 when used with mask_zero..is it correctly set here?
# i think that input dim is already +1 because padding symbol is part of the vocabulary
source_embeddings = Embedding(self.source_vocab.vocab_len, self.source_embedding_dim,
weights=self.source_embedding_weights, mask_zero=True, trainable=True,
name="input_embeddings")
source_embedding_outputs = source_embeddings(encoder_inputs)
# use bi-directional encoder with concatenation as in Google neural machine translation paper
# https://stackoverflow.com/questions/47923370/keras-bidirectional-lstm-seq2seq
# only first layer is bidirectional (too much params if all of them were)
# its OK to have return_sequences here as encoder outputs are not used anyway in the decoder and it is needed for multi layer encoder
bidirectional_encoder = Bidirectional(LSTM(self.num_units, return_state=True, return_sequences=True,
dropout=self.dropout, recurrent_dropout=self.dropout),
name="bidirectional_encoder_layer")
# h is inner(output) state, c i memory cell
inputs = source_embedding_outputs
encoder_outputs, forward_h, forward_c, backward_h, backward_c = bidirectional_encoder(inputs)
state_h = Average()([forward_h, backward_h])
state_c = Average()([forward_c, backward_c])
# multiple encoder layers
for i in range(1, self.num_encoder_layers):
# residual connections as in google's paper https://arxiv.org/abs/1609.08144
# if inputs (embeddings) size is different than LSTM's, sum has can be performed only from layer 2 on
# first layer is bidirectional (twice as big output) so it has to be skipped as well (i > 2)
if i > 2 or inputs.shape[-1] == self.num_units:
inputs = add([inputs, encoder_outputs])
else:
inputs = encoder_outputs
encoder_outputs, state_h, state_c = LSTM(self.num_units, return_state=True, return_sequences=True,
dropout=self.dropout, recurrent_dropout=self.dropout,
name="encoder_layer_{}".format(i + 1))(inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None,), name="decoder_input")
if self.target_embedding_weights is not None:
self.target_embedding_weights = [self.target_embedding_weights] # Embedding layer wantes list as parameter
target_embeddings = Embedding(self.target_vocab.vocab_len, self.target_embedding_dim,
weights=self.target_embedding_weights, mask_zero=True, trainable=True,
name="target_embeddings")
target_embedding_outputs = target_embeddings(decoder_inputs)
inputs = target_embedding_outputs
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(self.num_units, return_sequences=True, return_state=True,
dropout=self.dropout, recurrent_dropout=self.dropout,
name="decoder_layer_1")
decoder_outputs, _, _ = decoder_lstm(inputs,
initial_state=encoder_states)
# multiple decoder layers
decoder_layers = []
for i in range(1, self.num_decoder_layers):
# residual connections
# if inputs (embeddings) size is different than LSTM's, sum has can be performed only from layer 2 on
if i > 1 or inputs.shape[-1] == self.num_units:
inputs = add([inputs, decoder_outputs])
else:
inputs = decoder_outputs
decoder_layers.append(LSTM(self.num_units, return_state=True, return_sequences=True,
name="decoder_layer_{}".format(i + 1)))
# in the learning model, initial state of all decoder layers is encoder_states
decoder_outputs, _, _ = decoder_layers[-1](inputs, initial_state=encoder_states)
decoder_dense = Dense(self.target_vocab.vocab_len, activation='softmax',
name="output_layer")
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
# Next: inference mode (sampling).
# Here's the drill:
# 1) encode input and retrieve initial decoder state
# 2) run one step of decoder with this initial state
# and a "start of sequence" token as target.
# Output will be the next target token
# 3) Repeat with the current target token and current states
# Define sampling models
encoder_model = Model(encoder_inputs, encoder_states)
inputs = target_embedding_outputs
decoder_state_input_h = Input(shape=(self.num_units,), name="decoder_state_h_input")
decoder_state_input_c = Input(shape=(self.num_units,), name="decoder_state_c_input")
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, _, _ = decoder_lstm(
inputs, initial_state=decoder_states_inputs)
for i, decoder_layer in enumerate(decoder_layers):
# residual connections
if i > 0 or inputs.shape[-1] == self.num_units:
inputs = add([inputs, decoder_outputs])
else:
inputs = decoder_outputs
# every layer has to have its own inputs and outputs, because each outputs different state after first token
# at the start all of the layers are initialized with encoder states
# in inference, whole sequence has to be used as an input (not one word after another)
# to get propper inner states in hidden layers
decoder_outputs, _, _ = decoder_layer(inputs,
initial_state=decoder_states_inputs)
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
decoder_outputs)
return model, encoder_model, decoder_model
@staticmethod
def decode_encoded_seq(seq, vocab, one_hot=False):
decoded = []
for ix in seq:
if one_hot:
ix = np.argmax(ix)
decoded.append(vocab.ix_to_word[ix])
return decoded
def translate_sequence(self, input_seq):
# Encode the input as state vectors.
states_value = self.encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1))
# Populate the first character of target sequence with the start character.
target_seq[0, 0] = SpecialSymbols.GO_IX
# Sampling loop for a batch of sequences
decoded_sentence = ""
while True:
outputs = self.decoder_model.predict(
[target_seq] + states_value)
# outputs result for each token in sequence, we want to sample the last one
output_tokens = outputs[0][-1]
# Sample a token
sampled_token_index = np.argmax(output_tokens)
sampled_word = self.target_vocab.ix_to_word[sampled_token_index]
# Exit condition: either hit max length
# or find stop character.
if sampled_word == SpecialSymbols.EOS:
break
decoded_sentence += sampled_word + " "
decoded_len = len(decoded_sentence.strip().split(" "))
if decoded_len > self.training_dataset.y_max_seq_len \
and decoded_len > self.test_dataset.y_max_seq_len:
break
# Update the target sequence, add last samples word.
new_target_token = np.zeros((1, 1))
new_target_token[0, 0] = sampled_token_index
target_seq = np.hstack((target_seq, new_target_token))
# for BPE encoded
# decoded_sentence = re.sub(r"(@@ )|(@@ ?$)", "", decoded_sentence)
return decoded_sentence.strip()
def translate_sequence_beam(self, input_seq, beam_size=1):
# https://machinelearningmastery.com/beam-search-decoder-natural-language-processing/
# Encode the input as state vectors.
states_value = self.encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1))
# only one candidate at the begining
candidates = [
Candidate(target_seq=target_seq, last_prediction=SpecialSymbols.GO_IX, states_value=states_value, score=0,
decoded_sentence="")
]
while True:
should_stop = True
new_candidates = []
for candidate in candidates:
if not candidate.finalised:
outputs = self.decoder_model.predict(
[candidate.target_seq] + candidate.states_value)
should_stop = False
output_tokens = outputs[0][-1]
# find n (beam_size) best predictions
indices = np.argpartition(output_tokens, -beam_size)[-beam_size:]
for sampled_token_index in indices:
score = -math.log(output_tokens[sampled_token_index])
# how long is the sentence, to compute average score
step = candidate.get_sentence_length() + 1
# i believe scores should be summed together because log prob is used https://stats.stackexchange.com/questions/121257/log-probability-vs-product-of-probabilities
# score is average of all probabilities (normalization so that longer sequences are not penalized)
# incremental average https://math.stackexchange.com/questions/106700/incremental-averageing
avg_score = utils.incremental_average(candidate.score, score, step)
sampled_word = self.target_vocab.ix_to_word[sampled_token_index]
new_candidate = Candidate(target_seq=candidate.target_seq,
states_value=states_value,
decoded_sentence=candidate.decoded_sentence,
score=avg_score,
sampled_word=sampled_word, last_prediction=sampled_token_index)
new_candidates.append(new_candidate)
# Exit condition: either hit max length
# or find stop character.
if sampled_word == SpecialSymbols.EOS:
continue
decoded_len = new_candidate.get_sentence_length()
if decoded_len > self.training_dataset.y_max_seq_len \
and decoded_len > self.test_dataset.y_max_seq_len:
new_candidate.finalise()
continue
# finished candidates are transfered to new_candidates automatically
else:
new_candidates.append(candidate)
# take n (beam_size) best candidates
candidates = sorted(new_candidates, key=lambda can: can.score)[:beam_size]
if should_stop:
break
return candidates[0].decoded_sentence
@staticmethod
def encode_text_seq_to_encoder_seq(text, vocab):
"""
Encodes given text sequence to numpy array ready to be used as encoder_input for prediction
Args:
text (str): sequence to translate
vocab (Vocabulary): vocabulary object
Returns: encoded sequence ready to be used as encoder_inpout for prediction
"""
sequences = text_to_word_sequence(text)
x = np.zeros((1, len(sequences)), dtype='float32')
for i, seq in enumerate(sequences):
if seq in vocab.word_to_ix:
ix = vocab.word_to_ix[seq]
else:
ix = SpecialSymbols.UNK_IX
x[0][i] = ix
return x
@staticmethod
def get_gen_steps(dataset, batch_size):
"""
Returns how many steps are needed for the generator to go through the whole dataset with the batch_size
Args:
dataset: dataset that is beeing proccessed
batch_size: size of the batch
Returns: number of steps for the generatorto go through whole dataset
"""
assert batch_size > 0
return math.ceil(dataset.num_samples / batch_size)
def fit(self, epochs=1, initial_epoch=0, batch_size=64, use_fit_generator=False,
bucketing=False, bucket_range=3, early_stopping_patience=-1):
"""
fits the model, according to the parameters passed in constructor
Args:
epochs: Number of epochs
initial_epoch: Epoch number from which to start
batch_size: Size of one batch
use_fit_generator: Prevent memory crash by only load part of the dataset at once each time when fitting
bucketing (bool): Whether to bucket sequences according their size to optimize padding
automatically switches use_fit_generator to True
bucket_range (int): Range of different sequence lenghts in one bucket
early_stopping_patience (int): How many epochs should model train after loss/val_loss decreases. -1 means that early stopping won't be used.
"""
if bucketing:
use_fit_generator = True
# logging for tensorboard
tensorboard_callback = TensorBoard(log_dir="{}".format(self.log_folder),
write_graph=False) # quite SLOW LINE
monitor_value = "val_loss"
# model saving after each epoch
checkpoint_callback = ModelCheckpoint(self.model_weights_path, monitor=monitor_value,
save_weights_only=True, save_best_only=True)
callbacks = [tensorboard_callback, checkpoint_callback]
if early_stopping_patience >= 0:
early_stopping = EarlyStopping(monitor=monitor_value, patience=early_stopping_patience, verbose=True)
callbacks.append(early_stopping)
logger.info("fitting the model...")
if use_fit_generator:
# to prevent memory error, only loads parts of dataset at once
# or when using bucketing
if bucketing:
generator = self._training_data_bucketing(batch_size, infinite=True,
shuffle=True, bucket_range=bucket_range)
else:
generator = self._training_data_gen(batch_size, infinite=True,
shuffle=True)
test_data_generator = self._test_data_gen(batch_size, infinite=True, return_steps=True)
# first returned value from the generator is number of steps for one epoch
steps = next(generator)
test_steps = next(test_data_generator)
logger.info("traning generator will make {} steps".format(steps))
self.model.fit_generator(generator,
steps_per_epoch=steps,
epochs=epochs,
initial_epoch=initial_epoch,
callbacks=callbacks,
validation_data=test_data_generator,
validation_steps=test_steps
)
else:
training_data = self._get_training_data()
test_data = self._get_test_data()
test_data = ([test_data["encoder_input_data"], test_data["decoder_input_data"]],
test_data["decoder_target_data"])
self.model.fit(
[
training_data["encoder_input_data"],
training_data["decoder_input_data"]
],
training_data["decoder_target_data"],
batch_size=batch_size,
epochs=epochs,
initial_epoch=initial_epoch,
validation_data=test_data,
callbacks=callbacks
)
def translate_test_data(self, batch_size=64, beam_size=1):
"""
Generates translations for the test dataset, stores them in '.translated' file
"""
logger.info("translating the test dataset...")
steps = self.get_gen_steps(self.test_dataset, batch_size)
logger.info("test generator will make {} steps".format(steps))
path_original = self.test_dataset_path + "." + self.target_lang
path = path_original + ".translated"
step = 1
with open(path, "w", encoding="utf-8") as out_file:
for inputs, targets in self._test_data_gen(1, infinite=False):
print("\rtranslating {} seq out of {}".format(step, self.test_dataset.num_samples), end="", flush=True)
step += 1
encoder_input_data = inputs[0]
for i in range(len(encoder_input_data)):
# we need to keep the item in array ([i: i + 1])
decoded_sentence = self.translate_sequence_beam(encoder_input_data[i: i + 1], beam_size)
out_file.write(decoded_sentence + "\n")
print("\n", end="\n")
def get_bleu_for_test_data_translation(self):
"""
Return BLEU score for previously generated translation (with translate_test_data)
Returns (float): BLEU score
"""
path_original = self.test_dataset_path + "." + self.target_lang
path = path_original + ".translated"
bleu = utils.get_bleu(path_original, path)
return bleu
def translate(self, seq=None, expected_seq=None, beam_size=1):
"""
Translates given sequence
Args:
seq: sequence that will be translated from source to target language.
expected_seq: optional, expected result of translation
beam_size: how many candidate resulsts should be used during inference of the translated sequence for the beam search algorythm
"""
encoded_seq = Translator.encode_text_seq_to_encoder_seq(seq, self.source_vocab)
decoded_sentence = self.translate_sequence_beam(encoded_seq, beam_size)
logger.info("Input sequence: {}".format(seq))
logger.info("Expcected sentence: {}".format(expected_seq))
print("Translated sentence: {}".format(decoded_sentence))
```
#### File: nmt/utils/math_utils.py
```python
def incremental_average(old_value, added_value, n):
"""
Args:
old_value: computed average so far up to step n
added_value: new value to be added to the average
n: time step of the average (first is 1)
Returns:
"""
return old_value + ((added_value - old_value) / n)
``` |
{
"source": "Jojo-1000/micro-pin",
"score": 3
} |
#### File: micro-pin/parsing/HeaderParser.py
```python
import sys
import os
import io
import argparse
import pcpp
from pcpp import OutputDirective, Action
class Register:
width: int
name: str
addr: int
isIO: bool
def __repr__(self):
return "%s(0x%02x)" % (self.name, self.addr)
pcpp.CmdPreprocessor
# Processes register definition file and only leaves defines for registers
class SFRPreprocessor(pcpp.Preprocessor):
def __init__(self):
super().__init__()
self.bypass_ifpassthru = False
self.potential_include_guard = None
self.registers = []
self.define("_AVR_IO_H_ 1")
self.io_macro_start = '_SFR_IO'
self.mem_macro_start = '_SFR_MEM'
self.line_directive = None
def on_comment(self, tok):
# Pass through comments
return True
def on_directive_handle(self, directive, toks, ifpassthru, precedingtoks):
if ifpassthru:
if directive.value == 'if' or directive.value == 'elif' or directive == 'else' or directive.value == 'endif':
self.bypass_ifpassthru = len([tok for tok in toks if tok.value == '__PCPP_ALWAYS_FALSE__' or tok.value == '__PCPP_ALWAYS_TRUE__']) > 0
if not self.bypass_ifpassthru and (directive.value == 'define' or directive.value == 'undef'):
if toks[0].value != self.potential_include_guard:
raise OutputDirective(Action.IgnoreAndPassThrough) # Don't execute anything with effects when inside an #if expr with undefined macro
super().on_directive_handle(directive,toks,ifpassthru,precedingtoks)
if directive.value == 'define':
if self.is_register_define(toks):
self.add_register(toks)
return None
# only leave register definitions for now, bits are too inconsistent
#if self.could_be_port_define(toks) and self.current_register is not None:
# if toks[0].lineno == self.next_line:
# self.next_line += 1
# return None
return None # Pass through where possible
def on_potential_include_guard(self,macro):
self.potential_include_guard = macro
return super().on_potential_include_guard(macro)
def on_include_not_found(self,is_system_include,curdir,includepath):
raise OutputDirective(Action.IgnoreAndPassThrough)
def is_register_define(self, toks):
if len(toks) < 3:
return False
return toks[2].value.startswith(self.io_macro_start) or toks[2].value.startswith(self.mem_macro_start)
def add_register(self, toks):
r = Register()
r.name = toks[0].value;
try:
if toks[2].value.startswith(self.io_macro_start):
r.isIO = True
r.width = int(toks[2].value[len(self.io_macro_start):])
else:
r.isIO = False
r.width = int(toks[2].value[len(self.mem_macro_start):])
r.addr = int([tok for tok in toks if tok.type == self.t_INTEGER][0].value, base=0)
self.registers.append(r)
except:
pass
def could_be_port_define(self, toks):
return len(toks) >= 3 and toks[2].type == self.t_INTEGER
parser = argparse.ArgumentParser(description="Parses avr io headers for register definitions.")
parser.add_argument('inputs', metavar='input', nargs='*', type=argparse.FileType(), help='File(s)to process')
parser.add_argument('--output-dir', dest='output_dir', default='output', metavar='path', help='Output directory for generated files')
parser.add_argument('--output-preprocessed', dest='output_preprocessed',action='store_true',
help='Also output preprocessed header files containing only defines.\nCan be used to extract additional information.')
parser.add_argument('--input-dir', dest='input_dir', help='Process all header files in directory.')
args = parser.parse_args(sys.argv[1:])
input_files = args.inputs
output_dir = args.output_dir
extension = '.hpp'
include_guard_prefix = 'MICROPIN_DETAIL_'
include_guard_postfix = '_INCLUDED'
namespace = 'MicroPin'
required_includes = []
output_files = []
def output_registers(source_filename: str,filename: str, registers: [Register]):
include_guard = include_guard_prefix + filename.rpartition('.')[0].upper() + include_guard_postfix
output = open(output_dir + os.path.sep + filename, "wt")
output.write("// Generated from " + source_filename + '\n')
output.write('#ifndef ' + include_guard + '\n')
output.write('#define ' + include_guard + '\n')
for include in required_includes:
output.write('#include "')
output.write('"\n')
output.write('namespace ' + namespace + '\n{\n')
output.write('\tconstexpr uint8_t sfrOffset = __SFR_OFFSET;\n')
for r in registers:
output.write('\tconstexpr Register')
output.write(str(r.width))
output.write(' r')
output.write(r.name)
output.write('{0x%02x%s};\n' % (r.addr, ' + sfrOffset' if r.isIO else ''))
output.write('}\n\n#endif\n')
output.close()
if args.input_dir is not None:
for file in os.listdir(args.input_dir):
if file.endswith('.h'):
input_files.append(open(args.input_dir + os.path.sep + file))
if len(input_files) > 0:
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for input in input_files:
preprocessor = SFRPreprocessor()
filename = os.path.basename(input.name)
preprocessor.parse(input)
output_file = 'Reg' + filename.rpartition('.')[0].replace('io', '').capitalize() + extension
if not args.output_preprocessed:
# Discard preprocessed output
tok = preprocessor.token()
while tok is not None:
tok = preprocessor.token()
input.close()
else:
preprocessed_output = open(output_dir + os.path.sep + filename, 'wt')
preprocessor.write(preprocessed_output)
preprocessed_output.close()
input.close()
if len(preprocessor.registers) > 0:
output_registers(filename, output_file, preprocessor.registers)
output_files.append(output_file)
print('Parsed %s -> %s' % (filename, output_file))
else:
print('Skipped %s because it contained no register definitions' % (filename))
else:
print('No inputs specified')
``` |
{
"source": "jojo13572001/MPC-Net",
"score": 2
} |
#### File: jojo13572001/MPC-Net/arm_evaluation.py
```python
import torch
import numpy as np
import matplotlib.pyplot as plt
import os
import jmpc
import time
import sys
import settings
import shutil
if settings.enablePybulletTraining == True:
print("enablePybulletTraining = True, close evaluation app")
sys.exit(0)
mpc = jmpc.Jmpc(port=1234)
pybulletClient = jmpc.Jmpc(port=1235)
STATE_DIM = 12
learning_iterations = settings.learning_iterations
mpc.resetTrajectory()
getTrajectoryResponse = mpc.getTrajectory()
mpcTrajectoryTimes = getTrajectoryResponse.get("result").get("times")
trajectoryLen = len(mpcTrajectoryTimes)
mpcTrajectoryStates = getTrajectoryResponse.get("result").get("trajectory")
initState = mpcTrajectoryStates[0].copy()
initState.extend(np.zeros(int(STATE_DIM/2)))
dt = 7./240.
setInitStateResponse = pybulletClient.setInitState(1./240., initState, trajectoryLen, learning_iterations)
if setInitStateResponse == False:
print("set Initial State Response Error!")
sys.exit(0)
def mpcRollout(initState, mpc_trajectory_history, mpc_control_history, policy):
############## MPC Rollout #####################
# Set pubullet robot arm initial position, control period and total control times
MSE = 0
currentStateList = initState.copy()
mpc_trajectory_history[0, 1:] = np.array(mpcTrajectoryStates[0] + [0]*int(STATE_DIM/2))
for timeIndex in range(trajectoryLen-1):
currentTime = dt * timeIndex
mpc_trajectory_history[timeIndex, 0] = currentTime
mpc_trajectory_history[timeIndex, 1:] = currentStateList
computePolicyResponse = mpc.computePolicy(currentStateList, currentTime)
if computePolicyResponse == False :
print("Compute Policy Error!")
sys.exit(0)
jsonControl = mpc.getControl(dt, currentStateList, currentTime)
mpc_control_history[timeIndex, 0] = currentTime
mpc_control_history[timeIndex, 1:] = jsonControl
nextStateList = pybulletClient.getNextState(jsonControl, dt, currentStateList) #for pyBullet get next state
currentStateList = nextStateList
def mpcNetRollout(initState, tx_history, mpcnet_control_history, pybullet_mpcnet_position_history, pybullet_mpcnet_velocity_history, policy):
MSE = 0.0
currentStateList = initState.copy()
mpc.resetTrajectory()
#only have to calculate trajectoryLen-1 control
for timeIndex in range(trajectoryLen-1):
currentTime = dt*timeIndex
tx_history[timeIndex, 0] = currentTime
tx_history[timeIndex, 1:] = currentStateList
tx_torch = torch.tensor(np.concatenate((currentTime, currentStateList.copy()), axis=None), dtype=torch.float, requires_grad=False)
#tx_torch[0][0] = 0.0 #optionally run it in MPC style
p, u_pred = policy(tx_torch)
if len(p) > 1:
u = torch.matmul(p, u_pred)
else:
u = u_pred[0]
u_np = u.detach().numpy().astype('float64')
mpcnet_control_history[timeIndex, 0] = currentTime
mpcnet_control_history[timeIndex, 1:] = u_np.tolist()
computePolicyResponse = mpc.computePolicy(currentStateList.copy(), currentTime)
if computePolicyResponse == False :
print("Compute Policy Error!")
sys.exit(0)
jsonControl = mpc.getControl(dt, currentStateList.copy(), currentTime)
MSE += np.square(np.subtract(currentStateList[:6], mpcTrajectoryStates[timeIndex])).sum()
#print("MPC-Net index ", timeIndex,", Time ",currentTime," ,MSE loss",MSE, "\n")
pybullet_mpcnet_position_history[timeIndex, 0] = currentTime
pybullet_mpcnet_velocity_history[timeIndex, 0] = currentTime
if settings.currentRendering == "enablePybulletRendering":
nextStateList = pybulletClient.getNextState(u_np.tolist(), dt, currentStateList.copy()) #for pyBullet get next state
pybullet_mpcnet_position_history[timeIndex, int(STATE_DIM/2)+1:] = nextStateList[:int(STATE_DIM/2)]
pybullet_mpcnet_velocity_history[timeIndex, int(STATE_DIM/2)+1:] = nextStateList[int(STATE_DIM/2):]
elif settings.currentRendering == "enableMpcRendering":
nextStateList = mpc.getNextState(u_np.tolist(), dt, currentStateList.copy()) #for mpc-net get next state
pybullet_mpcnet_position_history[timeIndex, 1:int(STATE_DIM/2)+1] = nextStateList[:int(STATE_DIM/2)]
pybullet_mpcnet_velocity_history[timeIndex, 1:int(STATE_DIM/2)+1] = nextStateList[int(STATE_DIM/2):]
pybulletClient.setState(currentStateList.copy(), timeIndex)
elif settings.currentRendering == "enableResetStateRendering":
pybulletClient.setState(currentStateList.copy(), timeIndex)
nextStateList = pybulletClient.getNextState(u_np.tolist(), dt, currentStateList.copy()) #for pyBullet get next state
#print("pybullet nextStateList ", nextStateList[5])
pybullet_mpcnet_position_history[timeIndex, int(STATE_DIM/2)+1:] = nextStateList[:int(STATE_DIM/2)]
pybullet_mpcnet_velocity_history[timeIndex, int(STATE_DIM/2)+1:] = nextStateList[int(STATE_DIM/2):]
nextStateList = mpc.getNextState(u_np.tolist(), dt, currentStateList.copy())
print("index ", timeIndex, " ,mpc nextStateList ", currentStateList)
pybullet_mpcnet_position_history[timeIndex, 1:int(STATE_DIM/2)+1] = nextStateList[:int(STATE_DIM/2)]
pybullet_mpcnet_velocity_history[timeIndex, 1:int(STATE_DIM/2)+1] = nextStateList[int(STATE_DIM/2):]
currentStateList = nextStateList.copy()
print("MPC-Net Final State Diff ", np.subtract(currentStateList[:int(STATE_DIM/2)], mpcTrajectoryStates[-1]), 'MSE Loss ', MSE)
def plot(save_path):
policy = torch.load(save_path)
pybullet_mpcnet_position_history = np.zeros((trajectoryLen, STATE_DIM + 1))
pybullet_mpcnet_velocity_history = np.zeros((trajectoryLen, STATE_DIM + 1))
mpc_control_history = np.zeros((trajectoryLen, int(STATE_DIM/2)+1))
mpcnet_control_history = np.zeros((trajectoryLen, int(STATE_DIM/2)+1))
tx_history = np.zeros((trajectoryLen, STATE_DIM + 1))
mpc_trajectory_history = np.zeros((trajectoryLen, STATE_DIM + 1))
############## MPC Rollout #####################
mpcRollout(initState, mpc_trajectory_history, mpc_control_history, policy)
#print("MPC Final State Diff ", np.subtract(currentStateList[:int(STATE_DIM/2)], mpcTrajectoryStates[-1]))
time.sleep(1)
############## MPC-Net Rollout #####################
mpcNetRollout(initState, tx_history, mpcnet_control_history, pybullet_mpcnet_position_history, pybullet_mpcnet_velocity_history, policy)
f, axarr = plt.subplots(4,2)
lineObjects = axarr[0][0].plot(mpc_trajectory_history[:trajectoryLen-1, 0], mpc_trajectory_history[:trajectoryLen-1, 1:int(STATE_DIM/2)+1]) #plot velocity
axarr[0][0].legend(iter(lineObjects), ('q1', 'q2','q3','q4','q5','q6'))
axarr[0][0].set_ylim(-2, 2)
axarr[0][0].grid(True)
axarr[0][0].set_title("MPC State")
lineObjects = axarr[0][1].plot(tx_history[:trajectoryLen-1, 0], tx_history[:trajectoryLen-1, 1:int(STATE_DIM/2)+1]) #plot velocity
axarr[0][1].set_ylim(-2, 2)
axarr[0][1].grid(True)
axarr[0][1].set_title("MPC-Net State")
lineObjects = axarr[1][0].plot(mpc_trajectory_history[:trajectoryLen-1, 0], mpc_trajectory_history[:trajectoryLen-1, int(STATE_DIM/2)+1:]) #plot velocity
axarr[1][0].legend(iter(lineObjects), ('q7', 'q8','q9','q10','q11','q12'))
axarr[1][0].set_ylim(-2, 2)
axarr[1][0].grid(True)
lineObjects = axarr[1][1].plot(tx_history[:trajectoryLen-1, 0], tx_history[:trajectoryLen-1, int(STATE_DIM/2)+1:]) #plot velocity
axarr[1][1].set_ylim(-2, 2)
axarr[1][1].grid(True)
lineObjects = axarr[2][0].plot(mpc_trajectory_history[:trajectoryLen-1, 0], mpc_trajectory_history[:trajectoryLen-1, int(STATE_DIM/2)+1:-1]) #plot velocity
axarr[2][0].legend(iter(lineObjects), ('q7', 'q8','q9','q10','q11','q12'))
axarr[2][0].set_ylim(-2, 2)
axarr[2][0].grid(True)
lineObjects = axarr[2][1].plot(tx_history[:trajectoryLen-1, 0], tx_history[:trajectoryLen-1, int(STATE_DIM/2)+1:-1]) #plot velocity
axarr[2][1].set_ylim(-2, 2)
axarr[2][1].grid(True)
lineObjects = axarr[3][0].plot(mpc_control_history[:trajectoryLen-1, 0], mpc_control_history[:trajectoryLen-1, 1:int(STATE_DIM/2)+1]) #plot velocity
axarr[3][0].legend(iter(lineObjects), ('c1', 'c2','c3','c4','c5','c6'))
axarr[3][0].set_ylim(-50, 50)
axarr[3][0].grid(True)
lineObjects = axarr[3][1].plot(mpcnet_control_history[:trajectoryLen-1, 0], mpcnet_control_history[:trajectoryLen-1, 1:int(STATE_DIM/2)+1]) #plot velocity
axarr[3][1].set_ylim(-50, 50)
axarr[3][1].grid(True)
if settings.currentRendering == "enablePybulletRendering":
shutil.copy("PolicyNet_3Layer.py","PolicyNet.py")
#Formally we use three layer policy now, we train it from pybullet dynamic environment. Hard code path now.
plot(save_path=settings.loadPolicyPath)
else:
shutil.copy("PolicyNet_2Layer.py","PolicyNet.py")
#the two layer policy, we train it from mpc dynamic environment. Hard code path now.
plot(save_path="armPolicy/pyBullet/1014/mpcPolicy_2020-10-28_025339.pt")
#plot(save_path="armPolicy/pyBullet/1115/161926/233420/004124/mpcPolicy_2020-11-16_015155.pt", t_end=trajectoryLastTime)
#plot(save_path="armPolicy/pyBullet/1115/161926/233420/004124/mpcPolicy_2020-11-16_011155.pt", t_end=trajectoryLastTime)
#plot(save_path="armPolicy/pyBullet/1115/161926/233420/004124/mpcPolicy_2020-11-16_010655.pt", t_end=trajectoryLastTime)
#plot(save_path="armPolicy/pyBullet/1115/161926/233420/mpcPolicy_2020-11-16_004124.pt", t_end=trajectoryLastTime)
#plot(save_path="armPolicy/pyBullet/1105/094457/112213/mpcPolicy_2020-11-06_143418.pt", t_end=trajectoryLastTime)
#plot(save_path="armPolicy/alphaMix_1014/single_state_2_layers/keepTrainingWithoutSampling/175636/020101/mpcPolicy_2020-10-28_025049.pt", t_end=trajectoryLastTime)
#plot(save_path="armPolicy/alphaMix_1014/single_state_2_layers/keepTrainingWithoutSampling/mpcPolicy_2020-10-20_175406.pt", t_end=trajectoryMaxTime) #very good
#plot(save_path="armPolicy/alphaMix_1014/single_state_2_layers/keepTrainingWithoutSampling/mpcPolicy_2020-10-20_175906.pt", t_end=trajectoryMaxTime)
#plot(save_path="armPolicy/alphaMix_1014/single_state_2_layers/keepTrainingWithoutSampling/mpcPolicy_2020-10-20_174636.pt", t_end=trajectoryMaxTime)
#plot(save_path="armPolicy/alphaMix_1014/single_state_2_layers/keepTrainingWithoutSampling/mpcPolicy_2020-10-20_173406.pt", t_end=trajectoryMaxTime)
#plot(save_path="armPolicy/alphaMix_1014/single_state_2_layers/keepTrainingWithoutSampling/mpcPolicy_2020-10-20_172136.pt", t_end=trajectoryMaxTime)
#plot(save_path="armPolicy/alphaMix_1014/single_state/mpcPolicy_2020-10-20_144840.pt", t_end=trajectoryMaxTime)
#plot(save_path="armPolicy/next_sate_without_alpha_mixing/1016/mpcPolicy_2020-10-19_192314.pt", t_end=trajectoryMaxTime)
#plot(save_path="armPolicy/alphaMix_1014/1020/mpcPolicy_2020-10-20_023059.pt", t_end=trajectoryMaxTime)
#plot(save_path="armPolicy/next_sate_without_alpha_mixing/mpcPolicy_2020-10-12_201140.pt", t_end=trajectoryMaxTime)
#plot(save_path="armPolicy/mpcPolicy_2020-10-07_020028.pt", t_end=trajectoryMaxTime)
#plot(save_path="armPolicy/mpcPolicy_2020-10-07_020028.pt", t_end=trajectoryMaxTime)
plt.show()
```
#### File: jojo13572001/MPC-Net/ballbot_learner.py
```python
import numpy as np
import torch
from tensorboardX import SummaryWriter
import datetime
import time
import pickle
from replay_memory import ReplayMemory
import os
# ugly workaround until shared library can be discovered properly with python3
import sys
sys.path.append(os.environ["HOME"]+"/catkin_ws/devel/lib/python3.6/dist-packages/ocs2_ballbot_example")
from BallbotPyBindings import mpc_interface, scalar_array, state_vector_array, input_vector_array, dynamic_vector_array, cost_desired_trajectories
from PolicyNet import ExpertMixturePolicy as PolicyNet
mpc = mpc_interface("mpc", False)
systemHasConstraints = False
def getTargetTrajectories():
desiredTimeTraj = scalar_array()
desiredTimeTraj.resize(1)
desiredTimeTraj[0] = 2.0
desiredInputTraj = dynamic_vector_array()
desiredInputTraj.resize(1)
desiredInputTraj[0] = np.zeros((mpc.INPUT_DIM, 1))
desiredStateTraj = dynamic_vector_array()
desiredStateTraj.resize(1)
desiredStateTraj[0] = np.zeros((mpc.STATE_DIM, 1))
return cost_desired_trajectories(desiredTimeTraj, desiredStateTraj, desiredInputTraj)
targetTrajectories = getTargetTrajectories()
mpc.reset(targetTrajectories)
dtype = torch.float
device = torch.device("cpu")
#device = torch.device("cuda:0") # Uncomment this to run on GPU
class FlowMap(torch.autograd.Function):
@staticmethod
def forward(ctx, t, x, u):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
x_cpu = x.cpu()
u_cpu = u.cpu()
ctx.save_for_backward(t, x_cpu, u_cpu)
x_np = x_cpu.t().detach().numpy().astype('float64')
u_np = u_cpu.t().detach().numpy().astype('float64')
xDot = torch.tensor(mpc.computeFlowMap(t, x_np, u_np), device=device, dtype=dtype)
return xDot
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
grad_t = grad_x = grad_u = None
t, x, u = ctx.saved_tensors
x_np = x.t().detach().numpy().astype('float64')
u_np = u.t().detach().numpy().astype('float64')
if ctx.needs_input_grad[0]:
raise NotImplementedError("Derivative of dynamics w.r.t. time not available")
if ctx.needs_input_grad[1]:
mpc.setFlowMapDerivativeStateAndControl(t, x_np, u_np)
dfdx = torch.tensor(mpc.computeFlowMapDerivativeState(), device=device, dtype=dtype)
grad_x = torch.matmul(grad_output, dfdx).reshape((-1, x_np.size))
if ctx.needs_input_grad[2]:
mpc.setFlowMapDerivativeStateAndControl(t, x_np, u_np)
dfdu = torch.tensor(mpc.computeFlowMapDerivativeInput(), device=device, dtype=dtype)
grad_u = torch.matmul(grad_output, dfdu).reshape((-1, u_np.size))
return grad_t, grad_x, grad_u
class IntermediateCost(torch.autograd.Function):
@staticmethod
def forward(ctx, t, x, u):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
x_cpu = x.cpu()
u_cpu = u.cpu()
ctx.save_for_backward(t, x_cpu, u_cpu)
x_np = x_cpu.t().detach().numpy().astype('float64')
u_np = u_cpu.t().detach().numpy().astype('float64')
L = torch.tensor(mpc.getIntermediateCost(t, x_np, u_np), device=device, dtype=dtype)
return L
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
grad_t = grad_x = grad_u = None
t, x, u = ctx.saved_tensors
x_np = x.t().detach().numpy().astype('float64')
u_np = u.t().detach().numpy().astype('float64')
if ctx.needs_input_grad[0]:
raise NotImplementedError("Derivative of RunningCost w.r.t. time not available")
if ctx.needs_input_grad[1]:
dLdx = torch.tensor([[mpc.getIntermediateCostDerivativeState(t, x_np, u_np)]], device=device, dtype=dtype)
grad_x = grad_output * dLdx
if ctx.needs_input_grad[2]:
dLdu = torch.tensor([[mpc.getIntermediateCostDerivativeInput(t, x_np, u_np)]], device=device, dtype=dtype)
grad_u = grad_output * dLdu
return grad_t, grad_x, grad_u
class StateInputConstraint(torch.autograd.Function):
@staticmethod
def forward(ctx, t, x, u):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
x_cpu = x.cpu()
u_cpu = u.cpu()
ctx.save_for_backward(t, x_cpu, u_cpu)
x_np = x_cpu.t().detach().numpy().astype('float64')
u_np = u_cpu.t().detach().numpy().astype('float64')
g1 = torch.tensor(mpc.getStateInputConstraint(t, x_np, u_np), device=device, dtype=dtype)
return g1
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
grad_t = grad_x = grad_u = None
t, x, u = ctx.saved_tensors
x_np = x.t().detach().numpy().astype('float64')
u_np = u.t().detach().numpy().astype('float64')
if ctx.needs_input_grad[0]:
raise NotImplementedError("Derivative of StateInputConstraint w.r.t. time not available")
if ctx.needs_input_grad[1]:
raise NotImplementedError("Derivative of StateInputConstraint w.r.t. state not available")
if ctx.needs_input_grad[2]:
dg1du = torch.tensor(mpc.getStateInputConstraintDerivativeControl(t, x_np, u_np), device=device, dtype=dtype)
grad_u = torch.matmul(grad_output, dg1du).reshape((-1, u_np.size))
return grad_t, grad_x, grad_u
def control_Hamiltonian(tx, u_pred, dVdx, nu):
f = FlowMap.apply(tx[0], tx[1:], u_pred)
L = IntermediateCost.apply(tx[0], tx[1:], u_pred)
hamiltonian = L + dVdx.dot(f)
if systemHasConstraints:
g1 = StateInputConstraint.apply(tx[0], tx[1:], u_pred)
hamiltonian += g1.dot(nu)
return hamiltonian
def MSE_Loss(u_pred, u0):
mseloss = torch.nn.MSELoss(reduction='sum')
return mseloss(u_pred, u0)
def num_samples_per_trajectory_point(t, max_num_points, half_value_decay_t):
"""
Calculates number of samples drawn for each nominal state point in trajectory
:param t: Query time along trajectory
:param max_num_points:
:param half_value_decay_t: time into trajectory after which number of sampled point is halfed
:return: Number of samples to be drawn
"""
return max_num_points * np.exp(-np.log(2) * t / half_value_decay_t)
def trajectoryCost(policy, duration, dt_control):
cost = 0.0 # running sum
numStartingPoints = 1
for _ in range(numStartingPoints):
startPos = np.zeros([mpc.STATE_DIM, 1])
tx = np.concatenate(([[0.0]], startPos))
for it in range(int(duration / dt_control)):
ttx_torch = torch.tensor(np.concatenate((tx[0, 0], tx[1:]), axis=None), dtype=dtype,
device=device, requires_grad=False)
p, u_pred = policy(ttx_torch)
if len(p) > 1:
u = torch.matmul(p, u_pred)
else:
u = u_pred[0]
u_np = u.t().detach().numpy().astype('float64')
cost += torch.tensor(mpc.getIntermediateCost(tx[0], tx[1:], u_np), device=device, dtype=dtype)
if np.isnan(cost):
return np.nan, tx[0]
dx = mpc.computeFlowMap(tx[0], tx[1:], u_np)
tx[1:] += dx.reshape(mpc.STATE_DIM, 1) * dt_control
tx[0, 0] += dt_control
return cost, duration
writer = SummaryWriter()
load_policy = False
if load_policy:
save_path = "data/policy.pt"
policy = torch.load(save_path)
policy.eval()
else:
policy = PolicyNet(mpc.STATE_DIM+1, mpc.INPUT_DIM)
policy.to(device)
print("Initial policy parameters:")
print(list(policy.named_parameters()))
learning_rate = 1e-2
optimizer = torch.optim.Adam(policy.parameters(), lr=learning_rate)
load_memory = False
if load_memory:
with open("data/memory.pkl", 'rb') as memFile:
mem = pickle.load(memFile)
else:
mem_capacity = 1000000
mem = ReplayMemory(mem_capacity)
# prepare saving of MPC solution trajectory (always add first point of a slq run)
mpc_traj_len_sec = 3.0 # length of trajectories to generate with MPC
dt_control = 1.0/400. # 400 Hz control frequency
mpc_traj_t = np.linspace(0.0, mpc_traj_len_sec, int(mpc_traj_len_sec/dt_control))
last_policy_save_time = time.time()
learning_iterations = 100000
print("==============\nStarting training\n==============")
try:
for it in range(learning_iterations):
alpha_mix = np.clip(1.0 - 1.0 * it / learning_iterations, 0.2, 1.0)
# run data collection (=MPC) less frequently than the policy updates
mpc_decimation = 1 if len(mem) < 15000 else 500
if it % mpc_decimation == 0:
mpc.reset(targetTrajectories)
x0 = np.zeros((mpc.STATE_DIM, 1))
x0[0] = np.random.uniform(-0.5, 0.5) # base x
x0[1] = np.random.uniform(-0.5, 0.5) # base y
print("resetting MPC")
print("proportion of MPC policy is", alpha_mix)
print("starting from", x0.transpose())
for mpc_time in mpc_traj_t: # mpc dummy loop
mpc.setObservation(mpc_time, x0)
try:
mpc.advanceMpc()
except RuntimeError:
print("Caught error in MPC advance!!")
break
t_result = scalar_array()
x_result = state_vector_array()
u_result = input_vector_array()
mpc.getMpcSolution(t_result, x_result, u_result)
K = mpc.getLinearFeedbackGain(t_result[0])
# sample around the initial point and push it to the replay buffer
#for i in range(1):
for i in range(int(round(num_samples_per_trajectory_point(t_result[0], max_num_points=4, half_value_decay_t=1e10)))):
if i == 0:
x = x_result[0] # definitely push back the nominal point
else:
x = np.random.multivariate_normal(x_result[0], cov=np.diag(0.1 * np.ones(mpc.STATE_DIM)))
dVdx = mpc.getValueFunctionStateDerivative(t_result[0], x)
if systemHasConstraints:
nu = mpc.getStateInputConstraintLagrangian(t_result[0], x)
else:
nu = None
mem.push(mpc_time, x, dVdx, None, nu, None, u_result[0] + K.dot(x - x_result[0]), 0)
# increment state for next time step
ttx_torch = torch.tensor(np.concatenate((t_result[0], x_result[0]), axis=None),
dtype=torch.float, requires_grad=False)
p, u_net = policy(ttx_torch)
if len(p) > 1:
u_net = torch.matmul(p, u_net)
else:
u_net = u_net[0]
u_mixed = alpha_mix * u_result[0] + (1.0 - alpha_mix) * u_net.detach().numpy().astype('float64')
dx = mpc.computeFlowMap(t_result[0], x_result[0], u_mixed)
x0 += dt_control * dx.reshape(mpc.STATE_DIM,1)
print("mpc ended up at", x_result[0])
# extract batch of samples from replay memory
batch_size = 2**5
samples = mem.sample(batch_size)
writeLogThisIteration = True
def solver_step_closure():
loss = torch.zeros([1], dtype=dtype, device=device) # running sum over samples
#mpc_H = torch.zeros([1], dtype=dtype, device=device) # running sum over samples
g1_norm = 0.0 # running sum over samples
for sample in samples:
sum_u = 0.0
tx = torch.tensor(np.concatenate((sample.t, sample.x), axis=None), dtype=dtype, device=device, requires_grad=False)
ttx_net = torch.tensor(np.concatenate((sample.t, sample.x), axis=None), dtype=dtype, device=device, requires_grad=False)
p, u_pred = policy(ttx_net)
dVdx = torch.tensor(sample.dVdx, dtype=dtype, device=device, requires_grad=False)
if systemHasConstraints:
nu = torch.tensor(sample.nu, dtype=dtype, device=device, requires_grad=False)
else:
nu = None
for pi, u_pred_i in zip(p, u_pred): # loop through experts
sum_u += pi * u_pred_i
#mpc_H += control_Hamiltonian(tx, torch.tensor(sample.u0), dVdx, nu)
if len(p) > 1:
u_net = torch.matmul(p, u_pred)
else:
u_net = u_pred[0]
loss += MSE_Loss(u_net, torch.FloatTensor(sample.u0).to(device))
if systemHasConstraints:
g1_norm += np.linalg.norm(mpc.getStateInputConstraint(sample.t, sample.x, u_net.detach().numpy().astype('float64')))
optimizer.zero_grad()
loss.backward()
global writeLogThisIteration
if writeLogThisIteration:
writer.add_scalar('loss/perSample', loss.item() / batch_size, it)
writer.add_scalar('loss/constraintViolation', g1_norm / batch_size, it)
writeLogThisIteration = False
return loss
if it % 200 == 0:
oc_cost, survival_time = trajectoryCost(policy=policy, duration=mpc_traj_len_sec, dt_control=dt_control)
writer.add_scalar('metric/oc_cost', oc_cost, it)
writer.add_scalar('metric/survival_time', survival_time, it)
print("iteration", it, "oc_cost", oc_cost)
if time.time() - last_policy_save_time > 5.0 * 60.0:
last_policy_save_time = time.time()
now = datetime.datetime.now()
save_path = "ballbot/1020/mpcPolicy_" + now.strftime("%Y-%m-%d_%H%M%S")
print("Iteration", it, "saving policy to", save_path + ".pt")
torch.save(policy, save_path + ".pt")
optimizer.step(solver_step_closure)
for param in policy.parameters():
if(torch.isnan(param).any()):
print("nan in policy!")
print("==============\nTraining completed.\n==============")
except KeyboardInterrupt:
print("==============\nTraining interrupted after iteration", it, ".\n==============")
pass
print("optimized policy parameters:")
print(list(policy.named_parameters()))
now = datetime.datetime.now()
save_path = "ballbot/1020/mpcPolicy_" + now.strftime("%Y-%m-%d_%H%M%S")
print("saving policy to", save_path + ".pt")
torch.save(policy, save_path + ".pt")
# print("Saving data to", save_path+"_memory.pkl")
# with open(save_path+"_memory.pkl", 'wb') as outputFile:
# pickle.dump(mem, outputFile)
writer.close()
print("Done. Exiting now.")
``` |
{
"source": "jojo2234/drought-bulletin",
"score": 3
} |
#### File: drought-bulletin/Data/EstraiStazioniDaCartella.py
```python
import os
import sys
import xlrd
import string
from openpyxl import load_workbook
def do_extractionfrom(directoryentry):
if directoryentry.name.endswith('xlsx'):
workbook = load_workbook(filename=directoryentry)
nomiFogli = workbook.sheetnames
numSh = len(nomiFogli)
print("\tActive sheet: " + workbook.active.title)
print("\tSheets number: " + str(numSh))
for i in range(0,numSh):
trovato=False
workbook.active = i
print("\tOperating on sheet: " + workbook.active.title)
foglio = workbook.active
for x in range(1, foglio.max_row):
if(trovato==True):
break
for y in range(1, foglio.max_column):
celVal=""
if(type(foglio.cell(x,y).value) == str):
celVal = foglio.cell(x,y).value.lower()
if(celVal =="stazione" or celVal=="station" or celVal=="stazioni" or celVal=="stations"):
trovato=True
nomiStat = []
xcoord = []
ycoord = []
distr = []
quota = []
if("X_" not in foglio.cell(x,y+1).value and "cod_" not in foglio.cell(x,y+1).value and foglio.cell(x,y+1).value.isnumeric() == False):
#La cella dopo celVal molto problabilmente contine i nomi delle stazioni (sono in riga forse)
origY = y
for i in range(y+1,foglio.max_column):
if(i is not None):
nomiStat.append(foglio.cell(x,i).value)
y+=1
x+=1
y = origY
if("x_" in str(foglio.cell(x,y).value.lower())):
for i in range(y+1,foglio.max_column):
if(i is not None):
xcoord.append(foglio.cell(x,i).value)
y+=1
x+=1
y = origY
if("y_" in str(foglio.cell(x,y).value.lower())):
for i in range(y+1,foglio.max_column):
if(i is not None):
ycoord.append(foglio.cell(x,i).value)
y+=1
x+=1
y = origY
if("distr" in str(foglio.cell(x,y).value.lower())):
for i in range(y+1,foglio.max_column):
if(i is not None):
distr.append(foglio.cell(x,i).value)
y+=1
x+=2
y = origY
if("dtm" in str(foglio.cell(x,y).value.lower())):
for i in range(y+1,foglio.max_column):
if(i is not None):
quota.append(foglio.cell(x,i).value)
y+=1
y = origY
csv_file = open("stazioni_"+foglio.title+".csv", "w")
stringa = ""
for a in range(0,len(nomiStat)):
stringa += str(a+1)+";"+str(xcoord[a])+";"+str(ycoord[a])+";"+str(nomiStat[a])+";"+str(quota[a])+";"+str(distr[a])+"\n"
csv_file.write(stringa)
csv_file.close()
break
#Si suppone che il resto dei dati se i nomi sono in riga siano sulla colonna
else:
#La cella dopo celVal su y non contiene i nomi delle stazioni quindi procedere su x
origX = x
for i in range(x+1,foglio.max_row):
if(i is not None):
nomiStat.append(foglio.cell(i,y).value)
x+=1
y+=1
x = origX
if("x_" in str(foglio.cell(x,y).value.lower())):
for i in range(x+1,foglio.max_row):
if(i is not None):
xcoord.append(foglio.cell(i,y).value)
x+=1
y+=1
x = origX
if("y_" in str(foglio.cell(x,y).value.lower())):
for i in range(x+1,foglio.max_row):
if(i is not None):
ycoord.append(foglio.cell(i,y).value)
x+=1
y+=1
x = origX
if("distr" in str(foglio.cell(x,y).value.lower())):
for i in range(x+1,foglio.max_row):
if(i is not None):
distr.append(foglio.cell(i,y).value)
x+=1
y+=2
x = origX
if("dtm" in str(foglio.cell(x,y).value.lower())):
for i in range(x+1,foglio.max_row):
if(i is not None):
quota.append(foglio.cell(i,y).value)
x+=1
x = origX
csv_file = open("stazioni_"+foglio.title+".csv", "w")
stringa = ""
for a in range(0,len(nomiStat)):
stringa += str(a+1)+";"+str(xcoord[a])+";"+str(ycoord[a])+";"+str(nomiStat[a])+";"+str(quota[a])+";"+str(distr[a])+"\n"
csv_file.write(stringa)
csv_file.close()
break
#Si suppone che il resto dei dati se i nomi sono in colonna siano sulla riga
#Notabene nelle liste ci sono dei duplicati da rimuoverli inserendo la lista in un dict
#print(foglio.cell(x,y).value)
#for row in foglio.iter_rows(min_row=1,max_row=8,min_col=1,max_col=20):
# print(row.value) #Errato?
#print("\n\t " + str(foglio["B3"].value))
print("\n")
elif directoryentry.name.endswith('xls'):
book = xlrd.open_workbook(directoryentry)
numsht = book.nsheets
print("\tSheets number: " + str(numsht))
for i in range(0,numsht):
trovato = False
sht = book.sheet_by_index(i)
print("\tOperating on sheet: " + sht.name)
for x in range(1,sht.nrows):
if(trovato==True):
break
for y in range(1,sht.ncols):
celVal=""
if(type(sht.cell(x,y).value) == str):
celVal = sht.cell(x,y).value.lower()
if(celVal =="stazione" or celVal=="station" or celVal=="stazioni" or celVal=="stations"):
trovato = True
nomiStat = []
xcoord = []
ycoord = []
distr = []
quota = []
if("X_" not in sht.cell(x,y+1).value and "cod_" not in sht.cell(x,y+1).value and sht.cell(x,y+1).value.isnumeric() == False):
#print(sht.cell_value(x,y))
origY = y
for i in range(y+1,sht.ncols):
if(i is not None):
nomiStat.append(sht.cell(x,i).value)
y+=1
x+=1
y = origY
if("x_" in str(sht.cell(x,y).value.lower())):
for i in range(y+1,sht.ncols):
if(i is not None):
xcoord.append(sht.cell(x,i).value)
y+=1
x+=1
y = origY
if("y_" in str(sht.cell(x,y).value.lower())):
for i in range(y+1,sht.ncols):
if(i is not None):
ycoord.append(sht.cell(x,i).value)
y+=1
x+=1
y = origY
if("distr" in str(sht.cell(x,y).value.lower())):
for i in range(y+1,sht.ncols):
if(i is not None):
distr.append(sht.cell(x,i).value)
y+=1
x+=2
y = origY
if("dtm" in str(sht.cell(x,y).value.lower())):
for i in range(y+1,sht.ncols):
if(i is not None):
quota.append(sht.cell(x,i).value)
y+=1
y = origY
#Si suppone che il resto dei dati se i nomi sono in riga siano sulla colonna
csv_file = open("stazioni_"+sht.name+".csv", "w")
stringa = ""
for a in range(0,len(nomiStat)):
stringa += str(a+1)+";"+str(xcoord[a])+";"+str(ycoord[a])+";"+str(nomiStat[a])+";"+str(quota[a])+";"+str(distr[a])+"\n"
csv_file.write(stringa)
csv_file.close()
break
else:
#La cella dopo celVal su y non contiene i nomi delle stazioni quindi procedere su x
origX = x
for i in range(x+1,sht.nrows):
if(i is not None):
nomiStat.append(sht.cell(i,y).value)
x+=1
y+=1
x = origX
if("x_" in str(sht.cell(x,y).value.lower())):
for i in range(x+1,sht.nrows):
if(i is not None):
xcoord.append(sht.cell(i,y).value)
x+=1
y+=1
x = origX
if("y_" in str(sht.cell(x,y).value.lower())):
for i in range(x+1,sht.nrows):
if(i is not None):
ycoord.append(sht.cell(i,y).value)
x+=1
y+=1
x = origX
if("distr" in str(sht.cell(x,y).value.lower())):
for i in range(x+1,sht.nrows):
if(i is not None):
distr.append(sht.cell(i,y).value)
x+=1
y+=2
x = origX
if("dtm" in str(sht.cell(x,y).value.lower())):
for i in range(x+1,sht.nrows):
if(i is not None):
quota.append(sht.cell(i,y).value)
x+=1
x = origX
csv_file = open("stazioni_"+sht.name+".csv", "w")
stringa = ""
for a in range(0,len(nomiStat)):
stringa += str(a+1)+";"+str(xcoord[a])+";"+str(ycoord[a])+";"+str(nomiStat[a])+";"+str(quota[a])+";"+str(distr[a])+"\n"
csv_file.write(stringa)
csv_file.close()
break
else:
print("\tFile not supported")
if(len(sys.argv) > 1):
dire = sys.argv[1]
else:
dire = input("\nDirectory where xlsx files are located: ")
assert os.path.exists(dire), "Directory doesn't exist!"
print("\nSelected directory is: " + dire)
print("\nProcessing files: ")
with os.scandir(dire) as entries:
for entry in entries:
print("\n --- " + entry.name)
do_extractionfrom(entry)
``` |
{
"source": "jojo2234/LinComp",
"score": 3
} |
#### File: jojo2234/LinComp/programma1.py
```python
import sys
import os.path
import codecs
import re
import nltk
import xml.etree.ElementTree as ET
#Programma1: Confronta i 2 corpus sulla base delle seguenti informazioni statistiche:
# Nota: I corpus delle recensioni si trovano i 2 file xml, le recensioni comprendono:
# testo della recensione, titolo e valutazione che la persona assegna al prodotto o servizio.
# Questo programma usa nltk e i moduli averaged_perceptron_tagger e punktk
# python programma1.py nomeFile1.xml nomeFile2.xml
#---Apre il file
def rawTextFromFile(fileName):
fileInput = codecs.open(fileName, "r", "utf-8") #Apro il file che viene passato da riga di comando
raw = fileInput.read() #Carico tutto il file in memoria
return raw #Ritorno il testo alla funzione chiamante
#---Part of Speech tagging
def POSTaggingAnalyze(nodes):
bigList = list()
for child in nodes: #Per ogni nodo prendi la recensione
for sentences in nltk.sent_tokenize(child.text): #Per ogni recensione prendi le frasi
words = nltk.word_tokenize(sentences) #Ogni frase la divido in token
tagged = nltk.pos_tag(words) #Part of Speech tagging per ogni parola
bigList += tagged #Inserisco ogni risultato dell'iterazione in una lista, quindi ad esempio ('buy',VB)
nomi = 0
verbi = 0
aggettivi = 0
#Sfoglio la lista con i tag per avere un'idea approssimativa del numero di certi elementi nelle frasi
for elem in bigList:
if(elem[1] == "NN" or elem[1] == "NNP" or elem[1] == "NNS" or elem[1] == "NNPS"):
nomi += 1
if(elem[1] == "VB" or elem[1] == "VBD" or elem[1] == "VBG" or elem[1] == "VBN" or elem[1] == "VBP" or elem[1] == "VBZ"):
verbi += 1
if(elem[1] == "JJ" or elem[1] == "JJR" or elem[1] == "JJS"):
aggettivi += 1
print("Nomi: " + str(nomi) + " - Aggettivi: " + str(aggettivi) + " - Verbi: " + str(verbi))
#---Numero totale di frasi presenti nel testo
# ---Parametro nodes: Poichè quello che gli passo è una struttura a lista e quelli che sfoglio li ho chiamati nodi
def totFrasi(nodes):
recens = 0
for child in nodes: #Per ogni elemento prendo il testo (recensione)
recens += len(nltk.sent_tokenize(child.text)) #Sommo il numero di frasi
return recens
#---Numero totale di token presenti nel testo
def totToken(nodes):
recens = 0
for child in nodes: #Per ogni elemento prendo il testo (recensione)
recens += len(nltk.word_tokenize(child.text)) #Sommo il numero di token
return recens
#---Lunghezza media delle frasi in termini di token
def avgFrasi(nodes):
totWord = 0
numFrasi = 0
for child in nodes: #Prendo una frase alla volta
txtSentenced = nltk.sent_tokenize(child.text) #Divido la recensione in frasi e salvo in una variabile
numFrasi += len(txtSentenced) #Salvo il numero di frasi per quella recensione e lo sommo al numero precedente
for token in txtSentenced: #Per ogni frase effettuo la tokenizzazione
totWord += len(nltk.word_tokenize(token)) #Quindi suddivido la frase in token e sommo la lunghezza della lista ottenuta al valore precedente
return ("%.3f" %(totWord/numFrasi)) #Ritorno il numero totale di token diviso il numero totale di frasi presenti nel corpus
#---Lunghezza media dei token in termini di caratteri
def avgToken(nodes):
totToken = 0
totChar = 0
for child in nodes: #Per ogni recensione presente nel corpus prendo il testo
txtTokenized = nltk.word_tokenize(child.text)
totToken += len(txtTokenized)
for chars in txtTokenized: #Per ogni token ottenuto su quel testo..
totChar += len(chars) #Sommo la lunghezza del token in termini di caratteri al valore precedente
return (totChar/totToken)
#---Lunghezza media delle parole in termini di caratteri
def avgParole(nodes):
totWord = 0
totChar = 0
for child in nodes: #Per ogni recensione presente nel corpus prendo il testo
txtTokenized = re.findall(r'\w+',child.text) #Uso le espressioni regolari per prendere solo parole senza punteggiatura, anche se così non prendo le sigle U.S.A ecc..
totWord += len(txtTokenized) #...però non sono parole sono sigle e nemmo il prezzo è una parola, anche se prende i numeri e non prende le abbreviazioni come parole 's, n't ecc..
for chars in txtTokenized: #Per ogni token ottenuto su quel testo..
totChar += len(chars) #Sommo la lunghezza del token in termini di caratteri al valore precedente
return ("%.3f" % (totChar/totWord))
#---Dimensione del vocabolario
def Vt(nodes):
corpus = "" #Questa funzione ritorna la dimensione del vocabolario sul corpus
for child in nodes:
corpus += child.text #Carico tutte le recensioni pulite senza xml in memoria
return len(dict.fromkeys(nltk.word_tokenize(corpus))) #Il dizionario non ripete le parole quindi dala lista di parole ottengo un dizionario e poi prendo la dimensione
#---Type Token Ratio complessiva del testo
def ttr(nodes):
corpus = ""
for child in nodes:
corpus += child.text
listaWord = nltk.word_tokenize(corpus)
vocaLen = len(dict.fromkeys(listaWord))#Stessa cosa di Vt con la differenza che salvo la lista di token del corpus dentro una variabile per prenderne la dimensione
return ("%.6f" % (vocaLen/len(listaWord)))#Ritorno la ttr del testo approssimata a 6 unità dopo la virgola
#---Type Token Ratio incrementando di 1000 in 1000 token
def ttrIncr(nodes):
corpus = ""
i=1000
vocaLen = 0
for child in nodes:
corpus += child.text
listaWord = nltk.word_tokenize(corpus)
lista = list()
for i in range(i,len(listaWord),1000):
vocaLen = len(dict.fromkeys(listaWord[0:i]))
lista.append("%.6f" % (vocaLen/len(listaWord[0:i])))
return lista
#Funzione per la grandezza delle classi di frequenza
def freqToken(nodes,numWord):
corpus = ""
for child in nodes:
corpus += child.text
listaWord = nltk.word_tokenize(corpus)
count=0
freq = 0
for i in range(0,5000): #Per ogni token presente nel corpus
for j in range((i+1),5000): #Lo confronto con gli altri fino a 5000
if(listaWord[i] == listaWord[j]): #Se trovo una corrispondenza
count+=1 #Aumento il conteggio
if(count>numWord): #Se il conteggio è più alto di quello che sto cercando allora interrompo il ciclo
break #Ex. la parola si ripete 4 volte e io cercavo solo quelle che si ripetono 3
if(count==numWord):
freq+=1 #Se invece in tutto il testo trovo una parola che si ripete solamente 3 volte, incremento freq
count=0
return freq
#Funzione per il numero medio di sostantivi, aggettivi e verbi per frase
def avgNVA(nodes):
bigList = list()
count=0
for child in nodes: #Per ogni nodo prendi la recensione
for sentences in nltk.sent_tokenize(child.text): #Per ogni recensione prendi le frasi
words = nltk.word_tokenize(sentences) #Ogni frase la divido in token
tagged = nltk.pos_tag(words) #Part of Speech tagging per ogni parola
bigList += tagged #Ogni frase la divido in token
nomi = 0
verbi = 0
aggettivi = 0
#Sfoglio la lista con i tag per avere un'idea approssimativa del numero di certi elementi nelle frasi
for elem in bigList:
if(elem[1] == "NN" or elem[1] == "NNP" or elem[1] == "NNS" or elem[1] == "NNPS"):
nomi += 1
if(elem[1] == "VB" or elem[1] == "VBD" or elem[1] == "VBG" or elem[1] == "VBN" or elem[1] == "VBP" or elem[1] == "VBZ"):
verbi += 1
if(elem[1] == "JJ" or elem[1] == "JJR" or elem[1] == "JJS"):
aggettivi += 1
count+=1
return [nomi/count,aggettivi/count,verbi/count]
def densLess(nodes):
bigList = list()
count=0
for child in nodes: #Per ogni nodo prendi la recensione
for sentences in nltk.sent_tokenize(child.text): #Per ogni recensione prendi le frasi
words = nltk.word_tokenize(sentences) #Ogni frase la divido in token
tagged = nltk.pos_tag(words) #Part of Speech tagging per ogni parola
bigList += tagged #Ogni frase la divido in token
nomi = 0
verbi = 0
aggettivi = 0
avverbi = 0
punt = 0
#Sfoglio la lista con i tag per decidere cosa contare
for elem in bigList:
if(elem[1] == "NN" or elem[1] == "NNP" or elem[1] == "NNS" or elem[1] == "NNPS"):
nomi += 1
if(elem[1] == "VB" or elem[1] == "VBD" or elem[1] == "VBG" or elem[1] == "VBN" or elem[1] == "VBP" or elem[1] == "VBZ" or elem[1] == "TO"):
verbi += 1
if(elem[1] == "JJ" or elem[1] == "JJR" or elem[1] == "JJS"):
aggettivi += 1
if(elem[1] == "RB" or elem[1] == "RBR" or elem[1] == "RBS"):
avverbi += 1
if(elem[0] == "." or elem[0] == "," or elem[0] == ";" or elem[0] == ":" or elem[0] == "!" or elem[0] == "?" or elem[0] == " "):
punt+=1
count+=len(tagged)
return (nomi+verbi+aggettivi+avverbi)/(count-punt)
def coolPrint(stringa,dato1,dato2):
totSpazi = 56-len(stringa) #Gli spazi che ci devono essere dopo la stringa Tipo Operazione
for i in range(0,totSpazi):
stringa += " "
stringa +="|"
totSpazi = (30 - len(dato1))
for i in range(0,int(totSpazi/2)): #Metto gli spazi prima del dato1
stringa += " "
stringa += dato1
totSpazi = (27 - len(dato1))
for i in range(0,int(totSpazi/2)): #Metto gli spazi dopo il dato1 e infine metto il dato 2
stringa += " "
stringa += ("| "+dato2)
print(stringa)
def main(file1,file2):
if(os.path.isfile(file1) and os.path.isfile(file2)):
root = ET.fromstring(rawTextFromFile(file1)) #Passo il file in formato testo xml al parser xml in modo che possa lavorarci come in un albero
x = root.findall(".//div/p/.") #Trovo tutti i <p> dentro un <div> usando xpath perchè è li che ho messo le recensioni
#Quindi ottengo una variabile x di recensioni del file1 che posso sfogliare con un for perchè dentro struttura dati (lista o meglio albero)
root = ET.fromstring(rawTextFromFile(file2))
y = root.findall(".//div/p/.") #Faccio la stessa cosa che ho fatto sul file1 per il file2 e ottengo le rensioni del file2 dentro y
print("Informazioni sul file: " + file1)
POSTaggingAnalyze(x)
print("Informazioni sul file: " + file2)
POSTaggingAnalyze(y)
print("\n\n-------------------Tipo di operazione-------------------|--------"+file1+"---------|--------"+file2+"--------")
coolPrint("Numero di recensioni in entrambi i file: ",str(len(x)),str(len(y)))
coolPrint("Numero totale di frasi: ",str(totFrasi(x)),str(totFrasi(y)))
coolPrint("Numero totale di parole: ",str(totToken(x)),str(totToken(y)))
coolPrint("Lunghezza media delle frasi in termini di token: ",str(avgFrasi(x)),str(avgFrasi(y)))
coolPrint("Lunghezza media dei token in termini di caratteri: ",str(avgParole(x)),str(avgParole(y)))
coolPrint("Grandezza del vocabolario: ",str(Vt(x)),str(Vt(y)))
coolPrint("Type Token Ratio complessiva: ",str(ttr(x)),str(ttr(y)))
print("¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯")
print("\n\nType Token Ratio di 1000 in 1000 token: ")
print("Numero di parole-----|-----"+file1+"------|-----"+file2+"-----")
ttrFile1 = ttrIncr(x)
ttrFile2 = ttrIncr(y)
lenttrmax = len(ttrFile1) if(len(ttrFile1)>len(ttrFile2)) else len(ttrFile2)
for i in range(0,lenttrmax):
print(((str(1000*(i+1))+" ")if((1000*(i+1))<10000) else str(1000*(i+1)))+" | "+(str(ttrFile1[i]) if(len(ttrFile1)>i) else " ")+" |"+" "+(str(ttrFile2[i]) if(len(ttrFile2)>i) else " "))
print("\n\nGrandezza delle classi di frequenza V3, V6, V9 su 5000 token: ")
print("-----"+file1+"-----|-----"+file2+"-----")
print("V3: " + str(freqToken(x,3))+" | "+ str(freqToken(y,3)))
print("V6: " + str(freqToken(x,6))+" | "+ str(freqToken(y,6)))
print("V9: " + str(freqToken(x,9))+" | "+ str(freqToken(y,9)))
lisNVAX = avgNVA(x)
lisNVAY = avgNVA(y)
print("\n\n-------------------Tipo di operazione-------------------|--------"+file1+"---------|--------"+file2+"--------")
coolPrint("Numero medio di sostantivi per frase: ",str("%.3f"%lisNVAX[0]),str("%.3f"%lisNVAY[0]))
coolPrint("Numero medio di aggettivi per frase: ",str("%.3f"%lisNVAX[1]),str("%.3f"%lisNVAY[1]))
coolPrint("Numero medio di verbi per frase: ",str("%.3f"%lisNVAX[2]),str("%.3f"%lisNVAY[2]))
coolPrint("Densità lessicale: ",str("%.3f"%densLess(x)),str("%.3f"%densLess(y)))
else:
print("Inserire percorsi validi per i due file corpus!")
main(sys.argv[1],sys.argv[2])
#<NAME>
``` |
{
"source": "jojo2234/StruFun",
"score": 3
} |
#### File: StruFun/Python/disegno.py
```python
import turtle
def drawSquare(t,sz):
for i in range(98):
t.forward(sz)
t.left(146) #solo questo stella
#t.left(146) #cerchio
me=turtle.Turtle()
drawSquare(me,390)
```
#### File: StruFun/Python/Esercizi.py
```python
def ordina(A):
v0=0
for i in range(len(A)):
if A[i]==0:
v0=(v0+1)
print("v0: " + str(v0))
for j in range(0,v0):
A[j]=0
start = (v0)
stop = (len(A))
for x in range(start,stop):
A[x]=1
return A
vet = [0,1,0,1,0,1,1,1,0,0,0,1,0,1]
print(ordina(vet))
#Es.2 Scrivere un programma che dato un array A contenente solo i valori -1 35 e 27685, lo ordina in tempo lineare senza usare array o liste di appoggio
def ordina2(A):
v1=0
v2=0
for i in range(len(A)):
if A[i]==-1:
v1=v1+1
if A[i]==35:
v2=v2+1
for j in range(v1):
A[j]=-1
for j in range(v1,(v1+v2)):
A[j]=35
for j in range((v2+v1),len(A)):
A[j]=27685
return A
vet = [35,35,-1,27685,35,27685,-1,-1,-1,27685,35,-1,35,35]
print(ordina2(vet))
#Es.3 Dare un programma che dato un array A contenente interi compresi tra 0 e m-1 con m<len(A) lo ordina in tempo lineare
def scambia(v,i,j):
if i!=j:
v[i],v[j]=v[j],v[i]
def quicksort(v,l,r):#media n*log(n) peggiore n^2
if l>=r:
return
i=l
j=r
x=v[(l+r)//2]#Elemento (perno) casuale scelto sempre nel mezzo dal prof
while i<j:
while x>v[i]:
i=i+1
while v[j]>x:
j=j-1
if i<=j:
scambia(v,i,j)
i=i+1
j=j-1
if l<j:
quicksort(v,l,j)
if i<r:
quicksort(v,i,r)
#Quando esce dal ciclo il progrmma termina
def ordina3(A,m):
quicksort(A,0,(len(A)-1)) #Col quicksort non è in tempo lineare ma O(n*log(n)) PERò non è la complessità il tempo lineare quindi va bene.
return A
#Si possono usare array o liste di appoggio conviene dunque usare un algoritmo di ordinamento crescente e quindi il quicksort è il migliore per ora
def ordina3_2(A,m):
v=[0]*m #Si instanzia un vettore di 0 lungo quanto il massimo degli elementi dentro al vettore A (m-1), che guarda caso equivale alla lunghezza di A.
for a in A:#Si scorre A è ogni volta che ad esempio compare un 5 si va in posizione 5 del vettore v e si mette un +1 così facendo si sa quante occorrenze di numeri ci sono in A.
v[a]=v[a]+1 #v è un vettore di zeri
#praticamente usando il valore in a come indirizzo se si presenta un altro valore simile lo stesso indirizzo viene incrementato di uno, quindi così si sa quanti di quel numero ci sono tanto m<len(A)
k=0 #A questo punto basta fare 2 cicli e si riempe il vettore delle varie occorrenze che ci sono in A scorrendo per m e incrementando k per quanti v[i] con quel numero ci sono.
for i in range(m):# in questo caso for da 0 a 10
for j in range(v[i]):# per il primo ciclo e un for di 1 perchè v[0]=1 c'è un solo 0 nel vettore ma per il settimo v[7]=2 c'è 2 sette nel vettore
A[k]=i #Poi ad A[k] con k che parte da 0 gli assegno i che sono i valori dell'array e lo ordino in tempo lineare
k=k+1 #k è l'indice di A
return A
vet=[0,3,6,2,7,5,9,4,1,8,7]#vet viene ridefinito
print(ordina3(vet,10))
print(ordina3_2(vet,10))
#Es.4 Dare un programma che dato un array A di numeri restituisce la somma di quelli di posto pari
def sommaPostoPari(A):
s=0
for i in range(0,len(A)):
if i%2==0:
s=s+A[i]
return s
def sommaPostoPari2(A):
s=0
for i in range(0,len(A),2):
s=s+A[i]
return s
print("sommaPari1: " + str(sommaPostoPari(vet)))
print("sommaPari2: " + str(sommaPostoPari2(vet)))
#Es.5 Dare un programma che dato un array A di numeri interi restituisce la somma di quelli di valore pari
def sommaNumPari(A):
s=0
for i in range(0,len(A)):
if A[i]%2==0:
s=s+A[i]
return s
print(sommaNumPari(vet))
#Es.6 Dare un programma che dato un array A ordinato contenente numeri interi determina in tempo lineare credo con O(n) se vi sono due elementi la cui somma è k
def sommaK(A,k):
#Controllo se la somma del primo e del ultimo elemento del vettore ordinato è uguale a k se non lo è guardo se k è maggiore della somma ciò significa che per trovare k devo sommare numeri più grandi quindi incremento i
i=0
j=len(A)-1
while i<j:
if A[i]+A[j] == k:
return True
if k>(A[i]+A[j]):
i+=1
else:#k<(A[i]+A[j])
j-=1
return False
vet=[3,5,8,12,14,17,16,22,24]
print(sommaK(vet,15))
#Es.7 Scrivere un programma che accetta in input due array ordinati di uguale lunghezza n contenenti interi distinti e restituisce in output il numero di elementi che occorrono in entrambi gli array. La complessità deve essere lineare.
def ordinati(A,B):
i=0
j=0
n=0
while i<len(A) and j<len(B):
if A[i]==B[j]:
n+=1
i+=1
j+=1
elif A[i]<B[j]:
i+=1
else:
j+=1
return n
v1=[3,6,7,8,9,12]
v2=[0,1,2,3,4,9]
print(ordinati(v1,v2))
#Es.8 Scrivere due programmi uno encode(A) che riceve in input un array contenente gli elementi di A a partire dalla posizione 0 e restituisce un array B contenenti gli elementi di B a partire dalla posizione 0 e uno decode(B)
#che riceve in input un array B contenente gli elemneti di B a partire dalla posizione 0 e rende un array A contenente gli elementi di A a partire dalla posizione 0
def encodeMIO(A):
B = A[:]
return B
def encode(A):
B=[0]*A[-1]#istanzio un vettore B di zeri lungo quanto A
for x in A:
#Quindi x è l'elemento di A[i]
B[x-1]=B[x-1]+1#Quindi come l'es 3 uso il valore in A[i] come indirizzo e sommo le occorrenze
return B
#Così da poter decodificare l'array anche senza avere A ottengo un array A ordinato in tempo lineare.
def decode(B):
m=0
for x in B: #per la lunghezza di B somma x ad m ottenendo quindi la lunghezza originaria di A
m=m+x
A=[0]*m#Esempio se B è lungo 5 ed è così B=[1,1,3,2,1] A viene lungo 8 perchè m somma i valori interni a B
k=0
for i in range(len(B)): #Poi scorro di 5 l'array e ogni volta scorro il suo valore quindi m ha senso
for j in range(B[i]):
A[k]=i+1
k=k+1
return A
#A=[5,2,3,5,2,2,7,8]#Infatti in posizione x-1 cioè 4 per il primo ciclo di encode ci finirà il valore 2 mentre in 2 posizione il 3 un pò come una tabella dove all'indirizzo di quel valore vengono sommate le occorrenze
#Le posizioni che non vengono toccate restano a zero perciò in decode mi creo un vettore che sembra essere per forza lungo quanto era A all'inizio
A=[1,1,1,2,3,3,5,6,7]# Infatti non mi tornava con i valori del prof per pura coincidenza sarà perchè sono ordinati torna tutto lo codifica e lo decodifica correttamente ma con i miei no
#A=[2,6,3,7,9,9,9,9,7,3] Infatti è necessario come per l'es 3 che i vettori siano ordinati anche se non era specificato in modo esplicito nell'esercizio
print(A)
B=encode(A)
print(B)
C=decode(B)
print(C)
#Es.9 scrivere un programma solomon(m) che rende un array A, di lunghezza m+1, contenente i primi m elementi della sequenza di Solomon Golomb a partire dalla posizione 1
#Non ho capito come viene generata questa sequenza di solomon quindi provo direttamente la soluzione:
def solomon(m):
A=[0]*(m+1)
A[0]=''
A[1]=1
A[2]=2
h=2
for i in range(2,m):
for j in range(A[i]):
if h>m:
return A
A[h]=i
h=h+1
#Sembrerebbe che forse dalla posizione 2 in poi assegna il valore di per il numero di volte che c'è scritto in A[i] spostandosi ogni volta di una casella con la variabile h
#Perciò più il numero è grande più la sequenza cresce lentamente perchè ripete i soliti valori per tot volte finchè non arriva con i ad m ad esempio con 1560 iterazioni arriva fino a 113 mentre con 5560 fino a 248
print("Solomon" + str(solomon(10)))
def nodo(x,s,d):
return [x,s,d]
def foglia(x):
return [x,None,None]
def vuoto():
return None
def radice(A):
return A[0]
def sinistro(A):
return A[1]
def destro(A):
return A[2]
def isVuoto(A):
return A is None
def isFoglia(A):
return isVuoto(sinistro(A)) and isVuoto(destro(A))
def ricerca(x,A):
if isVuoto(A):
return False
if x==radice(A):
return True
if x<radice(A):
return ricerca(x,sinistro(A))
return ricerca(x,destro(A))
def inserzione(x,A):
if isVuoto(A):
return forglia(x)#: il prof ha messo i due punti ma secondo me non ci vanno
if x<radice(A):
return nodo(radice(A),destro(A), inserzione(x,sinistro(A)))
return nodo(radice(A), inserzione(x,destro(A)),sinistro(A))
#Solo per albero binario di ricerca pesato
def massimo(A):
#Dove i figli a destra sono più pesanti(grandi) di quelli a sinistra
#Quindi il massimo è il primo nodo senza figli destri che s'incontra scendendo
#sempre a destra a partire dalla radice
if isVuoto(destro(A)):
return radice(A)
return massimo(destro(A))
#La roba da qui in poi vale solo per alberi HEAP (dove la radice e maggiore di entrambi i suoi figli e i nodi dell'ultimo livello sono tutti addossati a sinistra)
def padre(i):
return (i-1)//2
def primofiglio(i):
return 2*(i+1)-1
def heaptest(h,i):
if i==0:
return True
return h[i] <= h[padre(i)]
def heapinser(h,x):#si inserisce il valore nella prima posizione libera se questo valore è minore del padre allora l'albero è ancora uno heap, altrimenti risale ricorsivamente verso la radice scambiando l'elemento con il padre
h.append(x)
i=len(h)-1
while not heaptest(h,i):
scambia(h,i,padre(i))
i=padre(i)
def heapricostr(h,i,j):#Porta a heap un array già esistente
#Da notare che heapricostr viene chiamato forse infinite volte quindi finisce?
f1=primoFiglio(i)
f2=f1+1
if f1<j:
k=f1
if f2<j:
if h[f2]>h[f1]:
k=f2
if not heaptest(h,k):
scambia(h,k,padre(k))
heapricostr(h,k,j)
def heapcostr(h):#per riorganizzare a heap un array di n elementi già allocato
for i in range(len(h)-1, -1, -1):
heapricostr(h,i,len(h))
def heapmassimo(h):#estrazione di un massimo da un heap
max, h[0] = h[0], h.pop()
heapricostr(h,0,len(h))
def stampaAlbero(A):
print(" ")
stampaAlbero1(A," ")
print(" ")
def stampaAlbero1(A,s):
if not isVuoto(A):
stampaAlbero1(destro(A),s+" ")
print(s,radice(A))
stampaAlbero1(sinistro(A),s+" ")
#Es.10 Scrivere un programma ricorsivo che dato un albero binario con valori numerici ai nodi calcola la somma degli elementi non foglia: QUINDI DI QUELLI CENTRALI
def sommaNodiC(A):
if isFoglia(A) or isVuoto(A):
return 0
return radice(A)+sommaNodiC(sinistro(A))+sommaNodiC(destro(A))
D=nodo(8,foglia(9),foglia(2))
B=nodo(7,foglia(5),D)
C=nodo(12,foglia(6),foglia(4))
A=nodo(32,B,C)
#Albero:
# 32 Livello 0 è pari
# 7 12 Livello 1 è dispari
# 5 8 6 4 Livello 2 è pari
# 9 2 Figli di 8
stampaAlbero(A)
print("Somma nodi non foglia: " + str(sommaNodiC(A)))
#Es.11 Scrivere un programma ricorsivo che dato un albero binario con valori numerici ai nodi calcola la somma degli elementi al livello k:
def sommaLivello(A,k):
#Quando k si azzerra siamo arrivati al livello k perciò viene fatta la somma di quei valori
if isVuoto(A):
return 0
if k==0:#Ritorna la radice solo se il nuovo chiamante gli passa un k a zero
#print("ROOT: " + str(radice(A)))
return radice(A)#Questa funzione somma i numeri che stanno sullo stesso livello dell'albero
return sommaLivello(sinistro(A),k-1)+sommaLivello(destro(A),k-1)
print("sommaLivello: " + str(sommaLivello(A,2)))
#Non è un es:
def visitaAnticipata(A):
#radice,sinistro,destro
if isVuoto(A):
return 0
print(radice(A))
visitaAnticipata(sinistro(A))
visitaAnticipata(destro(A))
#print(visitaAnticipata(A))
def visitaSimmetrica(A):
if isVuoto(A):
return None
visitaSimmetrica(sinistro(A))
print("-"+str(radice(A))+"-")
visitaSimmetrica(destro(A))
#print(visitaSimmetrica(A))
#Es.12 Scrivere un programma ricorsivo che dato un albero binario con valori numerici ai nodi calcola la somma degli elementi a distanza pari della radice:
def sommaP(A):
if isVuoto(A):
return 0
#print("Radice(A): " +str(radice(A)))
return radice(A)+sommaD(sinistro(A))+sommaD(destro(A))
def sommaD(A):
if isVuoto(A):
return 0
return sommaP(sinistro(A))+sommaP(destro(A))
print("sommaDistanzaPari: " + str(sommaP(A)))
print("sommaDistanzaDispari: " + str(sommaD(A)))
#Es.14 Scrivere un programma ricorsivo che dato un albero binario con valori numerici positivi ai nodi calcola il massimo degli elementi a distanza pari dalla radice:
def maxP(A):
if isVuoto(A):
return 0
return max(radice(A),maxD(sinistro(A)), maxD(destro(A)))
def maxD(A):
if isVuoto(A):
return 0
return max(maxP(sinistro(A)),maxP(destro(A)))
print("Massimo numero a distanza pari dalla radice: " + str(maxP(A)))
#Es.13 Scrivere un programma ricorsivo che dato un albero binario con valori numerici positivi ai nodi calcola il massimo degli elementi a distanza dispari dalla radice.
print("Massimo numero a distanza dispari dalla radice: " + str(maxD(A)))
```
#### File: StruFun/Python/ordinamento.py
```python
def bolle(v): #Complessità n^2
for i in range(n):
for j in range(1,n-i):
if v[j-1]>v[j]:
scambia(v,j,j-1)
def selezione(v): #Complessità n^2
for i in range(n-1):
min=v[i]
k=i
for j in range(i+1,n):
if v[j]<min:
min=v[j]
k=j
if k!=i: #significa che v[j] è minore del minimo
scambia(v,i,k)
def merge(v,l,c,r):
i=k=1
j=c+1
while i<=c and j<=r:
if v[i]<=v[j]:
temp[k]=v[i]
i=i+1
else:
temp[k]=v[j]
j=j+1
k=k+1
while i<=c:
temp[k]=v[i]
i=i+1
k=k+1
while j<=r:
temp[k]=v[j]
j=j+1
k=k+1
for k in range(l,r+1):
v[k]=temp[k]
def mergesort(v,l,r): #Complessità media n*log(n)
if l>=r:
return
mergesort(v,l,(l+r)//2)
mergesort(v,(l+r)//2+1,r)
merge(v,l,(l+r)//2,r)
def quicksort(v,l,r):#media n*log(n) peggiore n^2
if l>=r:
return
i=l
j=r
x=v[(l+r)//2]#Elemento (perno) casuale scelto sempre nel mezzo del prof
while i<j:
while x>v[i]:
i=i+1
while v[j]>x:
j=j-1
if i<=j:
scambia(v,i,j)
i=i+1
j=j-1
if l<j:
quicksort(v,l,j)
if i<r:
quicksort(v,i,r)
#Quando esce dal ciclo il progrmma termina
def heapsort(v):
heapcostr(v)
for i in range(1,n):
scambia(v,0,n-i)
heapricostr(v,0,n-1)
```
#### File: StruFun/Python/sommaArray.py
```python
def somma1(v,i,j):#i=inizio del vettore, j=elemento da sommare
print("somma1")
if i==j:
return v[i]
else:
return somma1(v,i,(i+j)//2)+somma1(v,(i+j)//2+1,j)
#Credo che questa somma sia di complessità computazionale quadratica
def somma2(v,i,j):
print("somma2")
if i==j:
return v[i] #questo qui lo fa n volte quindi è lineare come quello sotto
return v[i]+somma2(v,i+1,j)
def somma3(v):
print("somma3: " + str(len(v)))
s=0
for i in range(len(v)):
s=s+v[i]
return s
A=[0,1,2,3,4,5,6,7,8,9]
print(somma1(A,0,len(A)-1), somma2(A,0,len(A)-1), somma3(A))
```
#### File: StruFun/Python/tartaglia.py
```python
def generaTriangolo(stop):
#stop è la linea di arresto
v = [0]*(stop+1) #come si definisce un vettore?così va bene
v[0] = 1
for i in range(0,stop): #Per tutte le posizioni del vettore se i è 0 ci metto uno altrimenti faccio ogni volta la copia del vettore effettuo un ciclo for da 1 a i+1 e se tempVet[c]!=0 stampo il contenuto poi aumento c di 1
if(i==0):
v[i+1] = 1
print(v[i])
else:
#print("Sono temp: ")
tempVet = v[:] #Prima senza saperlo facevo alias cioè una copia ad indirizzo tempVet=v se modifico v modifico anche tempVet, ma con v[:] prendo una porzione di tutto il vettore effettuando una clonazione
#print(tempVet)
c=0
for j in range(1,(i+1)):#poi assegno a v[j] (v[1]) il valore di tempVet[j-1] (quindi 1 per il primo ciclo) + tempVet[j] (zero per il primo ciclo) quindi v[j] prende 1 e il secondo valore in v[j] è 1 e assegno al valore successivo
#parlo di v[i+1] un 1 come nel triangolo di tartaglia poi stampo tempVet[c] (che era incrementato di 1)e quindi ottengo una vera porcheria che però funziona.
if(tempVet[c]!=0):
print(tempVet[c], sep=' ', end=" ", flush=True)
c = c+1
#print("TEMPdiJ_PRIMA: " + str(tempVet[j]))
v[j] = tempVet[j-1] + tempVet[j] #ci credo che non fa, tempVet[j] viene incrementato senza motivo di 1 dopo la somma col suo precedente Il motivo era il cosidetto alias
#print("TEMPdiJ_DOPO: " + str(tempVet[j]))
#print("v[j]["+str(j)+"] prende " + "tempVet[j-1]["+str(j-1)+"]=" + str(tempVet[j-1]) + " + tempVet[j]["+str(j)+"]="+str(tempVet[j]) + " => v[j]: " + str(v[j]))
v[i+1] = 1
print(tempVet[c], sep=' ', end=" ", flush=True)
c=0
#print("Sono V: ")
#print(v)
print()
print(generaTriangolo(15))
#def tartaglia(n,k):
# if n<1 or k<1:
# return 1;
# return 7
#print (tartaglia(1,3))
#Nel triangolo di tartaglia si vede che le righe dispari hanno un valore centrale
#Il valore centrale forse viene generato seguendo una qualche regola che non richiede che venga calcolato tutto il triangolo
#I valori centrali a me conosciuti sono: 1,2,6,20,70..
#Visto che n corrisponde alla riga del triangolo e k alla posizione sapendo che i valori centrali sopra esposti sono la riga dispari si potrebbe trovare tutti i valori vicini partendo da quello centrale più vicino
#ad n senza calcolare tutto il triangolo di tartaglia, per esempio con n=4 basta sommarvi 1 spostandoci quindi sulla linea dispari numerata come quinta, su questa linea sappiamo esserci il 6 perchè 1 è alla prima linea, la seconda si
#salta sulla terza c'è il 2 e la quarta si salta e sulla 5 c'è il 6, quindi bisogna ottenere una formula per ottenere da n la posizione nel vettore in cui beccare il valore, esempio 5-2 = 3 in 3 posizione abbiamo il 6 la formula
#corrisponde a se (n+1) è dispari faccio(n+1)//2 altrimenti n//2, ottenendo la posizione del vettore in cui c'è il valore centrale del triangolo comunque questo non ci permette di ottenere tutti i valori centrali del triangolo
#Nel triangolo di tartaglia si nota che ci sono anche i numeri primi
``` |
{
"source": "jojo23333/mcan-vqa",
"score": 2
} |
#### File: core/data/data_utils.py
```python
from core.data.ans_punct import prep_ans
import numpy as np
import en_vectors_web_lg, random, re, json
def shuffle_list(ans_list):
random.shuffle(ans_list)
# ------------------------------
# ---- Initialization Utils ----
# ------------------------------
def img_feat_path_load(path_list):
iid_to_path = {}
for ix, path in enumerate(path_list):
iid = str(int(path.split('/')[-1].split('_')[-1].split('.')[0]))
iid_to_path[iid] = path
return iid_to_path
def img_feat_load(path_list):
iid_to_feat = {}
for ix, path in enumerate(path_list):
iid = str(int(path.split('/')[-1].split('_')[-1].split('.')[0]))
img_feat = np.load(path)
img_feat_x = img_feat['x'].transpose((1, 0))
iid_to_feat[iid] = img_feat_x
print('\rPre-Loading: [{} | {}] '.format(ix, path_list.__len__()), end=' ')
return iid_to_feat
def ques_load(ques_list):
qid_to_ques = {}
for ques in ques_list:
qid = str(ques['question_id'])
qid_to_ques[qid] = ques
return qid_to_ques
def tokenize(stat_ques_list, use_glove):
token_to_ix = {
'PAD': 0,
'UNK': 1,
}
spacy_tool = None
pretrained_emb = []
if use_glove:
spacy_tool = en_vectors_web_lg.load()
pretrained_emb.append(spacy_tool('PAD').vector)
pretrained_emb.append(spacy_tool('UNK').vector)
for ques in stat_ques_list:
if isinstance(ques, str):
words = ques
else:
words = ques['question']
words = re.sub(
r"([.,'!?\"()*#:;])",
'',
words.lower()
).replace('-', ' ').replace('/', ' ').split()
for word in words:
if word not in token_to_ix:
token_to_ix[word] = len(token_to_ix)
if use_glove:
pretrained_emb.append(spacy_tool(word).vector)
pretrained_emb = np.array(pretrained_emb)
return token_to_ix, pretrained_emb
# def ans_stat(stat_ans_list, ans_freq):
# ans_to_ix = {}
# ix_to_ans = {}
# ans_freq_dict = {}
#
# for ans in stat_ans_list:
# ans_proc = prep_ans(ans['multiple_choice_answer'])
# if ans_proc not in ans_freq_dict:
# ans_freq_dict[ans_proc] = 1
# else:
# ans_freq_dict[ans_proc] += 1
#
# ans_freq_filter = ans_freq_dict.copy()
# for ans in ans_freq_dict:
# if ans_freq_dict[ans] <= ans_freq:
# ans_freq_filter.pop(ans)
#
# for ans in ans_freq_filter:
# ix_to_ans[ans_to_ix.__len__()] = ans
# ans_to_ix[ans] = ans_to_ix.__len__()
#
# return ans_to_ix, ix_to_ans
def ans_stat(json_file):
ans_to_ix, ix_to_ans = json.load(open(json_file, 'r'))
return ans_to_ix, ix_to_ans
# ------------------------------------
# ---- Real-Time Processing Utils ----
# ------------------------------------
def proc_img_feat(img_feat, img_feat_pad_size):
if img_feat.shape[0] > img_feat_pad_size:
img_feat = img_feat[:img_feat_pad_size]
img_feat = np.pad(
img_feat,
((0, img_feat_pad_size - img_feat.shape[0]), (0, 0)),
mode='constant',
constant_values=0
)
return img_feat
def proc_ques(ques, token_to_ix, max_token):
ques_ix = np.zeros(max_token, np.int64)
if isinstance(ques, str):
words = ques
else:
words = ques['question']
words = re.sub(
r"([.,'!?\"()*#:;])",
'',
words.lower()
).replace('-', ' ').replace('/', ' ').split()
for ix, word in enumerate(words):
if word in token_to_ix:
ques_ix[ix] = token_to_ix[word]
else:
ques_ix[ix] = token_to_ix['UNK']
if ix + 1 == max_token:
break
return ques_ix
def get_score(occur):
if occur == 0:
return .0
elif occur == 1:
return .3
elif occur == 2:
return .6
elif occur == 3:
return .9
else:
return 1.
def proc_ans(ans, ans_to_ix):
ans_score = np.zeros(ans_to_ix.__len__(), np.float32)
ans_prob_dict = {}
for ans_ in ans['answers']:
ans_proc = prep_ans(ans_['answer'])
if ans_proc not in ans_prob_dict:
ans_prob_dict[ans_proc] = 1
else:
ans_prob_dict[ans_proc] += 1
for ans_ in ans_prob_dict:
if ans_ in ans_to_ix:
ans_score[ans_to_ix[ans_]] = get_score(ans_prob_dict[ans_])
return ans_score
```
#### File: mcan-vqa/core/utils.py
```python
import copy
import logging
import re
import torch
import json
from fvcore.common.checkpoint import (
get_missing_parameters_message,
get_unexpected_parameters_message,
)
from core.data.data_utils import ans_stat
class HierarchicClassification(object):
def __init__(self, __C):
self.__C = __C
self.loss_type = __C.LOSS_TYPE
self.ans_to_ix, self.ix_to_ans = ans_stat('core/data/answer_dict.json')
self.init_abs_tree()
self.init_tree_matrix()
self.layers = [x.cuda() for x in self.layers]
self.tree_matrix = self.tree_matrix.cuda()
def get_loss(self, pred, pred_abs, gt_ans, gt_abs, mask_ans, mask_abs, loss_fn):
'''
abs_group batch_size * N list
loss_fn should use mean reduction
'''
if self.__C.USE_ABS_MASKED_PRED:
pred, _ = self.get_abs_masked_pred(pred, pred_abs)
if self.loss_type == "mcan":
loss_ans = loss_fn(pred, gt_ans)
return loss_ans, torch.tensor(0.)
elif self.loss_type == "abs_bce":
s_pred_ans = torch.masked_select(pred, mask_ans)
s_gt_ans = torch.masked_select(gt_ans, mask_ans)
loss_ans = loss_fn(s_pred_ans, s_gt_ans)
s_pred_abs = torch.masked_select(pred_abs, mask_abs)
s_gt_abs = torch.masked_select(gt_abs, mask_abs)
loss_abs = loss_fn(s_pred_abs, s_gt_abs)
return loss_ans, loss_abs
elif self.loss_type == "all_bce":
loss_ans = loss_fn(pred, gt_ans)
loss_abs = loss_fn(pred_abs, gt_abs)
return loss_ans, loss_abs
def inference_abs(self, pred_abs, gt_abs):
prediction = pred_abs > 0.5
p_all = prediction.sum()
gt_all = gt_abs.sum()
tp = torch.masked_select(prediction, gt_abs).sum()
precision = tp / p_all
recall = tp / gt_all
return precision, recall
def get_abs_masked_pred(self, pred, pred_abs):
'''
tree: num_abs, num_pred
layers: list of list like [[1,2],[3,4,5,6]]
'''
# abs_masks: (batch, num_abs, num_pred)
abs_masks = pred_abs.unsqueeze(-1) * self.tree_matrix.unsqueeze(0)
#print(abs_masks.shape)
abs_masks_by_layer = []
for layer in self.layers:
layer_cnt = self.tree_matrix[layer, :].sum(dim=0, keepdim=True)
assert (layer_cnt > 0).all(), "layer not covering all leafs"
abs_masks_by_layer.append(
abs_masks[:, layer, :].sum(dim=1) / layer_cnt
)
# for multi-layer tree structure
# do production along the depth direction
abs_masks_by_layer = torch.stack(abs_masks_by_layer, dim=1)
assert (abs_masks_by_layer <= 1.0).all(), "mask exceed 1.0!"
# abs_maks: (batch, num_pred)
abs_mask = torch.prod(abs_masks_by_layer, dim=1)
masked_pred = pred * abs_mask
return masked_pred, abs_mask
def init_tree_matrix(self):
'''
return (number_of_abs_node, number_of_leaf)
'''
tree_matrix = np.zeros((self.abs_to_ix.__len__(), self.ans_to_ix.__len__()), dtype=np.float32)
for ans_ in self.ans_to_ix.keys():
ans_id = self.ans_to_ix[ans_]
abspath = self.ans_to_abspath[ans_]
for abs_ in abspath[1:]:
abs_id = self.abs_to_ix[abs_]
tree_matrix[abs_id, ans_id] = 1.0
self.tree_matrix = torch.from_numpy(tree_matrix)
return tree_matrix
def init_abs_tree(self):
with open('core/data/answer_dict_hierarchical.json', 'r') as f:
data = json.load(f)
# edge link of the abs tree
self.abs_tree = data['tree_dict']
# list of id from abs to ix
self.abs_to_ix = data['abs_dict']
# given ans, give all possible nodes of path to the ans, the first comonent is always '_root'
self.ans_to_abspath = {x:[] for x in self.ans_to_ix.keys()}
layers = []
def dfs_search(current_node, path, tree, d):
# if not leaf node yey
if current_node in tree:
print(f"Processing node: {current_node}:{path}")
if d > 0:
if len(layers) < d:
layers.append([current_node])
else:
layers[d-1].append(current_node)
for child in tree[current_node]:
dfs_search(child, path+[current_node], tree, d+1)
else:
for x in path:
if x not in self.ans_to_abspath[current_node]:
self.ans_to_abspath[current_node].append(x)
dfs_search('_rt', [], self.abs_tree, 0)
self.layers = [
torch.tensor([self.abs_to_ix[abs_] for abs_ in abs_nodes])
for abs_nodes in layers
]
print("Processing of tree finished")
# losses_ans = []
# losses_abs = []
# batch_size, num_class = pred.shape
# print(pred.shape)
# print(loss_groups)
# assert batch_size == len(loss_groups)
# for i in range(batch_size):
# loss_groups = []
# # loss for abstraction nodes
# for g in loss_groups[i][:-1]:
# loss_groups.append(loss_fn(pred_abs[i, g], gt_abs[i, g]))
# loss_abs = torch.mean(torch.stack(loss_groups))
# losses_abs.append(loss_abs)
# # loss for leaf nodes
# ans_group = loss_groups[i][-1]
# loss_ans = loss_fn(pred[i, ans_group], gt_ans[i, ans_group])
# losses_ans.append(loss_ans)
# loss_ans = torch.mean(torch.stack(losses_ans))
# loss_abs = torch.mean(torch.stack(losses_abs))
# return loss_ans, loss_abs
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict):
"""
Match names between the two state-dict, and update the values of model_state_dict in-place with
copies of the matched tensor in ckpt_state_dict.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
# used for logging
max_len_model = max(len(key) for key in model_keys) if model_keys else 1
max_len_ckpt = max(len(key) for key in ckpt_keys) if ckpt_keys else 1
log_str_template = "{: <{}} loaded from {: <{}} of shape {}"
# logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
print(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
print(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
model_state_dict[key_model] = value_ckpt.clone()
if key_ckpt in matched_keys: # already added to matched_keys
print(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
print(
log_str_template.format(
key_model,
max_len_model,
original_keys[key_ckpt],
max_len_ckpt,
tuple(shape_in_model),
)
)
matched_model_keys = matched_keys.values()
matched_ckpt_keys = matched_keys.keys()
# print warnings about unmatched keys on both side
unmatched_model_keys = [k for k in model_keys if k not in matched_model_keys]
if len(unmatched_model_keys):
print(get_missing_parameters_message(unmatched_model_keys))
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in matched_ckpt_keys]
if len(unmatched_ckpt_keys):
print(
get_unexpected_parameters_message(original_keys[x] for x in unmatched_ckpt_keys)
)
import numpy as np
class TrainLossMeter(object):
def __init__(self):
self.total_steps = 0
self.iter_steps = 0
def init_meter(self, loss_keys):
self.loss_iters = {x:0 for x in loss_keys}
self.loss_sum = {x:0 for x in loss_keys}
def update_iter(self, d):
losses = d#d["losses"]
if self.total_steps == 0:
self.init_meter(losses.keys())
for x in losses:
self.loss_iters[x] += losses[x]
self.loss_sum[x] += losses[x]
self.total_steps += 1
self.iter_steps += 1
def log_iter(self):
loss_str = ""
for x in self.loss_iters:
loss_str = loss_str + f"{x}: {self.loss_iters[x]/self.iter_steps} "
self.loss_iters = {x:0 for x in self.loss_iters}
self.iter_steps = 0
return loss_str
def log_epoch(self):
loss_str = ""
for x in self.loss_sum:
loss_str = loss_str + f"{x}: {self.loss_sum[x]/self.total_steps} "
self.total_steps = 0
self.iter_steps = 0
return loss_str
def get_param_group_finetune(model, base_lr=1e-4):
parameters_classifier = []
parameters_backbone = []
for module_param_name, value in model.named_parameters():
if not value.requires_grad:
continue
if 'classifier' not in module_param_name:
parameters_backbone.append(value)
else:
parameters_classifier.append(value)
return [{"params": parameters_backbone, "lr": base_lr*0.1},
{"params": parameters_classifier, "lr": base_lr}], [0.1, 1.]
``` |
{
"source": "JoJo2nd/hart",
"score": 2
} |
#### File: data/builder/update_prerequisites.py
```python
import argparse
import sys
import json
import os
from os.path import splitext, join, realpath
import base64
import uuid
from subprocess import Popen, PIPE
parser = argparse.ArgumentParser(prog='generate asset prerequisites for material & material setup assets',description='')
parser.add_argument('-d','--directory', help='*.fbs source directory. Recursive.')
def fbs_uuid_to_hex_str(u):
#u['highword3']: ((b[15]<<24) | (b[14]<<16) | (b[13]<<8) | (b[12])),
#u['highword2']: ((b[11]<<24) | (b[10]<<16) | (b[ 9]<<8) | (b[ 8])),
#u['highword1']: ((b[ 7]<<24) | (b[ 6]<<16) | (b[ 5]<<8) | (b[ 4])),
#u['lowword' ]: ((b[ 3]<<24) | (b[ 2]<<16) | (b[ 1]<<8) | (b[ 0]))
b = '{'
b += '%02x'%((u['lowword' ] & 0x000000FF));
b += '%02x'%((u['lowword' ] & 0x0000FF00) >> 8);
b += '%02x'%((u['lowword' ] & 0x00FF0000) >> 16);
b += '%02x'%((u['lowword' ] & 0xFF000000) >> 24);
b += '-'
b += '%02x'%((u['highword1'] & 0x000000FF));
b += '%02x'%((u['highword1'] & 0x0000FF00) >> 8);
b += '-'
b += '%02x'%((u['highword1'] & 0x00FF0000) >> 16);
b += '%02x'%((u['highword1'] & 0xFF000000) >> 24);
b += '-'
b += '%02x'%((u['highword2'] & 0x000000FF));
b += '%02x'%((u['highword2'] & 0x0000FF00) >> 8);
b += '-'
b += '%02x'%((u['highword2'] & 0x00FF0000) >> 16);
b += '%02x'%((u['highword2'] & 0xFF000000) >> 24);
b += '%02x'%((u['highword3'] & 0x000000FF));
b += '%02x'%((u['highword3'] & 0x0000FF00) >> 8);
b += '%02x'%((u['highword3'] & 0x00FF0000) >> 16);
b += '%02x'%((u['highword3'] & 0xFF000000) >> 24);
b += '}'
return b
uuid_properties = [
['player', 'test_uuid'],
['player', 'inner', 'test_uuid'],
['player', 'inner', 'another_test_uuid'],
]
def parseEntityPrerequisites(entity_props):
prerequisites = []
for links in uuid_properties:
found = True
d = entity_props
for p in links:
if p in d:
d = d[p]
else:
found = False
if found:
prerequisites += [fbs_uuid_to_hex_str(d)]
return list(set(prerequisites))#list(set(x)) to make x unique
def main():
args = parser.parse_args()
for root, dirs, files in os.walk(args.directory):
for file in [f for f in files if (splitext(f)[1] == '.asset')]:
update=False
with open(realpath(join(root, file)), 'rb') as fin:
asset = json.load(fin)
if asset['type'] == 'material':
with open(realpath(join(root, asset['processoptions']['input']))) as f:
obj_json = json.load(f)
prerequisites = []
for t in obj_json['techniques']:
for p in t['passes']:
prerequisites += [fbs_uuid_to_hex_str(p['vertex'])]
prerequisites += [fbs_uuid_to_hex_str(p['pixel'])]
asset['prerequisites'] = prerequisites
update = True
elif asset['type'] == 'materialsetup':
with open(realpath(join(root, asset['processoptions']['input']))) as f:
obj_json = json.load(f)
prerequisites = [fbs_uuid_to_hex_str(obj_json['material'])]
asset['prerequisites'] = prerequisites
update = True
elif asset['type'] == 'entity':
with open(realpath(join(root, asset['processoptions']['input']))) as f:
obj_json = json.load(f)
prerequisites = [fbs_uuid_to_hex_str(obj_json['entityTemplate'])]
prerequisites += parseEntityPrerequisites(obj_json['properties'])
asset['prerequisites'] = prerequisites
update = True
elif asset['type'] == 'entitytemplate':
with open(realpath(join(root, asset['processoptions']['input']))) as f:
obj_json = json.load(f)
prerequisites = parseEntityPrerequisites(obj_json)
asset['prerequisites'] = prerequisites
update = True
# Update the asset meta data
if update:
with open(realpath(join(root, file)), 'wb') as fin:
fin.write(json.dumps(asset, indent=2, sort_keys=True))
if __name__ == '__main__':
main()
```
#### File: hart/scripts/binary_asset_package.py
```python
import argparse
import zipfile
import os
import hashlib
import json
from os.path import join, realpath, splitext, relpath
import dropbox
import time
# BUF_SIZE is totally arbitrary, change for your app!
BUF_SIZE = 65536 # lets read stuff in 64kb chunks!
parser = argparse.ArgumentParser(prog='binary asset packager',description='Script to package binary data from a git repro into a deploy-able zip')
parser.add_argument('-d','--directory', action='append', help='Source directory to search. Recursive.')
parser.add_argument('-r','--root', help='Directory to make paths relative too.')
parser.add_argument('--preview', action='store_true')
asset_types = [
'.exe',
'.lib',
'.dll',
'.png',
'.tga',
'.jpg',
'.bmp',
'.bin',
'.qm',
'.lpf',
'.zip',
'.piskel',
'.7z',
]
def getFileSHA1(filepath):
sha1 = hashlib.sha1()
with open(filepath, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def main():
args = parser.parse_args()
base_path = args.root
directories = [realpath(x) for x in args.directory]
print "Checking files in %s"%(realpath(base_path))
manifest = {}
with open('dropbox.oauth2') as f:
oauth2_token = f.read();
dbx = dropbox.Dropbox(oauth2_token)
assets = []
for d in directories:
for root, dirs, files in os.walk(d):
assets += [ relpath(realpath(join(root, x)), base_path) for x in files if splitext(x)[1] in asset_types]
for asset in assets:
manifest[asset] = { 'sha1': getFileSHA1(join(base_path, asset)) }
with open('./../binary.manifest', 'wb') as f:
f.write(json.dumps(manifest, indent=2, sort_keys=True))
remote_files = []
#use empty string to get root folder
for f in read_db_directory(dbx, ''):
remote_files += [f]
to_upload = [local for local in assets if ('/'+manifest[local]['sha1']+'.zip').lower() not in remote_files]
if args.preview:
for u in to_upload:
print u, "needs uploading."
else:
for u in to_upload:
zipped_name = manifest[u]['sha1']+'.zip'
with zipfile.ZipFile(zipped_name, 'w') as zip_pkg:
print u, "Compressing...",
zip_pkg.write(realpath(join(base_path, u)), 'file')
with open(zipped_name, 'rb') as zf:
data = zf.read()
print 'Uploading %d bytes' % (os.path.getsize(zipped_name)),
try:
res = dbx.files_upload(
data,
'/'+zipped_name,
mode=dropbox.files.WriteMode.overwrite,
mute=True)
except dropbox.exceptions.ApiError as err:
print('*** API error', err)
return None
print ' as ', res.name.encode('utf8'),
print '...deleting temp data.'
try:
os.remove(zipped_name)
except:
pass
def read_db_directory(dbx, path):
try:
res = dbx.files_list_folder(path)
while True:
for i in res.entries:
yield i.path_lower
if res.has_more:
res = dbx.files_list_folder_continue(res.cursor)
else:
break
except dropbox.exceptions.ApiError as err:
pass
if __name__ == '__main__':
main()
```
#### File: hart/scripts/binary_asset_unpackage.py
```python
import argparse
import zipfile
import os
import hashlib
import json
from os.path import join, realpath, splitext, relpath, isfile, split
import dropbox
parser = argparse.ArgumentParser(prog='binary asset unpackager',description='Script to download and unpackage binary data for a git repro')
parser.add_argument('-r','--root', help='Directory to install too.')
def main():
args = parser.parse_args()
root = args.root
print "Grabbing updated files for %s"%(root)
manifest = {}
local = {}
with open('dropbox.oauth2') as f:
oauth2_token = f.read();
dbx = dropbox.Dropbox(oauth2_token)
with open(join(root, 'binary.manifest'), 'rb') as f:
manifest = json.loads(f.read())
if isfile(join(root, 'local.manifest')):
with open(join(root, 'local.manifest'), 'rb') as f:
local = json.loads(f.read())
files_to_dl = []
files_to_del = []
for k, v in manifest.iteritems():
if not k in local or local[k]['sha1'] != v['sha1']:
files_to_dl += [{'path':'/'+v['sha1']+'.zip', 'dest':k}]
for k, v in local.iteritems():
if not k in manifest:
files_to_del += {'dest':k}
for f in files_to_del:
try:
os.remove(join(root, f['dest']))
except:
pass
db_file_list = []
for f in read_db_directory(dbx, ''):
db_file_list += [f]
# There is an easy improvement here. We know all the files to download already
# begin a dropbox session and download in parallel. Would speed up a pull greatly.
for f in files_to_dl:
full_path = join(root, f['dest'])
if isfile(full_path):
os.remove(full_path)
if not f['path'].lower() in db_file_list:
print 'Can\'t locate remote data package!'
return
src_path = f['path'].lower()
print "Downloading package %s"%(src_path)
dbx.files_download_to_file('remote.zip', src_path)
print "Extracting package from %s to %s"%(src_path, f['dest'])
with zipfile.ZipFile('remote.zip', 'r') as zip_pkg:
zip_pkg.extractall()
# The extract creats a local 'file', move to the dest
if not os.path.exists(split(full_path)[0]):
os.makedirs(split(full_path)[0])
os.rename('file', full_path)
if isfile('remote.zip'):
os.remove('remote.zip')
# update the local manifest
with open(join(root, 'local.manifest'), 'wb') as f:
f.write(json.dumps(manifest, indent=2, sort_keys=True))
# try:
# with open('packaged.sha1.remote', 'rb') as f:
# remote_txt = f.read()
# except:
# remote_txt = 'blah!'
#
# try:
# with open('packaged.sha1.local', 'rb') as f:
# local_txt = f.read()
# except:
# local_txt = 'blah?'
#
# if remote_txt == local_txt:
# print "Nothing to do."
# return
#
# print "local doesn't match remote. Grabbing latest package."
#
# with open('scripts/dropbox.oauth2', 'rb') as f:
# oauth2_token = f.read();
#
# dbx = dropbox.Dropbox(oauth2_token)
#
# src_path = '/packaged_'+remote_txt+'.zip'
#
# print "listing packages..."
# folder_list = []
# for f in read_db_directory(dbx, ''):
# print f
# folder_list += [f]
#
# if not src_path.lower() in folder_list:
# print "Can't locate remote data package!"
# return
#
# print "Downloading package %s"%(src_path)
# dbx.files_download_to_file('packaged.remote.zip', src_path)
#
# print "Extracting package from %s"%(src_path)
# with zipfile.ZipFile('packaged.remote.zip', 'r') as zip_pkg:
# zip_pkg.extractall()
#
# os.remove('packaged.remote.zip')
def read_db_directory(dbx, path):
try:
res = dbx.files_list_folder(path)
while True:
for i in res.entries:
yield i.path_lower
if res.has_more:
res = dbx.files_list_folder_continue(res.cursor)
else:
break
except dropbox.exceptions.ApiError as err:
pass
if __name__ == '__main__':
main()
```
#### File: hart/scripts/clang_format_files.py
```python
import argparse
import zipfile
import os
import hashlib
import json
from os.path import join, realpath, splitext, relpath
from subprocess import Popen, PIPE
import time
parser = argparse.ArgumentParser(prog='binary asset packager',description='Script to package binary data from a git repro into a deploy-able zip')
parser.add_argument('-d','--directory', action='append', help='Source directory to search. NOT Recursive.')
parser.add_argument('-r','--rdirectory', action='append', help='Source directory to search. Recursive.')
#parser.add_argument('-s','--style', help='filepath of clang format style.')
file_types = [
'.c',
'.cpp',
'.h',
'.hpp',
'.cxx',
]
def main():
args = parser.parse_args()
directories = [realpath(x) for x in args.directory]
rdirectories = [realpath(x) for x in args.rdirectory]
print "Checking files in %s"%(str([realpath(x) for x in directories]+[realpath(x) for x in rdirectories]))
source_files = []
for d in directories:
for root, dirs, files in os.walk(d):
dirs = []
source_files += [realpath(join(root, x)) for x in files if splitext(x)[1] in file_types]
for d in rdirectories:
for root, dirs, files in os.walk(d):
source_files += [realpath(join(root, x)) for x in files if splitext(x)[1] in file_types]
cmd = ['../external/LLVM/clang-format', '-i', '-style=file']
cmd += source_files
#print "Running command line", cmd
p = Popen(cmd, stdin=PIPE, stdout=PIPE)
returncode = p.wait()
if __name__ == '__main__':
main()
``` |
{
"source": "JoJo2nd/lua-protobuf",
"score": 2
} |
#### File: lua-protobuf/lua_protobuf/generator.py
```python
from google.protobuf.descriptor import FieldDescriptor
import re
RE_BARE_BEGIN_BRACKET = re.compile(r'^\s*{\s*$')
RE_BEGIN_BRACKET = re.compile(r'{\s*$')
RE_END_BRACKET = re.compile(r'^\s*};?\s*$')
FIELD_LABEL_MAP = {
FieldDescriptor.LABEL_OPTIONAL: 'optional',
FieldDescriptor.LABEL_REQUIRED: 'required',
FieldDescriptor.LABEL_REPEATED: 'repeated'
}
FIELD_TYPE_MAP = {
FieldDescriptor.TYPE_DOUBLE: 'double',
FieldDescriptor.TYPE_FLOAT: 'float',
FieldDescriptor.TYPE_INT64: 'int64',
FieldDescriptor.TYPE_UINT64: 'uint64',
FieldDescriptor.TYPE_INT32: 'int32',
FieldDescriptor.TYPE_FIXED64: 'fixed64',
FieldDescriptor.TYPE_FIXED32: 'fixed32',
FieldDescriptor.TYPE_BOOL: 'bool',
FieldDescriptor.TYPE_STRING: 'string',
FieldDescriptor.TYPE_GROUP: 'group',
FieldDescriptor.TYPE_MESSAGE: 'message',
FieldDescriptor.TYPE_BYTES: 'bytes',
FieldDescriptor.TYPE_UINT32: 'uint32',
FieldDescriptor.TYPE_ENUM: 'enum',
FieldDescriptor.TYPE_SFIXED32: 'sfixed32',
FieldDescriptor.TYPE_SFIXED64: 'sfixed64',
FieldDescriptor.TYPE_SINT32: 'sint32',
FieldDescriptor.TYPE_SINT64: 'sint64',
}
def lua_protobuf_header():
'''Returns common header included by all produced files'''
return '''
#ifndef LUA_PROTOBUF_H
#define LUA_PROTOBUF_H
#include <google/protobuf/message.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <lua.h>
#ifdef WINDOWS
#define LUA_PROTOBUF_EXPORT __declspec(dllexport)
#else
#define LUA_PROTOBUF_EXPORT
#endif
// type for callback function that is executed before Lua performs garbage
// collection on a message instance.
// if called function returns 1, Lua will free the memory backing the object
// if returns 0, Lua will not free the memory
typedef int (*lua_protobuf_gc_callback)(::google::protobuf::MessageLite *msg, void *userdata);
// __index and __newindex functions for enum tables
LUA_PROTOBUF_EXPORT int lua_protobuf_enum_index(lua_State *L);
LUA_PROTOBUF_EXPORT int lua_protobuf_enum_newindex(lua_State *L);
// GC callback function that always returns true
LUA_PROTOBUF_EXPORT int lua_protobuf_gc_always_free(::google::protobuf::MessageLite *msg, void *userdata);
// A minimal Lua interface for coded input/output protobuf streams
int lua_protobuf_coded_streams_open(lua_State* L);
#ifdef __cplusplus
}
#endif
#endif
'''
def lua_protobuf_source():
'''Returns source for common code'''
return '''
#include "lua-protobuf.h"
#ifdef __cplusplus
extern "C" {
#endif
#include <lauxlib.h>
#ifdef __cplusplus
}
#endif
int lua_protobuf_enum_index(lua_State *L)
{
return luaL_error(L, "attempting to access undefined enumeration value: %s", lua_tostring(L, 2));
}
int lua_protobuf_enum_newindex(lua_State *L)
{
return luaL_error(L, "cannot modify enumeration tables");
}
int lua_protobuf_gc_always_free(::google::protobuf::MessageLite *msg, void *ud)
{
return 1;
}
#include "google/protobuf/io/coded_stream.h"
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/io/zero_copy_stream_impl_lite.h"
#include <fcntl.h>
#include <sys/stat.h>
#if defined (_MSC_VER)
# include <io.h> // for open
#else
# include <sys/types.h>
# define O_BINARY (0)
#endif
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
int lua_protobuf_coded_input_stream_new(lua_State* L) {
const char* filepath = luaL_checkstring(L, 1);
int fd = open(filepath, O_RDONLY | O_BINARY, S_IREAD);
if (fd == -1) {
return luaL_error(L, "Failed to open file %s", filepath);
}
char* udataptr = (char*)lua_newuserdata(L, sizeof(::google::protobuf::io::CodedInputStream)+sizeof(::google::protobuf::io::FileInputStream));
auto instream = new (udataptr+sizeof(::google::protobuf::io::FileInputStream)) ::google::protobuf::io::FileInputStream(fd);
instream->SetCloseOnDelete(true);
auto codestream = new (udataptr) ::google::protobuf::io::CodedInputStream(instream);
luaL_setmetatable(L, "protobuf_.CodedInputStream");
return 1;
}
int lua_protobuf_coded_input_stream_gc(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
::google::protobuf::io::FileInputStream* filestream = (::google::protobuf::io::FileInputStream*)(codestream+1);
codestream->~CodedInputStream();
filestream->~FileInputStream();
return 0;
}
int lua_protobuf_coded_input_stream_skip(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
int count = luaL_checkint(L, 2);
codestream->Skip(count);
return 0;
}
int lua_protobuf_coded_input_stream_push_limit(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
int limit = luaL_checkint(L, 2);
limit = codestream->PushLimit(limit);
lua_pushinteger(L, limit);
return 1;
}
int lua_protobuf_coded_input_stream_pop_limit(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
int limit = luaL_checkint(L, 2);
codestream->PopLimit(limit);
return 0;
}
int lua_protobuf_coded_input_stream_current_position(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
lua_pushinteger(L, codestream->CurrentPosition());
return 1;
}
int lua_protobuf_coded_input_stream_read_raw(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
int count = luaL_checkint(L, 2);
char* buf = new char[count];
bool success = codestream->ReadRaw(buf, count);
if (success) {
lua_pushlstring(L, buf, count);
} else {
lua_pushnil(L);
}
delete buf;
return 1;
}
int lua_protobuf_coded_input_stream_read_varint_32(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
::google::protobuf::uint32 val;
bool success = codestream->ReadVarint32(&val);
lua_pushboolean(L, success);
if (success) {
lua_pushinteger(L, val);
} else {
lua_pushnil(L);
}
return 1;
}
int lua_protobuf_coded_input_stream_read_varint_64(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
::google::protobuf::uint64 val;
bool success = codestream->ReadVarint64(&val);
lua_pushboolean(L, success);
if (success) {
lua_pushinteger(L, val);
} else {
lua_pushnil(L);
}
return 1;
}
int lua_protobuf_coded_input_stream_read_little_endian_32(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
::google::protobuf::uint32 val;
bool success = codestream->ReadLittleEndian32(&val);
lua_pushboolean(L, success);
if (success) {
lua_pushinteger(L, val);
} else {
lua_pushnil(L);
}
return 1;
}
int lua_protobuf_coded_input_stream_read_little_endian_64(lua_State* L) {
::google::protobuf::io::CodedInputStream* codestream = (::google::protobuf::io::CodedInputStream*)luaL_checkudata(L, 1, "protobuf_.CodedInputStream");
::google::protobuf::uint64 val;
bool success = codestream->ReadLittleEndian64(&val);
lua_pushboolean(L, success);
if (success) {
lua_pushinteger(L, val);
} else {
lua_pushnil(L);
}
return 1;
}
static const struct luaL_Reg CodedInputStream_functions [] = {
{"new", lua_protobuf_coded_input_stream_new},
{NULL, NULL}
};
static const struct luaL_Reg CodedInputStream_methods [] = {
{"__gc", lua_protobuf_coded_input_stream_gc},
{"Skip", lua_protobuf_coded_input_stream_skip},
{"PushLimit", lua_protobuf_coded_input_stream_push_limit},
{"PopLimit", lua_protobuf_coded_input_stream_pop_limit},
{"CurrentPosition", lua_protobuf_coded_input_stream_current_position},
{"ReadRaw", lua_protobuf_coded_input_stream_read_raw},
{"ReadVarint32", lua_protobuf_coded_input_stream_read_varint_32},
{"ReadVarint64", lua_protobuf_coded_input_stream_read_varint_64},
{"ReadLittleEndian32", lua_protobuf_coded_input_stream_read_little_endian_32},
{"ReadLittleEndian64", lua_protobuf_coded_input_stream_read_little_endian_64},
{NULL, NULL},
};
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
int lua_protobuf_coded_output_stream_new(lua_State* L) {
const char* filepath = luaL_checkstring(L, 1);
int fd = open(filepath, O_WRONLY | O_TRUNC | O_CREAT | O_BINARY, S_IREAD | S_IWRITE);
if (fd == -1) {
return luaL_error(L, "Failed to open file %s", filepath);
}
char* udataptr = (char*)lua_newuserdata(L, sizeof(::google::protobuf::io::CodedOutputStream)+sizeof(::google::protobuf::io::FileOutputStream));
auto outstream = new(udataptr+sizeof(::google::protobuf::io::CodedOutputStream)) ::google::protobuf::io::FileOutputStream(fd);
outstream->SetCloseOnDelete(true);
auto codestream = new (udataptr) ::google::protobuf::io::CodedOutputStream(outstream);
luaL_setmetatable(L, "protobuf_.CodedOutputStream");
return 1;
}
int lua_protobuf_coded_output_stream_gc(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
::google::protobuf::io::FileOutputStream* filestream = (::google::protobuf::io::FileOutputStream*)(codestream+1);
codestream->~CodedOutputStream();
filestream->~FileOutputStream();
return 0;
}
int lua_protobuf_coded_output_stream_skip(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
int count = luaL_checkint(L, 2);
codestream->Skip(count);
return 0;
}
int lua_protobuf_coded_output_stream_byte_count(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
lua_pushinteger(L, codestream->ByteCount());
return 1;
}
int lua_protobuf_coded_output_stream_write_raw(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
size_t count;
const char* buf = luaL_checklstring(L, 2, &count);
codestream->WriteRaw(buf, (int)count);
return 0;
}
int lua_protobuf_coded_output_stream_write_varint_32(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
::google::protobuf::uint32 val = luaL_checkunsigned(L, 2);
codestream->WriteVarint32(val);
return 0;
}
int lua_protobuf_coded_output_stream_write_varint_64(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
::google::protobuf::uint64 val = luaL_checkunsigned(L, 2);
codestream->WriteVarint64(val);
return 0;
}
int lua_protobuf_coded_output_stream_write_little_endian_32(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
::google::protobuf::uint32 val = luaL_checkunsigned(L, 2);
codestream->WriteLittleEndian32(val);
return 0;
}
int lua_protobuf_coded_output_stream_write_little_endian_64(lua_State* L) {
::google::protobuf::io::CodedOutputStream* codestream = (::google::protobuf::io::CodedOutputStream*)luaL_checkudata(L, 1, "protobuf_.CodedOutputStream");
::google::protobuf::uint64 val = luaL_checkunsigned(L, 2);
codestream->WriteLittleEndian64(val);
return 0;
}
static const struct luaL_Reg CodedOutputStream_functions [] = {
{"new", lua_protobuf_coded_output_stream_new},
{NULL, NULL}
};
static const struct luaL_Reg CodedOutputStream_methods [] = {
{"__gc", lua_protobuf_coded_output_stream_gc},
{"Skip", lua_protobuf_coded_output_stream_skip},
{"ByteCount", lua_protobuf_coded_output_stream_byte_count},
{"WriteRaw", lua_protobuf_coded_output_stream_write_raw},
{"WriteVarint32", lua_protobuf_coded_output_stream_write_varint_32},
{"WriteVarint64", lua_protobuf_coded_output_stream_write_varint_64},
{"WriteLittleEndian32", lua_protobuf_coded_output_stream_write_little_endian_32},
{"WriteLittleEndian64", lua_protobuf_coded_output_stream_write_little_endian_64},
{NULL, NULL},
};
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
static const struct luaL_Reg CodedInputStream_lib_functions [] = {
{NULL, NULL}
};
int lua_protobuf_coded_streams_open(lua_State* L) {
luaL_checktype(L, -1, LUA_TTABLE);
luaL_newmetatable(L, "protobuf_.CodedInputStream");
lua_pushvalue(L, -1);
lua_setfield(L, -2, "__index");
luaL_setfuncs(L, CodedInputStream_methods, 0);
lua_pop(L, 1);//pop the metatable
luaL_newmetatable(L, "protobuf_.CodedOutputStream");
lua_pushvalue(L, -1);
lua_setfield(L, -2, "__index");
luaL_setfuncs(L, CodedOutputStream_methods, 0);
lua_pop(L, 1);//pop the metatable
// add create funcs and tables
luaL_newlib(L, CodedInputStream_functions);
lua_setfield(L, -2, "CodedInputStream");
luaL_newlib(L, CodedOutputStream_functions);
lua_setfield(L, -2, "CodedOutputStream");
return 0;
}
#ifdef __cplusplus
extern "C" {
#endif
const char *luaEXT_findtable (lua_State *L, const char *fname, int idx, int szhint) {
const char *e;
if (idx) lua_pushvalue(L, idx);
do {
e = strchr(fname, '.');
if (e == NULL) e = fname + strlen(fname);
lua_pushlstring(L, fname, e - fname);
lua_rawget(L, -2);
if (lua_isnil(L, -1)) { /* no such field? */
lua_pop(L, 1); /* remove this nil */
lua_createtable(L, 0, (*e == '.' ? 1 : szhint)); /* new table for field */
lua_pushlstring(L, fname, e - fname);
lua_pushvalue(L, -2);
lua_settable(L, -4); /* set new table into field */
}
else if (!lua_istable(L, -1)) { /* field has a non-table value? */
lua_pop(L, 2); /* remove table and value */
return fname; /* return problematic part of the name */
}
lua_remove(L, -2); /* remove previous table */
fname = e + 1;
} while (*e == '.');
return NULL;
}
#ifdef __cplusplus
}
#endif
'''
def c_header_header(filename, package):
return [
'// Generated by the lua-protobuf compiler.',
'// You shouldn\'t be editing this file manually',
'//',
'// source proto file: %s' % filename,
'',
'#ifndef LUA_PROTOBUF_%s_%s_H' % (package.replace('.', '_'), filename.replace('.proto', '')),
'#define LUA_PROTOBUF_%s_%s_H' % (package.replace('.', '_'), filename.replace('.proto', '')),
'',
'#include "lua-protobuf.h"',
'#include <%s.pb.h>' % filename.replace('.proto', ''),#package.replace('.', '_'),
'',
'#ifdef __cplusplus',
'extern "C" {',
'#endif',
'',
'#include <lua.h>',
'',
'const char* luaEXT_findtable (lua_State*, const char*, int, int);',
'',
## We do this function based on file name to avoid name collisions
'// register all messages in this package to a Lua state',
'LUA_PROTOBUF_EXPORT int %sopen(lua_State *L);' % proto_function_open_name(filename),
'',
]
def source_header(filename, package, file_descriptor):
'''Returns lines that begin a source file'''
lines = []
lines.extend( [
'// Generated by the lua-protobuf compiler',
'// You shouldn\'t edit this file manually',
'//',
'// source proto file: %s' % filename,
'',
])
lines.append('#include "%s.pb.lua.h"' % filename.replace('.proto', ''))
for type in file_descriptor.dependency:
lines.append('#include "%s.pb.lua.h"' % type.replace('.proto', ''))
lines.extend( ['',
'#ifdef __cplusplus',
'extern "C" { // make sure functions treated with C naming',
'#endif',
'',
'#include <lauxlib.h>',
'',
'#ifdef __cplusplus',
'}',
'#endif',
'',
'#include <string>',
'',
'// this represents Lua udata for a protocol buffer message',
'// we record where a message came from so we can GC it properly',
'typedef struct msg_udata { // confuse over-simplified pretty-printer',
' ::google::protobuf::MessageLite * msg;',
' bool lua_owns;',
' lua_protobuf_gc_callback gc_callback;',
' void * callback_data;',
'} msg_udata;',
'',])
return lines
def proto_function_open_name(filename):
return 'lua_protobuf_%s_' % filename.replace('.proto', '')
def package_function_prefix(package):
return 'lua_protobuf_%s_' % package.replace('.', '_')
def message_function_prefix(package, message):
return '%s%s_' % (package_function_prefix(package), message)
def message_open_function_name(package, message):
'''Returns function name that registers the Lua library for a message type'''
return '%sopen' % message_function_prefix(package, message)
def cpp_class(package, message = None):
'''Returns the fully qualified class name for a message type'''
if not message:
return package.replace('.', '::')
return '::%s::%s' % ( package.replace('.', '::'), message )
def field_function_name(package, message, prefix, field):
'''Obtain the function name of a field accessor/mutator function'''
return '%s%s_%s' % ( message_function_prefix(package, message), prefix, field )
def field_function_start(package, message, prefix, field):
'''Obtain the start of function for a field accessor function'''
return [
'int %s(lua_State *L)' % field_function_name(package, message, prefix, field.lower()),
'{',
]
def lua_libname(package, message):
'''Returns the Lua library name for a specific message'''
return 'protobuf.%s.%s' % (package, message)
def metatable(package, message):
'''Returns Lua metatable for protocol buffer message type'''
return 'protobuf_.%s.%s' % (package, message)
def obtain_message_from_udata(package, message=None, index=1, varname='m'):
'''Statement that obtains a message from userdata'''
c = cpp_class(package, message)
return [
'msg_udata * %sud = (msg_udata *)%s;' % ( varname, check_udata(package, message, index) ),
'%s *%s = (%s *)%sud->msg;' % ( c, varname, c, varname ),
]
def check_udata(package, message, index=1):
'''Validates a udata is instance of protocol buffer message
By default, it validates udata at top of the stack
'''
return 'luaL_checkudata(L, %d, "%s")' % ( index, metatable(package, message) )
def has_body(package, message, field):
'''Returns the function body for a has_<field> function'''
lines = []
lines.extend(obtain_message_from_udata(package, message))
lines.append('lua_pushboolean(L, m->has_%s());' % field.lower())
lines.append('return 1;')
return lines
def clear_body(package, message, field):
'''Returns the function body for a clear_<field> function'''
lines = []
lines.extend(obtain_message_from_udata(package, message))
lines.append('m->clear_%s();' % field.lower())
lines.append('return 0;')
return lines
def size_body(package, message, field):
'''Returns the function body for a size_<field> function'''
lines = []
lines.extend(obtain_message_from_udata(package, message))
lines.append('int size = m->%s_size();' % field.lower())
lines.append('lua_pushinteger(L, size);')
lines.append('return 1;')
return lines
def add_body(package, message, field, type_name):
'''Returns the function body for the add_<field> function for repeated embedded messages'''
lines = []
lines.extend(obtain_message_from_udata(package, message))
lines.extend([
'%s *msg_new = m->add_%s();' % ( cpp_class(type_name), field.lower() ),
# since the message is allocated out of the containing message, Lua
# does not need to do GC
'lua_protobuf%s_pushreference(L, msg_new, NULL, NULL);' % type_name.replace('.', '_'),
'return 1;',
])
return lines
def field_get(package, message, field_descriptor):
'''Returns function definition for a get_<field> function'''
name = field_descriptor.name
type = field_descriptor.type
type_name = field_descriptor.type_name
label = field_descriptor.label
repeated = label == FieldDescriptor.LABEL_REPEATED
lines = []
lines.extend(field_function_start(package, message, 'get', name))
lines.extend(obtain_message_from_udata(package, message))
# the logic is significantly different depending on if the field is
# singular or repeated.
# for repeated, we have an argument which points to the numeric index to
# retrieve. in true Lua convention, we index starting from 1, which is
# different from protocol buffers, which indexes from 0
if repeated:
lines.extend([
'if (lua_gettop(L) != 2) {',
'return luaL_error(L, "missing required numeric argument");',
'}',
'lua_Integer index = luaL_checkinteger(L, 2);',
'if (index < 1 || index > m->%s_size()) {' % name.lower(),
# TODO is returning nil the more Lua way?
'return luaL_error(L, "index must be between 1 and current size: %%d", m->%s_size());' % name.lower(),
'}',
])
# TODO float and double types are not equivalent. don't treat them as such
# TODO figure out how to support 64 bit integers properly
if repeated:
if type in [ FieldDescriptor.TYPE_STRING, FieldDescriptor.TYPE_BYTES ]:
lines.extend([
'string s = m->%s(index - 1);' % name.lower(),
'lua_pushlstring(L, s.c_str(), s.size());',
])
elif type == FieldDescriptor.TYPE_BOOL:
lines.append('lua_pushboolean(L, m->%s(index-1));' % name.lower())
elif type in [FieldDescriptor.TYPE_INT32, FieldDescriptor.TYPE_UINT32,
FieldDescriptor.TYPE_FIXED32, FieldDescriptor.TYPE_SFIXED32, FieldDescriptor.TYPE_SINT32]:
lines.append('lua_pushinteger(L, m->%s(index-1));' % name.lower())
elif type in [ FieldDescriptor.TYPE_INT64, FieldDescriptor.TYPE_UINT64,
FieldDescriptor.TYPE_FIXED64, FieldDescriptor.TYPE_SFIXED64, FieldDescriptor.TYPE_SINT64]:
lines.append('lua_pushinteger(L, m->%s(index-1));' % name.lower())
elif type == FieldDescriptor.TYPE_FLOAT or type == FieldDescriptor.TYPE_DOUBLE:
lines.append('lua_pushnumber(L, m->%s(index-1));' % name.lower())
elif type == FieldDescriptor.TYPE_ENUM:
lines.append('lua_pushnumber(L, m->%s(index-1));' % name.lower())
elif type == FieldDescriptor.TYPE_MESSAGE:
lines.extend([
'%s * got_msg = m->mutable_%s(index-1);' % ( type_name.replace('.', '::'), name.lower() ),
'lua_protobuf%s_pushreference(L, got_msg, NULL, NULL);' % type_name.replace('.', '_'),
])
else:
lines.append('return luaL_error(L, "lua-protobuf does not support this field type");')
else:
# for scalar fields, we push nil if the value is not defined
# this is the Lua way
if type == FieldDescriptor.TYPE_STRING or type == FieldDescriptor.TYPE_BYTES:
lines.append('string s = m->%s();' % name.lower())
lines.append('if (m->has_%s()) lua_pushlstring(L, s.c_str(), s.size()); else lua_pushnil(L);' % name.lower())
elif type == FieldDescriptor.TYPE_BOOL:
lines.append('if (m->has_%s()) lua_pushboolean(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() ))
elif type in [FieldDescriptor.TYPE_INT32, FieldDescriptor.TYPE_UINT32,
FieldDescriptor.TYPE_FIXED32, FieldDescriptor.TYPE_SFIXED32, FieldDescriptor.TYPE_SINT32]:
lines.append('if (m->has_%s()) lua_pushinteger(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() ))
elif type in [ FieldDescriptor.TYPE_INT64, FieldDescriptor.TYPE_UINT64,
FieldDescriptor.TYPE_FIXED64, FieldDescriptor.TYPE_SFIXED64, FieldDescriptor.TYPE_SINT64]:
lines.append('if (m->has_%s()) lua_pushinteger(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() ))
elif type == FieldDescriptor.TYPE_FLOAT or type == FieldDescriptor.TYPE_DOUBLE:
lines.append('if (m->has_%s()) lua_pushnumber(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() ))
elif type == FieldDescriptor.TYPE_ENUM:
lines.append('if (m->has_%s()) lua_pushinteger(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() ))
elif type == FieldDescriptor.TYPE_MESSAGE:
lines.extend([
'if (!m->has_%s()) {' % name.lower(),
'lua_pushnil(L);',
'}',
# we push the message as userdata
# since the message is allocated out of the parent message, we
# don't need to do garbage collection
'%s * got_msg = m->mutable_%s();' % ( type_name.replace('.', '::'), name.lower() ),
'lua_protobuf%s_pushreference(L, got_msg, NULL, NULL);' % type_name.replace('.', '_'),
])
else:
# not supported yet :(
lines.append('return luaL_error(L, "lua-protobuf does not support this field type");')
lines.append('return 1;')
lines.append('}\n')
return lines
def field_set_assignment(field, args):
return [
'if (index == current_size + 1) {',
'm->add_%s(%s);' % ( field.lower(), args ),
'}',
'else {',
'm->set_%s(index-1, %s);' % ( field.lower(), args ),
'}',
]
def field_set(package, message, field_descriptor):
'''Returns function definition for a set_<field> function'''
name = field_descriptor.name
type = field_descriptor.type
type_name = field_descriptor.type_name
label = field_descriptor.label
repeated = label == FieldDescriptor.LABEL_REPEATED
lines = []
lines.extend(field_function_start(package, message, 'set', name.lower()))
lines.extend(obtain_message_from_udata(package, message, 1))
# we do things differently depending on if this is a singular or repeated field
# for singular fields, the new value is the first argument
# for repeated fields, the index is arg1 and the value is arg2
if repeated:
lines.extend([
'if (lua_gettop(L) != 3) {',
' return luaL_error(L, "required 2 arguments not passed to function");',
'}',
'lua_Integer index = luaL_checkinteger(L, 2);',
'int current_size = m->%s_size();' % name.lower(),
'if (index < 1 || index > current_size + 1) {',
'return luaL_error(L, "index must be between 1 and %d", current_size + 1);',
'}',
# we don't support the automagic nil clears value... yet
'if (lua_isnil(L, 3)) {',
'return luaL_error(L, "cannot assign nil to repeated fields (yet)");',
'}',
])
# TODO proper 64 bit handling
# now move on to the assignment
if repeated:
if type in [ FieldDescriptor.TYPE_STRING, FieldDescriptor.TYPE_BYTES ]:
lines.extend([
'size_t length = 0;',
'const char *s = luaL_checklstring(L, 3, &length);',
])
lines.extend(field_set_assignment(name, 's, length'))
elif type == FieldDescriptor.TYPE_BOOL:
lines.append('bool b = !!lua_toboolean(L, 3);')
lines.extend(field_set_assignment(name, 'b'))
elif type in [ FieldDescriptor.TYPE_DOUBLE, FieldDescriptor.TYPE_FLOAT ]:
lines.append('double d = lua_tonumber(L, 3);')
lines.extend(field_set_assignment(name, 'd'))
elif type in [ FieldDescriptor.TYPE_INT32, FieldDescriptor.TYPE_FIXED32,
FieldDescriptor.TYPE_UINT32, FieldDescriptor.TYPE_SFIXED32, FieldDescriptor.TYPE_SINT32 ]:
lines.append('lua_Integer i = lua_tointeger(L, 3);')
lines.extend(field_set_assignment(name, 'i'))
elif type in [ FieldDescriptor.TYPE_INT64, FieldDescriptor.TYPE_UINT64,
FieldDescriptor.TYPE_FIXED64, FieldDescriptor.TYPE_SFIXED64, FieldDescriptor.TYPE_SINT64]:
lines.append('lua_Integer i = lua_tointeger(L, 3);')
lines.extend(field_set_assignment(name, 'i'))
elif type == FieldDescriptor.TYPE_ENUM:
lines.append('lua_Integer i = lua_tointeger(L, 3);')
lines.extend(field_set_assignment(name, '(%s)i' % type_name.replace('.', '::')))
elif type == FieldDescriptor.TYPE_MESSAGE:
lines.append('return luaL_error(L, "to manipulate embedded messages, fetch the embedded message and modify it");')
else:
lines.append('return luaL_error(L, "field type not yet supported");')
lines.append('return 0;')
else:
# if they call set() with nil, we interpret as a clear
# this is the Lua way, after all
lines.extend([
'if (lua_isnil(L, 2)) {',
'm->clear_%s();' % name.lower(),
'return 0;',
'}',
'',
])
if type in [ FieldDescriptor.TYPE_STRING, FieldDescriptor.TYPE_BYTES ]:
lines.extend([
'if (!lua_isstring(L, 2)) return luaL_error(L, "passed value is not a string");',
'size_t len;',
'const char *s = lua_tolstring(L, 2, &len);',
'if (!s) {',
'luaL_error(L, "could not obtain string on stack. weird");',
'}',
'm->set_%s(s, len);' % name.lower(),
'return 0;',
])
elif type in [ FieldDescriptor.TYPE_DOUBLE, FieldDescriptor.TYPE_FLOAT ]:
lines.extend([
'if (!lua_isnumber(L, 2)) return luaL_error(L, "passed value cannot be converted to a number");',
'lua_Number n = lua_tonumber(L, 2);',
'm->set_%s(n);' % name.lower(),
'return 0;',
])
elif type in [ FieldDescriptor.TYPE_INT32, FieldDescriptor.TYPE_FIXED32,
FieldDescriptor.TYPE_UINT32, FieldDescriptor.TYPE_SFIXED32, FieldDescriptor.TYPE_SINT32 ]:
lines.extend([
'lua_Integer v = luaL_checkinteger(L, 2);',
'm->set_%s(v);' % name.lower(),
'return 0;',
])
elif type in [ FieldDescriptor.TYPE_INT64, FieldDescriptor.TYPE_UINT64,
FieldDescriptor.TYPE_FIXED64, FieldDescriptor.TYPE_SFIXED64, FieldDescriptor.TYPE_SINT64]:
lines.extend([
'lua_Integer i = luaL_checkinteger(L, 2);',
'm->set_%s(i);' % name.lower(),
'return 0;',
])
elif type == FieldDescriptor.TYPE_BOOL:
lines.extend([
'bool b = !!lua_toboolean(L, 2);',
'm->set_%s(b);' % name.lower(),
'return 0;',
])
elif type == FieldDescriptor.TYPE_ENUM:
lines.extend([
'lua_Integer i = luaL_checkinteger(L, 2);',
'm->set_%s((%s)i);' % ( name.lower(), type_name.replace('.', '::') ),
'return 0;',
])
elif type == FieldDescriptor.TYPE_MESSAGE:
lines.append('return luaL_error(L, "to manipulate embedded messages, obtain the embedded message and manipulate it");')
else:
lines.append('return luaL_error(L, "field type is not yet supported");')
lines.append('}\n')
return lines
def new_message(package, message):
'''Returns function definition for creating a new protocol buffer message'''
lines = []
lines.append('int %snew(lua_State *L)' % message_function_prefix(package, message))
lines.append('{')
c = cpp_class(package, message)
lines.append('msg_udata * ud = (msg_udata *)lua_newuserdata(L, sizeof(msg_udata));')
lines.append('ud->lua_owns = true;')
lines.append('ud->msg = new %s();' % c)
lines.append('ud->gc_callback = NULL;')
lines.append('ud->callback_data = NULL;')
lines.append('luaL_getmetatable(L, "%s");' % metatable(package, message))
lines.append('lua_setmetatable(L, -2);')
lines.append('return 1;')
lines.append('}\n')
return lines
def message_pushcopy_function(package, message):
'''Returns function definition for pushing a copy of a message to the stack'''
return [
'bool %spushcopy(lua_State *L, const %s &from)' % ( message_function_prefix(package, message), cpp_class(package, message) ),
'{',
'msg_udata * ud = (msg_udata *)lua_newuserdata(L, sizeof(msg_udata));',
'ud->lua_owns = true;',
'ud->msg = new %s(from);' % cpp_class(package, message),
'ud->gc_callback = NULL;',
'ud->callback_data = NULL;',
'luaL_getmetatable(L, "%s");' % metatable(package, message),
'lua_setmetatable(L, -2);',
'return true;',
'}',
]
def message_getcopy_function(package, message):
'''Returns function definition for getting a copy of a message from the stack'''
return [
'void %sgetcopy(lua_State *L, int index, %s &to)' % ( message_function_prefix(package, message), cpp_class(package, message) ),
'{',
'msg_udata * ud = (msg_udata *)luaL_checkudata(L, index, "%s")' % ( metatable(package, message) ),
'to->CopyFrom(*ud->msg);',
'}',
]
def message_pushreference_function(package, message):
'''Returns function definition for pushing a reference of a message on the stack'''
return [
'bool %spushreference(lua_State *L, %s *msg, lua_protobuf_gc_callback f, void *data)' % ( message_function_prefix(package, message), cpp_class(package, message) ),
'{',
'msg_udata * ud = (msg_udata *)lua_newuserdata(L, sizeof(msg_udata));',
'ud->lua_owns = false;',
'ud->msg = msg;',
'ud->gc_callback = f;',
'ud->callback_data = data;',
'luaL_getmetatable(L, "%s");' % metatable(package, message),
'lua_setmetatable(L, -2);',
'return true;',
'}',
]
def parsefromstring_message_function(package, message):
'''Returns function definition for parsing a message from a serialized string'''
lines = []
lines.append('int %sparsefromstring(lua_State *L)' % message_function_prefix(package, message))
c = cpp_class(package, message)
lines.extend([
'{',
'if (lua_gettop(L) != 1) {',
'return luaL_error(L, "parsefromstring() requires a string argument. none given");',
'}',
'size_t len;',
'const char *s = luaL_checklstring(L, -1, &len);',
'%s * msg = new %s();' % ( c, c ),
'if (!msg->ParseFromArray((const void *)s, len)) {',
'return luaL_error(L, "error deserializing message");',
'}',
'msg_udata * ud = (msg_udata *)lua_newuserdata(L, sizeof(msg_udata));',
'ud->lua_owns = true;',
'ud->msg = msg;',
'ud->gc_callback = NULL;',
'ud->callback_data = NULL;',
'luaL_getmetatable(L, "%s");' % metatable(package, message),
'lua_setmetatable(L, -2);',
'return 1;',
'}',
])
return lines
def label_to_string(label_value):
if label_value == FieldDescriptor.LABEL_OPTIONAL:
return "optional"
if label_value == FieldDescriptor.LABEL_REPEATED:
return "repeated"
if label_value == FieldDescriptor.LABEL_REQUIRED:
return "required"
def type_to_string(type_value):
if type_value == FieldDescriptor.TYPE_BOOL:# = 8
return "bool"
if type_value == FieldDescriptor.TYPE_BYTES:# = 12
return "bytes"
if type_value == FieldDescriptor.TYPE_DOUBLE:# = 1
return "double"
if type_value == FieldDescriptor.TYPE_ENUM:# = 14
return "enum"
if type_value == FieldDescriptor.TYPE_FIXED32:# = 7
return "fixed32"
if type_value == FieldDescriptor.TYPE_FIXED64:# = 6
return "fixed64"
if type_value == FieldDescriptor.TYPE_FLOAT:# = 2
return "float"
if type_value == FieldDescriptor.TYPE_GROUP:# = 10
return "group"
if type_value == FieldDescriptor.TYPE_INT32:# = 5
return "int32"
if type_value == FieldDescriptor.TYPE_INT64:# = 3
return "int64"
if type_value == FieldDescriptor.TYPE_MESSAGE:# = 11
return "message"
if type_value == FieldDescriptor.TYPE_SFIXED32:# = 15
return "sfixed32"
if type_value == FieldDescriptor.TYPE_SFIXED64:# = 16
return "sfixed64"
if type_value == FieldDescriptor.TYPE_SINT32:# = 17
return "sint32"
if type_value == FieldDescriptor.TYPE_SINT64:# = 18
return "sint64"
if type_value == FieldDescriptor.TYPE_STRING:# = 9
return "string"
if type_value == FieldDescriptor.TYPE_UINT32:# = 13
return "uint32"
if type_value == FieldDescriptor.TYPE_UINT64:# = 4
return "uint64"
def descriptor_message_function(package, message, descriptor):
''' Return a function that builds a table that describes message. Returns table to Lua for inspection'''
lines = []
lines.extend([
'int %sdescriptor(lua_State* L)' % message_function_prefix(package, message),
'{',
' lua_newtable(L);',
' ',
]);
for fields_descriptor in descriptor.field:
lines.extend([
' // Field: default_value = %s' % fields_descriptor.default_value,
' lua_newtable(L);',
' lua_pushstring(L, "%s");' % fields_descriptor.name,
' lua_setfield(L, -2, "name");',
' lua_pushstring(L, "%s");' % label_to_string(fields_descriptor.label),
' lua_setfield(L, -2, "label");',
' lua_pushnumber(L, %s);' % fields_descriptor.number,
' lua_setfield(L, -2, "number");',
' lua_pushstring(L, "%s");' % type_to_string(fields_descriptor.type),
' lua_setfield(L, -2, "type");',
' lua_pushstring(L, "%s");' % (fields_descriptor.type_name) if fields_descriptor.type_name else '',
' lua_setfield(L, -2, "type_name");' if fields_descriptor.type_name else '',
' lua_setfield(L, -2, "%s");' % fields_descriptor.name,
]);
lines.extend([
'',
' return 1;',
'}',
])
return lines
def gc_message_function(package, message):
'''Returns function definition for garbage collecting a message'''
lines = [
'int %sgc(lua_State *L)' % message_function_prefix(package, message),
'{',
]
lines.extend(obtain_message_from_udata(package, message, 1))
# if Lua "owns" the message, we delete it
# else, we delete only if a callback exists and it says it is OK
lines.extend([
'if (mud->lua_owns) {',
'delete mud->msg;',
'mud->msg = NULL;',
'return 0;',
'}',
'if (mud->gc_callback && mud->gc_callback(m, mud->callback_data)) {',
'delete mud->msg;',
'mud->msg = NULL;',
'return 0;',
'}',
'return 0;',
'}',
])
return lines
def clear_message_function(package, message):
'''Returns the function definition for clearing a message'''
lines = [
'int %sclear(lua_State *L)' % message_function_prefix(package, message),
'{'
]
lines.extend(obtain_message_from_udata(package, message, 1))
lines.extend([
'm->Clear();',
'return 0;',
'}',
])
return lines
def serialized_message_function(package, message):
'''Returns the function definition for serializing a message and its length'''
lines = [
'int %sserialized(lua_State *L)' % message_function_prefix(package, message),
'{'
]
lines.extend(obtain_message_from_udata(package, message, 1))
lines.extend([
'string s;',
'if (!m->SerializeToString(&s)) {',
'return luaL_error(L, "error serializing message");',
'}',
'lua_pushlstring(L, s.c_str(), s.length());',
'lua_pushnumber(L, s.length());',
'return 2;',
'}',
])
return lines
def message_function_array(package, message):
'''Defines functions for Lua object type
These are defined on the Lua metatable for the message type.
These are basically constructors and static methods in Lua land.
'''
return [
'static const struct luaL_Reg %s_functions [] = {' % message,
'{"new", %snew},' % message_function_prefix(package, message),
'{"parsefromstring", %sparsefromstring},' % message_function_prefix(package, message),
'{"descriptor", %sdescriptor},' % message_function_prefix(package, message),
'{NULL, NULL}',
'};\n',
]
def message_method_array(package, descriptor):
'''Defines functions for Lua object instances
These are functions available to each instance of a message.
They take the object userdata as the first parameter.
'''
message = descriptor.name
fp = message_function_prefix(package, message)
lines = []
lines.append('static const struct luaL_Reg %s_methods [] = {' % message)
lines.append('{"serialized", %sserialized},' % fp)
lines.append('{"clear", %sclear},' % fp)
lines.append('{"__gc", %sgc},' % message_function_prefix(package, message))
for fd in descriptor.field:
name = fd.name
label = fd.label
type = fd.type
lines.append('{"clear_%s", %s},' % ( name.lower(), field_function_name(package, message, 'clear', name.lower()) ))
lines.append('{"get_%s", %s},' % ( name.lower(), field_function_name(package, message, 'get', name.lower()) ))
lines.append('{"set_%s", %s},' % ( name.lower(), field_function_name(package, message, 'set', name.lower()) ))
if label in [ FieldDescriptor.LABEL_REQUIRED, FieldDescriptor.LABEL_OPTIONAL ]:
lines.append('{"has_%s", %s},' % ( name.lower(), field_function_name(package, message, 'has', name.lower()) ))
if label == FieldDescriptor.LABEL_REPEATED:
lines.append('{"size_%s", %s},' % ( name.lower(), field_function_name(package, message, 'size', name.lower()) ))
if type == FieldDescriptor.TYPE_MESSAGE:
lines.append('{"add_%s", %s},' % ( name.lower(), field_function_name(package, message, 'add', name.lower()) ))
lines.append('{NULL, NULL},')
lines.append('};\n')
return lines
def message_open_function(package, descriptor):
'''Function definition for opening/registering a message type'''
message = descriptor.name
lines = [
'int %s(lua_State *L)' % message_open_function_name(package, message),
'{',
'luaL_checktype(L, -1, LUA_TTABLE);', #
'luaL_newmetatable(L, "%s");' % metatable(package, message),
'lua_pushvalue(L, -1);',
'lua_setfield(L, -2, "__index");',
'luaL_setfuncs(L, %s_methods, 0);' % message, ##'luaL_register(L, NULL, %s_methods);' % message,
'lua_pop(L, 1); // remove the metatable', #
'if (luaEXT_findtable(L, "%s", -1, 1)) { ' % package, #
' return luaL_error(L, "Error finding correct table");',
'}',
'luaL_newlib(L, %s_functions);' % message, ##'luaL_register(L, "%s", %s_functions);' % (lua_libname(package, message), message),
'lua_setfield(L, -2, "%s");' % message, #
'lua_pop(L, 1); //remove the returned table from findtable' #
]
for enum_descriptor in descriptor.enum_type:
lines.extend(enum_source(enum_descriptor))
lines.extend([
# this is wrong if we are calling through normal Lua module load means
#'lua_pop(L, 1);',
'return 0;',#'return 1;',
'}',
'\n',
])
return lines
def message_header(package, message_descriptor):
'''Returns the lines for a header definition of a message'''
message_name = message_descriptor.name
lines = []
lines.append('// Message %s' % message_name)
function_prefix = 'lua_protobuf_' + package.replace('.', '_') + '_'
c = cpp_class(package, message_name)
lines.extend([
'// registers the message type with Lua',
'LUA_PROTOBUF_EXPORT int %s(lua_State *L);\n' % message_open_function_name(package, message_name),
'',
'// push a copy of the message to the Lua stack',
'// caller is free to use original message however she wants, but changes will not',
'// be reflected in Lua and vice-verse',
'LUA_PROTOBUF_EXPORT bool %s%s_pushcopy(lua_State *L, const %s &msg);' % ( function_prefix, message_name, c),
'',
'// push a reference of the message to the Lua stack',
'// the 3rd and 4th arguments define a callback that can be invoked just before Lua',
'// garbage collects the message. If the 3rd argument is NULL, Lua will *NOT* free',
'// memory. If the second argument points to a function, that function is called when',
'// Lua garbage collects the object. The function is sent a pointer to the message being',
'// collected and the 4th argument to this function. If the function returns true,',
'// Lua will free the memory. If false (0), Lua will not free the memory.',
'LUA_PROTOBUF_EXPORT bool %s%s_pushreference(lua_State *L, %s *msg, lua_protobuf_gc_callback callback, void *data);' % ( function_prefix, message_name, c ),
'',
'// get a copy of the message from the Lua stack',
'// caller is free to use the new message however she wants, but changes will not',
'// be reflected in Lua and vice-verse',
'LUA_PROTOBUF_EXPORT bool %s%s_getcopy(lua_State *L, int index, %s &msg);' % ( function_prefix, message_name, c),
'',
'',
'// The following functions are called by Lua. Many people will not need them,',
'// but they are exported for those that do.',
'',
'',
'// constructor called from Lua',
'LUA_PROTOBUF_EXPORT int %s%s_new(lua_State *L);' % ( function_prefix, message_name ),
'',
'// obtain instance from a serialized string',
'LUA_PROTOBUF_EXPORT int %s%s_parsefromstring(lua_State *L);' % ( function_prefix, message_name ),
'',
'// obtain table of fields in this message',
'LUA_PROTOBUF_EXPORT int %s%s_descriptor(lua_State* L);' % ( function_prefix, message_name),
'',
'// garbage collects message instance in Lua',
'LUA_PROTOBUF_EXPORT int %s%s_gc(lua_State *L);' % ( function_prefix, message_name ),
'',
'// obtain serialized representation of instance',
'LUA_PROTOBUF_EXPORT int %s%s_serialized(lua_State *L);' % ( function_prefix, message_name ),
'',
'// clear all fields in the message',
'LUA_PROTOBUF_EXPORT int %s%s_clear(lua_State *L);' % ( function_prefix, message_name ),
'',
])
# each field defined in the message
for field_descriptor in message_descriptor.field:
field_name = field_descriptor.name
field_number = field_descriptor.number
field_label = field_descriptor.label
field_type = field_descriptor.type
field_default = field_descriptor.default_value
if field_label not in FIELD_LABEL_MAP.keys():
raise Exception('unknown field label constant: %s' % field_label)
field_label_s = FIELD_LABEL_MAP[field_label]
if field_type not in FIELD_TYPE_MAP.keys():
raise Exception('unknown field type: %s' % field_type)
field_type_s = FIELD_TYPE_MAP[field_type]
lines.append('// %s %s %s = %d' % (field_label_s, field_type_s, field_name, field_number))
lines.append('LUA_PROTOBUF_EXPORT int %s%s_clear_%s(lua_State *L);' % (function_prefix, message_name, field_name.lower()))
lines.append('LUA_PROTOBUF_EXPORT int %s%s_get_%s(lua_State *L);' % (function_prefix, message_name, field_name.lower()))
# TODO I think we can get rid of this for message types
lines.append('LUA_PROTOBUF_EXPORT int %s%s_set_%s(lua_State *L);' % (function_prefix, message_name, field_name.lower()))
if field_label in [ FieldDescriptor.LABEL_REQUIRED, FieldDescriptor.LABEL_OPTIONAL ]:
lines.append('LUA_PROTOBUF_EXPORT int %s%s_has_%s(lua_State *L);' % (function_prefix, message_name, field_name.lower()))
if field_label == FieldDescriptor.LABEL_REPEATED:
lines.append('LUA_PROTOBUF_EXPORT int %s%s_size_%s(lua_State *L);' % (function_prefix, message_name, field_name.lower()))
if field_type == FieldDescriptor.TYPE_MESSAGE:
lines.append('LUA_PROTOBUF_EXPORT int %s%s_add_%s(lua_State *L);' % ( function_prefix, message_name, field_name.lower()))
lines.append('')
lines.append('// end of message %s\n' % message_name)
return lines
def message_source(package, message_descriptor):
'''Returns lines of source code for an individual message type'''
lines = []
message = message_descriptor.name
lines.extend(message_function_array(package, message))
lines.extend(message_method_array(package, message_descriptor))
lines.extend(message_open_function(package, message_descriptor))
lines.extend(message_pushcopy_function(package, message))
lines.extend(message_pushreference_function(package, message))
lines.extend(message_getcopy_function(package, message))
lines.extend(new_message(package, message))
lines.extend(parsefromstring_message_function(package, message))
lines.extend(descriptor_message_function(package, message, message_descriptor))
lines.extend(gc_message_function(package, message))
lines.extend(clear_message_function(package, message))
lines.extend(serialized_message_function(package, message))
for descriptor in message_descriptor.field:
name = descriptor.name
# clear() is in all label types
lines.extend(field_function_start(package, message, 'clear', name))
lines.extend(clear_body(package, message, name))
lines.append('}\n')
lines.extend(field_get(package, message, descriptor))
lines.extend(field_set(package, message, descriptor))
if descriptor.label in [FieldDescriptor.LABEL_OPTIONAL, FieldDescriptor.LABEL_REQUIRED]:
# has_<field>()
lines.extend(field_function_start(package, message, 'has', name))
lines.extend(has_body(package, message, name))
lines.append('}\n')
if descriptor.label == FieldDescriptor.LABEL_REPEATED:
# size_<field>()
lines.extend(field_function_start(package, message, 'size', name))
lines.extend(size_body(package, message, name))
lines.append('}\n')
if descriptor.type == FieldDescriptor.TYPE_MESSAGE:
lines.extend(field_function_start(package, message, 'add', name))
lines.extend(add_body(package, message, name, descriptor.type_name))
lines.append('}\n')
return lines
def enum_source(descriptor):
'''Returns source code defining an enumeration type'''
# this function assumes the module/table the enum should be assigned to
# is at the top of the stack when it is called
name = descriptor.name
# enums are a little funky
# at the core, there is a table whose keys are the enum string names and
# values corresponding to the respective integer values. this table also
# has a metatable with __index to throw errors when unknown enumerations
# are accessed
#
# this table is then wrapped in a proxy table. the proxy table is empty
# but has a metatable with __index and __newindex set. __index is the
# table that actually contains the values. __newindex is a function that
# always throws an error.
#
# we need the proxy table so we can intercept all requests for writes.
# __newindex is only called for new keys, so we need an empty table so
# all writes are sent to __newindex
lines = [
'// %s enum' % name,
'lua_newtable(L); // proxy table',
'lua_newtable(L); // main table',
]
# assign enumerations to the table
for value in descriptor.value:
k = value.name
v = value.number
lines.extend([
'lua_pushnumber(L, %d);' % v,
'lua_setfield(L, -2, "%s");' % k
])
# assign the metatable
lines.extend([
'// define metatable on main table',
'lua_newtable(L);',
'lua_pushcfunction(L, lua_protobuf_enum_index);',
'lua_setfield(L, -2, "__index");',
'lua_setmetatable(L, -2);',
'',
'// define metatable on proxy table',
'lua_newtable(L);',
# proxy meta: -1; main: -2; proxy: -3
'lua_pushvalue(L, -2);',
'lua_setfield(L, -2, "__index");',
'lua_pushcfunction(L, lua_protobuf_enum_newindex);',
'lua_setfield(L, -2, "__newindex");',
'lua_remove(L, -2);',
'lua_setmetatable(L, -2);',
# proxy at top of stack now
# assign to appropriate module
'lua_setfield(L, -2, "%s");' % name,
'// end %s enum' % name
])
return lines
def file_header(file_descriptor):
filename = file_descriptor.name
package = file_descriptor.package
lines = []
lines.extend(c_header_header(filename, package))
for descriptor in file_descriptor.message_type:
lines.extend(message_header(package, descriptor))
lines.append('#ifdef __cplusplus')
lines.append('}')
lines.append('#endif')
lines.append('')
lines.append('#endif')
return '\n'.join(lines)
def file_source(file_descriptor):
'''Obtains the source code for a FileDescriptor instance'''
filename = file_descriptor.name
package = file_descriptor.package
lines = []
lines.extend(source_header(filename, package, file_descriptor))
lines.append('using ::std::string;\n')
lines.extend([
'int %sopen(lua_State *L)' % proto_function_open_name(filename),
'{',
])
# we populate enumerations as tables inside the protobuf global
# variable/module
# this is a little tricky, because we need to ensure all the parent tables
# are present
# i.e. protobuf.package.foo.enum => protobuf['package']['foo']['enum']
# we interate over all the tables and create missing ones, as necessary
# we cheat here and use the undocumented/internal luaL_findtable function
# we probably shouldn't rely on an "internal" API, so
# TODO don't use internal API call
lines.extend([
'luaL_checktype(L, -1, LUA_TTABLE);',
'const char *table = luaEXT_findtable(L, "%s", -1, 1);' % package,
'if (table) {',
'return luaL_error(L, "could not create parent Lua tables");',
'}',
'if (!lua_istable(L, -1)) {',
'return luaL_error(L, "could not create parent Lua tables");',
'}',
])
for descriptor in file_descriptor.enum_type:
lines.extend(enum_source(descriptor))
lines.extend([
# don't need main table on stack any more
'lua_pop(L, 1);',
# and we register this package as a module, complete with enumerations
#'luaL_Reg funcs [] = { { NULL, NULL } };',
#'luaL_register(L, "protobuf.%s", funcs);' % package,
])
for descriptor in file_descriptor.message_type:
lines.append('%s(L);' % message_open_function_name(package, descriptor.name))
lines.append('return 0;')
lines.append('}')
lines.append('\n')
for descriptor in file_descriptor.message_type:
lines.extend(message_source(package, descriptor))
# perform some hacky pretty-printing
formatted = []
indent = 0
for line in lines:
if RE_BARE_BEGIN_BRACKET.search(line):
formatted.append((' ' * indent) + line)
indent += 4
elif RE_BEGIN_BRACKET.search(line):
formatted.append((' ' * indent) + line)
indent += 4
elif RE_END_BRACKET.search(line):
if indent >= 4:
indent -= 4
formatted.append((' ' * indent) + line)
else:
formatted.append((' ' * indent) + line)
return '\n'.join(formatted)
``` |
{
"source": "jojo-31/peakdb",
"score": 3
} |
#### File: api/peaks/endpoints.py
```python
import logging
from flask import request, jsonify
import flask_cors
from flask_restplus import Resource
from api import restplus
from database import db_instance
from api import serializers, utilities
log = logging.getLogger(__name__)
ns_peaks = restplus.api.namespace("peaks", description="Operations related to peaks")
ns_ips = restplus.api.namespace("ips", description="Operations related to black IPs")
@ns_peaks.route("/")
class PeakCollection(Resource):
@restplus.api.marshal_list_with(serializers.peak)
@utilities.whitelisted
def get(self):
"""Returns the list of all peaks"""
return db_instance.get_peaks()
@restplus.api.response(204, "Peak successfully created.")
@restplus.api.expect(serializers.peak)
@restplus.api.marshal_list_with(serializers.peak)
@utilities.whitelisted
def post(self):
"""Add a new peak
* The ID won't be taken into account
* Position should be a list of 2 floats, lon/lat, in decimal degrees, eg [12.5, 41.2].
As a convenience, equivalent string is accepted (eg, '[12.5, 41.2]')
"""
inserted_peak = db_instance.add_peak(request.json)
return inserted_peak, 204
@ns_peaks.route("/<string:peak_id>")
@restplus.api.response(404, "Peak not found.")
class PeakItem(Resource):
@restplus.api.expect(serializers.peak)
@utilities.whitelisted
@restplus.api.response(204, "Peak successfully updated.")
def put(self, peak_id: str):
"""Update a peak
* Position should be a list of 2 floats, lon/lat, in decimal degrees, eg [12.5, 41.2].
As a convenience, equivalent string is accepted (eg, '[12.5, 41.2]')
Args:
peak_id (str): ID of the peak
"""
success = db_instance.update_peak(peak_id, request.json)
if success:
return None, 204
else:
return None, 404
@restplus.api.response(204, "Peak successfully deleted.")
@utilities.whitelisted
def delete(self, peak_id: str):
"""Delete a peak
Args:
peak_id (str): ID of the peak
"""
success = db_instance.remove_peak(peak_id)
if success:
return None, 204
else:
return None, 404
@ns_peaks.route("/<string:bottom_left>/<string:upper_right>")
class PeakSearch(Resource):
@restplus.api.marshal_list_with(serializers.peak)
@utilities.whitelisted
def get(self, bottom_left: str, upper_right: str):
"""Get all peaks within the given bounding box
The given bounding box points should be a list of 2 floats:
lon / lat, in decimal degrees, eg: [40.2, 12.5].
As a convenience, equivalent string are accepted (eg, '[40.2, 12.5]')
Args:
bottom_left (str): Bottom left point of the bounding box
upper_right (str): Upper right point of the bounding box
"""
return db_instance.get_peaks_in_bb(bottom_left, upper_right)
@ns_ips.route("/")
class IpsCollection(Resource):
@restplus.api.marshal_list_with(serializers.ip)
def get(self):
"""Returns the list of all peaks"""
return db_instance.get_black_ips()
``` |
{
"source": "JoJo-77/ES_Discord_Bot",
"score": 3
} |
#### File: JoJo-77/ES_Discord_Bot/subscraper.py
```python
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import time
class Scraper:
def __init__(self,url):
self.url = url
self.headerend = 41
self.driver = webdriver.Chrome("./chromedriver")
self.actions = ActionChains(self.driver)
self.workerdict = {}
self.driver.get(self.url)
time.sleep(4)
def get_web_text(self):
return self.driver.find_element_by_tag_name("body").text.split("\n")
def scrape(self):
schedule = self.get_web_text()[self.headerend:]
schedule.remove("ES Support Assistants")
schedule.remove("ES Student Operations")
for s in sorted(range(len(schedule)),reverse = True):
if schedule[s] == '':
del schedule[s]
workers = [schedule[x] for x in range(len(schedule)) if x % 2 ==0]
shifts = [schedule[x] for x in range(len(schedule)) if x % 2 !=0]
self.make_dict(workers,shifts)
def make_dict(self,workers,shifts):
for x in range(len(workers)):
if workers[x] not in self.workerdict.keys():
self.workerdict[workers[x]] = [shifts[x]]
elif workers[x] in self.workerdict.keys() and shifts[x] not in self.workerdict[workers[x]]:
self.workerdict[workers[x]].append(shifts[x])
def print_dict(self):
for worker in self.workerdict.keys():
#print(worker,self.workerdict[worker])
#print('\n')
self.print_worker(worker)
def sort_shifts(self):
ret = 0
shift = self.get_shift().title()
for worker in self.workerdict.keys():
for item in self.workerdict[worker]:
if shift in item:
print(worker, self.workerdict[worker])
ret = 1
else:
continue
return ret
def print_worker(self,worker):
ret = (worker + ": ")
for shift in self.workerdict[worker]:
ret + shift + ", "
print(ret)
def get_shift(self):
inp = input("What shift do you want to see:\n")
return inp
def close(self):
self.driver.close()
'''
Testing OOP
'''
def main():
scraper = Scraper("https://account.subitup.com/public/?5t7i0fpakJA%3d#byTimeDay")
scraper.scrape()
scraper.print_dict()
scraper.close()
retval = scraper.sort_shifts()
while (retval is not 0):
retval = scraper.sort_shifts()
if __name__ == "__main__":
main()
``` |
{
"source": "JoJo-77/SpotifyWeatherApp",
"score": 3
} |
#### File: mysite/webapp/get_weather.py
```python
from bs4 import BeautifulSoup
import urllib
#Populates a dictionary with data from the website, returns None if the connection fails
zipInput = ""
def get_data(input):
zipInput = input
data = {}
if input == "swampletics":
data["easter_egg"] = "Meet pyletics my python locked ultimate mood playlist website -man"
input = 22030
url = "https://www.weather.gov/" + input
try:
con = get_connection(url)
except:
return None
soup = BeautifulSoup(con.read(), "html.parser")
data["location"] = soup.find("h2", attrs={"class": "panel-title"}).text.strip()
data["weather"] = soup.find("p", attrs = {"class": "myforecast-current"}).text.strip()
data["temperature"] = soup.find("p", attrs = {"class": "myforecast-current-lrg"}).text.strip() + " / " + soup.find("p", attrs = {"class": "myforecast-current-sm"}).text.strip()
data["wind_speed"] = soup.findAll("td", attrs = {"class": None})[1].text.strip()
if(data["wind_speed"]) == "Calm":
data["wind_speed"] = "0"
else:
data["wind_speed"] = str([int(s) for s in data["wind_speed"].split(" ") if s.isdigit()][0])
data["mood"] = None
return data
def get_connection(url):
req = urllib.request.Request(url, headers = {"User-Agent":"Magic Browser"})
return urllib.request.urlopen(req)
#Hardcoded if statements to find mood of playlist by looking for keywords in weather
#Returns updated dictionary of data
def get_mood(data):
#do not allow fall through for hard weathers such as rain, wind, or snow
try:
if data["easter_egg"]:
data["mood"] = "osrs"
return data
except:
if "sunny" in str.lower(data["weather"]) or "fair" in str.lower(data["weather"]) or "a few clouds" in str.lower(data["weather"]) :
data["mood"] = "sunny"
if int(data["wind_speed"]) > 15 or "breezy" in str.lower(data["weather"]):
data["mood"] = "windy"
if "rainy" in str.lower(data["weather"]) or "overcast" in str.lower(data["weather"]):
data["mood"] = "rainy"
return data
if "snowy" in str.lower(data["weather"]):
data["mood"] = "snowy"
return data
if "cloudy" in str.lower(data["weather"]) :
data["mood"] = "cloudy"
return data
return data
```
#### File: SpotifyWeatherApp/tests/scraper_tests.py
```python
import unittest
from get_weather import get_data
from get_weather import get_mood
mood_types = ["sunny", "windy", "rainy", "snowy", "cloudy"]
class GetWeather(unittest.TestCase):
def test_local(self):
result = get_data("22031") # Fairfax
assert (("Sunny" in result["weather"]) or ("Fair" in result["weather"]) or ("Windy" in result["weather"]) or ("Rainy" in result["weather"]) or ("Snowy" in result["weather"]) or ("Cloudy" in result["weather"]))
def test_midwest(self):
result = get_data("60007") # Chicago
assert (("Sunny" in result["weather"]) or ("Fair" in result["weather"]) or ("Windy" in result["weather"]) or ("Rainy" in result["weather"]) or ("Snowy" in result["weather"]) or ("Cloudy" in result["weather"]))
def test_westcoast(self):
result = get_data("90001") # Los Angeles
assert (("Sunny" in result["weather"]) or ("Fair" in result["weather"]) or ("Windy" in result["weather"]) or ("Rainy" in result["weather"]) or ("Snowy" in result["weather"]) or ("Cloudy" in result["weather"]))
class GetMood(unittest.TestCase):
def test_local(self):
result = get_mood(get_data("22031")) # Fairfax
assert result["mood"] in mood_types
def test_midwest(self):
result = get_mood(get_data("60007")) # Chicago
assert result["mood"] in mood_types
def test_westcoast(self):
result = get_mood(get_data("90001")) # Los Angeles
assert result["mood"] in mood_types
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jojo7815/quant",
"score": 3
} |
#### File: jojo7815/quant/swensen.py
```python
from __future__ import division
import datetime
import pytz
import pandas as pd
from zipline.api import order_target_percent
import numpy as np
def initialize(context):
set_long_only()
set_symbol_lookup_date('2005-01-01') # because EEM has multiple sid's.
context.secs = symbols('TIP', 'TLT', 'VNQ', 'EEM', 'EFA', 'VTI') # Securities
context.pcts = [ 0.15, 0.15, 0.15, 0.1, 0.15, 0.3 ] # Percentages
context.ETFs = zip(context.secs, context.pcts) # list of tuples
# Change this variable if you want to rebalance less frequently
context.rebalance_days = 20 # 1 = can rebalance any day, 20 = every month
# Set the trade time, if in minute mode, we trade between 10am and 3pm.
context.rebalance_date = None
context.rebalance_hour_start = 10
context.rebalance_hour_end = 15
def handle_data(context, data):
# Get the current exchange time, in the exchange timezone
exchange_time = pd.Timestamp(get_datetime()).tz_convert('US/Eastern')
# If it's a rebalance day (defined in intialize()) then rebalance:
if context.rebalance_date == None or \
exchange_time >= context.rebalance_date + datetime.timedelta(days=context.rebalance_days):
# Do nothing if there are open orders:
if has_orders(context):
print('has open orders - doing nothing!')
return
rebalance(context, data, exchange_time)
def rebalance(context, data, exchange_time, threshold = 0.05):
"""
For every stock or cash position, if the target percent is off by the threshold
amount (5% as a default), then place orders to adjust all positions to the target
percent of the current portfolio value.
"""
# if the backtest is in minute mode
if get_environment('data_frequency') == 'minute':
# rebalance if we are in the user specified rebalance time-of-day window
if exchange_time.hour < context.rebalance_hour_start or \
exchange_time.hour > context.rebalance_hour_end:
return
need_full_rebalance = False
portfolio_value = context.portfolio.portfolio_value
# rebalance if we have too much cash
if context.portfolio.cash / portfolio_value > threshold:
need_full_rebalance = True
# or rebalance if an ETF is off by the given threshold
for sid, target in context.ETFs:
pos = context.portfolio.positions[sid]
position_pct = (pos.amount * pos.last_sale_price) / portfolio_value
# if any position is out of range then rebalance the whole portfolio
if abs(position_pct - target) > threshold:
need_full_rebalance = True
break # don't bother checking the rest
# perform the full rebalance if we flagged the need to do so
# What we should do is first sell the overs and then buy the unders.
if need_full_rebalance:
for sid, target in context.ETFs:
order_target_percent(sid, target)
log.info("Rebalanced at %s" % str(exchange_time))
context.rebalance_date = exchange_time
def has_orders(context):
# Return true if there are pending orders.
has_orders = False
for sec in context.secs:
orders = get_open_orders(sec)
if orders:
for oo in orders:
message = 'Open order for {amount} shares in {stock}'
message = message.format(amount=oo.amount, stock=sec)
log.info(message)
has_orders = True
return has_orders
``` |
{
"source": "jojo935/Kemono2",
"score": 2
} |
#### File: src/lib/artist.py
```python
from ..internals.cache.redis import get_conn
from ..internals.database.database import get_cursor
from ..utils.utils import get_value
import ujson
import dateutil
import copy
import datetime
def get_top_artists_by_faves(offset, count, reload = False):
redis = get_conn()
key = 'top_artists:' + str(offset) + ':' + str(count)
artists = redis.get(key)
if artists is None or reload:
cursor = get_cursor()
query = """
SELECT l.*, count(*)
FROM lookup l
INNER JOIN account_artist_favorite aaf
ON l.id = aaf.artist_id AND l.service = aaf.service
WHERE aaf.service != 'discord-channel'
GROUP BY (l.id, l.service)
ORDER BY count(*) DESC
OFFSET %s
LIMIT %s
"""
cursor.execute(query, (offset, count,))
artists = cursor.fetchall()
redis.set(key, serialize_artists(artists), ex = 3600)
else:
artists = deserialize_artists(artists)
return artists
def get_count_of_artists_faved(reload = False):
redis = get_conn()
key = 'artists_faved'
count = redis.get(key)
if count is None or reload:
cursor = get_cursor()
query = """
SELECT count(distinct(l.id, l.service))
FROM lookup l
INNER JOIN account_artist_favorite aaf
ON l.id = aaf.artist_id AND l.service = aaf.service
WHERE aaf.service != 'discord-channel'
"""
cursor.execute(query)
count = cursor.fetchone()['count']
redis.set(key, count, ex = 3600)
else:
count = int(count)
return count
def get_random_artist_keys(count, reload = False):
redis = get_conn()
key = 'random_artist_keys:' + str(count)
artist_keys = redis.get(key)
if artist_keys is None or reload:
cursor = get_cursor()
query = "SELECT id, service FROM lookup WHERE service != 'discord-channel' ORDER BY random() LIMIT %s"
cursor.execute(query, (count,))
artist_keys = cursor.fetchall()
redis.set(key, ujson.dumps(artist_keys), ex = 600)
else:
artist_keys = ujson.loads(artist_keys)
return artist_keys
def get_non_discord_artist_keys(reload = False):
redis = get_conn()
key = 'non_discord_artist_keys'
artist_keys = redis.get(key)
if artist_keys is None or reload:
cursor = get_cursor()
query = "SELECT id, service FROM lookup WHERE service != 'discord-channel'"
cursor.execute(query)
artist_keys = cursor.fetchall()
redis.set(key, ujson.dumps(artist_keys), ex = 600)
else:
artist_keys = ujson.loads(artist_keys)
return artist_keys
def get_all_non_discord_artists(reload = False):
redis = get_conn()
key = 'non_discord_artists'
artists = redis.get(key)
if artists is None or reload:
cursor = get_cursor()
query = "SELECT * FROM lookup WHERE service != 'discord-channel'"
cursor.execute(query)
artists = cursor.fetchall()
redis.set(key, serialize_artists(artists), ex = 600)
else:
artists = deserialize_artists(artists)
return artists
def get_artists_by_service(service, reload = False):
redis = get_conn()
key = 'artists_by_service:' + service
artists = redis.get(key)
if artists is None or reload:
cursor = get_cursor()
query = "SELECT * FROM lookup WHERE service = %s"
cursor.execute(query, (service,))
artists = cursor.fetchall()
redis.set(key, serialize_artists(artists), ex = 600)
else:
artists = deserialize_artists(artists)
return artists
def get_artist(service, artist_id, reload = False):
redis = get_conn()
key = 'artist:' + service + ':' + str(artist_id)
artist = redis.get(key)
if artist is None or reload:
cursor = get_cursor()
query = 'SELECT * FROM lookup WHERE id = %s AND service = %s'
cursor.execute(query, (artist_id, service,))
artist = cursor.fetchone()
redis.set(key, serialize_artist(artist), ex = 600)
else:
artist = deserialize_artist(artist)
return artist
def get_artist_post_count(service, artist_id, reload = False):
redis = get_conn()
key = 'artist_post_count:' + service + ':' + str(artist_id)
count = redis.get(key)
if count is None or reload:
cursor = get_cursor()
query = 'SELECT count(*) as count FROM posts WHERE \"user\" = %s'
cursor.execute(query, (artist_id,))
count = cursor.fetchone()['count']
redis.set(key, str(count), ex = 600)
else:
count = int(count)
return count
def get_artist_last_updated(service, artist_id, reload = False):
redis = get_conn()
key = 'artist_last_updated:' + service + ':' + str(artist_id)
last_updated = redis.get(key)
if last_updated is None or reload:
cursor = get_cursor()
query = 'SELECT max(added) as max FROM posts WHERE service = %s AND "user" = %s'
cursor.execute(query, (service, artist_id,))
last_updated = cursor.fetchone()
if get_value(last_updated, 'max') is not None:
last_updated = last_updated['max']
else:
last_updated = datetime.datetime.min
redis.set(key, last_updated.isoformat(), ex = 600)
else:
last_updated = dateutil.parser.parse(last_updated)
return last_updated
def serialize_artists(artists):
artists = copy.deepcopy(artists)
return ujson.dumps(list(map(lambda artist: prepare_artist_fields(artist), artists)))
def deserialize_artists(artists_str):
artists = ujson.loads(artists_str)
return list(map(lambda artist: rebuild_artist_fields(artist), artists))
def serialize_artist(artist):
if artist is not None:
artist = prepare_artist_fields(copy.deepcopy(artist))
return ujson.dumps(artist)
def deserialize_artist(artist_str):
artist = ujson.loads(artist_str)
if artist is not None:
artist = rebuild_artist_fields(artist)
return artist
def prepare_artist_fields(artist):
artist['indexed'] = artist['indexed'].isoformat()
return artist
def rebuild_artist_fields(artist):
artist['indexed'] = dateutil.parser.parse(artist['indexed'])
return artist
```
#### File: src/pages/random.py
```python
from flask import Blueprint, redirect, url_for, g
from ..utils.utils import make_cache_key
from ..internals.cache.redis import get_conn
from ..internals.cache.flask_cache import cache
from ..internals.database.database import get_cursor
from ..lib.artist import get_artist, get_random_artist_keys
from ..lib.post import get_post, get_random_posts_keys
from ..lib.ab_test import get_ab_variant
from ..utils.utils import get_value
import random as rand
random = Blueprint('random', __name__)
@random.route('/posts/random')
def random_post():
post = get_random_post()
if post is None:
return redirect('back')
return redirect(url_for('post.get', service = post['service'], artist_id = post['user'], post_id = post['id']))
@random.route('/artists/random')
def random_artist():
artist = get_random_artist()
if artist is None:
return redirect('back')
return redirect(url_for('artists.get', service = artist['service'], artist_id = artist['id']))
def get_random_post():
post_keys = get_random_posts_keys(1000)
if len(post_keys) == 0:
return None
return rand.choice(post_keys)
def get_random_artist():
artists = get_random_artist_keys(1000)
if len(artists) == 0:
return None
return rand.choice(artists)
```
#### File: src/utils/utils.py
```python
from datetime import datetime
from flask import request, g
import json
def make_cache_key(*args, **kwargs):
return request.full_path
def relative_time(date):
"""Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
Original Gist by 'zhangsen' @ https://gist.github.com/zhangsen/1199964
"""
def formatn(n, s):
"""Add "s" if it's plural"""
if n == 1:
return "1 %s" % s
elif n > 1:
return "%d %ss" % (n, s)
def qnr(a, b):
"""Return quotient and remaining"""
return a / b, a % b
class FormatDelta:
def __init__(self, dt):
now = datetime.now()
delta = now - dt
self.day = delta.days
self.second = delta.seconds
self.year, self.day = qnr(self.day, 365)
self.month, self.day = qnr(self.day, 30)
self.hour, self.second = qnr(self.second, 3600)
self.minute, self.second = qnr(self.second, 60)
def format(self):
for period in ['year', 'month', 'day', 'hour', 'minute', 'second']:
n = getattr(self, period)
if n >= 1:
return '{0} ago'.format(formatn(n, period))
return "just now"
return FormatDelta(date).format()
def delta_key(e):
return e['delta_date']
def allowed_file(mime, accepted):
return any(x in mime for x in accepted)
def get_value(d, key, default = None):
if key in d:
return d[key]
return default
def url_is_for_non_logged_file_extension(path):
parts = path.split('/')
if len(parts) == 0:
return False
blocked_extensions = ['js', 'css', 'ico', 'svg']
for extension in blocked_extensions:
if ('.' + extension) in parts[-1]:
return True
return False
def sort_dict_list_by(l, key, reverse = False):
return sorted(l, key=lambda v: v[key], reverse=reverse)
def restrict_value(value, allowed, default = None):
if value not in allowed:
return default
return value
def take(num, l):
if len(l) <= num:
return l
return l[:num]
def offset(num, l):
if len(l) <= num:
return []
return l[num:]
def limit_int(i, limit):
if i > limit:
return limit
return i
def parse_int(string, default = 0):
try:
return int(string)
except Exception:
return default
def render_page_data():
return json.dumps(g.page_data)
``` |
{
"source": "jojo96/exceldraw",
"score": 3
} |
#### File: exceldraw/exceldraw/drawex.py
```python
import cv2
import pandas
import easygui
class ex():
def __init__(self):
return
def exceldraw(self):
#Only one argument image path
#For example: if path = 'C:\Users\admin\Downloads\rf.jpg'
#You can call exceldraw(path)
#Returns: 2 dataframes
#It works like this:
#do, dd = exceldraw(path)
#do.to_csv('C:\Users\admin\Downloads\file12.csv'), can change download path
#dd.to_csv('C:\Users\admin\Downloads\file22.csv')
uni_img = easygui.fileopenbox()
img_path = unicodedata.normalize('NFKD', uni_img)
img = cv2.imread(img_path)
grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(thresh, bw) = cv2.threshold(grayImage, 127, 255, cv2.THRESH_BINARY)
bwo = cv2.resize(bw,(128,512))
do = pd.DataFrame(np.zeros((bwo.shape[0], bwo.shape[1])))
dd = pd.DataFrame(np.ones((bwo.shape[0], bwo.shape[1])))
for i in range(bwo.shape[0]):
for j in range(bwo.shape[1]):
if bwo[i][j]==255:
do.at[i,j] = 1
dd.at[i,j] = 0
return do,dd
``` |
{
"source": "jojo-/air-quality-lopy-pytrack-sd",
"score": 3
} |
#### File: jojo-/air-quality-lopy-pytrack-sd/L76GNSS.py
```python
from machine import Timer
import time
import gc
import binascii
import pycom
class L76GNSS:
GPS_I2CADDR = const(0x10)
def __init__(self, pytrack=None, sda='P22', scl='P21', timeout=None):
if pytrack is not None:
self.i2c = pytrack.i2c
else:
from machine import I2C
self.i2c = I2C(0, mode=I2C.MASTER, pins=(sda, scl))
self.chrono = Timer.Chrono()
self.timeout = timeout
self.timeout_status = True
self.reg = bytearray(1)
self.i2c.writeto(GPS_I2CADDR, self.reg)
def _read(self):
self.reg = self.i2c.readfrom(GPS_I2CADDR, 128) #Changed from 64 to 128 - I2C L76 says it can read till 255 bytes
return self.reg
def _convert_coords(self, gngll_s):
lat = gngll_s[1]
lat_d = (float(lat) // 100) + ((float(lat) % 100) / 60)
lon = gngll_s[3]
lon_d = (float(lon) // 100) + ((float(lon) % 100) / 60)
if gngll_s[2] == 'S':
lat_d *= -1
if gngll_s[4] == 'W':
lon_d *= -1
return(lat_d, lon_d)
#diff indexes from original - Using GGA sentence
def _convert_coords1(self, gngga_s):
lat = gngga_s[2]
lat_d = (float(lat) // 100) + ((float(lat) % 100) / 60)
lon = gngga_s[4]
lon_d = (float(lon) // 100) + ((float(lon) % 100) / 60)
if gngga_s[3] == 'S':
lat_d *= -1
if gngga_s[5] == 'W':
lon_d *= -1
return(lat_d, lon_d)
def _get_time(self, gngga_s):
gps_time = gngga_s[1]
return(gps_time)
def _get_altitude(self, gngga_s):
gps_altitude = gngga_s[9]
return(gps_altitude)
def _get_satellites(self, gngga_s):
num_satellites = gngga_s[7]
return(num_satellites)
def _fix_quality(self, gngga_s):
valid = gngga_s[6]
if valid == '1':
return True
else:
return False
#Using RMC sentence
def _get_time_rmc(self, gnrmc_s):
gps_time = gnrmc_s[1]
return(gps_time)
def _data_valid_rmc(self, gnrmc_s):
valid = gnrmc_s[2]
if valid == 'A':
return True
else:
return False
def _get_date_rmc(self, gnrmc_s):
gps_date = gnrmc_s[9]
return(gps_date)
def coordinates(self, debug=False):
lat_d, lon_d, debug_timeout = None, None, False
if self.timeout is not None:
self.chrono.reset()
self.chrono.start()
nmea = b''
while True:
if self.timeout is not None and self.chrono.read() >= self.timeout:
self.chrono.stop()
chrono_timeout = self.chrono.read()
self.chrono.reset()
self.timeout_status = False
debug_timeout = True
if not self.timeout_status:
gc.collect()
break
nmea += self._read().lstrip(b'\n\n').rstrip(b'\n\n')
gngll_idx = nmea.find(b'GNGLL')
if gngll_idx >= 0:
gngll = nmea[gngll_idx:]
e_idx = gngll.find(b'\r\n')
if e_idx >= 0:
try:
gngll = gngll[:e_idx].decode('ascii')
gngll_s = gngll.split(',')
lat_d, lon_d = self._convert_coords(gngll_s)
except Exception:
pass
finally:
nmea = nmea[(gngll_idx + e_idx):]
gc.collect()
break
else:
gc.collect()
if len(nmea) > 410: # i suppose it can be safely changed to 82, which is longest NMEA frame
nmea = nmea[-5:] # $GNGL without last L
time.sleep(0.1)
self.timeout_status = True
if debug and debug_timeout:
print('GPS timed out after %f seconds' % (chrono_timeout))
return(None, None)
else:
return(lat_d, lon_d)
#TEST functions
#Parser for GPGGA
def coordinates1(self, debug=False):
lat_d, lon_d, gps_time, valid, gps_altitude, num_satellites, debug_timeout = None, None, None, None, None, False, False
if self.timeout is not None:
self.chrono.reset()
self.chrono.start()
nmea = b''
while True:
if self.timeout is not None and self.chrono.read() >= self.timeout:
self.chrono.stop()
chrono_timeout = self.chrono.read()
self.chrono.reset()
self.timeout_status = False
debug_timeout = True
if not self.timeout_status:
gc.collect()
break
nmea += self._read().lstrip(b'\n\n').rstrip(b'\n\n')
gpgga_idx = nmea.find(b'GPGGA')
if gpgga_idx >= 0:
gpgga = nmea[gpgga_idx:]
gpgga_e_idx = gpgga.find(b'\r\n')
if gpgga_e_idx >= 0:
try:
gpgga = gpgga[:gpgga_e_idx].decode('ascii')
gpgga_s = gpgga.split(',')
lat_d, lon_d = self._convert_coords1(gpgga_s)
gps_time = self._get_time(gpgga_s)
valid = self._fix_quality(gpgga_s)
gps_altitude = self._get_altitude(gpgga_s)
num_satellites = self._get_satellites(gpgga_s)
except Exception:
pass
finally:
nmea = nmea[(gpgga_idx + gpgga_e_idx):]
gc.collect()
break
else:
gc.collect()
if len(nmea) > 410: # i suppose it can be safely changed to 82, which is longest NMEA frame
nmea = nmea[-5:] # $GNGL without last L
time.sleep(0.1)
self.timeout_status = True
if debug and debug_timeout:
print('GPS timed out after %f seconds' % (chrono_timeout))
return(None, None, None, None, False, None)
else:
return(lat_d, lon_d, gps_time, gps_altitude, valid, num_satellites)
def stop(self,pytrack):
ANSELC_ADDR = const(0x18E)
pytrack.poke_memory(ANSELC_ADDR, ~(1 << 7))
#parser for UTC time and date >> Reads GPRMC
def get_datetime(self, debug=True):
lat_d, lon_d, gps_time, valid, gps_date, rmc_idx, debug_timeout = None, None, None, None, None, -1, False
if self.timeout is not None:
self.chrono.reset()
self.chrono.start()
nmea = b''
while True:
if self.timeout is not None and self.chrono.read() >= self.timeout:
self.chrono.stop()
chrono_timeout = self.chrono.read()
self.chrono.reset()
self.timeout_status = False
debug_timeout = True
if not self.timeout_status:
gc.collect()
break
nmea += self._read().lstrip(b'\n\n').rstrip(b'\n\n')
#Since or spg or glonass could give date see which one is present -SEE page 10 GNSS protocol
#GPS only - GPRMC GPGGA
#Glonass only - GNRMC GPGGA
#GPS+GLON - GNRMC GPGGA
#No station - GPRMC GPGGA
gprmc_idx = nmea.find(b'GPRMC')
gnrmc_idx = nmea.find(b'GNRMC')
if gprmc_idx >= 0:
rmc_idx = gprmc_idx
if gnrmc_idx >= 0:
rmc_idx = gnrmc_idx
if rmc_idx >= 0:
rmc = nmea[rmc_idx:]
rmc_e_idx = rmc.find(b'\r\n')
if rmc_e_idx >= 0:
print(nmea)
try:
rmc = rmc[:rmc_e_idx].decode('ascii')
rmc_s = rmc.split(',')
lat_d, lon_d = self._convert_coords1(rmc_s[1:])
gps_time = self._get_time_rmc(rmc_s)
valid = self._data_valid_rmc(rmc_s)
gps_date = self._get_date_rmc(rmc_s)
except Exception:
pass
finally:
nmea = nmea[(rmc_idx + rmc_e_idx):]
gc.collect()
break
else:
gc.collect()
if len(nmea) > 512: # i suppose it can be safely changed to 82, which is longest NMEA frame --CHANGED to 512
nmea = nmea[-5:] # $GNGL without last L
time.sleep(0.1)
self.timeout_status = True
if debug and debug_timeout:
print('GPS timed out after %f seconds' % (chrono_timeout))
return(None, None, None, False, None)
else:
return(lat_d, lon_d, gps_time, valid, gps_date)
``` |
{
"source": "jojoba106/OpenPype",
"score": 2
} |
#### File: openpype/hooks/pre_add_last_workfile_arg.py
```python
import os
from openpype.lib import PreLaunchHook
class AddLastWorkfileToLaunchArgs(PreLaunchHook):
"""Add last workfile path to launch arguments.
This is not possible to do for all applications the same way.
Checks 'start_last_workfile', if set to False, it will not open last
workfile. This property is set explicitly in Launcher.
"""
# Execute after workfile template copy
order = 10
app_groups = [
"maya",
"nuke",
"nukex",
"hiero",
"nukestudio",
"blender",
"photoshop",
"tvpaint",
"afftereffects"
]
def execute(self):
if not self.data.get("start_last_workfile"):
self.log.info("It is set to not start last workfile on start.")
return
last_workfile = self.data.get("last_workfile_path")
if not last_workfile:
self.log.warning("Last workfile was not collected.")
return
if not os.path.exists(last_workfile):
self.log.info("Current context does not have any workfile yet.")
return
# Add path to workfile to arguments
self.launch_context.launch_args.append(last_workfile)
```
#### File: openpype_flame_to_ftrack/modules/app_utils.py
```python
import os
import io
import ConfigParser as CP
from xml.etree import ElementTree as ET
from contextlib import contextmanager
PLUGIN_DIR = os.path.dirname(os.path.dirname(__file__))
EXPORT_PRESETS_DIR = os.path.join(PLUGIN_DIR, "export_preset")
CONFIG_DIR = os.path.join(os.path.expanduser(
"~/.openpype"), "openpype_flame_to_ftrack")
@contextmanager
def make_temp_dir():
import tempfile
try:
dirpath = tempfile.mkdtemp()
yield dirpath
except IOError as _error:
raise IOError("Not able to create temp dir file: {}".format(_error))
finally:
pass
@contextmanager
def get_config(section=None):
cfg_file_path = os.path.join(CONFIG_DIR, "settings.ini")
# create config dir
if not os.path.exists(CONFIG_DIR):
print("making dirs at: `{}`".format(CONFIG_DIR))
os.makedirs(CONFIG_DIR, mode=0o777)
# write default data to settings.ini
if not os.path.exists(cfg_file_path):
default_cfg = cfg_default()
config = CP.RawConfigParser()
config.readfp(io.BytesIO(default_cfg))
with open(cfg_file_path, 'wb') as cfg_file:
config.write(cfg_file)
try:
config = CP.RawConfigParser()
config.read(cfg_file_path)
if section:
_cfg_data = {
k: v
for s in config.sections()
for k, v in config.items(s)
if s == section
}
else:
_cfg_data = {s: dict(config.items(s)) for s in config.sections()}
yield _cfg_data
except IOError as _error:
raise IOError('Not able to read settings.ini file: {}'.format(_error))
finally:
pass
def set_config(cfg_data, section=None):
cfg_file_path = os.path.join(CONFIG_DIR, "settings.ini")
config = CP.RawConfigParser()
config.read(cfg_file_path)
try:
if not section:
for section in cfg_data:
for key, value in cfg_data[section].items():
config.set(section, key, value)
else:
for key, value in cfg_data.items():
config.set(section, key, value)
with open(cfg_file_path, 'wb') as cfg_file:
config.write(cfg_file)
except IOError as _error:
raise IOError('Not able to write settings.ini file: {}'.format(_error))
def cfg_default():
return """
[main]
workfile_start_frame = 1001
shot_handles = 0
shot_name_template = {sequence}_{shot}
hierarchy_template = shots[Folder]/{sequence}[Sequence]
create_task_type = Compositing
"""
def configure_preset(file_path, data):
split_fp = os.path.splitext(file_path)
new_file_path = split_fp[0] + "_tmp" + split_fp[-1]
with open(file_path, "r") as datafile:
tree = ET.parse(datafile)
for key, value in data.items():
for element in tree.findall(".//{}".format(key)):
print(element)
element.text = str(value)
tree.write(new_file_path)
return new_file_path
def export_thumbnail(sequence, tempdir_path, data):
import flame
export_preset = os.path.join(
EXPORT_PRESETS_DIR,
"openpype_seg_thumbnails_jpg.xml"
)
new_path = configure_preset(export_preset, data)
poster_frame_exporter = flame.PyExporter()
poster_frame_exporter.foreground = True
poster_frame_exporter.export(sequence, new_path, tempdir_path)
def export_video(sequence, tempdir_path, data):
import flame
export_preset = os.path.join(
EXPORT_PRESETS_DIR,
"openpype_seg_video_h264.xml"
)
new_path = configure_preset(export_preset, data)
poster_frame_exporter = flame.PyExporter()
poster_frame_exporter.foreground = True
poster_frame_exporter.export(sequence, new_path, tempdir_path)
def timecode_to_frames(timecode, framerate):
def _seconds(value):
if isinstance(value, str):
_zip_ft = zip((3600, 60, 1, 1 / framerate), value.split(':'))
return sum(f * float(t) for f, t in _zip_ft)
elif isinstance(value, (int, float)):
return value / framerate
return 0
def _frames(seconds):
return seconds * framerate
def tc_to_frames(_timecode, start=None):
return _frames(_seconds(_timecode) - _seconds(start))
if '+' in timecode:
timecode = timecode.replace('+', ':')
elif '#' in timecode:
timecode = timecode.replace('#', ':')
frames = int(round(tc_to_frames(timecode, start='00:00:00:00')))
return frames
```
#### File: plugins/publish/extract_subset_resources.py
```python
import os
from pprint import pformat
from copy import deepcopy
import pyblish.api
import openpype.api
from openpype.hosts.flame import api as opfapi
class ExtractSubsetResources(openpype.api.Extractor):
"""
Extractor for transcoding files from Flame clip
"""
label = "Extract subset resources"
order = pyblish.api.ExtractorOrder
families = ["clip"]
hosts = ["flame"]
# plugin defaults
default_presets = {
"thumbnail": {
"ext": "jpg",
"xml_preset_file": "Jpeg (8-bit).xml",
"xml_preset_dir": "",
"representation_add_range": False,
"representation_tags": ["thumbnail"]
},
"ftrackpreview": {
"ext": "mov",
"xml_preset_file": "Apple iPad (1920x1080).xml",
"xml_preset_dir": "",
"representation_add_range": True,
"representation_tags": [
"review",
"delete"
]
}
}
keep_original_representation = False
# hide publisher during exporting
hide_ui_on_process = True
# settings
export_presets_mapping = {}
def process(self, instance):
if (
self.keep_original_representation
and "representations" not in instance.data
or not self.keep_original_representation
):
instance.data["representations"] = []
frame_start = instance.data["frameStart"]
handle_start = instance.data["handleStart"]
frame_start_handle = frame_start - handle_start
source_first_frame = instance.data["sourceFirstFrame"]
source_start_handles = instance.data["sourceStartH"]
source_end_handles = instance.data["sourceEndH"]
source_duration_handles = (
source_end_handles - source_start_handles) + 1
clip_data = instance.data["flameSourceClip"]
clip = clip_data["PyClip"]
in_mark = (source_start_handles - source_first_frame) + 1
out_mark = in_mark + source_duration_handles
staging_dir = self.staging_dir(instance)
# add default preset type for thumbnail and reviewable video
# update them with settings and override in case the same
# are found in there
export_presets = deepcopy(self.default_presets)
export_presets.update(self.export_presets_mapping)
# with maintained duplication loop all presets
with opfapi.maintained_object_duplication(clip) as duplclip:
# loop all preset names and
for unique_name, preset_config in export_presets.items():
kwargs = {}
preset_file = preset_config["xml_preset_file"]
preset_dir = preset_config["xml_preset_dir"]
repre_tags = preset_config["representation_tags"]
# validate xml preset file is filled
if preset_file == "":
raise ValueError(
("Check Settings for {} preset: "
"`XML preset file` is not filled").format(
unique_name)
)
# resolve xml preset dir if not filled
if preset_dir == "":
preset_dir = opfapi.get_preset_path_by_xml_name(
preset_file)
if not preset_dir:
raise ValueError(
("Check Settings for {} preset: "
"`XML preset file` {} is not found").format(
unique_name, preset_file)
)
# create preset path
preset_path = str(os.path.join(
preset_dir, preset_file
))
# define kwargs based on preset type
if "thumbnail" in unique_name:
kwargs["thumb_frame_number"] = in_mark + (
source_duration_handles / 2)
else:
kwargs.update({
"in_mark": in_mark,
"out_mark": out_mark
})
export_dir_path = str(os.path.join(
staging_dir, unique_name
))
os.makedirs(export_dir_path)
# export
opfapi.export_clip(
export_dir_path, duplclip, preset_path, **kwargs)
# create representation data
representation_data = {
"name": unique_name,
"outputName": unique_name,
"ext": preset_config["ext"],
"stagingDir": export_dir_path,
"tags": repre_tags
}
files = os.listdir(export_dir_path)
# add files to represetation but add
# imagesequence as list
if (
"movie_file" in preset_path
or unique_name == "thumbnail"
):
representation_data["files"] = files.pop()
else:
representation_data["files"] = files
# add frame range
if preset_config["representation_add_range"]:
representation_data.update({
"frameStart": frame_start_handle,
"frameEnd": (
frame_start_handle + source_duration_handles),
"fps": instance.data["fps"]
})
instance.data["representations"].append(representation_data)
# add review family if found in tags
if "review" in repre_tags:
instance.data["families"].append("review")
self.log.info("Added representation: {}".format(
representation_data))
self.log.debug("All representations: {}".format(
pformat(instance.data["representations"])))
```
#### File: fusion/api/pipeline.py
```python
import os
from avalon import api as avalon
from pyblish import api as pyblish
from openpype.api import Logger
import openpype.hosts.fusion
log = Logger().get_logger(__name__)
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.fusion.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
def install():
"""Install fusion-specific functionality of avalon-core.
This is where you install menus and register families, data
and loaders into fusion.
It is called automatically when installing via `api.install(avalon.fusion)`
See the Maya equivalent for inspiration on how to implement this.
"""
# Disable all families except for the ones we explicitly want to see
family_states = ["imagesequence",
"camera",
"pointcache"]
avalon.data["familiesStateDefault"] = False
avalon.data["familiesStateToggled"] = family_states
log.info("openpype.hosts.fusion installed")
pyblish.register_host("fusion")
pyblish.register_plugin_path(PUBLISH_PATH)
log.info("Registering Fusion plug-ins..")
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
def uninstall():
"""Uninstall all that was installed
This is where you undo everything that was done in `install()`.
That means, removing menus, deregistering families and data
and everything. It should be as though `install()` was never run,
because odds are calling this function means the user is interested
in re-installing shortly afterwards. If, for example, he has been
modifying the menu or registered families.
"""
pyblish.deregister_host("fusion")
pyblish.deregister_plugin_path(PUBLISH_PATH)
log.info("Deregistering Fusion plug-ins..")
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
def on_pyblish_instance_toggled(instance, new_value, old_value):
"""Toggle saver tool passthrough states on instance toggles."""
from avalon.fusion import comp_lock_and_undo_chunk
comp = instance.context.data.get("currentComp")
if not comp:
return
savers = [tool for tool in instance if
getattr(tool, "ID", None) == "Saver"]
if not savers:
return
# Whether instances should be passthrough based on new value
passthrough = not new_value
with comp_lock_and_undo_chunk(comp,
undo_queue_name="Change instance "
"active state"):
for tool in savers:
attrs = tool.GetAttrs()
current = attrs["TOOLB_PassThrough"]
if current != passthrough:
tool.SetAttrs({"TOOLB_PassThrough": passthrough})
```
#### File: plugins/create/create_exr_saver.py
```python
import os
import openpype.api
from avalon import fusion
class CreateOpenEXRSaver(openpype.api.Creator):
name = "openexrDefault"
label = "Create OpenEXR Saver"
hosts = ["fusion"]
family = "render"
def process(self):
file_format = "OpenEXRFormat"
comp = fusion.get_current_comp()
# todo: improve method of getting current environment
# todo: pref avalon.Session over os.environ
workdir = os.path.normpath(os.environ["AVALON_WORKDIR"])
filename = "{}..tiff".format(self.name)
filepath = os.path.join(workdir, "render", filename)
with fusion.comp_lock_and_undo_chunk(comp):
args = (-32768, -32768) # Magical position numbers
saver = comp.AddTool("Saver", *args)
saver.SetAttrs({"TOOLS_Name": self.name})
# Setting input attributes is different from basic attributes
# Not confused with "MainInputAttributes" which
saver["Clip"] = filepath
saver["OutputFormat"] = file_format
# # # Set standard TIFF settings
if saver[file_format] is None:
raise RuntimeError("File format is not set to TiffFormat, "
"this is a bug")
# Set file format attributes
saver[file_format]["Depth"] = 1 # int8 | int16 | float32 | other
saver[file_format]["SaveAlpha"] = 0
```
#### File: fusion/scripts/set_rendermode.py
```python
from Qt import QtWidgets
from avalon.vendor import qtawesome
import avalon.fusion as avalon
_help = {"local": "Render the comp on your own machine and publish "
"it from that the destination folder",
"farm": "Submit a Fusion render job to a Render farm to use all other"
" computers and add a publish job"}
class SetRenderMode(QtWidgets.QWidget):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self._comp = avalon.get_current_comp()
self._comp_name = self._get_comp_name()
self.setWindowTitle("Set Render Mode")
self.setFixedSize(300, 175)
layout = QtWidgets.QVBoxLayout()
# region comp info
comp_info_layout = QtWidgets.QHBoxLayout()
update_btn = QtWidgets.QPushButton(qtawesome.icon("fa.refresh",
color="white"), "")
update_btn.setFixedWidth(25)
update_btn.setFixedHeight(25)
comp_information = QtWidgets.QLineEdit()
comp_information.setEnabled(False)
comp_info_layout.addWidget(comp_information)
comp_info_layout.addWidget(update_btn)
# endregion comp info
# region modes
mode_options = QtWidgets.QComboBox()
mode_options.addItems(_help.keys())
mode_information = QtWidgets.QTextEdit()
mode_information.setReadOnly(True)
# endregion modes
accept_btn = QtWidgets.QPushButton("Accept")
layout.addLayout(comp_info_layout)
layout.addWidget(mode_options)
layout.addWidget(mode_information)
layout.addWidget(accept_btn)
self.setLayout(layout)
self.comp_information = comp_information
self.update_btn = update_btn
self.mode_options = mode_options
self.mode_information = mode_information
self.accept_btn = accept_btn
self.connections()
self.update()
# Force updated render mode help text
self._update_rendermode_info()
def connections(self):
"""Build connections between code and buttons"""
self.update_btn.clicked.connect(self.update)
self.accept_btn.clicked.connect(self._set_comp_rendermode)
self.mode_options.currentIndexChanged.connect(
self._update_rendermode_info)
def update(self):
"""Update all information in the UI"""
self._comp = avalon.get_current_comp()
self._comp_name = self._get_comp_name()
self.comp_information.setText(self._comp_name)
# Update current comp settings
mode = self._get_comp_rendermode()
index = self.mode_options.findText(mode)
self.mode_options.setCurrentIndex(index)
def _update_rendermode_info(self):
rendermode = self.mode_options.currentText()
self.mode_information.setText(_help[rendermode])
def _get_comp_name(self):
return self._comp.GetAttrs("COMPS_Name")
def _get_comp_rendermode(self):
return self._comp.GetData("openpype.rendermode") or "local"
def _set_comp_rendermode(self):
rendermode = self.mode_options.currentText()
self._comp.SetData("openpype.rendermode", rendermode)
self._comp.Print("Updated render mode to '%s'\n" % rendermode)
self.hide()
def _validation(self):
ui_mode = self.mode_options.currentText()
comp_mode = self._get_comp_rendermode()
return comp_mode == ui_mode
```
#### File: utility_scripts/32bit/backgrounds_selected_to32bit.py
```python
from avalon.fusion import comp_lock_and_undo_chunk
from avalon import fusion
comp = fusion.get_current_comp()
def main():
"""Set all selected backgrounds to 32 bit"""
with comp_lock_and_undo_chunk(comp, 'Selected Backgrounds to 32bit'):
tools = comp.GetToolList(True, "Background").values()
for tool in tools:
tool.Depth = 5
main()
```
#### File: maya/api/customize.py
```python
import os
import logging
from functools import partial
import maya.cmds as mc
import maya.mel as mel
from openpype.api import resources
from openpype.tools.utils import host_tools
from .lib import get_main_window
log = logging.getLogger(__name__)
COMPONENT_MASK_ORIGINAL = {}
def override_component_mask_commands():
"""Override component mask ctrl+click behavior.
This implements special behavior for Maya's component
mask menu items where a ctrl+click will instantly make
it an isolated behavior disabling all others.
Tested in Maya 2016 and 2018
"""
log.info("Installing override_component_mask_commands..")
# Get all object mask buttons
buttons = mc.formLayout("objectMaskIcons",
query=True,
childArray=True)
# Skip the triangle list item
buttons = [btn for btn in buttons if btn != "objPickMenuLayout"]
def on_changed_callback(raw_command, state):
"""New callback"""
# If "control" is held force the toggled one to on and
# toggle the others based on whether any of the buttons
# was remaining active after the toggle, if not then
# enable all
if mc.getModifiers() == 4: # = CTRL
state = True
active = [mc.iconTextCheckBox(btn, query=True, value=True) for btn
in buttons]
if any(active):
mc.selectType(allObjects=False)
else:
mc.selectType(allObjects=True)
# Replace #1 with the current button state
cmd = raw_command.replace(" #1", " {}".format(int(state)))
mel.eval(cmd)
for btn in buttons:
# Store a reference to the original command so that if
# we rerun this override command it doesn't recursively
# try to implement the fix. (This also allows us to
# "uninstall" the behavior later)
if btn not in COMPONENT_MASK_ORIGINAL:
original = mc.iconTextCheckBox(btn, query=True, cc=True)
COMPONENT_MASK_ORIGINAL[btn] = original
# Assign the special callback
original = COMPONENT_MASK_ORIGINAL[btn]
new_fn = partial(on_changed_callback, original)
mc.iconTextCheckBox(btn, edit=True, cc=new_fn)
def override_toolbox_ui():
"""Add custom buttons in Toolbox as replacement for Maya web help icon."""
icons = resources.get_resource("icons")
parent_widget = get_main_window()
# Ensure the maya web icon on toolbox exists
web_button = "ToolBox|MainToolboxLayout|mayaWebButton"
if not mc.iconTextButton(web_button, query=True, exists=True):
return
mc.iconTextButton(web_button, edit=True, visible=False)
# real = 32, but 36 with padding - according to toolbox mel script
icon_size = 36
parent = web_button.rsplit("|", 1)[0]
# Ensure the parent is a formLayout
if not mc.objectTypeUI(parent) == "formLayout":
return
# Create our controls
controls = []
controls.append(
mc.iconTextButton(
"pype_toolbox_lookmanager",
annotation="Look Manager",
label="Look Manager",
image=os.path.join(icons, "lookmanager.png"),
command=host_tools.show_look_assigner,
width=icon_size,
height=icon_size,
parent=parent
)
)
controls.append(
mc.iconTextButton(
"pype_toolbox_workfiles",
annotation="Work Files",
label="Work Files",
image=os.path.join(icons, "workfiles.png"),
command=lambda: host_tools.show_workfiles(
parent=parent_widget
),
width=icon_size,
height=icon_size,
parent=parent
)
)
controls.append(
mc.iconTextButton(
"pype_toolbox_loader",
annotation="Loader",
label="Loader",
image=os.path.join(icons, "loader.png"),
command=lambda: host_tools.show_loader(
parent=parent_widget, use_context=True
),
width=icon_size,
height=icon_size,
parent=parent
)
)
controls.append(
mc.iconTextButton(
"pype_toolbox_manager",
annotation="Inventory",
label="Inventory",
image=os.path.join(icons, "inventory.png"),
command=lambda: host_tools.show_scene_inventory(
parent=parent_widget
),
width=icon_size,
height=icon_size,
parent=parent
)
)
# Add the buttons on the bottom and stack
# them above each other with side padding
controls.reverse()
for i, control in enumerate(controls):
previous = controls[i - 1] if i > 0 else web_button
mc.formLayout(parent, edit=True,
attachControl=[control, "bottom", 0, previous],
attachForm=([control, "left", 1],
[control, "right", 1]))
```
#### File: maya/api/lib_rendersetup.py
```python
from maya import cmds
import maya.api.OpenMaya as om
import logging
import maya.app.renderSetup.model.utils as utils
from maya.app.renderSetup.model import (
renderSetup
)
from maya.app.renderSetup.model.override import (
AbsOverride,
RelOverride,
UniqueOverride
)
ExactMatch = 0
ParentMatch = 1
ChildMatch = 2
DefaultRenderLayer = "defaultRenderLayer"
log = logging.getLogger(__name__)
def get_rendersetup_layer(layer):
"""Return render setup layer name.
This also converts names from legacy renderLayer node name to render setup
name.
Note: `defaultRenderLayer` is not a renderSetupLayer node but it is however
the valid layer name for Render Setup - so we return that as is.
Example:
>>> for legacy_layer in cmds.ls(type="renderLayer"):
>>> layer = get_rendersetup_layer(legacy_layer)
Returns:
str or None: Returns renderSetupLayer node name if `layer` is a valid
layer name in legacy renderlayers or render setup layers.
Returns None if the layer can't be found or Render Setup is
currently disabled.
"""
if layer == DefaultRenderLayer:
# defaultRenderLayer doesn't have a `renderSetupLayer`
return layer
if not cmds.mayaHasRenderSetup():
return None
if not cmds.objExists(layer):
return None
if cmds.nodeType(layer) == "renderSetupLayer":
return layer
# By default Render Setup renames the legacy renderlayer
# to `rs_<layername>` but lets not rely on that as the
# layer node can be renamed manually
connections = cmds.listConnections(layer + ".message",
type="renderSetupLayer",
exactType=True,
source=False,
destination=True,
plugs=True) or []
return next((conn.split(".", 1)[0] for conn in connections
if conn.endswith(".legacyRenderLayer")), None)
def get_attr_in_layer(node_attr, layer):
"""Return attribute value in Render Setup layer.
This will only work for attributes which can be
retrieved with `maya.cmds.getAttr` and for which
Relative and Absolute overrides are applicable.
Examples:
>>> get_attr_in_layer("defaultResolution.width", layer="layer1")
>>> get_attr_in_layer("defaultRenderGlobals.startFrame", layer="layer")
>>> get_attr_in_layer("transform.translate", layer="layer3")
Args:
attr (str): attribute name as 'node.attribute'
layer (str): layer name
Returns:
object: attribute value in layer
"""
# Delay pymel import to here because it's slow to load
import pymel.core as pm
def _layer_needs_update(layer):
"""Return whether layer needs updating."""
# Use `getattr` as e.g. DefaultRenderLayer does not have the attribute
return getattr(layer, "needsMembershipUpdate", False) or \
getattr(layer, "needsApplyUpdate", False)
def get_default_layer_value(node_attr_):
"""Return attribute value in defaultRenderLayer"""
inputs = cmds.listConnections(node_attr_,
source=True,
destination=False,
# We want to skip conversion nodes since
# an override to `endFrame` could have
# a `unitToTimeConversion` node
# in-between
skipConversionNodes=True,
type="applyOverride") or []
if inputs:
_override = inputs[0]
history_overrides = cmds.ls(cmds.listHistory(_override,
pruneDagObjects=True),
type="applyOverride")
node = history_overrides[-1] if history_overrides else _override
node_attr_ = node + ".original"
return pm.getAttr(node_attr_, asString=True)
layer = get_rendersetup_layer(layer)
rs = renderSetup.instance()
current_layer = rs.getVisibleRenderLayer()
if current_layer.name() == layer:
# Ensure layer is up-to-date
if _layer_needs_update(current_layer):
try:
rs.switchToLayer(current_layer)
except RuntimeError:
# Some cases can cause errors on switching
# the first time with Render Setup layers
# e.g. different overrides to compounds
# and its children plugs. So we just force
# it another time. If it then still fails
# we will let it error out.
rs.switchToLayer(current_layer)
return pm.getAttr(node_attr, asString=True)
overrides = get_attr_overrides(node_attr, layer)
default_layer_value = get_default_layer_value(node_attr)
if not overrides:
return default_layer_value
value = default_layer_value
for match, layer_override, index in overrides:
if isinstance(layer_override, AbsOverride):
# Absolute override
value = pm.getAttr(layer_override.name() + ".attrValue")
if match == ExactMatch:
value = value
if match == ParentMatch:
value = value[index]
if match == ChildMatch:
value[index] = value
elif isinstance(layer_override, RelOverride):
# Relative override
# Value = Original * Multiply + Offset
multiply = pm.getAttr(layer_override.name() + ".multiply")
offset = pm.getAttr(layer_override.name() + ".offset")
if match == ExactMatch:
value = value * multiply + offset
if match == ParentMatch:
value = value * multiply[index] + offset[index]
if match == ChildMatch:
value[index] = value[index] * multiply + offset
else:
raise TypeError("Unsupported override: %s" % layer_override)
return value
def get_attr_overrides(node_attr, layer,
skip_disabled=True,
skip_local_render=True,
stop_at_absolute_override=True):
"""Return all Overrides applicable to the attribute.
Overrides are returned as a 3-tuple:
(Match, Override, Index)
Match:
This is any of ExactMatch, ParentMatch, ChildMatch
and defines whether the override is exactly on the
plug, on the parent or on a child plug.
Override:
This is the RenderSetup Override instance.
Index:
This is the Plug index under the parent or for
the child that matches. The ExactMatch index will
always be None. For ParentMatch the index is which
index the plug is under the parent plug. For ChildMatch
the index is which child index matches the plug.
Args:
node_attr (str): attribute name as 'node.attribute'
layer (str): layer name
skip_disabled (bool): exclude disabled overrides
skip_local_render (bool): exclude overrides marked
as local render.
stop_at_absolute_override: exclude overrides prior
to the last absolute override as they have
no influence on the resulting value.
Returns:
list: Ordered Overrides in order of strength
"""
def get_mplug_children(plug):
"""Return children MPlugs of compound MPlug"""
children = []
if plug.isCompound:
for i in range(plug.numChildren()):
children.append(plug.child(i))
return children
def get_mplug_names(mplug):
"""Return long and short name of MPlug"""
long_name = mplug.partialName(useLongNames=True)
short_name = mplug.partialName(useLongNames=False)
return {long_name, short_name}
def iter_override_targets(_override):
try:
for target in _override._targets():
yield target
except AssertionError:
# Workaround: There is a bug where the private `_targets()` method
# fails on some attribute plugs. For example overrides
# to the defaultRenderGlobals.endFrame
# (Tested in Maya 2020.2)
log.debug("Workaround for %s" % _override)
from maya.app.renderSetup.common.utils import findPlug
attr = _override.attributeName()
if isinstance(_override, UniqueOverride):
node = _override.targetNodeName()
yield findPlug(node, attr)
else:
nodes = _override.parent().selector().nodes()
for node in nodes:
if cmds.attributeQuery(attr, node=node, exists=True):
yield findPlug(node, attr)
# Get the MPlug for the node.attr
sel = om.MSelectionList()
sel.add(node_attr)
plug = sel.getPlug(0)
layer = get_rendersetup_layer(layer)
if layer == DefaultRenderLayer:
# DefaultRenderLayer will never have overrides
# since it's the default layer
return []
rs_layer = renderSetup.instance().getRenderLayer(layer)
if rs_layer is None:
# Renderlayer does not exist
return
# Get any parent or children plugs as we also
# want to include them in the attribute match
# for overrides
parent = plug.parent() if plug.isChild else None
parent_index = None
if parent:
parent_index = get_mplug_children(parent).index(plug)
children = get_mplug_children(plug)
# Create lookup for the attribute by both long
# and short names
attr_names = get_mplug_names(plug)
for child in children:
attr_names.update(get_mplug_names(child))
if parent:
attr_names.update(get_mplug_names(parent))
# Get all overrides of the layer
# And find those that are relevant to the attribute
plug_overrides = []
# Iterate over the overrides in reverse so we get the last
# overrides first and can "break" whenever an absolute
# override is reached
layer_overrides = list(utils.getOverridesRecursive(rs_layer))
for layer_override in reversed(layer_overrides):
if skip_disabled and not layer_override.isEnabled():
# Ignore disabled overrides
continue
if skip_local_render and layer_override.isLocalRender():
continue
# The targets list can be very large so we'll do
# a quick filter by attribute name to detect whether
# it matches the attribute name, or its parent or child
if layer_override.attributeName() not in attr_names:
continue
override_match = None
for override_plug in iter_override_targets(layer_override):
override_match = None
if plug == override_plug:
override_match = (ExactMatch, layer_override, None)
elif parent and override_plug == parent:
override_match = (ParentMatch, layer_override, parent_index)
elif children and override_plug in children:
child_index = children.index(override_plug)
override_match = (ChildMatch, layer_override, child_index)
if override_match:
plug_overrides.append(override_match)
break
if (
override_match and
stop_at_absolute_override and
isinstance(layer_override, AbsOverride) and
# When the override is only on a child plug then it doesn't
# override the entire value so we not stop at this override
not override_match[0] == ChildMatch
):
# If override is absolute override, then BREAK out
# of parent loop we don't need to look any further as
# this is the absolute override
break
return reversed(plug_overrides)
```
#### File: photoshop/api/plugin.py
```python
import re
import avalon.api
from .launch_logic import stub
def get_unique_layer_name(layers, asset_name, subset_name):
"""
Gets all layer names and if 'asset_name_subset_name' is present, it
increases suffix by 1 (eg. creates unique layer name - for Loader)
Args:
layers (list) of dict with layers info (name, id etc.)
asset_name (string):
subset_name (string):
Returns:
(string): name_00X (without version)
"""
name = "{}_{}".format(asset_name, subset_name)
names = {}
for layer in layers:
layer_name = re.sub(r'_\d{3}$', '', layer.name)
if layer_name in names.keys():
names[layer_name] = names[layer_name] + 1
else:
names[layer_name] = 1
occurrences = names.get(name, 0)
return "{}_{:0>3d}".format(name, occurrences + 1)
class PhotoshopLoader(avalon.api.Loader):
@staticmethod
def get_stub():
return stub()
class Creator(avalon.api.Creator):
"""Creator plugin to create instances in Photoshop
A LayerSet is created to support any number of layers in an instance. If
the selection is used, these layers will be added to the LayerSet.
"""
def process(self):
# Photoshop can have multiple LayerSets with the same name, which does
# not work with Avalon.
msg = "Instance with name \"{}\" already exists.".format(self.name)
stub = lib.stub() # only after Photoshop is up
for layer in stub.get_layers():
if self.name.lower() == layer.Name.lower():
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText(msg)
msg.exec_()
return False
# Store selection because adding a group will change selection.
with lib.maintained_selection():
# Add selection to group.
if (self.options or {}).get("useSelection"):
group = stub.group_selected_layers(self.name)
else:
group = stub.create_group(self.name)
stub.imprint(group, self.data)
return group
```
#### File: resolve/api/plugin.py
```python
import re
import uuid
from avalon import api
import openpype.api as pype
from openpype.hosts import resolve
from avalon.vendor import qargparse
from . import lib
from Qt import QtWidgets, QtCore
class CreatorWidget(QtWidgets.QDialog):
# output items
items = dict()
def __init__(self, name, info, ui_inputs, parent=None):
super(CreatorWidget, self).__init__(parent)
self.setObjectName(name)
self.setWindowFlags(
QtCore.Qt.Window
| QtCore.Qt.CustomizeWindowHint
| QtCore.Qt.WindowTitleHint
| QtCore.Qt.WindowCloseButtonHint
| QtCore.Qt.WindowStaysOnTopHint
)
self.setWindowTitle(name or "OpenPype Creator Input")
self.resize(500, 700)
# Where inputs and labels are set
self.content_widget = [QtWidgets.QWidget(self)]
top_layout = QtWidgets.QFormLayout(self.content_widget[0])
top_layout.setObjectName("ContentLayout")
top_layout.addWidget(Spacer(5, self))
# first add widget tag line
top_layout.addWidget(QtWidgets.QLabel(info))
# main dynamic layout
self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True)
self.scroll_area.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAsNeeded)
self.scroll_area.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
self.scroll_area.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOff)
self.scroll_area.setWidgetResizable(True)
self.content_widget.append(self.scroll_area)
scroll_widget = QtWidgets.QWidget(self)
in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget)
self.content_layout = [in_scroll_area]
# add preset data into input widget layout
self.items = self.populate_widgets(ui_inputs)
self.scroll_area.setWidget(scroll_widget)
# Confirmation buttons
btns_widget = QtWidgets.QWidget(self)
btns_layout = QtWidgets.QHBoxLayout(btns_widget)
cancel_btn = QtWidgets.QPushButton("Cancel")
btns_layout.addWidget(cancel_btn)
ok_btn = QtWidgets.QPushButton("Ok")
btns_layout.addWidget(ok_btn)
# Main layout of the dialog
main_layout = QtWidgets.QVBoxLayout(self)
main_layout.setContentsMargins(10, 10, 10, 10)
main_layout.setSpacing(0)
# adding content widget
for w in self.content_widget:
main_layout.addWidget(w)
main_layout.addWidget(btns_widget)
ok_btn.clicked.connect(self._on_ok_clicked)
cancel_btn.clicked.connect(self._on_cancel_clicked)
stylesheet = resolve.api.menu.load_stylesheet()
self.setStyleSheet(stylesheet)
def _on_ok_clicked(self):
self.result = self.value(self.items)
self.close()
def _on_cancel_clicked(self):
self.result = None
self.close()
def value(self, data, new_data=None):
new_data = new_data or dict()
for k, v in data.items():
new_data[k] = {
"target": None,
"value": None
}
if v["type"] == "dict":
new_data[k]["target"] = v["target"]
new_data[k]["value"] = self.value(v["value"])
if v["type"] == "section":
new_data.pop(k)
new_data = self.value(v["value"], new_data)
elif getattr(v["value"], "currentText", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].currentText()
elif getattr(v["value"], "isChecked", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].isChecked()
elif getattr(v["value"], "value", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].value()
elif getattr(v["value"], "text", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].text()
return new_data
def camel_case_split(self, text):
matches = re.finditer(
'.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text)
return " ".join([str(m.group(0)).capitalize() for m in matches])
def create_row(self, layout, type, text, **kwargs):
# get type attribute from qwidgets
attr = getattr(QtWidgets, type)
# convert label text to normal capitalized text with spaces
label_text = self.camel_case_split(text)
# assign the new text to label widget
label = QtWidgets.QLabel(label_text)
label.setObjectName("LineLabel")
# create attribute name text strip of spaces
attr_name = text.replace(" ", "")
# create attribute and assign default values
setattr(
self,
attr_name,
attr(parent=self))
# assign the created attribute to variable
item = getattr(self, attr_name)
for func, val in kwargs.items():
if getattr(item, func):
func_attr = getattr(item, func)
if isinstance(val, tuple):
func_attr(*val)
else:
func_attr(val)
# add to layout
layout.addRow(label, item)
return item
def populate_widgets(self, data, content_layout=None):
"""
Populate widget from input dict.
Each plugin has its own set of widget rows defined in dictionary
each row values should have following keys: `type`, `target`,
`label`, `order`, `value` and optionally also `toolTip`.
Args:
data (dict): widget rows or organized groups defined
by types `dict` or `section`
content_layout (QtWidgets.QFormLayout)[optional]: used when nesting
Returns:
dict: redefined data dict updated with created widgets
"""
content_layout = content_layout or self.content_layout[-1]
# fix order of process by defined order value
ordered_keys = list(data.keys())
for k, v in data.items():
try:
# try removing a key from index which should
# be filled with new
ordered_keys.pop(v["order"])
except IndexError:
pass
# add key into correct order
ordered_keys.insert(v["order"], k)
# process ordered
for k in ordered_keys:
v = data[k]
tool_tip = v.get("toolTip", "")
if v["type"] == "dict":
# adding spacer between sections
self.content_layout.append(QtWidgets.QWidget(self))
content_layout.addWidget(self.content_layout[-1])
self.content_layout[-1].setObjectName("sectionHeadline")
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
headline.addWidget(Spacer(20, self))
headline.addWidget(QtWidgets.QLabel(v["label"]))
# adding nested layout with label
self.content_layout.append(QtWidgets.QWidget(self))
self.content_layout[-1].setObjectName("sectionContent")
nested_content_layout = QtWidgets.QFormLayout(
self.content_layout[-1])
nested_content_layout.setObjectName("NestedContentLayout")
content_layout.addWidget(self.content_layout[-1])
# add nested key as label
data[k]["value"] = self.populate_widgets(
v["value"], nested_content_layout)
if v["type"] == "section":
# adding spacer between sections
self.content_layout.append(QtWidgets.QWidget(self))
content_layout.addWidget(self.content_layout[-1])
self.content_layout[-1].setObjectName("sectionHeadline")
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
headline.addWidget(Spacer(20, self))
headline.addWidget(QtWidgets.QLabel(v["label"]))
# adding nested layout with label
self.content_layout.append(QtWidgets.QWidget(self))
self.content_layout[-1].setObjectName("sectionContent")
nested_content_layout = QtWidgets.QFormLayout(
self.content_layout[-1])
nested_content_layout.setObjectName("NestedContentLayout")
content_layout.addWidget(self.content_layout[-1])
# add nested key as label
data[k]["value"] = self.populate_widgets(
v["value"], nested_content_layout)
elif v["type"] == "QLineEdit":
data[k]["value"] = self.create_row(
content_layout, "QLineEdit", v["label"],
setText=v["value"], setToolTip=tool_tip)
elif v["type"] == "QComboBox":
data[k]["value"] = self.create_row(
content_layout, "QComboBox", v["label"],
addItems=v["value"], setToolTip=tool_tip)
elif v["type"] == "QCheckBox":
data[k]["value"] = self.create_row(
content_layout, "QCheckBox", v["label"],
setChecked=v["value"], setToolTip=tool_tip)
elif v["type"] == "QSpinBox":
data[k]["value"] = self.create_row(
content_layout, "QSpinBox", v["label"],
setRange=(0, 99999),
setValue=v["value"],
setToolTip=tool_tip)
return data
class Spacer(QtWidgets.QWidget):
def __init__(self, height, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.setFixedHeight(height)
real_spacer = QtWidgets.QWidget(self)
real_spacer.setObjectName("Spacer")
real_spacer.setFixedHeight(height)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(real_spacer)
self.setLayout(layout)
class ClipLoader:
active_bin = None
data = dict()
def __init__(self, cls, context, **options):
""" Initialize object
Arguments:
cls (avalon.api.Loader): plugin object
context (dict): loader plugin context
options (dict)[optional]: possible keys:
projectBinPath: "path/to/binItem"
"""
self.__dict__.update(cls.__dict__)
self.context = context
self.active_project = lib.get_current_project()
# try to get value from options or evaluate key value for `handles`
self.with_handles = options.get("handles") or bool(
options.get("handles") is True)
# try to get value from options or evaluate key value for `load_to`
self.new_timeline = options.get("newTimeline") or bool(
"New timeline" in options.get("load_to", ""))
assert self._populate_data(), str(
"Cannot Load selected data, look into database "
"or call your supervisor")
# inject asset data to representation dict
self._get_asset_data()
print("__init__ self.data: `{}`".format(self.data))
# add active components to class
if self.new_timeline:
if options.get("timeline"):
# if multiselection is set then use options sequence
self.active_timeline = options["timeline"]
else:
# create new sequence
self.active_timeline = lib.get_current_timeline(new=True)
else:
self.active_timeline = lib.get_current_timeline()
cls.timeline = self.active_timeline
def _populate_data(self):
""" Gets context and convert it to self.data
data structure:
{
"name": "assetName_subsetName_representationName"
"path": "path/to/file/created/by/get_repr..",
"binPath": "projectBinPath",
}
"""
# create name
repr = self.context["representation"]
repr_cntx = repr["context"]
asset = str(repr_cntx["asset"])
subset = str(repr_cntx["subset"])
representation = str(repr_cntx["representation"])
self.data["clip_name"] = "_".join([asset, subset, representation])
self.data["versionData"] = self.context["version"]["data"]
# gets file path
file = self.fname
if not file:
repr_id = repr["_id"]
print(
"Representation id `{}` is failing to load".format(repr_id))
return None
self.data["path"] = file.replace("\\", "/")
# solve project bin structure path
hierarchy = str("/".join((
"Loader",
repr_cntx["hierarchy"].replace("\\", "/"),
asset
)))
self.data["binPath"] = hierarchy
return True
def _get_asset_data(self):
""" Get all available asset data
joint `data` key with asset.data dict into the representation
"""
asset_name = self.context["representation"]["context"]["asset"]
self.data["assetData"] = pype.get_asset(asset_name)["data"]
def load(self):
# create project bin for the media to be imported into
self.active_bin = lib.create_bin(self.data["binPath"])
# create mediaItem in active project bin
# create clip media
media_pool_item = lib.create_media_pool_item(
self.data["path"], self.active_bin)
_clip_property = media_pool_item.GetClipProperty
# get handles
handle_start = self.data["versionData"].get("handleStart")
handle_end = self.data["versionData"].get("handleEnd")
if handle_start is None:
handle_start = int(self.data["assetData"]["handleStart"])
if handle_end is None:
handle_end = int(self.data["assetData"]["handleEnd"])
source_in = int(_clip_property("Start"))
source_out = int(_clip_property("End"))
if _clip_property("Type") == "Video":
source_in += handle_start
source_out -= handle_end
# include handles
if self.with_handles:
source_in -= handle_start
source_out += handle_end
handle_start = 0
handle_end = 0
# make track item from source in bin as item
timeline_item = lib.create_timeline_item(
media_pool_item, self.active_timeline, source_in, source_out)
print("Loading clips: `{}`".format(self.data["clip_name"]))
return timeline_item
def update(self, timeline_item):
# create project bin for the media to be imported into
self.active_bin = lib.create_bin(self.data["binPath"])
# create mediaItem in active project bin
# create clip media
media_pool_item = lib.create_media_pool_item(
self.data["path"], self.active_bin)
_clip_property = media_pool_item.GetClipProperty
# get handles
handle_start = self.data["versionData"].get("handleStart")
handle_end = self.data["versionData"].get("handleEnd")
if handle_start is None:
handle_start = int(self.data["assetData"]["handleStart"])
if handle_end is None:
handle_end = int(self.data["assetData"]["handleEnd"])
source_in = int(_clip_property("Start"))
source_out = int(_clip_property("End"))
resolve.swap_clips(
timeline_item,
media_pool_item,
source_in,
source_out
)
print("Loading clips: `{}`".format(self.data["clip_name"]))
return timeline_item
class TimelineItemLoader(api.Loader):
"""A basic SequenceLoader for Resolve
This will implement the basic behavior for a loader to inherit from that
will containerize the reference and will implement the `remove` and
`update` logic.
"""
options = [
qargparse.Toggle(
"handles",
label="Include handles",
default=0,
help="Load with handles or without?"
),
qargparse.Choice(
"load_to",
label="Where to load clips",
items=[
"Current timeline",
"New timeline"
],
default=0,
help="Where do you want clips to be loaded?"
)
]
def load(
self,
context,
name=None,
namespace=None,
options=None
):
pass
def update(self, container, representation):
"""Update an existing `container`
"""
pass
def remove(self, container):
"""Remove an existing `container`
"""
pass
class Creator(pype.PypeCreatorMixin, api.Creator):
"""Creator class wrapper
"""
marker_color = "Purple"
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)
from openpype.api import get_current_project_settings
resolve_p_settings = get_current_project_settings().get("resolve")
self.presets = dict()
if resolve_p_settings:
self.presets = resolve_p_settings["create"].get(
self.__class__.__name__, {})
# adding basic current context resolve objects
self.project = resolve.get_current_project()
self.timeline = resolve.get_current_timeline()
if (self.options or {}).get("useSelection"):
self.selected = resolve.get_current_timeline_items(filter=True)
else:
self.selected = resolve.get_current_timeline_items(filter=False)
self.widget = CreatorWidget
class PublishClip:
"""
Convert a track item to publishable instance
Args:
timeline_item (hiero.core.TrackItem): hiero track item object
kwargs (optional): additional data needed for rename=True (presets)
Returns:
hiero.core.TrackItem: hiero track item object with openpype tag
"""
vertical_clip_match = dict()
tag_data = dict()
types = {
"shot": "shot",
"folder": "folder",
"episode": "episode",
"sequence": "sequence",
"track": "sequence",
}
# parents search pattern
parents_search_pattern = r"\{([a-z]*?)\}"
# default templates for non-ui use
rename_default = False
hierarchy_default = "{_folder_}/{_sequence_}/{_track_}"
clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}"
subset_name_default = "<track_name>"
review_track_default = "< none >"
subset_family_default = "plate"
count_from_default = 10
count_steps_default = 10
vertical_sync_default = False
driving_layer_default = ""
def __init__(self, cls, timeline_item_data, **kwargs):
# populate input cls attribute onto self.[attr]
self.__dict__.update(cls.__dict__)
# get main parent objects
self.timeline_item_data = timeline_item_data
self.timeline_item = timeline_item_data["clip"]["item"]
timeline_name = timeline_item_data["timeline"].GetName()
self.timeline_name = str(timeline_name).replace(" ", "_")
# track item (clip) main attributes
self.ti_name = self.timeline_item.GetName()
self.ti_index = int(timeline_item_data["clip"]["index"])
# get track name and index
track_name = timeline_item_data["track"]["name"]
self.track_name = str(track_name).replace(" ", "_")
self.track_index = int(timeline_item_data["track"]["index"])
# adding tag.family into tag
if kwargs.get("avalon"):
self.tag_data.update(kwargs["avalon"])
# adding ui inputs if any
self.ui_inputs = kwargs.get("ui_inputs", {})
# adding media pool folder if any
self.mp_folder = kwargs.get("mp_folder")
# populate default data before we get other attributes
self._populate_timeline_item_default_data()
# use all populated default data to create all important attributes
self._populate_attributes()
# create parents with correct types
self._create_parents()
def convert(self):
# solve track item data and add them to tag data
self._convert_to_tag_data()
# if track name is in review track name and also if driving track name
# is not in review track name: skip tag creation
if (self.track_name in self.review_layer) and (
self.driving_layer not in self.review_layer):
return
# deal with clip name
new_name = self.tag_data.pop("newClipName")
if self.rename:
self.tag_data["asset"] = new_name
else:
self.tag_data["asset"] = self.ti_name
if not lib.pype_marker_workflow:
# create compound clip workflow
lib.create_compound_clip(
self.timeline_item_data,
self.tag_data["asset"],
self.mp_folder
)
# add timeline_item_data selection to tag
self.tag_data.update({
"track_data": self.timeline_item_data["track"]
})
# create openpype tag on timeline_item and add data
lib.imprint(self.timeline_item, self.tag_data)
return self.timeline_item
def _populate_timeline_item_default_data(self):
""" Populate default formatting data from track item. """
self.timeline_item_default_data = {
"_folder_": "shots",
"_sequence_": self.timeline_name,
"_track_": self.track_name,
"_clip_": self.ti_name,
"_trackIndex_": self.track_index,
"_clipIndex_": self.ti_index
}
def _populate_attributes(self):
""" Populate main object attributes. """
# track item frame range and parent track name for vertical sync check
self.clip_in = int(self.timeline_item.GetStart())
self.clip_out = int(self.timeline_item.GetEnd())
# define ui inputs if non gui mode was used
self.shot_num = self.ti_index
print(
"____ self.shot_num: {}".format(self.shot_num))
# ui_inputs data or default values if gui was not used
self.rename = self.ui_inputs.get(
"clipRename", {}).get("value") or self.rename_default
self.clip_name = self.ui_inputs.get(
"clipName", {}).get("value") or self.clip_name_default
self.hierarchy = self.ui_inputs.get(
"hierarchy", {}).get("value") or self.hierarchy_default
self.hierarchy_data = self.ui_inputs.get(
"hierarchyData", {}).get("value") or \
self.timeline_item_default_data.copy()
self.count_from = self.ui_inputs.get(
"countFrom", {}).get("value") or self.count_from_default
self.count_steps = self.ui_inputs.get(
"countSteps", {}).get("value") or self.count_steps_default
self.subset_name = self.ui_inputs.get(
"subsetName", {}).get("value") or self.subset_name_default
self.subset_family = self.ui_inputs.get(
"subsetFamily", {}).get("value") or self.subset_family_default
self.vertical_sync = self.ui_inputs.get(
"vSyncOn", {}).get("value") or self.vertical_sync_default
self.driving_layer = self.ui_inputs.get(
"vSyncTrack", {}).get("value") or self.driving_layer_default
self.review_track = self.ui_inputs.get(
"reviewTrack", {}).get("value") or self.review_track_default
# build subset name from layer name
if self.subset_name == "<track_name>":
self.subset_name = self.track_name
# create subset for publishing
self.subset = self.subset_family + self.subset_name.capitalize()
def _replace_hash_to_expression(self, name, text):
""" Replace hash with number in correct padding. """
_spl = text.split("#")
_len = (len(_spl) - 1)
_repl = "{{{0}:0>{1}}}".format(name, _len)
new_text = text.replace(("#" * _len), _repl)
return new_text
def _convert_to_tag_data(self):
""" Convert internal data to tag data.
Populating the tag data into internal variable self.tag_data
"""
# define vertical sync attributes
hero_track = True
self.review_layer = ""
if self.vertical_sync:
# check if track name is not in driving layer
if self.track_name not in self.driving_layer:
# if it is not then define vertical sync as None
hero_track = False
# increasing steps by index of rename iteration
self.count_steps *= self.rename_index
hierarchy_formating_data = dict()
_data = self.timeline_item_default_data.copy()
if self.ui_inputs:
# adding tag metadata from ui
for _k, _v in self.ui_inputs.items():
if _v["target"] == "tag":
self.tag_data[_k] = _v["value"]
# driving layer is set as positive match
if hero_track or self.vertical_sync:
# mark review layer
if self.review_track and (
self.review_track not in self.review_track_default):
# if review layer is defined and not the same as default
self.review_layer = self.review_track
# shot num calculate
if self.rename_index == 0:
self.shot_num = self.count_from
else:
self.shot_num = self.count_from + self.count_steps
# clip name sequence number
_data.update({"shot": self.shot_num})
# solve # in test to pythonic expression
for _k, _v in self.hierarchy_data.items():
if "#" not in _v["value"]:
continue
self.hierarchy_data[
_k]["value"] = self._replace_hash_to_expression(
_k, _v["value"])
# fill up pythonic expresisons in hierarchy data
for k, _v in self.hierarchy_data.items():
hierarchy_formating_data[k] = _v["value"].format(**_data)
else:
# if no gui mode then just pass default data
hierarchy_formating_data = self.hierarchy_data
tag_hierarchy_data = self._solve_tag_hierarchy_data(
hierarchy_formating_data
)
tag_hierarchy_data.update({"heroTrack": True})
if hero_track and self.vertical_sync:
self.vertical_clip_match.update({
(self.clip_in, self.clip_out): tag_hierarchy_data
})
if not hero_track and self.vertical_sync:
# driving layer is set as negative match
for (_in, _out), hero_data in self.vertical_clip_match.items():
hero_data.update({"heroTrack": False})
if _in == self.clip_in and _out == self.clip_out:
data_subset = hero_data["subset"]
# add track index in case duplicity of names in hero data
if self.subset in data_subset:
hero_data["subset"] = self.subset + str(
self.track_index)
# in case track name and subset name is the same then add
if self.subset_name == self.track_name:
hero_data["subset"] = self.subset
# assign data to return hierarchy data to tag
tag_hierarchy_data = hero_data
# add data to return data dict
self.tag_data.update(tag_hierarchy_data)
# add uuid to tag data
self.tag_data["uuid"] = str(uuid.uuid4())
# add review track only to hero track
if hero_track and self.review_layer:
self.tag_data.update({"reviewTrack": self.review_layer})
else:
self.tag_data.update({"reviewTrack": None})
def _solve_tag_hierarchy_data(self, hierarchy_formating_data):
""" Solve tag data from hierarchy data and templates. """
# fill up clip name and hierarchy keys
hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data)
clip_name_filled = self.clip_name.format(**hierarchy_formating_data)
return {
"newClipName": clip_name_filled,
"hierarchy": hierarchy_filled,
"parents": self.parents,
"hierarchyData": hierarchy_formating_data,
"subset": self.subset,
"family": self.subset_family,
"families": ["clip"]
}
def _convert_to_entity(self, key):
""" Converting input key to key with type. """
# convert to entity type
entity_type = self.types.get(key, None)
assert entity_type, "Missing entity type for `{}`".format(
key
)
return {
"entity_type": entity_type,
"entity_name": self.hierarchy_data[key]["value"].format(
**self.timeline_item_default_data
)
}
def _create_parents(self):
""" Create parents and return it in list. """
self.parents = []
pattern = re.compile(self.parents_search_pattern)
par_split = [pattern.findall(t).pop()
for t in self.hierarchy.split("/")]
for key in par_split:
parent = self._convert_to_entity(key)
self.parents.append(parent)
```
#### File: plugins/publish/validate_texture_has_workfile.py
```python
import pyblish.api
import openpype.api
class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin):
"""Validates that textures have appropriate workfile attached.
Workfile is optional, disable this Validator after Refresh if you are
sure it is not needed.
"""
label = "Validate Texture Has Workfile"
hosts = ["standalonepublisher"]
order = openpype.api.ValidateContentsOrder
families = ["textures"]
optional = True
def process(self, instance):
wfile = instance.data["versionData"].get("workfile")
assert wfile, "Textures are missing attached workfile"
```
#### File: plugins/publish/validate_texture_versions.py
```python
import pyblish.api
import openpype.api
class ValidateTextureBatchVersions(pyblish.api.InstancePlugin):
"""Validates that versions match in workfile and textures.
Workfile is optional, so if you are sure, you can disable this
validator after Refresh.
Validates that only single version is published at a time.
"""
label = "Validate Texture Batch Versions"
hosts = ["standalonepublisher"]
order = openpype.api.ValidateContentsOrder
families = ["textures"]
optional = False
def process(self, instance):
wfile = instance.data["versionData"].get("workfile")
version_str = "v{:03d}".format(instance.data["version"])
if not wfile: # no matching workfile, do not check versions
self.log.info("No workfile present for textures")
return
msg = "Not matching version: texture v{:03d} - workfile {}"
assert version_str in wfile, \
msg.format(
instance.data["version"], wfile
)
present_versions = set()
for instance in instance.context:
present_versions.add(instance.data["version"])
assert len(present_versions) == 1, "Too many versions in a batch!"
```
#### File: plugins/load/load_reference_image.py
```python
import collections
from avalon.pipeline import get_representation_context
from avalon.vendor import qargparse
from openpype.hosts.tvpaint.api import lib, pipeline, plugin
class LoadImage(plugin.Loader):
"""Load image or image sequence to TVPaint as new layer."""
families = ["render", "image", "background", "plate", "review"]
representations = ["*"]
label = "Load Image"
order = 1
icon = "image"
color = "white"
import_script = (
"filepath = '\"'\"{}\"'\"'\n"
"layer_name = \"{}\"\n"
"tv_loadsequence filepath {}PARSE layer_id\n"
"tv_layerrename layer_id layer_name"
)
defaults = {
"stretch": True,
"timestretch": True,
"preload": True
}
options = [
qargparse.Boolean(
"stretch",
label="Stretch to project size",
default=True,
help="Stretch loaded image/s to project resolution?"
),
qargparse.Boolean(
"timestretch",
label="Stretch to timeline length",
default=True,
help="Clip loaded image/s to timeline length?"
),
qargparse.Boolean(
"preload",
label="Preload loaded image/s",
default=True,
help="Preload image/s?"
)
]
def load(self, context, name, namespace, options):
stretch = options.get("stretch", self.defaults["stretch"])
timestretch = options.get("timestretch", self.defaults["timestretch"])
preload = options.get("preload", self.defaults["preload"])
load_options = []
if stretch:
load_options.append("\"STRETCH\"")
if timestretch:
load_options.append("\"TIMESTRETCH\"")
if preload:
load_options.append("\"PRELOAD\"")
load_options_str = ""
for load_option in load_options:
load_options_str += (load_option + " ")
# Prepare layer name
asset_name = context["asset"]["name"]
subset_name = context["subset"]["name"]
layer_name = self.get_unique_layer_name(asset_name, subset_name)
# Fill import script with filename and layer name
# - filename mus not contain backwards slashes
george_script = self.import_script.format(
self.fname.replace("\\", "/"),
layer_name,
load_options_str
)
lib.execute_george_through_file(george_script)
loaded_layer = None
layers = lib.layers_data()
for layer in layers:
if layer["name"] == layer_name:
loaded_layer = layer
break
if loaded_layer is None:
raise AssertionError(
"Loading probably failed during execution of george script."
)
layer_names = [loaded_layer["name"]]
namespace = namespace or layer_name
return pipeline.containerise(
name=name,
namespace=namespace,
members=layer_names,
context=context,
loader=self.__class__.__name__
)
def _remove_layers(self, layer_names=None, layer_ids=None, layers=None):
if not layer_names and not layer_ids:
self.log.warning("Got empty layer names list.")
return
if layers is None:
layers = lib.layers_data()
available_ids = set(layer["layer_id"] for layer in layers)
if layer_ids is None:
# Backwards compatibility (layer ids were stored instead of names)
layer_names_are_ids = True
for layer_name in layer_names:
if (
not isinstance(layer_name, int)
and not layer_name.isnumeric()
):
layer_names_are_ids = False
break
if layer_names_are_ids:
layer_ids = layer_names
layer_ids_to_remove = []
if layer_ids is not None:
for layer_id in layer_ids:
if layer_id in available_ids:
layer_ids_to_remove.append(layer_id)
else:
layers_by_name = collections.defaultdict(list)
for layer in layers:
layers_by_name[layer["name"]].append(layer)
for layer_name in layer_names:
layers = layers_by_name[layer_name]
if len(layers) == 1:
layer_ids_to_remove.append(layers[0]["layer_id"])
if not layer_ids_to_remove:
self.log.warning("No layers to delete.")
return
george_script_lines = []
for layer_id in layer_ids_to_remove:
line = "tv_layerkill {}".format(layer_id)
george_script_lines.append(line)
george_script = "\n".join(george_script_lines)
lib.execute_george_through_file(george_script)
def _remove_container(self, container, members=None):
if not container:
return
representation = container["representation"]
members = self.get_members_from_container(container)
current_containers = pipeline.ls()
pop_idx = None
for idx, cur_con in enumerate(current_containers):
cur_members = self.get_members_from_container(cur_con)
if (
cur_members == members
and cur_con["representation"] == representation
):
pop_idx = idx
break
if pop_idx is None:
self.log.warning(
"Didn't found container in workfile containers. {}".format(
container
)
)
return
current_containers.pop(pop_idx)
pipeline.write_workfile_metadata(
pipeline.SECTION_NAME_CONTAINERS, current_containers
)
def remove(self, container):
members = self.get_members_from_container(container)
self.log.warning("Layers to delete {}".format(members))
self._remove_layers(members)
self._remove_container(container)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
"""Replace container with different version.
New layers are loaded as first step. Then is tried to change data in
new layers with data from old layers. When that is done old layers are
removed.
"""
# Create new containers first
context = get_representation_context(representation)
# Get layer ids from previous container
old_layer_names = self.get_members_from_container(container)
# Backwards compatibility (layer ids were stored instead of names)
old_layers_are_ids = True
for name in old_layer_names:
if isinstance(name, int) or name.isnumeric():
continue
old_layers_are_ids = False
break
old_layers = []
layers = lib.layers_data()
previous_layer_ids = set(layer["layer_id"] for layer in layers)
if old_layers_are_ids:
for layer in layers:
if layer["layer_id"] in old_layer_names:
old_layers.append(layer)
else:
layers_by_name = collections.defaultdict(list)
for layer in layers:
layers_by_name[layer["name"]].append(layer)
for layer_name in old_layer_names:
layers = layers_by_name[layer_name]
if len(layers) == 1:
old_layers.append(layers[0])
# Prepare few data
new_start_position = None
new_group_id = None
layer_ids_to_remove = set()
for layer in old_layers:
layer_ids_to_remove.add(layer["layer_id"])
position = layer["position"]
group_id = layer["group_id"]
if new_start_position is None:
new_start_position = position
elif new_start_position > position:
new_start_position = position
if new_group_id is None:
new_group_id = group_id
elif new_group_id < 0:
continue
elif new_group_id != group_id:
new_group_id = -1
# Remove old container
self._remove_container(container)
# Remove old layers
self._remove_layers(layer_ids=layer_ids_to_remove)
# Change `fname` to new representation
self.fname = self.filepath_from_context(context)
name = container["name"]
namespace = container["namespace"]
new_container = self.load(context, name, namespace, {})
new_layer_names = self.get_members_from_container(new_container)
layers = lib.layers_data()
new_layers = []
for layer in layers:
if layer["layer_id"] in previous_layer_ids:
continue
if layer["name"] in new_layer_names:
new_layers.append(layer)
george_script_lines = []
# Group new layers to same group as previous container layers had
# - all old layers must be under same group
if new_group_id is not None and new_group_id > 0:
for layer in new_layers:
line = "tv_layercolor \"set\" {} {}".format(
layer["layer_id"], new_group_id
)
george_script_lines.append(line)
# Rename new layer to have same name
# - only if both old and new have one layer
if len(old_layers) == 1 and len(new_layers) == 1:
layer_name = old_layers[0]["name"]
george_script_lines.append(
"tv_layerrename {} \"{}\"".format(
new_layers[0]["layer_id"], layer_name
)
)
# Change position of new layer
# - this must be done before remove old layers
if len(new_layers) == 1 and new_start_position is not None:
new_layer = new_layers[0]
george_script_lines.extend([
"tv_layerset {}".format(new_layer["layer_id"]),
"tv_layermove {}".format(new_start_position)
])
# Execute george scripts if there are any
if george_script_lines:
george_script = "\n".join(george_script_lines)
lib.execute_george_through_file(george_script)
```
#### File: plugins/publish/validate_layers_visibility.py
```python
import pyblish.api
class ValidateLayersVisiblity(pyblish.api.InstancePlugin):
"""Validate existence of renderPass layers."""
label = "Validate Layers Visibility"
order = pyblish.api.ValidatorOrder
families = ["review", "renderPass", "renderLayer"]
def process(self, instance):
for layer in instance.data["layers"]:
if layer["visible"]:
return
raise AssertionError("All layers of instance are not visible.")
```
#### File: plugins/publish/validate_project_settings.py
```python
import json
import pyblish.api
class ValidateProjectSettings(pyblish.api.ContextPlugin):
"""Validate project settings against database.
"""
label = "Validate Project Settings"
order = pyblish.api.ValidatorOrder
optional = True
def process(self, context):
scene_data = {
"fps": context.data.get("sceneFps"),
"resolutionWidth": context.data.get("sceneWidth"),
"resolutionHeight": context.data.get("sceneHeight"),
"pixelAspect": context.data.get("scenePixelAspect")
}
invalid = {}
for k in scene_data.keys():
expected_value = context.data["assetEntity"]["data"][k]
if scene_data[k] != expected_value:
invalid[k] = {
"current": scene_data[k], "expected": expected_value
}
if invalid:
raise AssertionError(
"Project settings does not match database:\n{}".format(
json.dumps(invalid, sort_keys=True, indent=4)
)
)
```
#### File: hosts/unreal/__init__.py
```python
import os
def add_implementation_envs(env, _app):
"""Modify environments to contain all required for implementation."""
# Set AVALON_UNREAL_PLUGIN required for Unreal implementation
unreal_plugin_path = os.path.join(
os.environ["OPENPYPE_REPOS_ROOT"], "repos", "avalon-unreal-integration"
)
env["AVALON_UNREAL_PLUGIN"] = unreal_plugin_path
# Set default environments if are not set via settings
defaults = {
"OPENPYPE_LOG_NO_COLORS": "True"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
```
#### File: ftrack/event_handlers_server/event_push_frame_values_to_task.py
```python
import collections
import datetime
import ftrack_api
from openpype_modules.ftrack.lib import (
BaseEvent,
query_custom_attributes
)
class PushFrameValuesToTaskEvent(BaseEvent):
# Ignore event handler by default
cust_attrs_query = (
"select id, key, object_type_id, is_hierarchical, default"
" from CustomAttributeConfiguration"
" where key in ({}) and"
" (object_type_id in ({}) or is_hierarchical is true)"
)
cust_attr_query = (
"select value, entity_id from ContextCustomAttributeValue "
"where entity_id in ({}) and configuration_id in ({})"
)
_cached_task_object_id = None
_cached_interest_object_ids = None
_cached_user_id = None
_cached_changes = []
_max_delta = 30
settings_key = "sync_hier_entity_attributes"
def session_user_id(self, session):
if self._cached_user_id is None:
user = session.query(
"User where username is \"{}\"".format(session.api_user)
).one()
self._cached_user_id = user["id"]
return self._cached_user_id
def launch(self, session, event):
filtered_entities_info = self.filter_entities_info(event)
if not filtered_entities_info:
return
for project_id, entities_info in filtered_entities_info.items():
self.process_by_project(session, event, project_id, entities_info)
def filter_entities_info(self, event):
# Filter if event contain relevant data
entities_info = event["data"].get("entities")
if not entities_info:
return
entities_info_by_project_id = {}
for entity_info in entities_info:
# Care only about tasks
if entity_info.get("entityType") != "task":
continue
# Care only about changes of status
changes = entity_info.get("changes")
if not changes:
continue
# Get project id from entity info
project_id = None
for parent_item in reversed(entity_info["parents"]):
if parent_item["entityType"] == "show":
project_id = parent_item["entityId"]
break
if project_id is None:
continue
# Skip `Task` entity type if parent didn't change
if entity_info["entity_type"].lower() == "task":
if (
"parent_id" not in changes
or changes["parent_id"]["new"] is None
):
continue
if project_id not in entities_info_by_project_id:
entities_info_by_project_id[project_id] = []
entities_info_by_project_id[project_id].append(entity_info)
return entities_info_by_project_id
def process_by_project(self, session, event, project_id, entities_info):
project_name = self.get_project_name_from_event(
session, event, project_id
)
# Load settings
project_settings = self.get_project_settings_from_event(
event, project_name
)
# Load status mapping from presets
event_settings = (
project_settings
["ftrack"]
["events"]
["sync_hier_entity_attributes"]
)
# Skip if event is not enabled
if not event_settings["enabled"]:
self.log.debug("Project \"{}\" has disabled {}".format(
project_name, self.__class__.__name__
))
return
interest_attributes = event_settings["interest_attributes"]
if not interest_attributes:
self.log.info((
"Project \"{}\" does not have filled 'interest_attributes',"
" skipping."
))
return
interest_entity_types = event_settings["interest_entity_types"]
if not interest_entity_types:
self.log.info((
"Project \"{}\" does not have filled 'interest_entity_types',"
" skipping."
))
return
interest_attributes = set(interest_attributes)
interest_entity_types = set(interest_entity_types)
# Separate value changes and task parent changes
_entities_info = []
task_parent_changes = []
for entity_info in entities_info:
if entity_info["entity_type"].lower() == "task":
task_parent_changes.append(entity_info)
else:
_entities_info.append(entity_info)
entities_info = _entities_info
# Filter entities info with changes
interesting_data, changed_keys_by_object_id = self.filter_changes(
session, event, entities_info, interest_attributes
)
if not interesting_data and not task_parent_changes:
return
# Prepare object types
object_types = session.query("select id, name from ObjectType").all()
object_types_by_name = {}
for object_type in object_types:
name_low = object_type["name"].lower()
object_types_by_name[name_low] = object_type
# NOTE it would be nice to check if `interesting_data` do not contain
# value changs of tasks that were created or moved
# - it is a complex way how to find out
if interesting_data:
self.process_attribute_changes(
session, object_types_by_name,
interesting_data, changed_keys_by_object_id,
interest_entity_types, interest_attributes
)
if task_parent_changes:
self.process_task_parent_change(
session, object_types_by_name, task_parent_changes,
interest_entity_types, interest_attributes
)
def process_task_parent_change(
self, session, object_types_by_name, task_parent_changes,
interest_entity_types, interest_attributes
):
"""Push custom attribute values if task parent has changed.
Parent is changed if task is created or if is moved under different
entity. We don't care about all task changes only about those that
have it's parent in interest types (from settings).
Tasks hierarchical value should be unset or set based on parents
real hierarchical value and non hierarchical custom attribute value
should be set to hierarchical value.
"""
# Store task ids which were created or moved under parent with entity
# type defined in settings (interest_entity_types).
task_ids = set()
# Store parent ids of matching task ids
matching_parent_ids = set()
# Store all entity ids of all entities to be able query hierarchical
# values.
whole_hierarchy_ids = set()
# Store parent id of each entity id
parent_id_by_entity_id = {}
for entity_info in task_parent_changes:
# Ignore entities with less parents than 2
# NOTE entity itself is also part of "parents" value
parents = entity_info.get("parents") or []
if len(parents) < 2:
continue
parent_info = parents[1]
# Check if parent has entity type we care about.
if parent_info["entity_type"] not in interest_entity_types:
continue
task_ids.add(entity_info["entityId"])
matching_parent_ids.add(parent_info["entityId"])
# Store whole hierarchi of task entity
prev_id = None
for item in parents:
item_id = item["entityId"]
whole_hierarchy_ids.add(item_id)
if prev_id is None:
prev_id = item_id
continue
parent_id_by_entity_id[prev_id] = item_id
if item["entityType"] == "show":
break
prev_id = item_id
# Just skip if nothing is interesting for our settings
if not matching_parent_ids:
return
# Query object type ids of parent ids for custom attribute
# definitions query
entities = session.query(
"select object_type_id from TypedContext where id in ({})".format(
self.join_query_keys(matching_parent_ids)
)
)
# Prepare task object id
task_object_id = object_types_by_name["task"]["id"]
# All object ids for which we're querying custom attribute definitions
object_type_ids = set()
object_type_ids.add(task_object_id)
for entity in entities:
object_type_ids.add(entity["object_type_id"])
attrs_by_obj_id, hier_attrs = self.attrs_configurations(
session, object_type_ids, interest_attributes
)
# Skip if all task attributes are not available
task_attrs = attrs_by_obj_id.get(task_object_id)
if not task_attrs:
return
# Skip attributes that is not in both hierarchical and nonhierarchical
# TODO be able to push values if hierarchical is available
for key in interest_attributes:
if key not in hier_attrs:
task_attrs.pop(key, None)
elif key not in task_attrs:
hier_attrs.pop(key)
# Skip if nothing remained
if not task_attrs:
return
# Do some preparations for custom attribute values query
attr_key_by_id = {}
nonhier_id_by_key = {}
hier_attr_ids = []
for key, attr_id in hier_attrs.items():
attr_key_by_id[attr_id] = key
hier_attr_ids.append(attr_id)
conf_ids = list(hier_attr_ids)
for key, attr_id in task_attrs.items():
attr_key_by_id[attr_id] = key
nonhier_id_by_key[key] = attr_id
conf_ids.append(attr_id)
# Query custom attribute values
# - result does not contain values for all entities only result of
# query callback to ftrack server
result = query_custom_attributes(
session, conf_ids, whole_hierarchy_ids
)
# Prepare variables where result will be stored
# - hierachical values should not contain attribute with value by
# default
hier_values_by_entity_id = {
entity_id: {}
for entity_id in whole_hierarchy_ids
}
# - real values of custom attributes
values_by_entity_id = {
entity_id: {
attr_id: None
for attr_id in conf_ids
}
for entity_id in whole_hierarchy_ids
}
for item in result:
attr_id = item["configuration_id"]
entity_id = item["entity_id"]
value = item["value"]
values_by_entity_id[entity_id][attr_id] = value
if attr_id in hier_attr_ids and value is not None:
hier_values_by_entity_id[entity_id][attr_id] = value
# Prepare values for all task entities
# - going through all parents and storing first value value
# - store None to those that are already known that do not have set
# value at all
for task_id in tuple(task_ids):
for attr_id in hier_attr_ids:
entity_ids = []
value = None
entity_id = task_id
while value is None:
entity_value = hier_values_by_entity_id[entity_id]
if attr_id in entity_value:
value = entity_value[attr_id]
if value is None:
break
if value is None:
entity_ids.append(entity_id)
entity_id = parent_id_by_entity_id.get(entity_id)
if entity_id is None:
break
for entity_id in entity_ids:
hier_values_by_entity_id[entity_id][attr_id] = value
# Prepare changes to commit
changes = []
for task_id in tuple(task_ids):
parent_id = parent_id_by_entity_id[task_id]
for attr_id in hier_attr_ids:
attr_key = attr_key_by_id[attr_id]
nonhier_id = nonhier_id_by_key[attr_key]
# Real value of hierarchical attribute on parent
# - If is none then should be unset
real_parent_value = values_by_entity_id[parent_id][attr_id]
# Current hierarchical value of a task
# - Will be compared to real parent value
hier_value = hier_values_by_entity_id[task_id][attr_id]
# Parent value that can be inherited from it's parent entity
parent_value = hier_values_by_entity_id[parent_id][attr_id]
# Task value of nonhierarchical custom attribute
nonhier_value = values_by_entity_id[task_id][nonhier_id]
if real_parent_value != hier_value:
changes.append({
"new_value": real_parent_value,
"attr_id": attr_id,
"entity_id": task_id,
"attr_key": attr_key
})
if parent_value != nonhier_value:
changes.append({
"new_value": parent_value,
"attr_id": nonhier_id,
"entity_id": task_id,
"attr_key": attr_key
})
self._commit_changes(session, changes)
def _commit_changes(self, session, changes):
uncommited_changes = False
for idx, item in enumerate(changes):
new_value = item["new_value"]
attr_id = item["attr_id"]
entity_id = item["entity_id"]
attr_key = item["attr_key"]
entity_key = collections.OrderedDict()
entity_key["configuration_id"] = attr_id
entity_key["entity_id"] = entity_id
self._cached_changes.append({
"attr_key": attr_key,
"entity_id": entity_id,
"value": new_value,
"time": datetime.datetime.now()
})
if new_value is None:
op = ftrack_api.operation.DeleteEntityOperation(
"CustomAttributeValue",
entity_key
)
else:
op = ftrack_api.operation.UpdateEntityOperation(
"ContextCustomAttributeValue",
entity_key,
"value",
ftrack_api.symbol.NOT_SET,
new_value
)
session.recorded_operations.push(op)
self.log.info((
"Changing Custom Attribute \"{}\" to value"
" \"{}\" on entity: {}"
).format(attr_key, new_value, entity_id))
if (idx + 1) % 20 == 0:
uncommited_changes = False
try:
session.commit()
except Exception:
session.rollback()
self.log.warning(
"Changing of values failed.", exc_info=True
)
else:
uncommited_changes = True
if uncommited_changes:
try:
session.commit()
except Exception:
session.rollback()
self.log.warning("Changing of values failed.", exc_info=True)
def process_attribute_changes(
self, session, object_types_by_name,
interesting_data, changed_keys_by_object_id,
interest_entity_types, interest_attributes
):
# Prepare task object id
task_object_id = object_types_by_name["task"]["id"]
# Collect object type ids based on settings
interest_object_ids = []
for entity_type in interest_entity_types:
_entity_type = entity_type.lower()
object_type = object_types_by_name.get(_entity_type)
if not object_type:
self.log.warning("Couldn't find object type \"{}\"".format(
entity_type
))
interest_object_ids.append(object_type["id"])
# Query entities by filtered data and object ids
entities = self.get_entities(
session, interesting_data, interest_object_ids
)
if not entities:
return
# Pop not found entities from interesting data
entity_ids = set(
entity["id"]
for entity in entities
)
for entity_id in tuple(interesting_data.keys()):
if entity_id not in entity_ids:
interesting_data.pop(entity_id)
# Add task object type to list
attr_obj_ids = list(interest_object_ids)
attr_obj_ids.append(task_object_id)
attrs_by_obj_id, hier_attrs = self.attrs_configurations(
session, attr_obj_ids, interest_attributes
)
task_attrs = attrs_by_obj_id.get(task_object_id)
changed_keys = set()
# Skip keys that are not both in hierachical and type specific
for object_id, keys in changed_keys_by_object_id.items():
changed_keys |= set(keys)
object_id_attrs = attrs_by_obj_id.get(object_id)
for key in keys:
if key not in hier_attrs:
attrs_by_obj_id[object_id].pop(key)
continue
if (
(not object_id_attrs or key not in object_id_attrs)
and (not task_attrs or key not in task_attrs)
):
hier_attrs.pop(key)
# Clean up empty values
for key, value in tuple(attrs_by_obj_id.items()):
if not value:
attrs_by_obj_id.pop(key)
if not attrs_by_obj_id:
self.log.warning((
"There is not created Custom Attributes {} "
" for entity types: {}"
).format(
self.join_query_keys(interest_attributes),
self.join_query_keys(interest_entity_types)
))
return
# Prepare task entities
task_entities = []
# If task entity does not contain changed attribute then skip
if task_attrs:
task_entities = self.get_task_entities(session, interesting_data)
task_entity_ids = set()
parent_id_by_task_id = {}
for task_entity in task_entities:
task_id = task_entity["id"]
task_entity_ids.add(task_id)
parent_id_by_task_id[task_id] = task_entity["parent_id"]
self.finalize_attribute_changes(
session, interesting_data,
changed_keys, attrs_by_obj_id, hier_attrs,
task_entity_ids, parent_id_by_task_id
)
def finalize_attribute_changes(
self, session, interesting_data,
changed_keys, attrs_by_obj_id, hier_attrs,
task_entity_ids, parent_id_by_task_id
):
attr_id_to_key = {}
for attr_confs in attrs_by_obj_id.values():
for key in changed_keys:
custom_attr_id = attr_confs.get(key)
if custom_attr_id:
attr_id_to_key[custom_attr_id] = key
for key in changed_keys:
custom_attr_id = hier_attrs.get(key)
if custom_attr_id:
attr_id_to_key[custom_attr_id] = key
entity_ids = (
set(interesting_data.keys()) | task_entity_ids
)
attr_ids = set(attr_id_to_key.keys())
current_values_by_id = self.current_values(
session, attr_ids, entity_ids, task_entity_ids, hier_attrs
)
changes = []
for entity_id, current_values in current_values_by_id.items():
parent_id = parent_id_by_task_id.get(entity_id)
if not parent_id:
parent_id = entity_id
values = interesting_data[parent_id]
for attr_id, old_value in current_values.items():
attr_key = attr_id_to_key.get(attr_id)
if not attr_key:
continue
# Convert new value from string
new_value = values.get(attr_key)
if new_value is not None and old_value is not None:
try:
new_value = type(old_value)(new_value)
except Exception:
self.log.warning((
"Couldn't convert from {} to {}."
" Skipping update values."
).format(type(new_value), type(old_value)))
if new_value == old_value:
continue
changes.append({
"new_value": new_value,
"attr_id": attr_id,
"entity_id": entity_id,
"attr_key": attr_key
})
self._commit_changes(session, changes)
def filter_changes(
self, session, event, entities_info, interest_attributes
):
session_user_id = self.session_user_id(session)
user_data = event["data"].get("user")
changed_by_session = False
if user_data and user_data.get("userid") == session_user_id:
changed_by_session = True
current_time = datetime.datetime.now()
interesting_data = {}
changed_keys_by_object_id = {}
for entity_info in entities_info:
# Care only about changes if specific keys
entity_changes = {}
changes = entity_info["changes"]
for key in interest_attributes:
if key in changes:
entity_changes[key] = changes[key]["new"]
entity_id = entity_info["entityId"]
if changed_by_session:
for key, new_value in tuple(entity_changes.items()):
for cached in tuple(self._cached_changes):
if (
cached["entity_id"] != entity_id
or cached["attr_key"] != key
):
continue
cached_value = cached["value"]
try:
new_value = type(cached_value)(new_value)
except Exception:
pass
if cached_value == new_value:
self._cached_changes.remove(cached)
entity_changes.pop(key)
break
delta = (current_time - cached["time"]).seconds
if delta > self._max_delta:
self._cached_changes.remove(cached)
if not entity_changes:
continue
entity_id = entity_info["entityId"]
object_id = entity_info["objectTypeId"]
interesting_data[entity_id] = entity_changes
if object_id not in changed_keys_by_object_id:
changed_keys_by_object_id[object_id] = set()
changed_keys_by_object_id[object_id] |= set(entity_changes.keys())
return interesting_data, changed_keys_by_object_id
def current_values(
self, session, attr_ids, entity_ids, task_entity_ids, hier_attrs
):
current_values_by_id = {}
if not attr_ids or not entity_ids:
return current_values_by_id
joined_conf_ids = self.join_query_keys(attr_ids)
joined_entity_ids = self.join_query_keys(entity_ids)
call_expr = [{
"action": "query",
"expression": self.cust_attr_query.format(
joined_entity_ids, joined_conf_ids
)
}]
if hasattr(session, "call"):
[values] = session.call(call_expr)
else:
[values] = session._call(call_expr)
for item in values["data"]:
entity_id = item["entity_id"]
attr_id = item["configuration_id"]
if entity_id in task_entity_ids and attr_id in hier_attrs:
continue
if entity_id not in current_values_by_id:
current_values_by_id[entity_id] = {}
current_values_by_id[entity_id][attr_id] = item["value"]
return current_values_by_id
def get_entities(self, session, interesting_data, interest_object_ids):
return session.query((
"select id from TypedContext"
" where id in ({}) and object_type_id in ({})"
).format(
self.join_query_keys(interesting_data.keys()),
self.join_query_keys(interest_object_ids)
)).all()
def get_task_entities(self, session, interesting_data):
return session.query(
"select id, parent_id from Task where parent_id in ({})".format(
self.join_query_keys(interesting_data.keys())
)
).all()
def attrs_configurations(self, session, object_ids, interest_attributes):
attrs = session.query(self.cust_attrs_query.format(
self.join_query_keys(interest_attributes),
self.join_query_keys(object_ids)
)).all()
output = {}
hiearchical = {}
for attr in attrs:
if attr["is_hierarchical"]:
hiearchical[attr["key"]] = attr["id"]
continue
obj_id = attr["object_type_id"]
if obj_id not in output:
output[obj_id] = {}
output[obj_id][attr["key"]] = attr["id"]
return output, hiearchical
def register(session):
PushFrameValuesToTaskEvent(session).register()
```
#### File: ftrack/event_handlers_user/action_create_cust_attrs.py
```python
import collections
import json
import arrow
import ftrack_api
from openpype_modules.ftrack.lib import (
BaseAction,
statics_icon,
CUST_ATTR_ID_KEY,
CUST_ATTR_GROUP,
CUST_ATTR_TOOLS,
CUST_ATTR_APPLICATIONS,
CUST_ATTR_INTENT,
default_custom_attributes_definition,
app_definitions_from_app_manager,
tool_definitions_from_app_manager
)
from openpype.api import get_system_settings
from openpype.lib import ApplicationManager
"""
This action creates/updates custom attributes.
## First part take care about special attributes
- `avalon_mongo_id` for storing Avalon MongoID
- `applications` based on applications usages
- `tools` based on tools usages
## Second part is based on json file in ftrack module.
File location: `~/OpenPype/pype/modules/ftrack/ftrack_custom_attributes.json`
Data in json file is nested dictionary. Keys in first dictionary level
represents Ftrack entity type (task, show, assetversion, user, list, asset)
and dictionary value define attribute.
There is special key for hierchical attributes `is_hierarchical`.
Entity types `task` requires to define task object type (Folder, Shot,
Sequence, Task, Library, Milestone, Episode, Asset Build, etc.) at second
dictionary level, task's attributes are nested more.
*** Not Changeable *********************************************************
group (string)
- name of group
- based on attribute `openpype_modules.ftrack.lib.CUST_ATTR_GROUP`
- "pype" by default
*** Required ***************************************************************
label (string)
- label that will show in ftrack
key (string)
- must contain only chars [<KEY>
type (string)
- type of custom attribute
- possibilities:
text, boolean, date, enumerator, dynamic enumerator, number
*** Required with conditions ***********************************************
config (dictionary)
- for each attribute type different requirements and possibilities:
- enumerator:
multiSelect = True/False(default: False)
data = {key_1:value_1,key_2:value_2,..,key_n:value_n}
- 'data' is Required value with enumerator
- 'key' must contain only chars [a-z0-9_]
- number:
isdecimal = True/False(default: False)
- text:
markdown = True/False(default: False)
*** Presetable keys **********************************************************
write_security_roles/read_security_roles (array of strings)
- default: ["ALL"]
- strings should be role names (e.g.: ["API", "Administrator"])
- if set to ["ALL"] - all roles will be availabled
- if first is 'except' - roles will be set to all except roles in array
- Warning: Be carefull with except - roles can be different by company
- example:
write_security_roles = ["except", "User"]
read_security_roles = ["ALL"] # (User is can only read)
default
- default: None
- sets default value for custom attribute:
- text -> string
- number -> integer
- enumerator -> array with string of key/s
- boolean -> bool true/false
- date -> string in format: 'YYYY.MM.DD' or 'YYYY.MM.DD HH:mm:ss'
- example: "2018.12.24" / "2018.1.1 6:0:0"
- dynamic enumerator -> DON'T HAVE DEFAULT VALUE!!!
Example:
```
"show": {
"avalon_auto_sync": {
"label": "Avalon auto-sync",
"type": "boolean",
"write_security_roles": ["API", "Administrator"],
"read_security_roles": ["API", "Administrator"]
}
},
"is_hierarchical": {
"fps": {
"label": "FPS",
"type": "number",
"config": {"isdecimal": true}
}
},
"task": {
"library": {
"my_attr_name": {
"label": "My Attr",
"type": "number"
}
}
}
```
"""
class CustAttrException(Exception):
pass
class CustomAttributes(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
identifier = 'create.update.attributes'
#: Action label.
label = "OpenPype Admin"
variant = '- Create/Update Avalon Attributes'
#: Action description.
description = 'Creates Avalon/Mongo ID for double check'
icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg")
settings_key = "create_update_attributes"
required_keys = ("key", "label", "type")
presetable_keys = (
"default",
"write_security_roles",
"read_security_roles"
)
hierarchical_key = "is_hierarchical"
type_posibilities = (
"text", "boolean", "date", "enumerator",
"dynamic enumerator", "number"
)
def discover(self, session, entities, event):
'''
Validation
- action is only for Administrators
'''
return self.valid_roles(session, entities, event)
def launch(self, session, entities, event):
# JOB SETTINGS
userId = event['source']['user']['id']
user = session.query('User where id is ' + userId).one()
job = session.create('Job', {
'user': user,
'status': 'running',
'data': json.dumps({
'description': 'Custom Attribute creation.'
})
})
session.commit()
self.app_manager = ApplicationManager()
try:
self.prepare_global_data(session)
self.avalon_mongo_id_attributes(session, event)
self.applications_attribute(event)
self.tools_attribute(event)
self.intent_attribute(event)
self.custom_attributes_from_file(event)
job['status'] = 'done'
session.commit()
except Exception:
session.rollback()
job["status"] = "failed"
session.commit()
self.log.error(
"Creating custom attributes failed ({})", exc_info=True
)
return True
def prepare_global_data(self, session):
self.types_per_name = {
attr_type["name"].lower(): attr_type
for attr_type in session.query("CustomAttributeType").all()
}
self.security_roles = {
role["name"].lower(): role
for role in session.query("SecurityRole").all()
}
object_types = session.query("ObjectType").all()
self.object_types_per_id = {
object_type["id"]: object_type for object_type in object_types
}
self.object_types_per_name = {
object_type["name"].lower(): object_type
for object_type in object_types
}
self.groups = {}
self.ftrack_settings = get_system_settings()["modules"]["ftrack"]
self.attrs_settings = self.prepare_attribute_settings()
def prepare_attribute_settings(self):
output = {}
attr_settings = self.ftrack_settings["custom_attributes"]
for entity_type, attr_data in attr_settings.items():
# Lower entity type
entity_type = entity_type.lower()
# Just store if entity type is not "task"
if entity_type != "task":
output[entity_type] = attr_data
continue
# Prepare empty dictionary for entity type if not set yet
if entity_type not in output:
output[entity_type] = {}
# Store presets per lowered object type
for obj_type, _preset in attr_data.items():
output[entity_type][obj_type.lower()] = _preset
return output
def avalon_mongo_id_attributes(self, session, event):
self.create_hierarchical_mongo_attr(session, event)
hierarchical_attr, object_type_attrs = (
self.mongo_id_custom_attributes(session)
)
if object_type_attrs:
self.convert_mongo_id_to_hierarchical(
hierarchical_attr, object_type_attrs, session, event
)
def mongo_id_custom_attributes(self, session):
cust_attrs_query = (
"select id, entity_type, object_type_id, is_hierarchical, default"
" from CustomAttributeConfiguration"
" where key = \"{}\""
).format(CUST_ATTR_ID_KEY)
mongo_id_avalon_attr = session.query(cust_attrs_query).all()
heirarchical_attr = None
object_type_attrs = []
for cust_attr in mongo_id_avalon_attr:
if cust_attr["is_hierarchical"]:
heirarchical_attr = cust_attr
else:
object_type_attrs.append(cust_attr)
return heirarchical_attr, object_type_attrs
def create_hierarchical_mongo_attr(self, session, event):
# Set security roles for attribute
data = {
"key": CUST_ATTR_ID_KEY,
"label": "Avalon/Mongo ID",
"type": "text",
"default": "",
"group": CUST_ATTR_GROUP,
"is_hierarchical": True,
"config": {"markdown": False}
}
self.process_attr_data(data, event)
def convert_mongo_id_to_hierarchical(
self, hierarchical_attr, object_type_attrs, session, event
):
user_msg = "Converting old custom attributes. This may take some time."
self.show_message(event, user_msg, True)
self.log.info(user_msg)
object_types_per_id = {
object_type["id"]: object_type
for object_type in session.query("ObjectType").all()
}
cust_attr_query = (
"select value, entity_id from ContextCustomAttributeValue "
"where configuration_id is {}"
)
for attr_def in object_type_attrs:
attr_ent_type = attr_def["entity_type"]
if attr_ent_type == "show":
entity_type_label = "Project"
elif attr_ent_type == "task":
entity_type_label = (
object_types_per_id[attr_def["object_type_id"]]["name"]
)
else:
self.log.warning(
"Unsupported entity type: \"{}\". Skipping.".format(
attr_ent_type
)
)
continue
self.log.debug((
"Converting Avalon MongoID attr for Entity type \"{}\"."
).format(entity_type_label))
call_expr = [{
"action": "query",
"expression": cust_attr_query.format(attr_def["id"])
}]
if hasattr(session, "call"):
[values] = session.call(call_expr)
else:
[values] = session._call(call_expr)
for value in values["data"]:
table_values = collections.OrderedDict({
"configuration_id": hierarchical_attr["id"],
"entity_id": value["entity_id"]
})
session.recorded_operations.push(
ftrack_api.operation.UpdateEntityOperation(
"ContextCustomAttributeValue",
table_values,
"value",
ftrack_api.symbol.NOT_SET,
value["value"]
)
)
try:
session.commit()
except Exception:
session.rollback()
self.log.warning(
(
"Couldn't transfer Avalon Mongo ID"
" attribute for entity type \"{}\"."
).format(entity_type_label),
exc_info=True
)
try:
session.delete(attr_def)
session.commit()
except Exception:
session.rollback()
self.log.warning(
(
"Couldn't delete Avalon Mongo ID"
" attribute for entity type \"{}\"."
).format(entity_type_label),
exc_info=True
)
def applications_attribute(self, event):
apps_data = app_definitions_from_app_manager(self.app_manager)
applications_custom_attr_data = {
"label": "Applications",
"key": CUST_ATTR_APPLICATIONS,
"type": "enumerator",
"entity_type": "show",
"group": CUST_ATTR_GROUP,
"config": {
"multiselect": True,
"data": apps_data
}
}
self.process_attr_data(applications_custom_attr_data, event)
def tools_attribute(self, event):
tools_data = tool_definitions_from_app_manager(self.app_manager)
tools_custom_attr_data = {
"label": "Tools",
"key": CUST_ATTR_TOOLS,
"type": "enumerator",
"is_hierarchical": True,
"group": CUST_ATTR_GROUP,
"config": {
"multiselect": True,
"data": tools_data
}
}
self.process_attr_data(tools_custom_attr_data, event)
def intent_attribute(self, event):
intent_key_values = self.ftrack_settings["intent"]["items"]
intent_values = []
for key, label in intent_key_values.items():
if not key or not label:
self.log.info((
"Skipping intent row: {{\"{}\": \"{}\"}}"
" because of empty key or label."
).format(key, label))
continue
intent_values.append({key: label})
if not intent_values:
return
intent_custom_attr_data = {
"label": "Intent",
"key": CUST_ATTR_INTENT,
"type": "enumerator",
"entity_type": "assetversion",
"group": CUST_ATTR_GROUP,
"config": {
"multiselect": False,
"data": intent_values
}
}
self.process_attr_data(intent_custom_attr_data, event)
def custom_attributes_from_file(self, event):
# Load json with custom attributes configurations
cust_attr_def = default_custom_attributes_definition()
attrs_data = []
# Prepare data of hierarchical attributes
hierarchical_attrs = cust_attr_def.pop(self.hierarchical_key, {})
for key, cust_attr_data in hierarchical_attrs.items():
cust_attr_data["key"] = key
cust_attr_data["is_hierarchical"] = True
attrs_data.append(cust_attr_data)
# Prepare data of entity specific attributes
for entity_type, cust_attr_datas in cust_attr_def.items():
if entity_type.lower() != "task":
for key, cust_attr_data in cust_attr_datas.items():
cust_attr_data["key"] = key
cust_attr_data["entity_type"] = entity_type
attrs_data.append(cust_attr_data)
continue
# Task should have nested level for object type
for object_type, _cust_attr_datas in cust_attr_datas.items():
for key, cust_attr_data in _cust_attr_datas.items():
cust_attr_data["key"] = key
cust_attr_data["entity_type"] = entity_type
cust_attr_data["object_type"] = object_type
attrs_data.append(cust_attr_data)
# Process prepared data
for cust_attr_data in attrs_data:
# Add group
cust_attr_data["group"] = CUST_ATTR_GROUP
self.process_attr_data(cust_attr_data, event)
def presets_for_attr_data(self, attr_data):
output = {}
attr_key = attr_data["key"]
if attr_data.get("is_hierarchical"):
entity_key = self.hierarchical_key
else:
entity_key = attr_data["entity_type"]
entity_settings = self.attrs_settings.get(entity_key) or {}
if entity_key.lower() == "task":
object_type = attr_data["object_type"]
entity_settings = entity_settings.get(object_type.lower()) or {}
key_settings = entity_settings.get(attr_key) or {}
for key, value in key_settings.items():
if key in self.presetable_keys and value:
output[key] = value
return output
def process_attr_data(self, cust_attr_data, event):
attr_settings = self.presets_for_attr_data(cust_attr_data)
cust_attr_data.update(attr_settings)
try:
data = {}
# Get key, label, type
data.update(self.get_required(cust_attr_data))
# Get hierachical/ entity_type/ object_id
data.update(self.get_entity_type(cust_attr_data))
# Get group, default, security roles
data.update(self.get_optional(cust_attr_data))
# Process data
self.process_attribute(data)
except CustAttrException as cae:
cust_attr_name = cust_attr_data.get("label", cust_attr_data["key"])
if cust_attr_name:
msg = 'Custom attribute error "{}" - {}'.format(
cust_attr_name, str(cae)
)
else:
msg = 'Custom attribute error - {}'.format(str(cae))
self.log.warning(msg, exc_info=True)
self.show_message(event, msg)
def process_attribute(self, data):
existing_attrs = self.session.query(
"CustomAttributeConfiguration"
).all()
matching = []
for attr in existing_attrs:
if (
attr["key"] != data["key"] or
attr["type"]["name"] != data["type"]["name"]
):
continue
if data.get("is_hierarchical") is True:
if attr["is_hierarchical"] is True:
matching.append(attr)
elif "object_type_id" in data:
if (
attr["entity_type"] == data["entity_type"] and
attr["object_type_id"] == data["object_type_id"]
):
matching.append(attr)
else:
if attr["entity_type"] == data["entity_type"]:
matching.append(attr)
if len(matching) == 0:
self.session.create("CustomAttributeConfiguration", data)
self.session.commit()
self.log.debug(
"Custom attribute \"{}\" created".format(data["label"])
)
elif len(matching) == 1:
attr_update = matching[0]
for key in data:
if key not in (
"is_hierarchical", "entity_type", "object_type_id"
):
attr_update[key] = data[key]
self.session.commit()
self.log.debug(
"Custom attribute \"{}\" updated".format(data["label"])
)
else:
raise CustAttrException((
"Custom attribute is duplicated. Key: \"{}\" Type: \"{}\""
).format(data["key"], data["type"]["name"]))
def get_required(self, attr):
output = {}
for key in self.required_keys:
if key not in attr:
raise CustAttrException(
"BUG: Key \"{}\" is required".format(key)
)
if attr['type'].lower() not in self.type_posibilities:
raise CustAttrException(
'Type {} is not valid'.format(attr['type'])
)
output['key'] = attr['key']
output['label'] = attr['label']
type_name = attr['type'].lower()
output['type'] = self.types_per_name[type_name]
config = None
if type_name == 'number':
config = self.get_number_config(attr)
elif type_name == 'text':
config = self.get_text_config(attr)
elif type_name == 'enumerator':
config = self.get_enumerator_config(attr)
if config is not None:
output['config'] = config
return output
def get_number_config(self, attr):
if 'config' in attr and 'isdecimal' in attr['config']:
isdecimal = attr['config']['isdecimal']
else:
isdecimal = False
config = json.dumps({'isdecimal': isdecimal})
return config
def get_text_config(self, attr):
if 'config' in attr and 'markdown' in attr['config']:
markdown = attr['config']['markdown']
else:
markdown = False
config = json.dumps({'markdown': markdown})
return config
def get_enumerator_config(self, attr):
if 'config' not in attr:
raise CustAttrException('Missing config with data')
if 'data' not in attr['config']:
raise CustAttrException('Missing data in config')
data = []
for item in attr['config']['data']:
item_data = {}
for key in item:
# TODO key check by regex
item_data['menu'] = item[key]
item_data['value'] = key
data.append(item_data)
multiSelect = False
for k in attr['config']:
if k.lower() == 'multiselect':
if isinstance(attr['config'][k], bool):
multiSelect = attr['config'][k]
else:
raise CustAttrException('Multiselect must be boolean')
break
config = json.dumps({
'multiSelect': multiSelect,
'data': json.dumps(data)
})
return config
def get_group(self, attr):
if isinstance(attr, dict):
group_name = attr['group'].lower()
else:
group_name = attr
if group_name in self.groups:
return self.groups[group_name]
query = 'CustomAttributeGroup where name is "{}"'.format(group_name)
groups = self.session.query(query).all()
if len(groups) == 1:
group = groups[0]
self.groups[group_name] = group
return group
elif len(groups) < 1:
group = self.session.create('CustomAttributeGroup', {
'name': group_name,
})
self.session.commit()
return group
else:
raise CustAttrException(
'Found more than one group "{}"'.format(group_name)
)
def get_security_roles(self, security_roles):
security_roles_lowered = tuple(name.lower() for name in security_roles)
if (
len(security_roles_lowered) == 0
or "all" in security_roles_lowered
):
return list(self.security_roles.values())
output = []
if security_roles_lowered[0] == "except":
excepts = security_roles_lowered[1:]
for role_name, role in self.security_roles.items():
if role_name not in excepts:
output.append(role)
else:
for role_name in security_roles_lowered:
if role_name in self.security_roles:
output.append(self.security_roles[role_name])
else:
raise CustAttrException((
"Securit role \"{}\" was not found in Ftrack."
).format(role_name))
return output
def get_default(self, attr):
type = attr['type']
default = attr['default']
if default is None:
return default
err_msg = 'Default value is not'
if type == 'number':
if isinstance(default, (str)) and default.isnumeric():
default = float(default)
if not isinstance(default, (float, int)):
raise CustAttrException('{} integer'.format(err_msg))
elif type == 'text':
if not isinstance(default, str):
raise CustAttrException('{} string'.format(err_msg))
elif type == 'boolean':
if not isinstance(default, bool):
raise CustAttrException('{} boolean'.format(err_msg))
elif type == 'enumerator':
if not isinstance(default, list):
raise CustAttrException(
'{} array with strings'.format(err_msg)
)
# TODO check if multiSelect is available
# and if default is one of data menu
if not isinstance(default[0], str):
raise CustAttrException('{} array of strings'.format(err_msg))
elif type == 'date':
date_items = default.split(' ')
try:
if len(date_items) == 1:
default = arrow.get(default, 'YY.M.D')
elif len(date_items) == 2:
default = arrow.get(default, 'YY.M.D H:m:s')
else:
raise Exception
except Exception:
raise CustAttrException('Date is not in proper format')
elif type == 'dynamic enumerator':
raise CustAttrException('Dynamic enumerator can\'t have default')
return default
def get_optional(self, attr):
output = {}
if "group" in attr:
output["group"] = self.get_group(attr)
if "default" in attr:
output["default"] = self.get_default(attr)
roles_read = []
roles_write = []
if "read_security_roles" in attr:
roles_read = attr["read_security_roles"]
if "write_security_roles" in attr:
roles_write = attr["write_security_roles"]
output["read_security_roles"] = self.get_security_roles(roles_read)
output["write_security_roles"] = self.get_security_roles(roles_write)
return output
def get_entity_type(self, attr):
if attr.get("is_hierarchical", False):
return {
"is_hierarchical": True,
"entity_type": attr.get("entity_type") or "show"
}
if 'entity_type' not in attr:
raise CustAttrException('Missing entity_type')
if attr['entity_type'].lower() != 'task':
return {'entity_type': attr['entity_type']}
if 'object_type' not in attr:
raise CustAttrException('Missing object_type')
object_type_name = attr['object_type']
object_type_name_low = object_type_name.lower()
object_type = self.object_types_per_name.get(object_type_name_low)
if not object_type:
raise CustAttrException((
'Object type with name "{}" don\'t exist'
).format(object_type_name))
return {
'entity_type': attr['entity_type'],
'object_type_id': object_type["id"]
}
def register(session):
'''Register plugin. Called when used as an plugin.'''
CustomAttributes(session).register()
```
#### File: ftrack/event_handlers_user/action_rv.py
```python
import os
import subprocess
import traceback
import json
from openpype_modules.ftrack.lib import BaseAction, statics_icon
import ftrack_api
from avalon import io, api
class RVAction(BaseAction):
""" Launch RV action """
identifier = "rv.launch.action"
label = "rv"
description = "rv Launcher"
icon = statics_icon("ftrack", "action_icons", "RV.png")
type = 'Application'
allowed_types = ["img", "mov", "exr", "mp4"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# QUESTION load RV application data from AppplicationManager?
rv_path = None
# RV_HOME should be set if properly installed
if os.environ.get('RV_HOME'):
rv_path = os.path.join(
os.environ.get('RV_HOME'),
'bin',
'rv'
)
if not os.path.exists(rv_path):
rv_path = None
if not rv_path:
self.log.info("RV path was not found.")
self.ignore_me = True
self.rv_path = rv_path
def discover(self, session, entities, event):
"""Return available actions based on *event*. """
return True
def preregister(self):
if self.rv_path is None:
return (
'RV is not installed or paths in presets are not set correctly'
)
return True
def get_components_from_entity(self, session, entity, components):
"""Get components from various entity types.
The components dictionary is modifid in place, so nothing is returned.
Args:
entity (Ftrack entity)
components (dict)
"""
if entity.entity_type.lower() == "assetversion":
for component in entity["components"]:
if component["file_type"][1:] not in self.allowed_types:
continue
try:
components[entity["asset"]["parent"]["name"]].append(
component
)
except KeyError:
components[entity["asset"]["parent"]["name"]] = [component]
return
if entity.entity_type.lower() == "task":
query = "AssetVersion where task_id is '{0}'".format(entity["id"])
for assetversion in session.query(query):
self.get_components_from_entity(
session, assetversion, components
)
return
if entity.entity_type.lower() == "shot":
query = "AssetVersion where asset.parent.id is '{0}'".format(
entity["id"]
)
for assetversion in session.query(query):
self.get_components_from_entity(
session, assetversion, components
)
return
raise NotImplementedError(
"\"{}\" entity type is not implemented yet.".format(
entity.entity_type
)
)
def interface(self, session, entities, event):
if event['data'].get('values', {}):
return
user = session.query(
"User where username is '{0}'".format(
os.environ["FTRACK_API_USER"]
)
).one()
job = session.create(
"Job",
{
"user": user,
"status": "running",
"data": json.dumps({
"description": "RV: Collecting components."
})
}
)
# Commit to feedback to user.
session.commit()
items = []
try:
items = self.get_interface_items(session, entities)
except Exception:
self.log.error(traceback.format_exc())
job["status"] = "failed"
else:
job["status"] = "done"
# Commit to end job.
session.commit()
return {"items": items}
def get_interface_items(self, session, entities):
components = {}
for entity in entities:
self.get_components_from_entity(session, entity, components)
# Sort by version
for parent_name, entities in components.items():
version_mapping = {}
for entity in entities:
try:
version_mapping[entity["version"]["version"]].append(
entity
)
except KeyError:
version_mapping[entity["version"]["version"]] = [entity]
# Sort same versions by date.
for version, entities in version_mapping.items():
version_mapping[version] = sorted(
entities, key=lambda x: x["version"]["date"], reverse=True
)
components[parent_name] = []
for version in reversed(sorted(version_mapping.keys())):
components[parent_name].extend(version_mapping[version])
# Items to present to user.
items = []
label = "{} - v{} - {}"
for parent_name, entities in components.items():
data = []
for entity in entities:
data.append(
{
"label": label.format(
entity["version"]["asset"]["name"],
str(entity["version"]["version"]).zfill(3),
entity["file_type"][1:]
),
"value": entity["id"]
}
)
items.append(
{
"label": parent_name,
"type": "enumerator",
"name": parent_name,
"data": data,
"value": data[0]["value"]
}
)
return items
def launch(self, session, entities, event):
"""Callback method for RV action."""
# Launching application
if "values" not in event["data"]:
return
user = session.query(
"User where username is '{0}'".format(
os.environ["FTRACK_API_USER"]
)
).one()
job = session.create(
"Job",
{
"user": user,
"status": "running",
"data": json.dumps({
"description": "RV: Collecting file paths."
})
}
)
# Commit to feedback to user.
session.commit()
paths = []
try:
paths = self.get_file_paths(session, event)
except Exception:
self.log.error(traceback.format_exc())
job["status"] = "failed"
else:
job["status"] = "done"
# Commit to end job.
session.commit()
args = [os.path.normpath(self.rv_path)]
fps = entities[0].get("custom_attributes", {}).get("fps", None)
if fps is not None:
args.extend(["-fps", str(fps)])
args.extend(paths)
self.log.info("Running rv: {}".format(args))
subprocess.Popen(args)
return True
def get_file_paths(self, session, event):
"""Get file paths from selected components."""
link = session.get(
"Component", list(event["data"]["values"].values())[0]
)["version"]["asset"]["parent"]["link"][0]
project = session.get(link["type"], link["id"])
os.environ["AVALON_PROJECT"] = project["name"]
api.Session["AVALON_PROJECT"] = project["name"]
io.install()
location = ftrack_api.Session().pick_location()
paths = []
for parent_name in sorted(event["data"]["values"].keys()):
component = session.get(
"Component", event["data"]["values"][parent_name]
)
# Newer publishes have the source referenced in Ftrack.
online_source = False
for neighbour_component in component["version"]["components"]:
if neighbour_component["name"] != "ftrackreview-mp4_src":
continue
paths.append(
location.get_filesystem_path(neighbour_component)
)
online_source = True
if online_source:
continue
asset = io.find_one({"type": "asset", "name": parent_name})
subset = io.find_one(
{
"type": "subset",
"name": component["version"]["asset"]["name"],
"parent": asset["_id"]
}
)
version = io.find_one(
{
"type": "version",
"name": component["version"]["version"],
"parent": subset["_id"]
}
)
representation = io.find_one(
{
"type": "representation",
"parent": version["_id"],
"name": component["file_type"][1:]
}
)
if representation is None:
representation = io.find_one(
{
"type": "representation",
"parent": version["_id"],
"name": "preview"
}
)
paths.append(api.get_representation_path(representation))
return paths
def register(session):
"""Register hooks."""
RVAction(session).register()
```
#### File: openpype/modules/interfaces.py
```python
from abc import abstractmethod
from openpype import resources
from openpype.modules import OpenPypeInterface
class IPluginPaths(OpenPypeInterface):
"""Module has plugin paths to return.
Expected result is dictionary with keys "publish", "create", "load" or
"actions" and values as list or string.
{
"publish": ["path/to/publish_plugins"]
}
"""
# TODO validation of an output
@abstractmethod
def get_plugin_paths(self):
pass
class ILaunchHookPaths(OpenPypeInterface):
"""Module has launch hook paths to return.
Expected result is list of paths.
["path/to/launch_hooks_dir"]
"""
@abstractmethod
def get_launch_hook_paths(self):
pass
class ITrayModule(OpenPypeInterface):
"""Module has special procedures when used in Pype Tray.
IMPORTANT:
The module still must be usable if is not used in tray even if
would do nothing.
"""
tray_initialized = False
_tray_manager = None
@abstractmethod
def tray_init(self):
"""Initialization part of tray implementation.
Triggered between `initialization` and `connect_with_modules`.
This is where GUIs should be loaded or tray specific parts should be
prepared.
"""
pass
@abstractmethod
def tray_menu(self, tray_menu):
"""Add module's action to tray menu."""
pass
@abstractmethod
def tray_start(self):
"""Start procedure in Pype tray."""
pass
@abstractmethod
def tray_exit(self):
"""Cleanup method which is executed on tray shutdown.
This is place where all threads should be shut.
"""
pass
def execute_in_main_thread(self, callback):
""" Pushes callback to the queue or process 'callback' on a main thread
Some callbacks need to be processed on main thread (menu actions
must be added on main thread or they won't get triggered etc.)
"""
if not self.tray_initialized:
# TODO Called without initialized tray, still main thread needed
try:
callback()
except Exception:
self.log.warning(
"Failed to execute {} in main thread".format(callback),
exc_info=True)
return
self.manager.tray_manager.execute_in_main_thread(callback)
def show_tray_message(self, title, message, icon=None, msecs=None):
"""Show tray message.
Args:
title (str): Title of message.
message (str): Content of message.
icon (QSystemTrayIcon.MessageIcon): Message's icon. Default is
Information icon, may differ by Qt version.
msecs (int): Duration of message visibility in miliseconds.
Default is 10000 msecs, may differ by Qt version.
"""
if self._tray_manager:
self._tray_manager.show_tray_message(title, message, icon, msecs)
def add_doubleclick_callback(self, callback):
if hasattr(self.manager, "add_doubleclick_callback"):
self.manager.add_doubleclick_callback(self, callback)
class ITrayAction(ITrayModule):
"""Implementation of Tray action.
Add action to tray menu which will trigger `on_action_trigger`.
It is expected to be used for showing tools.
Methods `tray_start`, `tray_exit` and `connect_with_modules` are overridden
as it's not expected that action will use them. But it is possible if
necessary.
"""
admin_action = False
_admin_submenu = None
@property
@abstractmethod
def label(self):
"""Service label showed in menu."""
pass
@abstractmethod
def on_action_trigger(self):
"""What happens on actions click."""
pass
def tray_menu(self, tray_menu):
from Qt import QtWidgets
if self.admin_action:
menu = self.admin_submenu(tray_menu)
action = QtWidgets.QAction(self.label, menu)
menu.addAction(action)
if not menu.menuAction().isVisible():
menu.menuAction().setVisible(True)
else:
action = QtWidgets.QAction(self.label, tray_menu)
tray_menu.addAction(action)
action.triggered.connect(self.on_action_trigger)
def tray_start(self):
return
def tray_exit(self):
return
@staticmethod
def admin_submenu(tray_menu):
if ITrayAction._admin_submenu is None:
from Qt import QtWidgets
admin_submenu = QtWidgets.QMenu("Admin", tray_menu)
admin_submenu.menuAction().setVisible(False)
ITrayAction._admin_submenu = admin_submenu
return ITrayAction._admin_submenu
class ITrayService(ITrayModule):
# Module's property
menu_action = None
# Class properties
_services_submenu = None
_icon_failed = None
_icon_running = None
_icon_idle = None
@property
@abstractmethod
def label(self):
"""Service label showed in menu."""
pass
# TODO be able to get any sort of information to show/print
# @abstractmethod
# def get_service_info(self):
# pass
@staticmethod
def services_submenu(tray_menu):
if ITrayService._services_submenu is None:
from Qt import QtWidgets
services_submenu = QtWidgets.QMenu("Services", tray_menu)
services_submenu.menuAction().setVisible(False)
ITrayService._services_submenu = services_submenu
return ITrayService._services_submenu
@staticmethod
def add_service_action(action):
ITrayService._services_submenu.addAction(action)
if not ITrayService._services_submenu.menuAction().isVisible():
ITrayService._services_submenu.menuAction().setVisible(True)
@staticmethod
def _load_service_icons():
from Qt import QtGui
ITrayService._failed_icon = QtGui.QIcon(
resources.get_resource("icons", "circle_red.png")
)
ITrayService._icon_running = QtGui.QIcon(
resources.get_resource("icons", "circle_green.png")
)
ITrayService._icon_idle = QtGui.QIcon(
resources.get_resource("icons", "circle_orange.png")
)
@staticmethod
def get_icon_running():
if ITrayService._icon_running is None:
ITrayService._load_service_icons()
return ITrayService._icon_running
@staticmethod
def get_icon_idle():
if ITrayService._icon_idle is None:
ITrayService._load_service_icons()
return ITrayService._icon_idle
@staticmethod
def get_icon_failed():
if ITrayService._failed_icon is None:
ITrayService._load_service_icons()
return ITrayService._failed_icon
def tray_menu(self, tray_menu):
from Qt import QtWidgets
action = QtWidgets.QAction(
self.label,
self.services_submenu(tray_menu)
)
self.menu_action = action
self.add_service_action(action)
self.set_service_running_icon()
def set_service_running_icon(self):
"""Change icon of an QAction to green circle."""
if self.menu_action:
self.menu_action.setIcon(self.get_icon_running())
def set_service_failed_icon(self):
"""Change icon of an QAction to red circle."""
if self.menu_action:
self.menu_action.setIcon(self.get_icon_failed())
def set_service_idle_icon(self):
"""Change icon of an QAction to orange circle."""
if self.menu_action:
self.menu_action.setIcon(self.get_icon_idle())
class ISettingsChangeListener(OpenPypeInterface):
"""Module has plugin paths to return.
Expected result is dictionary with keys "publish", "create", "load" or
"actions" and values as list or string.
{
"publish": ["path/to/publish_plugins"]
}
"""
@abstractmethod
def on_system_settings_save(
self, old_value, new_value, changes, new_value_metadata
):
pass
@abstractmethod
def on_project_settings_save(
self, old_value, new_value, changes, project_name, new_value_metadata
):
pass
@abstractmethod
def on_project_anatomy_save(
self, old_value, new_value, changes, project_name, new_value_metadata
):
pass
```
#### File: plugins/publish/collect_slack_family.py
```python
from avalon import io
import pyblish.api
from openpype.lib.profiles_filtering import filter_profiles
class CollectSlackFamilies(pyblish.api.InstancePlugin):
"""Collect family for Slack notification
Expects configured profile in
Project settings > Slack > Publish plugins > Notification to Slack
Add Slack family to those instance that should be messaged to Slack
"""
order = pyblish.api.CollectorOrder + 0.4999
label = 'Collect Slack family'
profiles = None
def process(self, instance):
task_name = io.Session.get("AVALON_TASK")
family = self.main_family_from_instance(instance)
key_values = {
"families": family,
"tasks": task_name,
"hosts": instance.data["anatomyData"]["app"],
}
profile = filter_profiles(self.profiles, key_values,
logger=self.log)
# make slack publishable
if profile:
if instance.data.get('families'):
instance.data['families'].append('slack')
else:
instance.data['families'] = ['slack']
instance.data["slack_channel_message_profiles"] = \
profile["channel_messages"]
slack_token = (instance.context.data["project_settings"]
["slack"]
["token"])
instance.data["slack_token"] = slack_token
def main_family_from_instance(self, instance): # TODO yank from integrate
"""Returns main family of entered instance."""
family = instance.data.get("family")
if not family:
family = instance.data["families"][0]
return family
```
#### File: plugins/load/add_site.py
```python
from avalon import api
from openpype.modules import ModulesManager
class AddSyncSite(api.Loader):
"""Add sync site to representation"""
representations = ["*"]
families = ["*"]
label = "Add Sync Site"
order = 2 # lower means better
icon = "download"
color = "#999999"
def load(self, context, name=None, namespace=None, data=None):
self.log.info("Adding {} to representation: {}".format(
data["site_name"], data["_id"]))
self.add_site_to_representation(data["project_name"],
data["_id"],
data["site_name"])
self.log.debug("Site added.")
@staticmethod
def add_site_to_representation(project_name, representation_id, site_name):
"""Adds new site to representation_id, resets if exists"""
manager = ModulesManager()
sync_server = manager.modules_by_name["sync_server"]
sync_server.add_site(project_name, representation_id, site_name,
force=True)
def filepath_from_context(self, context):
"""No real file loading"""
return ""
```
#### File: tools/experimental_tools/dialog.py
```python
from Qt import QtWidgets, QtCore, QtGui
from openpype.style import (
load_stylesheet,
app_icon_path
)
from .tools_def import ExperimentalTools
class ToolButton(QtWidgets.QPushButton):
triggered = QtCore.Signal(str)
def __init__(self, identifier, *args, **kwargs):
super(ToolButton, self).__init__(*args, **kwargs)
self._identifier = identifier
self.clicked.connect(self._on_click)
def _on_click(self):
self.triggered.emit(self._identifier)
class ExperimentalToolsDialog(QtWidgets.QDialog):
refresh_interval = 3000
def __init__(self, parent=None):
super(ExperimentalToolsDialog, self).__init__(parent)
self.setWindowTitle("OpenPype Experimental tools")
icon = QtGui.QIcon(app_icon_path())
self.setWindowIcon(icon)
self.setStyleSheet(load_stylesheet())
# Widgets for cases there are not available experimental tools
empty_widget = QtWidgets.QWidget(self)
empty_label = QtWidgets.QLabel(
"There are no experimental tools available...", empty_widget
)
empty_btns_layout = QtWidgets.QHBoxLayout()
ok_btn = QtWidgets.QPushButton("OK", empty_widget)
empty_btns_layout.setContentsMargins(0, 0, 0, 0)
empty_btns_layout.addStretch(1)
empty_btns_layout.addWidget(ok_btn, 0)
empty_layout = QtWidgets.QVBoxLayout(empty_widget)
empty_layout.setContentsMargins(0, 0, 0, 0)
empty_layout.addWidget(empty_label)
empty_layout.addStretch(1)
empty_layout.addLayout(empty_btns_layout)
# Content of Experimental tools
# Layout where buttons are added
content_layout = QtWidgets.QVBoxLayout()
content_layout.setContentsMargins(0, 0, 0, 0)
# Separator line
separator_widget = QtWidgets.QWidget(self)
separator_widget.setObjectName("Separator")
separator_widget.setMinimumHeight(2)
separator_widget.setMaximumHeight(2)
# Label describing how to turn off tools
tool_btns_widget = QtWidgets.QWidget(self)
tool_btns_label = QtWidgets.QLabel(
(
"You can enable these features in"
"<br><b>OpenPype tray -> Settings -> Experimental tools</b>"
),
tool_btns_widget
)
tool_btns_label.setAlignment(QtCore.Qt.AlignCenter)
tool_btns_layout = QtWidgets.QVBoxLayout(tool_btns_widget)
tool_btns_layout.setContentsMargins(0, 0, 0, 0)
tool_btns_layout.addLayout(content_layout)
tool_btns_layout.addStretch(1)
tool_btns_layout.addWidget(separator_widget, 0)
tool_btns_layout.addWidget(tool_btns_label, 0)
experimental_tools = ExperimentalTools(
parent=parent, filter_hosts=True
)
# Main layout
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(empty_widget, 1)
layout.addWidget(tool_btns_widget, 1)
refresh_timer = QtCore.QTimer()
refresh_timer.setInterval(self.refresh_interval)
refresh_timer.timeout.connect(self._on_refresh_timeout)
ok_btn.clicked.connect(self._on_ok_click)
self._empty_widget = empty_widget
self._tool_btns_widget = tool_btns_widget
self._content_layout = content_layout
self._experimental_tools = experimental_tools
self._buttons_by_tool_identifier = {}
self._refresh_timer = refresh_timer
# Is dialog first shown
self._first_show = True
# Trigger refresh when window gets activity
self._refresh_on_active = True
# Is window active
self._window_is_active = False
def refresh(self):
self._experimental_tools.refresh_availability()
buttons_to_remove = set(self._buttons_by_tool_identifier.keys())
for idx, tool in enumerate(self._experimental_tools.tools):
identifier = tool.identifier
if identifier in buttons_to_remove:
buttons_to_remove.remove(identifier)
is_new = False
button = self._buttons_by_tool_identifier[identifier]
else:
is_new = True
button = ToolButton(identifier, self._tool_btns_widget)
button.triggered.connect(self._on_btn_trigger)
self._buttons_by_tool_identifier[identifier] = button
self._content_layout.insertWidget(idx, button)
if button.text() != tool.label:
button.setText(tool.label)
if tool.enabled:
button.setToolTip(tool.tooltip)
elif is_new or button.isEnabled():
button.setToolTip((
"You can enable this tool in local settings."
"\n\nOpenPype Tray > Settings > Experimental Tools"
))
if tool.enabled != button.isEnabled():
button.setEnabled(tool.enabled)
for identifier in buttons_to_remove:
button = self._buttons_by_tool_identifier.pop(identifier)
button.setVisible(False)
idx = self._content_layout.indexOf(button)
self._content_layout.takeAt(idx)
button.deleteLater()
self._set_visibility()
def _is_content_visible(self):
return len(self._buttons_by_tool_identifier) > 0
def _set_visibility(self):
content_visible = self._is_content_visible()
self._tool_btns_widget.setVisible(content_visible)
self._empty_widget.setVisible(not content_visible)
def _on_ok_click(self):
self.close()
def _on_btn_trigger(self, identifier):
tool = self._experimental_tools.tools_by_identifier.get(identifier)
if tool is not None:
tool.execute()
def showEvent(self, event):
super(ExperimentalToolsDialog, self).showEvent(event)
if self._refresh_on_active:
# Start/Restart timer
self._refresh_timer.start()
# Refresh
self.refresh()
elif not self._refresh_timer.isActive():
self._refresh_timer.start()
if self._first_show:
self._first_show = False
# Set stylesheet
self.setStyleSheet(load_stylesheet())
# Resize dialog if there is not content
if not self._is_content_visible():
size = self.size()
size.setWidth(size.width() + size.width() / 3)
self.resize(size)
def changeEvent(self, event):
if event.type() == QtCore.QEvent.ActivationChange:
self._window_is_active = self.isActiveWindow()
if self._window_is_active and self._refresh_on_active:
self._refresh_timer.start()
self.refresh()
super(ExperimentalToolsDialog, self).changeEvent(event)
def _on_refresh_timeout(self):
# Stop timer if window is not visible
if not self.isVisible():
self._refresh_on_active = True
self._refresh_timer.stop()
# Skip refreshing if window is not active
elif not self._window_is_active:
self._refresh_on_active = True
# Window is active and visible so we're refreshing buttons
else:
self.refresh()
```
#### File: tools/experimental_tools/tools_def.py
```python
import os
from openpype.settings import get_local_settings
# Constant key under which local settings are stored
LOCAL_EXPERIMENTAL_KEY = "experimental_tools"
class ExperimentalTool:
"""Definition of experimental tool.
Definition is used in local settings and in experimental tools dialog.
Args:
identifier (str): String identifier of tool (unique).
label (str): Label shown in UI.
callback (function): Callback for UI button.
tooltip (str): Tooltip showed on button.
hosts_filter (list): List of host names for which is tool available.
Some tools may not be available in all hosts.
"""
def __init__(
self, identifier, label, callback, tooltip, hosts_filter=None
):
self.identifier = identifier
self.label = label
self.callback = callback
self.tooltip = tooltip
self.hosts_filter = hosts_filter
self._enabled = True
def is_available_for_host(self, host_name):
if self.hosts_filter:
return host_name in self.hosts_filter
return True
@property
def enabled(self):
"""Is tool enabled and button is clickable."""
return self._enabled
def set_enabled(self, enabled=True):
"""Change if tool is enabled."""
self._enabled = enabled
def execute(self):
"""Trigger registered callback."""
self.callback()
class ExperimentalTools:
"""Wrapper around experimental tools.
To add/remove experimental tool just add/remove tool to
`experimental_tools` variable in __init__ function.
Args:
parent (QtWidgets.QWidget): Parent widget for tools.
host_name (str): Name of host in which context we're now. Environment
value 'AVALON_APP' is used when not passed.
filter_hosts (bool): Should filter tools. By default is set to 'True'
when 'host_name' is passed. Is always set to 'False' if 'host_name'
is not defined.
"""
def __init__(self, parent=None, host_name=None, filter_hosts=None):
# Definition of experimental tools
experimental_tools = [
ExperimentalTool(
"publisher",
"New publisher",
self._show_publisher,
"Combined creation and publishing into one tool."
)
]
# --- Example tool (callback will just print on click) ---
# def example_callback(*args):
# print("Triggered tool")
#
# experimental_tools = [
# ExperimentalTool(
# "example",
# "Example experimental tool",
# example_callback,
# "Example tool tooltip."
# )
# ]
# Try to get host name from env variable `AVALON_APP`
if not host_name:
host_name = os.environ.get("AVALON_APP")
# Decide if filtering by host name should happen
if filter_hosts is None:
filter_hosts = host_name is not None
if filter_hosts and not host_name:
filter_hosts = False
# Filter tools by host name
if filter_hosts:
experimental_tools = [
tool
for tool in experimental_tools
if tool.is_available_for_host(host_name)
]
# Store tools by identifier
tools_by_identifier = {}
for tool in experimental_tools:
if tool.identifier in tools_by_identifier:
raise KeyError((
"Duplicated experimental tool identifier \"{}\""
).format(tool.identifier))
tools_by_identifier[tool.identifier] = tool
self._tools_by_identifier = tools_by_identifier
self._tools = experimental_tools
self._parent_widget = parent
self._publisher_tool = None
@property
def tools(self):
"""Tools in list.
Returns:
list: Tools filtered by host name if filtering was enabled
on initialization.
"""
return self._tools
@property
def tools_by_identifier(self):
"""Tools by their identifier.
Returns:
dict: Tools by identifier filtered by host name if filtering
was enabled on initialization.
"""
return self._tools_by_identifier
def refresh_availability(self):
"""Reload local settings and check if any tool changed ability."""
local_settings = get_local_settings()
experimental_settings = (
local_settings.get(LOCAL_EXPERIMENTAL_KEY)
) or {}
for identifier, eperimental_tool in self.tools_by_identifier.items():
enabled = experimental_settings.get(identifier, False)
eperimental_tool.set_enabled(enabled)
def _show_publisher(self):
if self._publisher_tool is None:
from openpype.tools import publisher
self._publisher_tool = publisher.PublisherWindow(
parent=self._parent_widget
)
self._publisher_tool.show()
```
#### File: tools/launcher/widgets.py
```python
import copy
import time
import collections
from Qt import QtWidgets, QtCore, QtGui
from avalon.vendor import qtawesome
from .delegates import ActionDelegate
from . import lib
from .actions import ApplicationAction
from .models import ActionModel
from openpype.tools.flickcharm import FlickCharm
from .constants import (
ACTION_ROLE,
GROUP_ROLE,
VARIANT_GROUP_ROLE,
ACTION_ID_ROLE,
ANIMATION_START_ROLE,
ANIMATION_STATE_ROLE,
ANIMATION_LEN,
FORCE_NOT_OPEN_WORKFILE_ROLE
)
class ProjectBar(QtWidgets.QWidget):
def __init__(self, project_handler, parent=None):
super(ProjectBar, self).__init__(parent)
project_combobox = QtWidgets.QComboBox(self)
# Change delegate so stylysheets are applied
project_delegate = QtWidgets.QStyledItemDelegate(project_combobox)
project_combobox.setItemDelegate(project_delegate)
project_combobox.setModel(project_handler.model)
project_combobox.setRootModelIndex(QtCore.QModelIndex())
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(project_combobox)
self.setSizePolicy(
QtWidgets.QSizePolicy.MinimumExpanding,
QtWidgets.QSizePolicy.Maximum
)
self.project_handler = project_handler
self.project_delegate = project_delegate
self.project_combobox = project_combobox
# Signals
self.project_combobox.currentIndexChanged.connect(self.on_index_change)
project_handler.project_changed.connect(self._on_project_change)
# Set current project by default if it's set.
project_name = project_handler.current_project
if project_name:
self.set_project(project_name)
def _on_project_change(self, project_name):
if self.get_current_project() == project_name:
return
self.set_project(project_name)
def get_current_project(self):
return self.project_combobox.currentText()
def set_project(self, project_name):
index = self.project_combobox.findText(project_name)
if index < 0:
# Try refresh combobox model
self.project_handler.refresh_model()
index = self.project_combobox.findText(project_name)
if index >= 0:
self.project_combobox.setCurrentIndex(index)
def on_index_change(self, idx):
if not self.isVisible():
return
project_name = self.get_current_project()
self.project_handler.set_project(project_name)
class ActionBar(QtWidgets.QWidget):
"""Launcher interface"""
action_clicked = QtCore.Signal(object)
def __init__(self, project_handler, dbcon, parent=None):
super(ActionBar, self).__init__(parent)
self.project_handler = project_handler
self.dbcon = dbcon
view = QtWidgets.QListView(self)
view.setProperty("mode", "icon")
view.setObjectName("IconView")
view.setViewMode(QtWidgets.QListView.IconMode)
view.setResizeMode(QtWidgets.QListView.Adjust)
view.setSelectionMode(QtWidgets.QListView.NoSelection)
view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
view.setEditTriggers(QtWidgets.QListView.NoEditTriggers)
view.setWrapping(True)
view.setGridSize(QtCore.QSize(70, 75))
view.setIconSize(QtCore.QSize(30, 30))
view.setSpacing(0)
view.setWordWrap(True)
model = ActionModel(self.dbcon, self)
view.setModel(model)
# TODO better group delegate
delegate = ActionDelegate(
[GROUP_ROLE, VARIANT_GROUP_ROLE],
self
)
view.setItemDelegate(delegate)
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(view)
self.model = model
self.view = view
self._animated_items = set()
animation_timer = QtCore.QTimer()
animation_timer.setInterval(50)
animation_timer.timeout.connect(self._on_animation)
self._animation_timer = animation_timer
# Make view flickable
flick = FlickCharm(parent=view)
flick.activateOn(view)
self.set_row_height(1)
project_handler.projects_refreshed.connect(self._on_projects_refresh)
view.clicked.connect(self.on_clicked)
view.customContextMenuRequested.connect(self.on_context_menu)
self._context_menu = None
self._discover_on_menu = False
def discover_actions(self):
if self._context_menu is not None:
self._discover_on_menu = True
return
if self._animation_timer.isActive():
self._animation_timer.stop()
self.model.discover()
def filter_actions(self):
if self._animation_timer.isActive():
self._animation_timer.stop()
self.model.filter_actions()
def set_row_height(self, rows):
self.setMinimumHeight(rows * 75)
def _on_projects_refresh(self):
self.discover_actions()
def _on_animation(self):
time_now = time.time()
for action_id in tuple(self._animated_items):
item = self.model.items_by_id.get(action_id)
if not item:
self._animated_items.remove(action_id)
continue
start_time = item.data(ANIMATION_START_ROLE)
if (time_now - start_time) > ANIMATION_LEN:
item.setData(0, ANIMATION_STATE_ROLE)
self._animated_items.remove(action_id)
if not self._animated_items:
self._animation_timer.stop()
self.update()
def _start_animation(self, index):
# Offset refresh timeout
self.project_handler.start_timer()
action_id = index.data(ACTION_ID_ROLE)
item = self.model.items_by_id.get(action_id)
if item:
item.setData(time.time(), ANIMATION_START_ROLE)
item.setData(1, ANIMATION_STATE_ROLE)
self._animated_items.add(action_id)
self._animation_timer.start()
def on_context_menu(self, point):
"""Creates menu to force skip opening last workfile."""
index = self.view.indexAt(point)
if not index.isValid():
return
action_item = index.data(ACTION_ROLE)
if not self.model.is_application_action(action_item):
return
menu = QtWidgets.QMenu(self.view)
checkbox = QtWidgets.QCheckBox("Skip opening last workfile.",
menu)
if index.data(FORCE_NOT_OPEN_WORKFILE_ROLE):
checkbox.setChecked(True)
action_id = index.data(ACTION_ID_ROLE)
checkbox.stateChanged.connect(
lambda: self.on_checkbox_changed(checkbox.isChecked(),
action_id))
action = QtWidgets.QWidgetAction(menu)
action.setDefaultWidget(checkbox)
menu.addAction(action)
self._context_menu = menu
global_point = self.mapToGlobal(point)
menu.exec_(global_point)
self._context_menu = None
if self._discover_on_menu:
self._discover_on_menu = False
self.discover_actions()
def on_checkbox_changed(self, is_checked, action_id):
self.model.update_force_not_open_workfile_settings(is_checked,
action_id)
self.view.update()
if self._context_menu is not None:
self._context_menu.close()
def on_clicked(self, index):
if not index or not index.isValid():
return
is_group = index.data(GROUP_ROLE)
is_variant_group = index.data(VARIANT_GROUP_ROLE)
if not is_group and not is_variant_group:
action = index.data(ACTION_ROLE)
# Change data of application action
if issubclass(action, ApplicationAction):
if index.data(FORCE_NOT_OPEN_WORKFILE_ROLE):
action.data["start_last_workfile"] = False
else:
action.data.pop("start_last_workfile", None)
self._start_animation(index)
self.action_clicked.emit(action)
return
# Offset refresh timeout
self.project_handler.start_timer()
actions = index.data(ACTION_ROLE)
menu = QtWidgets.QMenu(self)
actions_mapping = {}
if is_variant_group:
for action in actions:
menu_action = QtWidgets.QAction(
lib.get_action_label(action)
)
menu.addAction(menu_action)
actions_mapping[menu_action] = action
else:
by_variant_label = collections.defaultdict(list)
orders = []
for action in actions:
# Label variants
label = getattr(action, "label", None)
label_variant = getattr(action, "label_variant", None)
if label_variant and not label:
label_variant = None
if not label_variant:
orders.append(action)
continue
if label not in orders:
orders.append(label)
by_variant_label[label].append(action)
for action_item in orders:
actions = by_variant_label.get(action_item)
if not actions:
action = action_item
elif len(actions) == 1:
action = actions[0]
else:
action = None
if action:
menu_action = QtWidgets.QAction(
lib.get_action_label(action)
)
menu.addAction(menu_action)
actions_mapping[menu_action] = action
continue
sub_menu = QtWidgets.QMenu(label, menu)
for action in actions:
menu_action = QtWidgets.QAction(
lib.get_action_label(action)
)
sub_menu.addAction(menu_action)
actions_mapping[menu_action] = action
menu.addMenu(sub_menu)
result = menu.exec_(QtGui.QCursor.pos())
if result:
action = actions_mapping[result]
self._start_animation(index)
self.action_clicked.emit(action)
class ActionHistory(QtWidgets.QPushButton):
trigger_history = QtCore.Signal(tuple)
def __init__(self, parent=None):
super(ActionHistory, self).__init__(parent=parent)
self.max_history = 15
self.setFixedWidth(25)
self.setFixedHeight(25)
self.setIcon(qtawesome.icon("fa.history", color="#CCCCCC"))
self.setIconSize(QtCore.QSize(15, 15))
self._history = []
self.clicked.connect(self.show_history)
def show_history(self):
# Show history popup
if not self._history:
return
widget = QtWidgets.QListWidget()
widget.setSelectionMode(widget.NoSelection)
widget.setStyleSheet("""
* {
font-family: "Courier New";
}
""")
largest_label_num_chars = 0
largest_action_label = max(len(x[0].label) for x in self._history)
action_session_role = QtCore.Qt.UserRole + 1
for action, session in reversed(self._history):
project = session.get("AVALON_PROJECT")
asset = session.get("AVALON_ASSET")
task = session.get("AVALON_TASK")
breadcrumb = " > ".join(x for x in [project, asset, task] if x)
m = "{{action:{0}}} | {{breadcrumb}}".format(largest_action_label)
label = m.format(action=action.label, breadcrumb=breadcrumb)
icon = lib.get_action_icon(action)
item = QtWidgets.QListWidgetItem(icon, label)
item.setData(action_session_role, (action, session))
largest_label_num_chars = max(largest_label_num_chars, len(label))
widget.addItem(item)
# Show history
dialog = QtWidgets.QDialog(parent=self)
dialog.setWindowTitle("Action History")
dialog.setWindowFlags(
QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup
)
dialog.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored
)
layout = QtWidgets.QVBoxLayout(dialog)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(widget)
def on_clicked(index):
data = index.data(action_session_role)
self.trigger_history.emit(data)
dialog.close()
widget.clicked.connect(on_clicked)
# padding + icon + text
width = 40 + (largest_label_num_chars * 7)
entry_height = 21
height = entry_height * len(self._history)
point = QtGui.QCursor().pos()
dialog.setGeometry(
point.x() - width,
point.y() - height,
width,
height
)
dialog.exec_()
self.widget_popup = widget
def add_action(self, action, session):
key = (action, copy.deepcopy(session))
# Remove entry if already exists
if key in self._history:
self._history.remove(key)
self._history.append(key)
# Slice the end of the list if we exceed the max history
if len(self._history) > self.max_history:
self._history = self._history[-self.max_history:]
def clear_history(self):
self._history.clear()
class SlidePageWidget(QtWidgets.QStackedWidget):
"""Stacked widget that nicely slides between its pages"""
directions = {
"left": QtCore.QPoint(-1, 0),
"right": QtCore.QPoint(1, 0),
"up": QtCore.QPoint(0, 1),
"down": QtCore.QPoint(0, -1)
}
def slide_view(self, index, direction="right"):
if self.currentIndex() == index:
return
offset_direction = self.directions.get(direction)
if offset_direction is None:
print("BUG: invalid slide direction: {}".format(direction))
return
width = self.frameRect().width()
height = self.frameRect().height()
offset = QtCore.QPoint(
offset_direction.x() * width,
offset_direction.y() * height
)
new_page = self.widget(index)
new_page.setGeometry(0, 0, width, height)
curr_pos = new_page.pos()
new_page.move(curr_pos + offset)
new_page.show()
new_page.raise_()
current_page = self.currentWidget()
b_pos = QtCore.QByteArray(b"pos")
anim_old = QtCore.QPropertyAnimation(current_page, b_pos, self)
anim_old.setDuration(250)
anim_old.setStartValue(curr_pos)
anim_old.setEndValue(curr_pos - offset)
anim_old.setEasingCurve(QtCore.QEasingCurve.OutQuad)
anim_new = QtCore.QPropertyAnimation(new_page, b_pos, self)
anim_new.setDuration(250)
anim_new.setStartValue(curr_pos + offset)
anim_new.setEndValue(curr_pos)
anim_new.setEasingCurve(QtCore.QEasingCurve.OutQuad)
anim_group = QtCore.QParallelAnimationGroup(self)
anim_group.addAnimation(anim_old)
anim_group.addAnimation(anim_new)
def slide_finished():
self.setCurrentWidget(new_page)
anim_group.finished.connect(slide_finished)
anim_group.start()
```
#### File: tools/mayalookassigner/commands.py
```python
from collections import defaultdict
import logging
import os
import maya.cmds as cmds
from openpype.hosts.maya.api import lib
from avalon import io, api
from .vray_proxies import get_alembic_ids_cache
log = logging.getLogger(__name__)
def get_workfile():
path = cmds.file(query=True, sceneName=True) or "untitled"
return os.path.basename(path)
def get_workfolder():
return os.path.dirname(cmds.file(query=True, sceneName=True))
def select(nodes):
cmds.select(nodes)
def get_namespace_from_node(node):
"""Get the namespace from the given node
Args:
node (str): name of the node
Returns:
namespace (str)
"""
parts = node.rsplit("|", 1)[-1].rsplit(":", 1)
return parts[0] if len(parts) > 1 else u":"
def list_descendents(nodes):
"""Include full descendant hierarchy of given nodes.
This is a workaround to cmds.listRelatives(allDescendents=True) because
this way correctly keeps children instance paths (see Maya documentation)
This fixes LKD-26: assignments not working as expected on instanced shapes.
Return:
list: List of children descendents of nodes
"""
result = []
while True:
nodes = cmds.listRelatives(nodes,
fullPath=True)
if nodes:
result.extend(nodes)
else:
return result
def get_selected_nodes():
"""Get information from current selection"""
selection = cmds.ls(selection=True, long=True)
hierarchy = list_descendents(selection)
return list(set(selection + hierarchy))
def get_all_asset_nodes():
"""Get all assets from the scene, container based
Returns:
list: list of dictionaries
"""
host = api.registered_host()
nodes = []
for container in host.ls():
# We are not interested in looks but assets!
if container["loader"] == "LookLoader":
continue
# Gather all information
container_name = container["objectName"]
nodes += cmds.sets(container_name, query=True, nodesOnly=True) or []
nodes = list(set(nodes))
return nodes
def create_asset_id_hash(nodes):
"""Create a hash based on cbId attribute value
Args:
nodes (list): a list of nodes
Returns:
dict
"""
node_id_hash = defaultdict(list)
for node in nodes:
# iterate over content of reference node
if cmds.nodeType(node) == "reference":
ref_hashes = create_asset_id_hash(
list(set(cmds.referenceQuery(node, nodes=True, dp=True))))
for asset_id, ref_nodes in ref_hashes.items():
node_id_hash[asset_id] += ref_nodes
elif cmds.pluginInfo('vrayformaya', query=True,
loaded=True) and cmds.nodeType(
node) == "VRayProxy":
path = cmds.getAttr("{}.fileName".format(node))
ids = get_alembic_ids_cache(path)
for k, _ in ids.items():
pid = k.split(":")[0]
if node not in node_id_hash[pid]:
node_id_hash[pid].append(node)
else:
value = lib.get_id(node)
if value is None:
continue
asset_id = value.split(":")[0]
node_id_hash[asset_id].append(node)
return dict(node_id_hash)
def create_items_from_nodes(nodes):
"""Create an item for the view based the container and content of it
It fetches the look document based on the asset ID found in the content.
The item will contain all important information for the tool to work.
If there is an asset ID which is not registered in the project's collection
it will log a warning message.
Args:
nodes (list): list of maya nodes
Returns:
list of dicts
"""
asset_view_items = []
id_hashes = create_asset_id_hash(nodes)
if not id_hashes:
log.warning("No id hashes")
return asset_view_items
for _id, id_nodes in id_hashes.items():
asset = io.find_one({"_id": io.ObjectId(_id)},
projection={"name": True})
# Skip if asset id is not found
if not asset:
log.warning("Id not found in the database, skipping '%s'." % _id)
log.warning("Nodes: %s" % id_nodes)
continue
# Collect available look subsets for this asset
looks = lib.list_looks(asset["_id"])
# Collect namespaces the asset is found in
namespaces = set()
for node in id_nodes:
namespace = get_namespace_from_node(node)
namespaces.add(namespace)
asset_view_items.append({"label": asset["name"],
"asset": asset,
"looks": looks,
"namespaces": namespaces})
return asset_view_items
def remove_unused_looks():
"""Removes all loaded looks for which none of the shaders are used.
This will cleanup all loaded "LookLoader" containers that are unused in
the current scene.
"""
host = api.registered_host()
unused = []
for container in host.ls():
if container['loader'] == "LookLoader":
members = cmds.sets(container['objectName'], query=True)
look_sets = cmds.ls(members, type="objectSet")
for look_set in look_sets:
# If the set is used than we consider this look *in use*
if cmds.sets(look_set, query=True):
break
else:
unused.append(container)
for container in unused:
log.info("Removing unused look container: %s", container['objectName'])
api.remove(container)
log.info("Finished removing unused looks. (see log for details)")
```
#### File: tools/mayalookassigner/views.py
```python
from Qt import QtWidgets, QtCore
DEFAULT_COLOR = "#fb9c15"
class View(QtWidgets.QTreeView):
data_changed = QtCore.Signal()
def __init__(self, parent=None):
super(View, self).__init__(parent=parent)
# view settings
self.setAlternatingRowColors(False)
self.setSortingEnabled(True)
self.setSelectionMode(self.ExtendedSelection)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
def get_indices(self):
"""Get the selected rows"""
selection_model = self.selectionModel()
return selection_model.selectedRows()
def extend_to_children(self, indices):
"""Extend the indices to the children indices.
Top-level indices are extended to its children indices. Sub-items
are kept as is.
:param indices: The indices to extend.
:type indices: list
:return: The children indices
:rtype: list
"""
subitems = set()
for i in indices:
valid_parent = i.parent().isValid()
if valid_parent and i not in subitems:
subitems.add(i)
else:
# is top level node
model = i.model()
rows = model.rowCount(parent=i)
for row in range(rows):
child = model.index(row, 0, parent=i)
subitems.add(child)
return list(subitems)
```
#### File: tools/mayalookassigner/widgets.py
```python
import logging
from collections import defaultdict
from Qt import QtWidgets, QtCore
# TODO: expose this better in avalon core
from avalon.tools import lib
from avalon.tools.models import TreeModel
from .models import (
AssetModel,
LookModel
)
from . import commands
from . import views
from maya import cmds
MODELINDEX = QtCore.QModelIndex()
class AssetOutliner(QtWidgets.QWidget):
refreshed = QtCore.Signal()
selection_changed = QtCore.Signal()
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
layout = QtWidgets.QVBoxLayout()
title = QtWidgets.QLabel("Assets")
title.setAlignment(QtCore.Qt.AlignCenter)
title.setStyleSheet("font-weight: bold; font-size: 12px")
model = AssetModel()
view = views.View()
view.setModel(model)
view.customContextMenuRequested.connect(self.right_mouse_menu)
view.setSortingEnabled(False)
view.setHeaderHidden(True)
view.setIndentation(10)
from_all_asset_btn = QtWidgets.QPushButton("Get All Assets")
from_selection_btn = QtWidgets.QPushButton("Get Assets From Selection")
layout.addWidget(title)
layout.addWidget(from_all_asset_btn)
layout.addWidget(from_selection_btn)
layout.addWidget(view)
# Build connections
from_selection_btn.clicked.connect(self.get_selected_assets)
from_all_asset_btn.clicked.connect(self.get_all_assets)
selection_model = view.selectionModel()
selection_model.selectionChanged.connect(self.selection_changed)
self.view = view
self.model = model
self.setLayout(layout)
self.log = logging.getLogger(__name__)
def clear(self):
self.model.clear()
# fix looks remaining visible when no items present after "refresh"
# todo: figure out why this workaround is needed.
self.selection_changed.emit()
def add_items(self, items):
"""Add new items to the outliner"""
self.model.add_items(items)
self.refreshed.emit()
def get_selected_items(self):
"""Get current selected items from view
Returns:
list: list of dictionaries
"""
selection_model = self.view.selectionModel()
return [row.data(TreeModel.ItemRole)
for row in selection_model.selectedRows(0)]
def get_all_assets(self):
"""Add all items from the current scene"""
items = []
with lib.preserve_expanded_rows(self.view):
with lib.preserve_selection(self.view):
self.clear()
nodes = commands.get_all_asset_nodes()
items = commands.create_items_from_nodes(nodes)
self.add_items(items)
return len(items) > 0
def get_selected_assets(self):
"""Add all selected items from the current scene"""
with lib.preserve_expanded_rows(self.view):
with lib.preserve_selection(self.view):
self.clear()
nodes = commands.get_selected_nodes()
items = commands.create_items_from_nodes(nodes)
self.add_items(items)
def get_nodes(self, selection=False):
"""Find the nodes in the current scene per asset."""
items = self.get_selected_items()
# Collect all nodes by hash (optimization)
if not selection:
nodes = cmds.ls(dag=True, long=True)
else:
nodes = commands.get_selected_nodes()
id_nodes = commands.create_asset_id_hash(nodes)
# Collect the asset item entries per asset
# and collect the namespaces we'd like to apply
assets = {}
asset_namespaces = defaultdict(set)
for item in items:
asset_id = str(item["asset"]["_id"])
asset_name = item["asset"]["name"]
asset_namespaces[asset_name].add(item.get("namespace"))
if asset_name in assets:
continue
assets[asset_name] = item
assets[asset_name]["nodes"] = id_nodes.get(asset_id, [])
# Filter nodes to namespace (if only namespaces were selected)
for asset_name in assets:
namespaces = asset_namespaces[asset_name]
# When None is present there should be no filtering
if None in namespaces:
continue
# Else only namespaces are selected and *not* the top entry so
# we should filter to only those namespaces.
nodes = assets[asset_name]["nodes"]
nodes = [node for node in nodes if
commands.get_namespace_from_node(node) in namespaces]
assets[asset_name]["nodes"] = nodes
return assets
def select_asset_from_items(self):
"""Select nodes from listed asset"""
items = self.get_nodes(selection=False)
nodes = []
for item in items.values():
nodes.extend(item["nodes"])
commands.select(nodes)
def right_mouse_menu(self, pos):
"""Build RMB menu for asset outliner"""
active = self.view.currentIndex() # index under mouse
active = active.sibling(active.row(), 0) # get first column
globalpos = self.view.viewport().mapToGlobal(pos)
menu = QtWidgets.QMenu(self.view)
# Direct assignment
apply_action = QtWidgets.QAction(menu, text="Select nodes")
apply_action.triggered.connect(self.select_asset_from_items)
if not active.isValid():
apply_action.setEnabled(False)
menu.addAction(apply_action)
menu.exec_(globalpos)
class LookOutliner(QtWidgets.QWidget):
menu_apply_action = QtCore.Signal()
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
# look manager layout
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(10)
# Looks from database
title = QtWidgets.QLabel("Looks")
title.setAlignment(QtCore.Qt.AlignCenter)
title.setStyleSheet("font-weight: bold; font-size: 12px")
title.setAlignment(QtCore.Qt.AlignCenter)
model = LookModel()
# Proxy for dynamic sorting
proxy = QtCore.QSortFilterProxyModel()
proxy.setSourceModel(model)
view = views.View()
view.setModel(proxy)
view.setMinimumHeight(180)
view.setToolTip("Use right mouse button menu for direct actions")
view.customContextMenuRequested.connect(self.right_mouse_menu)
view.sortByColumn(0, QtCore.Qt.AscendingOrder)
layout.addWidget(title)
layout.addWidget(view)
self.view = view
self.model = model
def clear(self):
self.model.clear()
def add_items(self, items):
self.model.add_items(items)
def get_selected_items(self):
"""Get current selected items from view
Returns:
list: list of dictionaries
"""
items = [i.data(TreeModel.ItemRole) for i in self.view.get_indices()]
return [item for item in items if item is not None]
def right_mouse_menu(self, pos):
"""Build RMB menu for look view"""
active = self.view.currentIndex() # index under mouse
active = active.sibling(active.row(), 0) # get first column
globalpos = self.view.viewport().mapToGlobal(pos)
if not active.isValid():
return
menu = QtWidgets.QMenu(self.view)
# Direct assignment
apply_action = QtWidgets.QAction(menu, text="Assign looks..")
apply_action.triggered.connect(self.menu_apply_action)
menu.addAction(apply_action)
menu.exec_(globalpos)
```
#### File: tests/lib/assert_classes.py
```python
class DBAssert:
@classmethod
def count_of_types(cls, dbcon, queried_type, expected, **kwargs):
"""Queries 'dbcon' and counts documents of type 'queried_type'
Args:
dbcon (AvalonMongoDB)
queried_type (str): type of document ("asset", "version"...)
expected (int): number of documents found
any number of additional keyword arguments
special handling of argument additional_args (dict)
with additional args like
{"context.subset": "XXX"}
"""
args = {"type": queried_type}
for key, val in kwargs.items():
if key == "additional_args":
args.update(val)
else:
args[key] = val
msg = None
no_of_docs = dbcon.count_documents(args)
if expected != no_of_docs:
msg = "Not expected no of versions. "\
"Expected {}, found {}".format(expected, no_of_docs)
args.pop("type")
detail_str = " "
if args:
detail_str = " with {}".format(args)
status = "successful"
if msg:
status = "failed"
print("Comparing count of {}{} {}".format(queried_type,
detail_str,
status))
return msg
``` |
{
"source": "jojobrogess/script.advanced.settings.editor",
"score": 2
} |
#### File: resources/lib/utils.py
```python
import xbmc,xbmcvfs
import xbmcgui
import xbmcaddon
__addon_id__ = 'script.advanced.settings.editor'
__Addon = xbmcaddon.Addon(__addon_id__)
def data_dir():
return __Addon.getAddonInfo('profile')
def addon_dir():
return __Addon.getAddonInfo('path')
def log(message,loglevel=xbmc.LOGDEBUG):
xbmc.log(__addon_id__ + "-" + __Addon.getAddonInfo('version') + ": " + message, level=loglevel)
def showNotification(message):
xbmcgui.Dialog().notification(getString(30000), message, time=4000, icon=xbmcvfs.translatePath(__Addon.getAddonInfo('path') + "/icon.png"))
def setSetting(name,value):
__Addon.setSetting(name,value)
def getSetting(name):
return __Addon.getSetting(name)
def getString(string_id):
return __Addon.getLocalizedString(string_id)
``` |
{
"source": "jojochuang/determined",
"score": 2
} |
#### File: aws/deployment_types/simple.py
```python
import boto3
from determined_deploy.aws import aws, constants
from determined_deploy.aws.deployment_types import base
class Simple(base.DeterminedDeployment):
ssh_command = "SSH to master Instance: ssh -i <pem-file> ubuntu@{master_ip}"
det_ui = (
"Configure the Determined CLI: export DET_MASTER={master_ip}\n"
"View the Determined UI: http://{master_ip}:8080\n"
"View Logs at: https://{region}.console.aws.amazon.com/cloudwatch/home?"
"region={region}#logStream:group={log_group}"
)
template = "simple.yaml"
template_parameter_keys = [
constants.cloudformation.KEYPAIR,
constants.cloudformation.MASTER_INSTANCE_TYPE,
constants.cloudformation.AGENT_INSTANCE_TYPE,
constants.cloudformation.INBOUND_CIDR,
constants.cloudformation.VERSION,
constants.cloudformation.DB_PASSWORD,
constants.cloudformation.MAX_IDLE_AGENT_PERIOD,
constants.cloudformation.MAX_AGENT_STARTING_PERIOD,
constants.cloudformation.MAX_DYNAMIC_AGENTS,
]
def deploy(self) -> None:
cfn_parameters = self.consolidate_parameters()
self.before_deploy_print()
with open(self.template_path) as f:
template = f.read()
aws.deploy_stack(
stack_name=self.parameters[constants.cloudformation.CLUSTER_ID],
template_body=template,
keypair=self.parameters[constants.cloudformation.KEYPAIR],
boto3_session=self.parameters[constants.cloudformation.BOTO3_SESSION],
parameters=cfn_parameters,
)
self.print_results(
self.parameters[constants.cloudformation.CLUSTER_ID],
self.parameters[constants.cloudformation.BOTO3_SESSION],
)
def print_results(self, stack_name: str, boto3_session: boto3.session.Session) -> None:
output = aws.get_output(stack_name, boto3_session)
master_ip = output[constants.cloudformation.DET_ADDRESS]
region = output[constants.cloudformation.REGION]
log_group = output[constants.cloudformation.LOG_GROUP]
ui_command = self.det_ui.format(master_ip=master_ip, region=region, log_group=log_group)
print(ui_command)
ssh_command = self.ssh_command.format(master_ip=master_ip)
print(ssh_command)
```
#### File: tests/command/test_run.py
```python
import re
import subprocess
import tempfile
import time
from pathlib import Path
from typing import Any, List
import docker
import docker.errors
import pytest
import yaml
from tests import command as cmd
from tests import config as conf
from tests.filetree import FileTree
@pytest.mark.slow # type: ignore
@pytest.mark.e2e_cpu # type: ignore
def test_cold_and_warm_start(tmp_path: Path) -> None:
for _ in range(3):
subprocess.check_call(
["det", "-m", conf.make_master_url(), "cmd", "run", "echo", "hello", "world"]
)
def _run_and_return_real_exit_status(args: List[str], **kwargs: Any) -> None:
"""
Wraps subprocess.check_call and extracts exit status from output.
"""
# TODO(#2903): remove this once exit status are propagated through cli
output = subprocess.check_output(args, **kwargs)
if re.search(b"finished command \\S+ task failed with exit code", output):
raise subprocess.CalledProcessError(1, " ".join(args), output=output)
def _run_and_verify_exit_code_zero(args: List[str], **kwargs: Any) -> None:
"""Wraps subprocess.check_output and verifies a successful exit code."""
# TODO(#2903): remove this once exit status are propagated through cli
output = subprocess.check_output(args, **kwargs)
assert re.search(b"command exited successfully", output) is not None
def _run_and_verify_failure(args: List[str], message: str, **kwargs: Any) -> None:
output = subprocess.check_output(args, **kwargs)
if re.search(message.encode(), output):
raise subprocess.CalledProcessError(1, " ".join(args), output=output)
@pytest.mark.e2e_cpu # type: ignore
def test_exit_code_reporting() -> None:
"""
Confirm that failed commands are not reported as successful, and confirm
that our test infrastructure is valid.
"""
with pytest.raises(AssertionError):
_run_and_verify_exit_code_zero(["det", "-m", conf.make_master_url(), "cmd", "run", "false"])
@pytest.mark.slow # type: ignore
@pytest.mark.e2e_cpu # type: ignore
def test_basic_workflows(tmp_path: Path) -> None:
with FileTree(tmp_path, {"hello.py": "print('hello world')"}) as tree:
_run_and_verify_exit_code_zero(
[
"det",
"-m",
conf.make_master_url(),
"cmd",
"run",
"--context",
str(tree),
"python",
"hello.py",
]
)
with FileTree(tmp_path, {"hello.py": "print('hello world')"}) as tree:
link = tree.joinpath("hello-link.py")
link.symlink_to(tree.joinpath("hello.py"))
_run_and_verify_exit_code_zero(
[
"det",
"-m",
conf.make_master_url(),
"cmd",
"run",
"--context",
str(tree),
"python",
"hello-link.py",
]
)
_run_and_verify_exit_code_zero(
["det", "-m", conf.make_master_url(), "cmd", "run", "python", "-c", "print('hello world')"]
)
with pytest.raises(subprocess.CalledProcessError):
_run_and_return_real_exit_status(
[
"det",
"-m",
conf.make_master_url(),
"cmd",
"run",
"--context",
"non-existent-path-here",
"python",
"hello.py",
]
)
@pytest.mark.slow # type: ignore
@pytest.mark.e2e_cpu # type: ignore
def test_large_uploads(tmp_path: Path) -> None:
with pytest.raises(subprocess.CalledProcessError):
with FileTree(tmp_path, {"hello.py": "print('hello world')"}) as tree:
large = tree.joinpath("large-file.bin")
large.touch()
f = large.open(mode="w")
f.seek(1024 * 1024 * 120)
f.write("\0")
f.close()
_run_and_return_real_exit_status(
[
"det",
"-m",
conf.make_master_url(),
"cmd",
"run",
"--context",
str(tree),
"python",
"hello.py",
]
)
with FileTree(tmp_path, {"hello.py": "print('hello world')", ".detignore": "*.bin"}) as tree:
large = tree.joinpath("large-file.bin")
large.touch()
f = large.open(mode="w")
f.seek(1024 * 1024 * 120)
f.write("\0")
f.close()
_run_and_verify_exit_code_zero(
[
"det",
"-m",
conf.make_master_url(),
"cmd",
"run",
"--context",
str(tree),
"python",
"hello.py",
]
)
@pytest.mark.slow # type: ignore
@pytest.mark.e2e_cpu # type: ignore
def test_configs(tmp_path: Path) -> None:
with FileTree(
tmp_path,
{
"config.yaml": """
resources:
slots: 1
environment:
environment_variables:
- TEST=TEST
"""
},
) as tree:
config_path = tree.joinpath("config.yaml")
_run_and_verify_exit_code_zero(
[
"det",
"-m",
conf.make_master_url(),
"cmd",
"run",
"--config-file",
str(config_path),
"python",
"-c",
"""
import os
test = os.environ["TEST"]
if test != "TEST":
print("{} != {}".format(test, "TEST"))
sys.exit(1)
""",
]
)
@pytest.mark.slow # type: ignore
@pytest.mark.e2e_cpu # type: ignore
def test_singleton_command() -> None:
_run_and_verify_exit_code_zero(
["det", "-m", conf.make_master_url(), "cmd", "run", "echo hello && echo world"]
)
@pytest.mark.slow # type: ignore
@pytest.mark.e2e_cpu # type: ignore
def test_absolute_bind_mount(tmp_path: Path) -> None:
_run_and_verify_exit_code_zero(
[
"det",
"-m",
conf.make_master_url(),
"cmd",
"run",
"--volume",
"/bin:/foo-bar",
"ls",
"/foo-bar",
]
)
with FileTree(
tmp_path,
{
"config.yaml": """
bind_mounts:
- host_path: /bin
container_path: /foo-bar
"""
},
) as tree:
config_path = tree.joinpath("config.yaml")
_run_and_verify_exit_code_zero(
[
"det",
"-m",
conf.make_master_url(),
"cmd",
"run",
"--volume",
"/bin:/foo-bar2",
"--config-file",
str(config_path),
"ls",
"/foo-bar",
"/foo-bar2",
]
)
@pytest.mark.slow # type: ignore
@pytest.mark.e2e_cpu # type: ignore
def test_relative_bind_mount(tmp_path: Path) -> None:
_run_and_verify_exit_code_zero(
[
"det",
"-m",
conf.make_master_url(),
"cmd",
"run",
"--volume",
"/bin:foo-bar",
"ls",
"foo-bar",
]
)
with FileTree(
tmp_path,
{
"config.yaml": """
bind_mounts:
- host_path: /bin
container_path: foo-bar
"""
},
) as tree:
config_path = tree.joinpath("config.yaml")
_run_and_verify_exit_code_zero(
[
"det",
"-m",
conf.make_master_url(),
"cmd",
"run",
"--volume",
"/bin:foo-bar2",
"--config-file",
str(config_path),
"ls",
"foo-bar",
"foo-bar2",
]
)
@pytest.mark.slow # type: ignore
@pytest.mark.e2e_cpu # type: ignore
def test_cmd_kill() -> None:
"""Start a command, extract its task ID, and then kill it."""
with cmd.interactive_command(
"command", "run", "echo hello world; echo hello world; sleep infinity"
) as command:
assert command.task_id is not None
for line in command.stdout:
if "hello world" in line:
assert cmd.get_num_running_commands() == 1
break
@pytest.mark.slow # type: ignore
@pytest.mark.e2e_cpu # type: ignore
def test_image_pull_after_remove() -> None:
"""
Remove pulled image and verify that it will be pulled again with auth.
"""
client = docker.from_env()
try:
client.images.remove("alpine:3.10")
except docker.errors.ImageNotFound:
pass
_run_and_verify_exit_code_zero(
[
"det",
"-m",
conf.make_master_url(),
"cmd",
"run",
"--config",
"environment.image=alpine:3.10",
"sleep 3; echo hello world",
]
)
@pytest.mark.slow # type: ignore
@pytest.mark.e2e_cpu # type: ignore
def test_killed_pending_command_terminates() -> None:
# Specify an outrageous number of slots to be sure that it can't be scheduled.
with cmd.interactive_command(
"cmd", "run", "--config", "resources.slots=1048576", "sleep infinity"
) as command:
for _ in range(10):
assert cmd.get_command(command.task_id)["state"] == "PENDING"
time.sleep(1)
# The command is killed when the context is exited; now it should reach TERMINATED soon.
for _ in range(5):
if cmd.get_command(command.task_id)["state"] == "TERMINATED":
break
time.sleep(1)
else:
state = cmd.get_command(command.task_id)["state"]
raise AssertionError(f"Task was in state {state} rather than TERMINATED")
@pytest.mark.e2e_gpu # type: ignore
def test_k8_mount(using_k8s: bool) -> None:
if not using_k8s:
pytest.skip("only need to run test on kubernetes")
mount_path = "/ci/"
with pytest.raises(subprocess.CalledProcessError):
_run_and_verify_failure(
["det", "-m", conf.make_master_url(), "cmd", "run", f"sleep 3; touch {mount_path}"],
"No such file or directory",
)
with tempfile.NamedTemporaryFile() as tf:
config = {
"environment": {
"pod_spec": {
"spec": {
"containers": [
{"volumeMounts": [{"name": "temp1", "mountPath": mount_path}]}
],
"volumes": [{"name": "temp1", "emptyDir": {}}],
}
}
}
}
with open(tf.name, "w") as f:
yaml.dump(config, f)
_run_and_verify_exit_code_zero(
[
"det",
"-m",
conf.make_master_url(),
"cmd",
"run",
"--config-file",
tf.name,
f"sleep 3; touch {mount_path}",
]
)
```
#### File: fixtures/pytorch-rng-saver/model_def.py
```python
import random
from typing import Any, Dict, Tuple
import numpy as np
import torch
from torch import nn
from determined import pytorch
class OnesDataset(torch.utils.data.Dataset):
def __len__(self) -> int:
return 64
def __getitem__(self, index: int) -> Tuple:
return torch.Tensor([float(1)])
class NoopPytorchTrial(pytorch.PyTorchTrial):
def __init__(self, context):
self.context = context
model = nn.Linear(1, 1, False)
model.weight.data.fill_(0)
self.model = context.wrap_model(model)
opt = torch.optim.SGD(self.model.parameters(), 0.1)
self.opt = context.wrap_optimizer(opt)
def train_batch(
self, batch: pytorch.TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
w_real = self.model.weight.data[0]
loss = torch.nn.MSELoss()(self.model(batch), batch)
self.context.backward(loss)
self.context.step_optimizer(self.opt)
return {"loss": loss, "w_real": w_real}
def evaluate_batch(self, batch: pytorch.TorchData) -> Dict[str, Any]:
val = batch[0]
np_rand = np.random.randint(1, 1000)
rand_rand = random.randint(0, 1000)
torch_rand = torch.randint(1000, (1,))
gpu_rand = torch.randint(1000, (1,), device=self.context.device)
return {
"validation_error": val,
"np_rand": np_rand,
"rand_rand": rand_rand,
"torch_rand": torch_rand,
"gpu_rand": gpu_rand,
}
def build_training_data_loader(self):
return pytorch.DataLoader(OnesDataset(), batch_size=self.context.get_per_slot_batch_size())
def build_validation_data_loader(self):
return pytorch.DataLoader(OnesDataset(), batch_size=self.context.get_per_slot_batch_size())
```
#### File: e2e_tests/tests/test_system.py
```python
import json
import operator
import os
import subprocess
import tempfile
import time
from typing import Dict, Set
import botocore.exceptions
import numpy as np
import pytest
import yaml
from determined.experimental import Determined, ModelSortBy
from determined_common import check, storage
from tests import config as conf
from tests import experiment as exp
from tests.fixtures.metric_maker.metric_maker import structure_equal, structure_to_metrics
@pytest.mark.e2e_cpu # type: ignore
def test_trial_error() -> None:
exp.run_failure_test(
conf.fixtures_path("trial_error/const.yaml"),
conf.fixtures_path("trial_error"),
"NotImplementedError",
)
@pytest.mark.e2e_cpu # type: ignore
def test_invalid_experiment() -> None:
completed_process = exp.maybe_create_experiment(
conf.fixtures_path("invalid_experiment/const.yaml"), conf.official_examples_path("mnist_tf")
)
assert completed_process.returncode != 0
@pytest.mark.e2e_cpu # type: ignore
def test_metric_gathering() -> None:
"""
Confirm that metrics are gathered from the trial the way that we expect.
"""
experiment_id = exp.run_basic_test(
conf.fixtures_path("metric_maker/const.yaml"), conf.fixtures_path("metric_maker"), 1
)
trials = exp.experiment_trials(experiment_id)
assert len(trials) == 1
# Read the structure of the metrics directly from the config file
config = conf.load_config(conf.fixtures_path("metric_maker/const.yaml"))
base_value = config["hyperparameters"]["starting_base_value"]
gain_per_batch = config["hyperparameters"]["gain_per_batch"]
training_structure = config["hyperparameters"]["training_structure"]["val"]
validation_structure = config["hyperparameters"]["validation_structure"]["val"]
scheduling_unit = 100
# Check training metrics.
full_trial_metrics = exp.trial_metrics(trials[0]["id"])
for step in full_trial_metrics["steps"]:
metrics = step["metrics"]
assert metrics["num_inputs"] == scheduling_unit
actual = metrics["batch_metrics"]
assert len(actual) == scheduling_unit
first_base_value = base_value + (step["id"] - 1) * scheduling_unit
batch_values = first_base_value + gain_per_batch * np.arange(scheduling_unit)
expected = [structure_to_metrics(value, training_structure) for value in batch_values]
assert structure_equal(expected, actual)
# Check validation metrics.
for step in trials[0]["steps"]:
validation = step["validation"]
metrics = validation["metrics"]
actual = metrics["validation_metrics"]
value = base_value + step["id"] * scheduling_unit
expected = structure_to_metrics(value, validation_structure)
assert structure_equal(expected, actual)
@pytest.mark.e2e_gpu # type: ignore
def test_gc_checkpoints_s3(secrets: Dict[str, str]) -> None:
config = exp.s3_checkpoint_config(secrets)
run_gc_checkpoints_test(config)
@pytest.mark.e2e_cpu # type: ignore
def test_gc_checkpoints_lfs() -> None:
run_gc_checkpoints_test(exp.shared_fs_checkpoint_config())
def run_gc_checkpoints_test(checkpoint_storage: Dict[str, str]) -> None:
fixtures = [
(
conf.fixtures_path("no_op/gc_checkpoints_decreasing.yaml"),
{"COMPLETED": {8, 9, 10}, "DELETED": {1, 2, 3, 4, 5, 6, 7}},
),
(
conf.fixtures_path("no_op/gc_checkpoints_increasing.yaml"),
{"COMPLETED": {1, 2, 3, 9, 10}, "DELETED": {4, 5, 6, 7, 8}},
),
]
all_checkpoints = []
for base_conf_path, result in fixtures:
config = conf.load_config(str(base_conf_path))
config["checkpoint_storage"].update(checkpoint_storage)
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
yaml.dump(config, f)
experiment_id = exp.create_experiment(tf.name, conf.fixtures_path("no_op"))
exp.wait_for_experiment_state(experiment_id, "COMPLETED")
# Checkpoints are not marked as deleted until gc_checkpoint task starts.
retries = 5
for retry in range(retries):
trials = exp.experiment_trials(experiment_id)
assert len(trials) == 1
checkpoints = sorted(
(step["checkpoint"] for step in trials[0]["steps"]),
key=operator.itemgetter("step_id"),
)
assert len(checkpoints) == 10
by_state = {} # type: Dict[str, Set[int]]
for checkpoint in checkpoints:
by_state.setdefault(checkpoint["state"], set()).add(checkpoint["step_id"])
if by_state == result:
all_checkpoints.append((config, checkpoints))
break
if retry + 1 == retries:
assert by_state == result
time.sleep(1)
# Check that the actual checkpoint storage (for shared_fs) reflects the
# deletions. We want to wait for the GC containers to exit, so check
# repeatedly with a timeout.
max_checks = 30
for i in range(max_checks):
time.sleep(1)
try:
for config, checkpoints in all_checkpoints:
checkpoint_config = config["checkpoint_storage"]
if checkpoint_config["type"] == "shared_fs":
deleted_exception = check.CheckFailedError
elif checkpoint_config["type"] == "s3":
deleted_exception = botocore.exceptions.ClientError
else:
raise NotImplementedError(
f'unsupported storage type {checkpoint_config["type"]}'
)
storage_manager = storage.build(checkpoint_config, container_path=None)
for checkpoint in checkpoints:
metadata = storage.StorageMetadata.from_json(checkpoint)
if checkpoint["state"] == "COMPLETED":
with storage_manager.restore_path(metadata):
pass
elif checkpoint["state"] == "DELETED":
try:
with storage_manager.restore_path(metadata):
raise AssertionError("checkpoint not deleted")
except deleted_exception:
pass
except AssertionError:
if i == max_checks - 1:
raise
else:
break
@pytest.mark.e2e_cpu # type: ignore
def test_experiment_delete() -> None:
subprocess.check_call(["det", "-m", conf.make_master_url(), "user", "whoami"])
experiment_id = exp.run_basic_test(
conf.fixtures_path("no_op/single.yaml"), conf.fixtures_path("no_op"), 1
)
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "delete", str(experiment_id), "--yes"],
env={**os.environ, "DET_ADMIN": "1"},
)
# "det experiment describe" call should fail, because the
# experiment is no longer in the database.
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "describe", str(experiment_id)]
)
@pytest.mark.e2e_cpu # type: ignore
def test_experiment_archive_unarchive() -> None:
experiment_id = exp.create_experiment(
conf.fixtures_path("no_op/single.yaml"), conf.fixtures_path("no_op"), ["--paused"]
)
describe_args = [
"det",
"-m",
conf.make_master_url(),
"experiment",
"describe",
"--json",
str(experiment_id),
]
# Check that the experiment is initially unarchived.
infos = json.loads(subprocess.check_output(describe_args))
assert len(infos) == 1
assert not infos[0]["archived"]
# Check that archiving a non-terminal experiment fails, then terminate it.
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "archive", str(experiment_id)]
)
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "cancel", str(experiment_id)]
)
# Check that we can archive and unarchive the experiment and see the expected effects.
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "archive", str(experiment_id)]
)
infos = json.loads(subprocess.check_output(describe_args))
assert len(infos) == 1
assert infos[0]["archived"]
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "unarchive", str(experiment_id)]
)
infos = json.loads(subprocess.check_output(describe_args))
assert len(infos) == 1
assert not infos[0]["archived"]
@pytest.mark.e2e_cpu # type: ignore
def test_create_test_mode() -> None:
# test-mode should succeed with a valid experiment.
command = [
"det",
"-m",
conf.make_master_url(),
"experiment",
"create",
"--test-mode",
conf.fixtures_path("mnist_pytorch/adaptive_short.yaml"),
conf.official_examples_path("trial/mnist_pytorch"),
]
output = subprocess.check_output(command, universal_newlines=True)
assert "Model definition test succeeded" in output
# test-mode should fail when an error is introduced into the trial
# implementation.
command = [
"det",
"-m",
conf.make_master_url(),
"experiment",
"create",
"--test-mode",
conf.fixtures_path("trial_error/const.yaml"),
conf.fixtures_path("trial_error"),
]
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(command)
@pytest.mark.e2e_cpu # type: ignore
def test_trial_logs() -> None:
experiment_id = exp.run_basic_test(
conf.fixtures_path("no_op/single.yaml"), conf.fixtures_path("no_op"), 1
)
trial_id = exp.experiment_trials(experiment_id)[0]["id"]
subprocess.check_call(["det", "-m", conf.make_master_url(), "trial", "logs", str(trial_id)])
subprocess.check_call(
["det", "-m", conf.make_master_url(), "trial", "logs", "--head", "10", str(trial_id)],
)
subprocess.check_call(
["det", "-m", conf.make_master_url(), "trial", "logs", "--tail", "10", str(trial_id)],
)
@pytest.mark.e2e_cpu # type: ignore
def test_labels() -> None:
experiment_id = exp.create_experiment(
conf.fixtures_path("no_op/single-one-short-step.yaml"), conf.fixtures_path("no_op"), None
)
label = "__det_test_dummy_label__"
# Add a label and check that it shows up.
subprocess.check_call(
["det", "-m", conf.make_master_url(), "e", "label", "add", str(experiment_id), label]
)
output = subprocess.check_output(
["det", "-m", conf.make_master_url(), "e", "describe", str(experiment_id)]
).decode()
assert label in output
# Remove the label and check that it doesn't show up.
subprocess.check_call(
["det", "-m", conf.make_master_url(), "e", "label", "remove", str(experiment_id), label]
)
output = subprocess.check_output(
["det", "-m", conf.make_master_url(), "e", "describe", str(experiment_id)]
).decode()
assert label not in output
@pytest.mark.e2e_cpu # type: ignore
def test_end_to_end_adaptive() -> None:
exp_id = exp.run_basic_test(
conf.fixtures_path("mnist_pytorch/adaptive_short.yaml"),
conf.official_examples_path("trial/mnist_pytorch"),
None,
)
# Check that validation accuracy look sane (more than 93% on MNIST).
trials = exp.experiment_trials(exp_id)
best = None
for trial in trials:
assert len(trial["steps"])
last_step = trial["steps"][-1]
accuracy = last_step["validation"]["metrics"]["validation_metrics"]["accuracy"]
if not best or accuracy > best:
best = accuracy
assert best is not None
assert best > 0.93
# Check that ExperimentReference returns a sorted order of top checkpoints
# without gaps. The top 2 checkpoints should be the first 2 of the top k
# checkpoints if sorting is stable.
d = Determined(conf.make_master_url())
exp_ref = d.get_experiment(exp_id)
top_2 = exp_ref.top_n_checkpoints(2)
top_k = exp_ref.top_n_checkpoints(len(trials))
top_2_uuids = [c.uuid for c in top_2]
top_k_uuids = [c.uuid for c in top_k]
assert top_2_uuids == top_k_uuids[:2]
# Check that metrics are truly in sorted order.
metrics = [c.validation["metrics"]["validationMetrics"]["validation_loss"] for c in top_k]
assert metrics == sorted(metrics)
# Check that changing smaller is better reverses the checkpoint ordering.
top_k_reversed = exp_ref.top_n_checkpoints(
len(trials), sort_by="validation_loss", smaller_is_better=False
)
top_k_reversed_uuids = [c.uuid for c in top_k_reversed]
assert top_k_uuids == top_k_reversed_uuids[::-1]
checkpoint = top_k[0]
checkpoint.add_metadata({"testing": "metadata"})
db_check = d.get_checkpoint(checkpoint.uuid)
# Make sure the checkpoint metadata is correct and correctly saved to the db.
assert checkpoint.metadata == {"testing": "metadata"}
assert checkpoint.metadata == db_check.metadata
checkpoint.add_metadata({"some_key": "some_value"})
db_check = d.get_checkpoint(checkpoint.uuid)
assert checkpoint.metadata == {"testing": "metadata", "some_key": "some_value"}
assert checkpoint.metadata == db_check.metadata
checkpoint.add_metadata({"testing": "override"})
db_check = d.get_checkpoint(checkpoint.uuid)
assert checkpoint.metadata == {"testing": "override", "some_key": "some_value"}
assert checkpoint.metadata == db_check.metadata
checkpoint.remove_metadata(["some_key"])
db_check = d.get_checkpoint(checkpoint.uuid)
assert checkpoint.metadata == {"testing": "override"}
assert checkpoint.metadata == db_check.metadata
@pytest.mark.e2e_cpu # type: ignore
def test_model_registry() -> None:
exp_id = exp.run_basic_test(
conf.fixtures_path("mnist_pytorch/const-pytorch11.yaml"),
conf.official_examples_path("trial/mnist_pytorch"),
None,
)
d = Determined(conf.make_master_url())
mnist = d.create_model("mnist", "simple computer vision model")
assert mnist.metadata == {}
mnist.add_metadata({"testing": "metadata"})
db_model = d.get_model("mnist")
# Make sure the model metadata is correct and correctly saved to the db.
assert mnist.metadata == db_model.metadata
assert mnist.metadata == {"testing": "metadata"}
mnist.add_metadata({"some_key": "some_value"})
db_model = d.get_model("mnist")
assert mnist.metadata == db_model.metadata
assert mnist.metadata == {"testing": "metadata", "some_key": "some_value"}
mnist.add_metadata({"testing": "override"})
db_model = d.get_model("mnist")
assert mnist.metadata == db_model.metadata
assert mnist.metadata == {"testing": "override", "some_key": "some_value"}
mnist.remove_metadata(["some_key"])
db_model = d.get_model("mnist")
assert mnist.metadata == db_model.metadata
assert mnist.metadata == {"testing": "override"}
checkpoint = d.get_experiment(exp_id).top_checkpoint()
model_version = mnist.register_version(checkpoint.uuid)
assert model_version.model_version == 1
latest_version = mnist.get_version()
assert latest_version is not None
assert latest_version.uuid == checkpoint.uuid
d.create_model("transformer", "all you need is attention")
d.create_model("object-detection", "a bounding box model")
models = d.get_models(sort_by=ModelSortBy.NAME)
assert [m.name for m in models] == ["mnist", "object-detection", "transformer"]
@pytest.mark.e2e_cpu # type: ignore
def test_log_null_bytes() -> None:
config_obj = conf.load_config(conf.fixtures_path("no_op/single.yaml"))
config_obj["hyperparameters"]["write_null"] = True
config_obj["max_restarts"] = 0
config_obj["searcher"]["max_length"] = {"batches": 1}
experiment_id = exp.run_basic_test_with_temp_config(config_obj, conf.fixtures_path("no_op"), 1)
trials = exp.experiment_trials(experiment_id)
assert len(trials) == 1
logs = exp.trial_logs(trials[0]["id"])
assert len(logs) > 0
@pytest.mark.e2e_cpu # type: ignore
def test_graceful_trial_termination() -> None:
config_obj = conf.load_config(conf.fixtures_path("no_op/grid-graceful-trial-termination.yaml"))
exp.run_basic_test_with_temp_config(config_obj, conf.fixtures_path("no_op"), 2)
@pytest.mark.e2e_gpu # type: ignore
def test_s3_no_creds(secrets: Dict[str, str]) -> None:
pytest.skip("Temporarily skipping this until we find a more secure way of testing this.")
config = conf.load_config(conf.official_examples_path("trial/mnist_pytorch/const.yaml"))
config["checkpoint_storage"] = exp.s3_checkpoint_config_no_creds()
config.setdefault("environment", {})
config["environment"].setdefault("environment_variables", [])
config["environment"]["environment_variables"] += [
f"AWS_ACCESS_KEY_ID={secrets['INTEGRATIONS_S3_ACCESS_KEY']}",
f"AWS_SECRET_ACCESS_KEY={secrets['INTEGRATIONS_S3_SECRET_KEY']}",
]
exp.run_basic_test_with_temp_config(
config, conf.official_examples_path("trial/mnist_pytorch"), 1
)
@pytest.mark.parallel # type: ignore
def test_pytorch_parallel() -> None:
config = conf.load_config(conf.official_examples_path("trial/mnist_pytorch/const.yaml"))
config = conf.set_slots_per_trial(config, 8)
config = conf.set_native_parallel(config, False)
config = conf.set_max_length(config, {"batches": 200})
config = conf.set_tensor_auto_tuning(config, True)
config = conf.set_perform_initial_validation(config, True)
exp_id = exp.run_basic_test_with_temp_config(
config, conf.official_examples_path("trial/mnist_pytorch"), 1, has_zeroth_step=True
)
exp.assert_performed_initial_validation(exp_id)
@pytest.mark.e2e_cpu # type: ignore
def test_fail_on_first_validation() -> None:
error_log = "failed on first validation"
config_obj = conf.load_config(conf.fixtures_path("no_op/single.yaml"))
config_obj["hyperparameters"]["fail_on_first_validation"] = error_log
exp.run_failure_test_with_temp_config(
config_obj,
conf.fixtures_path("no_op"),
error_log,
)
@pytest.mark.e2e_cpu # type: ignore
def test_fail_on_chechpoint_save() -> None:
error_log = "failed on checkpoint save"
config_obj = conf.load_config(conf.fixtures_path("no_op/single.yaml"))
config_obj["hyperparameters"]["fail_on_chechpoint_save"] = error_log
exp.run_failure_test_with_temp_config(
config_obj,
conf.fixtures_path("no_op"),
error_log,
)
@pytest.mark.e2e_cpu # type: ignore
def test_perform_initial_validation() -> None:
config = conf.load_config(conf.fixtures_path("no_op/single.yaml"))
config = conf.set_max_length(config, {"batches": 1})
config = conf.set_perform_initial_validation(config, True)
exp_id = exp.run_basic_test_with_temp_config(
config, conf.fixtures_path("no_op"), 1, has_zeroth_step=True
)
exp.assert_performed_initial_validation(exp_id)
```
#### File: trial/rsws_nas/model_def.py
```python
import logging
import math
import os
import pickle as pkl
from typing import Dict, Sequence, Union
import numpy as np
import randomNAS_files.genotypes as genotypes
import randomNAS_files.data_util as data_util
import torch
from randomNAS_files.model import RNNModel
from torch import nn
from torch.optim.lr_scheduler import _LRScheduler
from determined.pytorch import (
ClipGradsL2Norm,
DataLoader,
PyTorchCallback,
PyTorchTrial,
PyTorchTrialContext,
LRScheduler,
)
import data
TorchData = Union[Dict[str, torch.Tensor], Sequence[torch.Tensor], torch.Tensor]
PTB_NUMBER_TOKENS = 10000
class MyLR(_LRScheduler):
def __init__(self, optimizer, hparams, last_epoch=-1):
"""
Custom LR scheudler for the LR to be adjusted based on the batch size
"""
self.hparams = hparams
self.seq_len = hparams["bptt"]
self.start_lr = hparams["learning_rate"]
super(MyLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
ret = list(self.base_lrs)
self.base_lrs = [
self.start_lr * self.seq_len / self.hparams["bptt"]
for base_lr in self.base_lrs
]
return ret
def set_seq_len(self, seq_len):
self.seq_len = seq_len
class NASModel(PyTorchTrial):
def __init__(self, context: PyTorchTrialContext) -> None:
self.context = context
# Create a unique download directory for each rank so they don't overwrite each other.
self.download_directory = f"/tmp/data-rank{self.context.distributed.get_rank()}"
self.data_downloaded = False
# Initialize the model
arch_to_use = self.context.get_hparam("arch_to_use")
if hasattr(genotypes, arch_to_use):
self.arch = getattr(genotypes, arch_to_use)
logging.info("using genotype.{0}".format(self.arch))
else:
self.arch = self.sample_arch()
logging.info("using random arch.{0}".format(self.arch))
model = RNNModel(
PTB_NUMBER_TOKENS,
self.context.get_hparam("emsize"),
self.context.get_hparam("nhid"),
self.context.get_hparam("nhidlast"),
self.context.get_hparam("dropout"),
self.context.get_hparam("dropouth"),
self.context.get_hparam("dropoutx"),
self.context.get_hparam("dropouti"),
self.context.get_hparam("dropoute"),
genotype=self.arch,
)
# Made for stacking multiple cells, by default the depth is set to 1
# which will not run this for loop
for _ in range(
self.context.get_hparam("depth") - 1
): # minus 1 because 1 gets auto added by the main model
new_cell = model.cell_cls(
self.context.get_hparam("emsize"),
self.context.get_hparam("nhid"),
self.context.get_hparam("dropouth"),
self.context.get_hparam("dropoutx"),
self.arch,
self.context.get_hparam("init_op"),
)
model.rnns.append(new_cell)
model.batch_size = self.context.get_per_slot_batch_size()
self.model = self.context.wrap_model(model)
self.optimizer = self.context.wrap_optimizer(
torch.optim.SGD(
self.model.parameters(),
lr=self.context.get_hparam("learning_rate"),
weight_decay=self.context.get_hparam("wdecay"),
)
)
myLR = MyLR(self.optimizer, self.context.get_hparams())
step_mode = LRScheduler.StepMode.MANUAL_STEP
if self.context.get_hparam("step_every_batch"):
step_mode = LRScheduler.StepMode.STEP_EVERY_BATCH
elif self.context.get_hparam("step_every_epoch"):
step_mode = LRScheduler.StepMode.STEP_EVERY_EPOCH
self.myLR = self.context.wrap_lr_scheduler(myLR, step_mode=step_mode)
def sample_arch(self):
"""
Required: Method to build the Optimizer
Returns: PyTorch Optimizer
"""
n_nodes = genotypes.STEPS
n_ops = len(genotypes.PRIMITIVES)
arch = []
for i in range(n_nodes):
op = np.random.choice(range(1, n_ops))
node_in = np.random.choice(range(i + 1))
arch.append((genotypes.PRIMITIVES[op], node_in))
concat = range(1, 9)
genotype = genotypes.Genotype(recurrent=arch, concat=concat)
return genotype
def update_and_step_lr(self, seq_len):
"""
Updates and steps the learning rate
"""
self.myLR.set_seq_len(seq_len)
self.myLR.step()
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int):
"""
Trains the provided batch.
Returns: Dictionary of the calculated Metrics
"""
features, labels = batch
self.update_and_step_lr(features.shape[0])
# set hidden if it's the first run
if batch_idx == 0:
self.hidden = self.model.init_hidden(self.context.get_per_slot_batch_size())
# detach to prevent backpropagating to far
for i in range(len(self.hidden)):
self.hidden[i] = self.hidden[i].detach()
log_prob, self.hidden, rnn_hs, dropped_rnn_hs = self.model(
features, self.hidden, return_h=True
)
loss = nn.functional.nll_loss(
log_prob.view(-1, log_prob.size(2)), labels.contiguous().view(-1)
)
if self.context.get_hparam("alpha") > 0:
loss = loss + sum(
self.context.get_hparam("alpha") * dropped_rnn_h.pow(2).mean()
for dropped_rnn_h in dropped_rnn_hs[-1:]
)
loss = (
loss
+ sum(
self.context.get_hparam("beta") * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean()
for rnn_h in rnn_hs[-1:]
)
) * 1.0
try:
perplexity = math.exp(loss / len(features))
except Exception as e:
logging.error("Calculating perplexity failed with error: %s", e)
perplexity = 100000
if math.isnan(perplexity):
perplexity = 100000
self.context.backward(loss)
self.context.step_optimizer(
self.optimizer,
clip_grads=lambda params: torch.nn.utils.clip_grad_norm_(
params, self.context.get_hparam("clip_gradients_l2_norm")
),
)
return {"loss": loss, "perplexity": perplexity}
def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader):
"""
Determines if multiple architectures should be evaluated and sends to approprate path
Returns: the results of the evaluated dataset or the best result from multiple evaluations
"""
eval_same_arch = self.context.get_hparam("eval_same_arch")
if eval_same_arch: # evaluate the same architecture
res = self.evaluate_dataset(data_loader, self.arch)
else:
res = self.evaluate_multiple_archs(data_loader)
return res
def evaluate_multiple_archs(self, data_loader):
"""
Helper that randomly selects architectures and evaluates their performance
This function is only called if eval_same_arch is False and should not be used for
the primary NAS search
"""
num_archs_to_eval = self.context.get_hparam("num_archs_to_eval")
sample_vals = []
for _ in range(num_archs_to_eval):
arch = self.sample_arch()
res = self.evaluate_dataset(data_loader, arch)
perplexity = res["perplexity"]
loss = res["loss"]
sample_vals.append((arch, perplexity, loss))
sample_vals = sorted(sample_vals, key=lambda x: x[1])
logging.info("best arch found: ", sample_vals[0])
self.save_archs(sample_vals)
return {"loss": sample_vals[0][2], "perplexity": sample_vals[0][1]}
def evaluate_dataset(self, data_loader, arch, split=None):
"""
Evaluates the full dataset against the given arch
"""
hidden = self.model.init_hidden(self.context.get_hparam("eval_batch_size"))
model = self.set_model_arch(arch, self.model)
total_loss = 0
num_samples_seen = 0
for i, batch in enumerate(data_loader):
features, targets = batch
features, targets = features.cuda(), targets.cuda()
log_prob, hidden = model(features, hidden)
loss = nn.functional.nll_loss(
log_prob.view(-1, log_prob.size(2)), targets
).data
total_loss += loss * len(features)
for i in range(len(hidden)):
hidden[i] = hidden[i].detach()
num_samples_seen += features.shape[0]
try:
perplexity = math.exp(total_loss.item() / num_samples_seen)
except Exception as e:
logging.error("Calculating perplexity failed with error: %s", e)
perplexity = 100000
if math.isnan(perplexity):
perplexity = 100000
if math.isnan(loss):
loss = 100000
return {"loss": total_loss, "perplexity": perplexity}
def save_archs(self, data):
out_file = self.context.get_data_config().get(
"out_file"
) + self.context.get_hparam("seed")
with open(os.path.join(out_file), "wb+") as f:
pkl.dump(data, f)
def set_model_arch(self, arch, model):
for rnn in model.rnns:
rnn.genotype = arch
return model
def build_training_data_loader(self) -> DataLoader:
if not self.data_downloaded:
data.download_data(self.download_directory)
self.data_downloaded = True
corpus = data_util.Corpus(self.download_directory)
train_dataset = data.PTBData(
corpus.train,
self.context.get_hparam("seq_len"),
self.context.get_per_slot_batch_size(),
self.context.get_hparam("bptt"),
self.context.get_hparam("max_seq_length_delta"),
)
return DataLoader(
train_dataset,
batch_sampler=data.BatchSamp(
train_dataset,
self.context.get_hparam("bptt"),
self.context.get_hparam("max_seq_length_delta"),
),
collate_fn=data.PadSequence(),
)
def build_validation_data_loader(self) -> DataLoader:
if not self.data_downloaded:
data.download_data(self.download_directory)
self.data_downloaded = True
corpus = data_util.Corpus(self.download_directory)
test_dataset = data.PTBData(
corpus.valid,
self.context.get_hparam("seq_len"),
self.context.get_hparam("eval_batch_size"),
self.context.get_hparam("bptt"),
self.context.get_hparam("max_seq_length_delta"),
)
return DataLoader(
test_dataset,
batch_sampler=data.BatchSamp(
test_dataset,
self.context.get_hparam("bptt"),
self.context.get_hparam("max_seq_length_delta"),
valid=True,
),
collate_fn=data.PadSequence(),
)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.