prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>agent3.py<|end_file_name|><|fim▁begin|>import random
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy.optimize import minimize
def worker_func(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
return (self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) ** 2
def optimized_func_i_der(args):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
self = args[0]
r = args[1]
i = args[2]
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def worker_func_der(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
i = args[4]
return ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
class Agent:
num_features = 22
def __init__(self):
self.lf = 0.2 # Learning factor lambda
self.data = [] # The features' values for all the games
self.rewards = [] # Reward values for moving from 1 state to the next
self.rt = np.array([])
self.max_iter = 50
def set_learning_factor(self, learning_factor):
assert(learning_factor >= 0 and learning_factor <= 1)
self.lf = learning_factor
def set_rt(self, rt):
assert(len(rt) == self.num_features)
self.rt = rt
def set_iter(self, max_iter):
self.max_iter = max_iter
def set_data(self, data):
self.data = []
self.rewards = []
for game in data:
game = np.vstack((game, np.zeros(self.num_features + 1)))
self.data.append(game[:, :-1])
self.rewards.append(game[:, -1:])
def eval_func(self, m, k, r):
"""
The evaluation function value for the set of weights (vector) r
at the mth game and kth board state """
return np.dot(r, self.data[m][k])
def <|fim_middle|>(self, m, k, r, i):
"""
Find the derivative of the evaluation function with respect
to the ith component of the vector r
"""
return self.data[m][k][i]
def get_reward(self, m, s):
"""
Get reward for moving from state s to state (s + 1)
"""
return self.rewards[m][s + 1][0]
def temporal_diff(self, m, s):
"""
The temporal diffence value for state s to state (s+1) in the mth game
"""
return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
self.eval_func(m, s, self.rt))
def temporal_diff_sum(self, m, k):
Nm = self.data[m].shape[0] - 1
result = 0
for s in range(k, Nm):
result += self.lf**(s - k) * self.temporal_diff(m, s)
return result
def optimized_func(self, r):
result = 0
M = len(self.data)
pool = Pool(processes=4)
for m in range(M):
Nm = self.data[m].shape[0] - 1
k_args = range(Nm + 1)
self_args = [self] * len(k_args)
m_args = [m] * len(k_args)
r_args = [r] * len(k_args)
result += sum(pool.map(worker_func,
zip(self_args, m_args, k_args, r_args)))
return result
def optimized_func_i_der(self, r, i):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def optimized_func_der(self, r):
p = Pool(processes=4)
self_args = [self] * len(r)
i_args = range(len(r))
r_args = [r] * len(r)
return np.array(p.map(optimized_func_i_der,
zip(self_args, r_args, i_args)))
def callback(self, r):
print("Iteration %d completed at %s" %
(self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter += 1
def compute_next_rt(self):
print("Start computing at %s" %
(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter = 1
r0 = np.array([random.randint(-10, 10)
for i in range(self.num_features)])
res = minimize(self.optimized_func, r0, method='BFGS',
jac=self.optimized_func_der,
options={'maxiter': self.max_iter, 'disp': True},
callback=self.callback)
return res.x
<|fim▁end|> | eval_func_der |
<|file_name|>agent3.py<|end_file_name|><|fim▁begin|>import random
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy.optimize import minimize
def worker_func(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
return (self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) ** 2
def optimized_func_i_der(args):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
self = args[0]
r = args[1]
i = args[2]
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def worker_func_der(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
i = args[4]
return ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
class Agent:
num_features = 22
def __init__(self):
self.lf = 0.2 # Learning factor lambda
self.data = [] # The features' values for all the games
self.rewards = [] # Reward values for moving from 1 state to the next
self.rt = np.array([])
self.max_iter = 50
def set_learning_factor(self, learning_factor):
assert(learning_factor >= 0 and learning_factor <= 1)
self.lf = learning_factor
def set_rt(self, rt):
assert(len(rt) == self.num_features)
self.rt = rt
def set_iter(self, max_iter):
self.max_iter = max_iter
def set_data(self, data):
self.data = []
self.rewards = []
for game in data:
game = np.vstack((game, np.zeros(self.num_features + 1)))
self.data.append(game[:, :-1])
self.rewards.append(game[:, -1:])
def eval_func(self, m, k, r):
"""
The evaluation function value for the set of weights (vector) r
at the mth game and kth board state """
return np.dot(r, self.data[m][k])
def eval_func_der(self, m, k, r, i):
"""
Find the derivative of the evaluation function with respect
to the ith component of the vector r
"""
return self.data[m][k][i]
def <|fim_middle|>(self, m, s):
"""
Get reward for moving from state s to state (s + 1)
"""
return self.rewards[m][s + 1][0]
def temporal_diff(self, m, s):
"""
The temporal diffence value for state s to state (s+1) in the mth game
"""
return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
self.eval_func(m, s, self.rt))
def temporal_diff_sum(self, m, k):
Nm = self.data[m].shape[0] - 1
result = 0
for s in range(k, Nm):
result += self.lf**(s - k) * self.temporal_diff(m, s)
return result
def optimized_func(self, r):
result = 0
M = len(self.data)
pool = Pool(processes=4)
for m in range(M):
Nm = self.data[m].shape[0] - 1
k_args = range(Nm + 1)
self_args = [self] * len(k_args)
m_args = [m] * len(k_args)
r_args = [r] * len(k_args)
result += sum(pool.map(worker_func,
zip(self_args, m_args, k_args, r_args)))
return result
def optimized_func_i_der(self, r, i):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def optimized_func_der(self, r):
p = Pool(processes=4)
self_args = [self] * len(r)
i_args = range(len(r))
r_args = [r] * len(r)
return np.array(p.map(optimized_func_i_der,
zip(self_args, r_args, i_args)))
def callback(self, r):
print("Iteration %d completed at %s" %
(self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter += 1
def compute_next_rt(self):
print("Start computing at %s" %
(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter = 1
r0 = np.array([random.randint(-10, 10)
for i in range(self.num_features)])
res = minimize(self.optimized_func, r0, method='BFGS',
jac=self.optimized_func_der,
options={'maxiter': self.max_iter, 'disp': True},
callback=self.callback)
return res.x
<|fim▁end|> | get_reward |
<|file_name|>agent3.py<|end_file_name|><|fim▁begin|>import random
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy.optimize import minimize
def worker_func(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
return (self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) ** 2
def optimized_func_i_der(args):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
self = args[0]
r = args[1]
i = args[2]
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def worker_func_der(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
i = args[4]
return ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
class Agent:
num_features = 22
def __init__(self):
self.lf = 0.2 # Learning factor lambda
self.data = [] # The features' values for all the games
self.rewards = [] # Reward values for moving from 1 state to the next
self.rt = np.array([])
self.max_iter = 50
def set_learning_factor(self, learning_factor):
assert(learning_factor >= 0 and learning_factor <= 1)
self.lf = learning_factor
def set_rt(self, rt):
assert(len(rt) == self.num_features)
self.rt = rt
def set_iter(self, max_iter):
self.max_iter = max_iter
def set_data(self, data):
self.data = []
self.rewards = []
for game in data:
game = np.vstack((game, np.zeros(self.num_features + 1)))
self.data.append(game[:, :-1])
self.rewards.append(game[:, -1:])
def eval_func(self, m, k, r):
"""
The evaluation function value for the set of weights (vector) r
at the mth game and kth board state """
return np.dot(r, self.data[m][k])
def eval_func_der(self, m, k, r, i):
"""
Find the derivative of the evaluation function with respect
to the ith component of the vector r
"""
return self.data[m][k][i]
def get_reward(self, m, s):
"""
Get reward for moving from state s to state (s + 1)
"""
return self.rewards[m][s + 1][0]
def <|fim_middle|>(self, m, s):
"""
The temporal diffence value for state s to state (s+1) in the mth game
"""
return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
self.eval_func(m, s, self.rt))
def temporal_diff_sum(self, m, k):
Nm = self.data[m].shape[0] - 1
result = 0
for s in range(k, Nm):
result += self.lf**(s - k) * self.temporal_diff(m, s)
return result
def optimized_func(self, r):
result = 0
M = len(self.data)
pool = Pool(processes=4)
for m in range(M):
Nm = self.data[m].shape[0] - 1
k_args = range(Nm + 1)
self_args = [self] * len(k_args)
m_args = [m] * len(k_args)
r_args = [r] * len(k_args)
result += sum(pool.map(worker_func,
zip(self_args, m_args, k_args, r_args)))
return result
def optimized_func_i_der(self, r, i):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def optimized_func_der(self, r):
p = Pool(processes=4)
self_args = [self] * len(r)
i_args = range(len(r))
r_args = [r] * len(r)
return np.array(p.map(optimized_func_i_der,
zip(self_args, r_args, i_args)))
def callback(self, r):
print("Iteration %d completed at %s" %
(self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter += 1
def compute_next_rt(self):
print("Start computing at %s" %
(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter = 1
r0 = np.array([random.randint(-10, 10)
for i in range(self.num_features)])
res = minimize(self.optimized_func, r0, method='BFGS',
jac=self.optimized_func_der,
options={'maxiter': self.max_iter, 'disp': True},
callback=self.callback)
return res.x
<|fim▁end|> | temporal_diff |
<|file_name|>agent3.py<|end_file_name|><|fim▁begin|>import random
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy.optimize import minimize
def worker_func(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
return (self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) ** 2
def optimized_func_i_der(args):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
self = args[0]
r = args[1]
i = args[2]
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def worker_func_der(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
i = args[4]
return ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
class Agent:
num_features = 22
def __init__(self):
self.lf = 0.2 # Learning factor lambda
self.data = [] # The features' values for all the games
self.rewards = [] # Reward values for moving from 1 state to the next
self.rt = np.array([])
self.max_iter = 50
def set_learning_factor(self, learning_factor):
assert(learning_factor >= 0 and learning_factor <= 1)
self.lf = learning_factor
def set_rt(self, rt):
assert(len(rt) == self.num_features)
self.rt = rt
def set_iter(self, max_iter):
self.max_iter = max_iter
def set_data(self, data):
self.data = []
self.rewards = []
for game in data:
game = np.vstack((game, np.zeros(self.num_features + 1)))
self.data.append(game[:, :-1])
self.rewards.append(game[:, -1:])
def eval_func(self, m, k, r):
"""
The evaluation function value for the set of weights (vector) r
at the mth game and kth board state """
return np.dot(r, self.data[m][k])
def eval_func_der(self, m, k, r, i):
"""
Find the derivative of the evaluation function with respect
to the ith component of the vector r
"""
return self.data[m][k][i]
def get_reward(self, m, s):
"""
Get reward for moving from state s to state (s + 1)
"""
return self.rewards[m][s + 1][0]
def temporal_diff(self, m, s):
"""
The temporal diffence value for state s to state (s+1) in the mth game
"""
return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
self.eval_func(m, s, self.rt))
def <|fim_middle|>(self, m, k):
Nm = self.data[m].shape[0] - 1
result = 0
for s in range(k, Nm):
result += self.lf**(s - k) * self.temporal_diff(m, s)
return result
def optimized_func(self, r):
result = 0
M = len(self.data)
pool = Pool(processes=4)
for m in range(M):
Nm = self.data[m].shape[0] - 1
k_args = range(Nm + 1)
self_args = [self] * len(k_args)
m_args = [m] * len(k_args)
r_args = [r] * len(k_args)
result += sum(pool.map(worker_func,
zip(self_args, m_args, k_args, r_args)))
return result
def optimized_func_i_der(self, r, i):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def optimized_func_der(self, r):
p = Pool(processes=4)
self_args = [self] * len(r)
i_args = range(len(r))
r_args = [r] * len(r)
return np.array(p.map(optimized_func_i_der,
zip(self_args, r_args, i_args)))
def callback(self, r):
print("Iteration %d completed at %s" %
(self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter += 1
def compute_next_rt(self):
print("Start computing at %s" %
(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter = 1
r0 = np.array([random.randint(-10, 10)
for i in range(self.num_features)])
res = minimize(self.optimized_func, r0, method='BFGS',
jac=self.optimized_func_der,
options={'maxiter': self.max_iter, 'disp': True},
callback=self.callback)
return res.x
<|fim▁end|> | temporal_diff_sum |
<|file_name|>agent3.py<|end_file_name|><|fim▁begin|>import random
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy.optimize import minimize
def worker_func(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
return (self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) ** 2
def optimized_func_i_der(args):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
self = args[0]
r = args[1]
i = args[2]
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def worker_func_der(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
i = args[4]
return ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
class Agent:
num_features = 22
def __init__(self):
self.lf = 0.2 # Learning factor lambda
self.data = [] # The features' values for all the games
self.rewards = [] # Reward values for moving from 1 state to the next
self.rt = np.array([])
self.max_iter = 50
def set_learning_factor(self, learning_factor):
assert(learning_factor >= 0 and learning_factor <= 1)
self.lf = learning_factor
def set_rt(self, rt):
assert(len(rt) == self.num_features)
self.rt = rt
def set_iter(self, max_iter):
self.max_iter = max_iter
def set_data(self, data):
self.data = []
self.rewards = []
for game in data:
game = np.vstack((game, np.zeros(self.num_features + 1)))
self.data.append(game[:, :-1])
self.rewards.append(game[:, -1:])
def eval_func(self, m, k, r):
"""
The evaluation function value for the set of weights (vector) r
at the mth game and kth board state """
return np.dot(r, self.data[m][k])
def eval_func_der(self, m, k, r, i):
"""
Find the derivative of the evaluation function with respect
to the ith component of the vector r
"""
return self.data[m][k][i]
def get_reward(self, m, s):
"""
Get reward for moving from state s to state (s + 1)
"""
return self.rewards[m][s + 1][0]
def temporal_diff(self, m, s):
"""
The temporal diffence value for state s to state (s+1) in the mth game
"""
return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
self.eval_func(m, s, self.rt))
def temporal_diff_sum(self, m, k):
Nm = self.data[m].shape[0] - 1
result = 0
for s in range(k, Nm):
result += self.lf**(s - k) * self.temporal_diff(m, s)
return result
def <|fim_middle|>(self, r):
result = 0
M = len(self.data)
pool = Pool(processes=4)
for m in range(M):
Nm = self.data[m].shape[0] - 1
k_args = range(Nm + 1)
self_args = [self] * len(k_args)
m_args = [m] * len(k_args)
r_args = [r] * len(k_args)
result += sum(pool.map(worker_func,
zip(self_args, m_args, k_args, r_args)))
return result
def optimized_func_i_der(self, r, i):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def optimized_func_der(self, r):
p = Pool(processes=4)
self_args = [self] * len(r)
i_args = range(len(r))
r_args = [r] * len(r)
return np.array(p.map(optimized_func_i_der,
zip(self_args, r_args, i_args)))
def callback(self, r):
print("Iteration %d completed at %s" %
(self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter += 1
def compute_next_rt(self):
print("Start computing at %s" %
(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter = 1
r0 = np.array([random.randint(-10, 10)
for i in range(self.num_features)])
res = minimize(self.optimized_func, r0, method='BFGS',
jac=self.optimized_func_der,
options={'maxiter': self.max_iter, 'disp': True},
callback=self.callback)
return res.x
<|fim▁end|> | optimized_func |
<|file_name|>agent3.py<|end_file_name|><|fim▁begin|>import random
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy.optimize import minimize
def worker_func(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
return (self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) ** 2
def optimized_func_i_der(args):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
self = args[0]
r = args[1]
i = args[2]
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def worker_func_der(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
i = args[4]
return ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
class Agent:
num_features = 22
def __init__(self):
self.lf = 0.2 # Learning factor lambda
self.data = [] # The features' values for all the games
self.rewards = [] # Reward values for moving from 1 state to the next
self.rt = np.array([])
self.max_iter = 50
def set_learning_factor(self, learning_factor):
assert(learning_factor >= 0 and learning_factor <= 1)
self.lf = learning_factor
def set_rt(self, rt):
assert(len(rt) == self.num_features)
self.rt = rt
def set_iter(self, max_iter):
self.max_iter = max_iter
def set_data(self, data):
self.data = []
self.rewards = []
for game in data:
game = np.vstack((game, np.zeros(self.num_features + 1)))
self.data.append(game[:, :-1])
self.rewards.append(game[:, -1:])
def eval_func(self, m, k, r):
"""
The evaluation function value for the set of weights (vector) r
at the mth game and kth board state """
return np.dot(r, self.data[m][k])
def eval_func_der(self, m, k, r, i):
"""
Find the derivative of the evaluation function with respect
to the ith component of the vector r
"""
return self.data[m][k][i]
def get_reward(self, m, s):
"""
Get reward for moving from state s to state (s + 1)
"""
return self.rewards[m][s + 1][0]
def temporal_diff(self, m, s):
"""
The temporal diffence value for state s to state (s+1) in the mth game
"""
return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
self.eval_func(m, s, self.rt))
def temporal_diff_sum(self, m, k):
Nm = self.data[m].shape[0] - 1
result = 0
for s in range(k, Nm):
result += self.lf**(s - k) * self.temporal_diff(m, s)
return result
def optimized_func(self, r):
result = 0
M = len(self.data)
pool = Pool(processes=4)
for m in range(M):
Nm = self.data[m].shape[0] - 1
k_args = range(Nm + 1)
self_args = [self] * len(k_args)
m_args = [m] * len(k_args)
r_args = [r] * len(k_args)
result += sum(pool.map(worker_func,
zip(self_args, m_args, k_args, r_args)))
return result
def <|fim_middle|>(self, r, i):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def optimized_func_der(self, r):
p = Pool(processes=4)
self_args = [self] * len(r)
i_args = range(len(r))
r_args = [r] * len(r)
return np.array(p.map(optimized_func_i_der,
zip(self_args, r_args, i_args)))
def callback(self, r):
print("Iteration %d completed at %s" %
(self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter += 1
def compute_next_rt(self):
print("Start computing at %s" %
(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter = 1
r0 = np.array([random.randint(-10, 10)
for i in range(self.num_features)])
res = minimize(self.optimized_func, r0, method='BFGS',
jac=self.optimized_func_der,
options={'maxiter': self.max_iter, 'disp': True},
callback=self.callback)
return res.x
<|fim▁end|> | optimized_func_i_der |
<|file_name|>agent3.py<|end_file_name|><|fim▁begin|>import random
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy.optimize import minimize
def worker_func(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
return (self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) ** 2
def optimized_func_i_der(args):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
self = args[0]
r = args[1]
i = args[2]
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def worker_func_der(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
i = args[4]
return ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
class Agent:
num_features = 22
def __init__(self):
self.lf = 0.2 # Learning factor lambda
self.data = [] # The features' values for all the games
self.rewards = [] # Reward values for moving from 1 state to the next
self.rt = np.array([])
self.max_iter = 50
def set_learning_factor(self, learning_factor):
assert(learning_factor >= 0 and learning_factor <= 1)
self.lf = learning_factor
def set_rt(self, rt):
assert(len(rt) == self.num_features)
self.rt = rt
def set_iter(self, max_iter):
self.max_iter = max_iter
def set_data(self, data):
self.data = []
self.rewards = []
for game in data:
game = np.vstack((game, np.zeros(self.num_features + 1)))
self.data.append(game[:, :-1])
self.rewards.append(game[:, -1:])
def eval_func(self, m, k, r):
"""
The evaluation function value for the set of weights (vector) r
at the mth game and kth board state """
return np.dot(r, self.data[m][k])
def eval_func_der(self, m, k, r, i):
"""
Find the derivative of the evaluation function with respect
to the ith component of the vector r
"""
return self.data[m][k][i]
def get_reward(self, m, s):
"""
Get reward for moving from state s to state (s + 1)
"""
return self.rewards[m][s + 1][0]
def temporal_diff(self, m, s):
"""
The temporal diffence value for state s to state (s+1) in the mth game
"""
return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
self.eval_func(m, s, self.rt))
def temporal_diff_sum(self, m, k):
Nm = self.data[m].shape[0] - 1
result = 0
for s in range(k, Nm):
result += self.lf**(s - k) * self.temporal_diff(m, s)
return result
def optimized_func(self, r):
result = 0
M = len(self.data)
pool = Pool(processes=4)
for m in range(M):
Nm = self.data[m].shape[0] - 1
k_args = range(Nm + 1)
self_args = [self] * len(k_args)
m_args = [m] * len(k_args)
r_args = [r] * len(k_args)
result += sum(pool.map(worker_func,
zip(self_args, m_args, k_args, r_args)))
return result
def optimized_func_i_der(self, r, i):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def <|fim_middle|>(self, r):
p = Pool(processes=4)
self_args = [self] * len(r)
i_args = range(len(r))
r_args = [r] * len(r)
return np.array(p.map(optimized_func_i_der,
zip(self_args, r_args, i_args)))
def callback(self, r):
print("Iteration %d completed at %s" %
(self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter += 1
def compute_next_rt(self):
print("Start computing at %s" %
(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter = 1
r0 = np.array([random.randint(-10, 10)
for i in range(self.num_features)])
res = minimize(self.optimized_func, r0, method='BFGS',
jac=self.optimized_func_der,
options={'maxiter': self.max_iter, 'disp': True},
callback=self.callback)
return res.x
<|fim▁end|> | optimized_func_der |
<|file_name|>agent3.py<|end_file_name|><|fim▁begin|>import random
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy.optimize import minimize
def worker_func(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
return (self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) ** 2
def optimized_func_i_der(args):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
self = args[0]
r = args[1]
i = args[2]
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def worker_func_der(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
i = args[4]
return ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
class Agent:
num_features = 22
def __init__(self):
self.lf = 0.2 # Learning factor lambda
self.data = [] # The features' values for all the games
self.rewards = [] # Reward values for moving from 1 state to the next
self.rt = np.array([])
self.max_iter = 50
def set_learning_factor(self, learning_factor):
assert(learning_factor >= 0 and learning_factor <= 1)
self.lf = learning_factor
def set_rt(self, rt):
assert(len(rt) == self.num_features)
self.rt = rt
def set_iter(self, max_iter):
self.max_iter = max_iter
def set_data(self, data):
self.data = []
self.rewards = []
for game in data:
game = np.vstack((game, np.zeros(self.num_features + 1)))
self.data.append(game[:, :-1])
self.rewards.append(game[:, -1:])
def eval_func(self, m, k, r):
"""
The evaluation function value for the set of weights (vector) r
at the mth game and kth board state """
return np.dot(r, self.data[m][k])
def eval_func_der(self, m, k, r, i):
"""
Find the derivative of the evaluation function with respect
to the ith component of the vector r
"""
return self.data[m][k][i]
def get_reward(self, m, s):
"""
Get reward for moving from state s to state (s + 1)
"""
return self.rewards[m][s + 1][0]
def temporal_diff(self, m, s):
"""
The temporal diffence value for state s to state (s+1) in the mth game
"""
return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
self.eval_func(m, s, self.rt))
def temporal_diff_sum(self, m, k):
Nm = self.data[m].shape[0] - 1
result = 0
for s in range(k, Nm):
result += self.lf**(s - k) * self.temporal_diff(m, s)
return result
def optimized_func(self, r):
result = 0
M = len(self.data)
pool = Pool(processes=4)
for m in range(M):
Nm = self.data[m].shape[0] - 1
k_args = range(Nm + 1)
self_args = [self] * len(k_args)
m_args = [m] * len(k_args)
r_args = [r] * len(k_args)
result += sum(pool.map(worker_func,
zip(self_args, m_args, k_args, r_args)))
return result
def optimized_func_i_der(self, r, i):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def optimized_func_der(self, r):
p = Pool(processes=4)
self_args = [self] * len(r)
i_args = range(len(r))
r_args = [r] * len(r)
return np.array(p.map(optimized_func_i_der,
zip(self_args, r_args, i_args)))
def <|fim_middle|>(self, r):
print("Iteration %d completed at %s" %
(self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter += 1
def compute_next_rt(self):
print("Start computing at %s" %
(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter = 1
r0 = np.array([random.randint(-10, 10)
for i in range(self.num_features)])
res = minimize(self.optimized_func, r0, method='BFGS',
jac=self.optimized_func_der,
options={'maxiter': self.max_iter, 'disp': True},
callback=self.callback)
return res.x
<|fim▁end|> | callback |
<|file_name|>agent3.py<|end_file_name|><|fim▁begin|>import random
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy.optimize import minimize
def worker_func(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
return (self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) ** 2
def optimized_func_i_der(args):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
self = args[0]
r = args[1]
i = args[2]
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def worker_func_der(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
i = args[4]
return ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
class Agent:
num_features = 22
def __init__(self):
self.lf = 0.2 # Learning factor lambda
self.data = [] # The features' values for all the games
self.rewards = [] # Reward values for moving from 1 state to the next
self.rt = np.array([])
self.max_iter = 50
def set_learning_factor(self, learning_factor):
assert(learning_factor >= 0 and learning_factor <= 1)
self.lf = learning_factor
def set_rt(self, rt):
assert(len(rt) == self.num_features)
self.rt = rt
def set_iter(self, max_iter):
self.max_iter = max_iter
def set_data(self, data):
self.data = []
self.rewards = []
for game in data:
game = np.vstack((game, np.zeros(self.num_features + 1)))
self.data.append(game[:, :-1])
self.rewards.append(game[:, -1:])
def eval_func(self, m, k, r):
"""
The evaluation function value for the set of weights (vector) r
at the mth game and kth board state """
return np.dot(r, self.data[m][k])
def eval_func_der(self, m, k, r, i):
"""
Find the derivative of the evaluation function with respect
to the ith component of the vector r
"""
return self.data[m][k][i]
def get_reward(self, m, s):
"""
Get reward for moving from state s to state (s + 1)
"""
return self.rewards[m][s + 1][0]
def temporal_diff(self, m, s):
"""
The temporal diffence value for state s to state (s+1) in the mth game
"""
return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
self.eval_func(m, s, self.rt))
def temporal_diff_sum(self, m, k):
Nm = self.data[m].shape[0] - 1
result = 0
for s in range(k, Nm):
result += self.lf**(s - k) * self.temporal_diff(m, s)
return result
def optimized_func(self, r):
result = 0
M = len(self.data)
pool = Pool(processes=4)
for m in range(M):
Nm = self.data[m].shape[0] - 1
k_args = range(Nm + 1)
self_args = [self] * len(k_args)
m_args = [m] * len(k_args)
r_args = [r] * len(k_args)
result += sum(pool.map(worker_func,
zip(self_args, m_args, k_args, r_args)))
return result
def optimized_func_i_der(self, r, i):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def optimized_func_der(self, r):
p = Pool(processes=4)
self_args = [self] * len(r)
i_args = range(len(r))
r_args = [r] * len(r)
return np.array(p.map(optimized_func_i_der,
zip(self_args, r_args, i_args)))
def callback(self, r):
print("Iteration %d completed at %s" %
(self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter += 1
def <|fim_middle|>(self):
print("Start computing at %s" %
(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter = 1
r0 = np.array([random.randint(-10, 10)
for i in range(self.num_features)])
res = minimize(self.optimized_func, r0, method='BFGS',
jac=self.optimized_func_der,
options={'maxiter': self.max_iter, 'disp': True},
callback=self.callback)
return res.x
<|fim▁end|> | compute_next_rt |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):<|fim▁hole|> acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)<|fim▁end|> | acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id: |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
<|fim_middle|>
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar() |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
<|fim_middle|>
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
<|fim_middle|>
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0] |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
<|fim_middle|>
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
<|fim_middle|>
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()] |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
<|fim_middle|>
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query) |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
<|fim_middle|>
<|fim▁end|> | conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query) |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
<|fim_middle|>
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | acl_template_ids.append(acl_template_id) |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
<|fim_middle|>
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar() |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
<|fim_middle|>
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | return |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
<|fim_middle|>
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | return |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
<|fim_middle|>
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | return |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def <|fim_middle|>(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | _find_acl_template |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def <|fim_middle|>(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | _find_acl_templates |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def <|fim_middle|>(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | _get_policy_uuid |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def <|fim_middle|>(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | _insert_acl_template |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def <|fim_middle|>(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | _get_acl_template_ids |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def <|fim_middle|>():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def downgrade():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | upgrade |
<|file_name|>2d4882d39dbb_add_graphql_acl_to_users.py<|end_file_name|><|fim▁begin|>"""add graphql ACL to users
Revision ID: 2d4882d39dbb
Revises: c4d0e9ec46a9
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d4882d39dbb'
down_revision = 'dc2848563b53'
POLICY_NAME = 'wazo_default_user_policy'
ACL_TEMPLATES = ['dird.graphql.me']
policy_table = sa.sql.table(
'auth_policy', sa.Column('uuid', sa.String(38)), sa.Column('name', sa.String(80))
)
acl_template_table = sa.sql.table(
'auth_acl_template', sa.Column('id', sa.Integer), sa.Column('template', sa.Text)
)
policy_template = sa.sql.table(
'auth_policy_template',
sa.Column('policy_uuid', sa.String(38)),
sa.Column('template_id', sa.Integer),
)
def _find_acl_template(conn, acl_template):
query = (
sa.sql.select([acl_template_table.c.id])
.where(acl_template_table.c.template == acl_template)
.limit(1)
)
return conn.execute(query).scalar()
def _find_acl_templates(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if acl_template_id:
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_policy_uuid(conn, policy_name):
policy_query = sa.sql.select([policy_table.c.uuid]).where(
policy_table.c.name == policy_name
)
for policy in conn.execute(policy_query).fetchall():
return policy[0]
def _insert_acl_template(conn, acl_templates):
acl_template_ids = []
for acl_template in acl_templates:
acl_template_id = _find_acl_template(conn, acl_template)
if not acl_template_id:
query = (
acl_template_table.insert()
.returning(acl_template_table.c.id)
.values(template=acl_template)
)
acl_template_id = conn.execute(query).scalar()
acl_template_ids.append(acl_template_id)
return acl_template_ids
def _get_acl_template_ids(conn, policy_uuid):
query = sa.sql.select([policy_template.c.template_id]).where(
policy_template.c.policy_uuid == policy_uuid
)
return [acl_template_id for (acl_template_id,) in conn.execute(query).fetchall()]
def upgrade():
conn = op.get_bind()
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
acl_template_ids = _insert_acl_template(conn, ACL_TEMPLATES)
acl_template_ids_already_associated = _get_acl_template_ids(conn, policy_uuid)
for template_id in set(acl_template_ids) - set(acl_template_ids_already_associated):
query = policy_template.insert().values(
policy_uuid=policy_uuid, template_id=template_id
)
conn.execute(query)
def <|fim_middle|>():
conn = op.get_bind()
acl_template_ids = _find_acl_templates(conn, ACL_TEMPLATES)
if not acl_template_ids:
return
policy_uuid = _get_policy_uuid(conn, POLICY_NAME)
if not policy_uuid:
return
delete_query = policy_template.delete().where(
sa.sql.and_(
policy_template.c.policy_uuid == policy_uuid,
policy_template.c.template_id.in_(acl_template_ids),
)
)
op.execute(delete_query)
<|fim▁end|> | downgrade |
<|file_name|>eggie.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python<|fim▁hole|>#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
'''
Created on Dec 16, 2014
@author: Jaakko Leppakangas
'''
import sys
from PyQt4 import QtGui
from ui.preprocessDialog import PreprocessDialog
def main():
app = QtGui.QApplication(sys.argv)
window=PreprocessDialog()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()<|fim▁end|> |
#Copyright (c) <2015>, <Jaakko Leppakangas> |
<|file_name|>eggie.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#Copyright (c) <2015>, <Jaakko Leppakangas>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
'''
Created on Dec 16, 2014
@author: Jaakko Leppakangas
'''
import sys
from PyQt4 import QtGui
from ui.preprocessDialog import PreprocessDialog
def main():
<|fim_middle|>
if __name__ == '__main__':
main()
<|fim▁end|> | app = QtGui.QApplication(sys.argv)
window=PreprocessDialog()
window.show()
sys.exit(app.exec_()) |
<|file_name|>eggie.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#Copyright (c) <2015>, <Jaakko Leppakangas>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
'''
Created on Dec 16, 2014
@author: Jaakko Leppakangas
'''
import sys
from PyQt4 import QtGui
from ui.preprocessDialog import PreprocessDialog
def main():
app = QtGui.QApplication(sys.argv)
window=PreprocessDialog()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | main() |
<|file_name|>eggie.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#Copyright (c) <2015>, <Jaakko Leppakangas>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
'''
Created on Dec 16, 2014
@author: Jaakko Leppakangas
'''
import sys
from PyQt4 import QtGui
from ui.preprocessDialog import PreprocessDialog
def <|fim_middle|>():
app = QtGui.QApplication(sys.argv)
window=PreprocessDialog()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
<|fim▁end|> | main |
<|file_name|>test_tctototal_streaming.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import TCToTotal as sTCToTotal
class TestTCToTotal(TestCase):
def testEmpty(self):
gen = VectorInput([])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertRaises(KeyError, lambda: p['lowlevel.tctototal'])
def testOneValue(self):
gen = VectorInput([1])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
self.assertRaises(RuntimeError, lambda: run(gen))
def testRegression(self):
envelope = range(22050)
envelope.reverse()
envelope = range(22050) + envelope
gen = VectorInput(envelope)
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertAlmostEqual(p['lowlevel.tctototal'],
<|fim▁hole|>suite = allTests(TestTCToTotal)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)<|fim▁end|> | TCToTotal()(envelope))
|
<|file_name|>test_tctototal_streaming.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import TCToTotal as sTCToTotal
class TestTCToTotal(TestCase):
<|fim_middle|>
suite = allTests(TestTCToTotal)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
<|fim▁end|> | def testEmpty(self):
gen = VectorInput([])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertRaises(KeyError, lambda: p['lowlevel.tctototal'])
def testOneValue(self):
gen = VectorInput([1])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
self.assertRaises(RuntimeError, lambda: run(gen))
def testRegression(self):
envelope = range(22050)
envelope.reverse()
envelope = range(22050) + envelope
gen = VectorInput(envelope)
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertAlmostEqual(p['lowlevel.tctototal'],
TCToTotal()(envelope)) |
<|file_name|>test_tctototal_streaming.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import TCToTotal as sTCToTotal
class TestTCToTotal(TestCase):
def testEmpty(self):
<|fim_middle|>
def testOneValue(self):
gen = VectorInput([1])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
self.assertRaises(RuntimeError, lambda: run(gen))
def testRegression(self):
envelope = range(22050)
envelope.reverse()
envelope = range(22050) + envelope
gen = VectorInput(envelope)
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertAlmostEqual(p['lowlevel.tctototal'],
TCToTotal()(envelope))
suite = allTests(TestTCToTotal)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
<|fim▁end|> | gen = VectorInput([])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertRaises(KeyError, lambda: p['lowlevel.tctototal']) |
<|file_name|>test_tctototal_streaming.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import TCToTotal as sTCToTotal
class TestTCToTotal(TestCase):
def testEmpty(self):
gen = VectorInput([])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertRaises(KeyError, lambda: p['lowlevel.tctototal'])
def testOneValue(self):
<|fim_middle|>
def testRegression(self):
envelope = range(22050)
envelope.reverse()
envelope = range(22050) + envelope
gen = VectorInput(envelope)
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertAlmostEqual(p['lowlevel.tctototal'],
TCToTotal()(envelope))
suite = allTests(TestTCToTotal)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
<|fim▁end|> | gen = VectorInput([1])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
self.assertRaises(RuntimeError, lambda: run(gen)) |
<|file_name|>test_tctototal_streaming.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import TCToTotal as sTCToTotal
class TestTCToTotal(TestCase):
def testEmpty(self):
gen = VectorInput([])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertRaises(KeyError, lambda: p['lowlevel.tctototal'])
def testOneValue(self):
gen = VectorInput([1])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
self.assertRaises(RuntimeError, lambda: run(gen))
def testRegression(self):
<|fim_middle|>
suite = allTests(TestTCToTotal)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
<|fim▁end|> | envelope = range(22050)
envelope.reverse()
envelope = range(22050) + envelope
gen = VectorInput(envelope)
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertAlmostEqual(p['lowlevel.tctototal'],
TCToTotal()(envelope)) |
<|file_name|>test_tctototal_streaming.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import TCToTotal as sTCToTotal
class TestTCToTotal(TestCase):
def testEmpty(self):
gen = VectorInput([])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertRaises(KeyError, lambda: p['lowlevel.tctototal'])
def testOneValue(self):
gen = VectorInput([1])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
self.assertRaises(RuntimeError, lambda: run(gen))
def testRegression(self):
envelope = range(22050)
envelope.reverse()
envelope = range(22050) + envelope
gen = VectorInput(envelope)
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertAlmostEqual(p['lowlevel.tctototal'],
TCToTotal()(envelope))
suite = allTests(TestTCToTotal)
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | TextTestRunner(verbosity=2).run(suite) |
<|file_name|>test_tctototal_streaming.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import TCToTotal as sTCToTotal
class TestTCToTotal(TestCase):
def <|fim_middle|>(self):
gen = VectorInput([])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertRaises(KeyError, lambda: p['lowlevel.tctototal'])
def testOneValue(self):
gen = VectorInput([1])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
self.assertRaises(RuntimeError, lambda: run(gen))
def testRegression(self):
envelope = range(22050)
envelope.reverse()
envelope = range(22050) + envelope
gen = VectorInput(envelope)
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertAlmostEqual(p['lowlevel.tctototal'],
TCToTotal()(envelope))
suite = allTests(TestTCToTotal)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
<|fim▁end|> | testEmpty |
<|file_name|>test_tctototal_streaming.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import TCToTotal as sTCToTotal
class TestTCToTotal(TestCase):
def testEmpty(self):
gen = VectorInput([])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertRaises(KeyError, lambda: p['lowlevel.tctototal'])
def <|fim_middle|>(self):
gen = VectorInput([1])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
self.assertRaises(RuntimeError, lambda: run(gen))
def testRegression(self):
envelope = range(22050)
envelope.reverse()
envelope = range(22050) + envelope
gen = VectorInput(envelope)
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertAlmostEqual(p['lowlevel.tctototal'],
TCToTotal()(envelope))
suite = allTests(TestTCToTotal)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
<|fim▁end|> | testOneValue |
<|file_name|>test_tctototal_streaming.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import TCToTotal as sTCToTotal
class TestTCToTotal(TestCase):
def testEmpty(self):
gen = VectorInput([])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertRaises(KeyError, lambda: p['lowlevel.tctototal'])
def testOneValue(self):
gen = VectorInput([1])
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
self.assertRaises(RuntimeError, lambda: run(gen))
def <|fim_middle|>(self):
envelope = range(22050)
envelope.reverse()
envelope = range(22050) + envelope
gen = VectorInput(envelope)
tcToTotal = sTCToTotal()
p = Pool()
gen.data >> tcToTotal.envelope
tcToTotal.TCToTotal >> (p, 'lowlevel.tctototal')
run(gen)
self.assertAlmostEqual(p['lowlevel.tctototal'],
TCToTotal()(envelope))
suite = allTests(TestTCToTotal)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
<|fim▁end|> | testRegression |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],<|fim▁hole|> self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()<|fim▁end|> | self.config['image_project'], |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
<|fim_middle|>
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
<|fim_middle|>
<|fim▁end|> | def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete() |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
<|fim_middle|>
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
<|fim_middle|>
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | """ Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
<|fim_middle|>
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
<|fim_middle|>
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | """ Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode() |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
<|fim_middle|>
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | return list(self.deployment.hosts)[1:] |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
<|fim_middle|>
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | return list(self.deployment.hosts)[0] |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
<|fim_middle|>
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | """ Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion() |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
<|fim_middle|>
<|fim▁end|> | """ Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete() |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
<|fim_middle|>
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | env = os.environ.copy() |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
<|fim_middle|>
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | json_credentials = env['GCE_CREDENTIALS'] |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
<|fim_middle|>
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path) |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
<|fim_middle|>
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env') |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
<|fim_middle|>
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | raise util.LauncherError('DeploymentContainsErrors', str(errors)) |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
<|fim_middle|>
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
<|fim_middle|>
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode() |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def <|fim_middle|>(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | get_credentials |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def <|fim_middle|>(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | __init__ |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def <|fim_middle|>(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | deployment |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def <|fim_middle|>(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | create |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def <|fim_middle|>(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | key_helper |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def <|fim_middle|>(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | get_cluster_hosts |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def <|fim_middle|>(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | get_bootstrap_host |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def <|fim_middle|>(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def delete(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | wait |
<|file_name|>gcp.py<|end_file_name|><|fim▁begin|>""" Launcher functionality for the Google Compute Engine (GCE)
"""
import json
import logging
import os
from dcos_launch import onprem, util
from dcos_launch.platforms import gcp
from dcos_test_utils.helpers import Host
from googleapiclient.errors import HttpError
log = logging.getLogger(__name__)
def get_credentials(env=None) -> tuple:
path = None
if env is None:
env = os.environ.copy()
if 'GCE_CREDENTIALS' in env:
json_credentials = env['GCE_CREDENTIALS']
elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:
path = env['GOOGLE_APPLICATION_CREDENTIALS']
json_credentials = util.read_file(path)
else:
raise util.LauncherError(
'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')
return json_credentials, path
class OnPremLauncher(onprem.AbstractOnpremLauncher):
# Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS
def __init__(self, config: dict, env=None):
creds_string, _ = get_credentials(env)
self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))
self.config = config
@property
def deployment(self):
""" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the
corresponding real deployment (active machines) exists and doesn't contain any errors.
"""
try:
deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],
self.config['gce_zone'])
info = deployment.get_info()
errors = info['operation'].get('error')
if errors:
raise util.LauncherError('DeploymentContainsErrors', str(errors))
return deployment
except HttpError as e:
if e.resp.status == 404:
raise util.LauncherError('DeploymentNotFound',
"The deployment you are trying to access doesn't exist") from e
raise e
def create(self) -> dict:
self.key_helper()
node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']
+ self.config['num_private_agents'])
gcp.BareClusterDeployment.create(
self.gcp_wrapper,
self.config['deployment_name'],
self.config['gce_zone'],
node_count,
self.config['disk_size'],
self.config['disk_type'],
self.config['source_image'],
self.config['machine_type'],
self.config['image_project'],
self.config['ssh_user'],
self.config['ssh_public_key'],
self.config['disable_updates'],
self.config['use_preemptible_vms'],
tags=self.config.get('tags'))
return self.config
def key_helper(self):
""" Generates a public key and a private key and stores them in the config. The public key will be applied to
all the instances in the deployment later on when wait() is called.
"""
if self.config['key_helper']:
private_key, public_key = util.generate_rsa_keypair()
self.config['ssh_private_key'] = private_key.decode()
self.config['ssh_public_key'] = public_key.decode()
def get_cluster_hosts(self) -> [Host]:
return list(self.deployment.hosts)[1:]
def get_bootstrap_host(self) -> Host:
return list(self.deployment.hosts)[0]
def wait(self):
""" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once
the network is deployed, a firewall for the network and an instance template are deployed. Finally,
once the instance template is deployed, an instance group manager and all its instances are deployed.
"""
self.deployment.wait_for_completion()
def <|fim_middle|>(self):
""" Deletes all the resources associated with the deployment (instance template, network, firewall, instance
group manager and all its instances.
"""
self.deployment.delete()
<|fim▁end|> | delete |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(<|fim▁hole|> scaled_size,
' or '.join(map(str, valid_sizes)))))
return results<|fim▁end|> | 'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name, |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
<|fim_middle|>
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
<|fim▁end|> | pass |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
<|fim_middle|>
<|fim▁end|> | """Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
<|fim_middle|>
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
<|fim▁end|> | """ Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
<|fim_middle|>
<|fim▁end|> | """Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
<|fim_middle|>
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
<|fim▁end|> | with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24]) |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
<|fim_middle|>
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
<|fim▁end|> | return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100])) |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
<|fim_middle|>
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
<|fim▁end|> | raise InvalidPNGException |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
<|fim_middle|>
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
<|fim▁end|> | relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path) |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
<|fim_middle|>
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
<|fim▁end|> | files.append(relative_path) |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
<|fim_middle|>
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
<|fim▁end|> | results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
<|fim_middle|>
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
<|fim▁end|> | continue |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
<|fim_middle|>
return results
<|fim▁end|> | results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes))))) |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def <|fim_middle|>(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
<|fim▁end|> | __init__ |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def <|fim_middle|>(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
<|fim▁end|> | RunChecks |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def <|fim_middle|>(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
<|fim▁end|> | ImageSize |
<|file_name|>resource_scale_factors.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def <|fim_middle|>(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
<|fim▁end|> | ValidSizes |
<|file_name|>test_relu.py<|end_file_name|><|fim▁begin|><|fim▁hole|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration relu tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, a_min, a_max):
assert a_min >= np.iinfo(dtype).min and a_max <= np.iinfo(dtype).max
a = relay.var("a", shape=shape, dtype=dtype)
relu = relay.clip(a, a_min=a_min, a_max=a_max)
return relu
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_relu(dtype):
trials = [
((1, 4, 4, 4), 65, 178, "uint8"),
((1, 8, 4, 2), 1, 254, "uint8"),
((1, 16), 12, 76, "uint8"),
((1, 4, 4, 4), 65, 125, "int8"),
((1, 8, 4, 2), -100, 100, "int8"),
((1, 16), -120, -20, "int8"),
]
np.random.seed(0)
for shape, a_min, a_max, trial_dtype in trials:
if trial_dtype == dtype:
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
def test_relu_failure():
trials = [
((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
]
for shape, dtype, a_min, a_max, err_msg in trials:
model = _get_model(shape, dtype, a_min, a_max)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)<|fim▁end|> | |
<|file_name|>test_relu.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration relu tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, a_min, a_max):
<|fim_middle|>
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_relu(dtype):
trials = [
((1, 4, 4, 4), 65, 178, "uint8"),
((1, 8, 4, 2), 1, 254, "uint8"),
((1, 16), 12, 76, "uint8"),
((1, 4, 4, 4), 65, 125, "int8"),
((1, 8, 4, 2), -100, 100, "int8"),
((1, 16), -120, -20, "int8"),
]
np.random.seed(0)
for shape, a_min, a_max, trial_dtype in trials:
if trial_dtype == dtype:
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
def test_relu_failure():
trials = [
((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
]
for shape, dtype, a_min, a_max, err_msg in trials:
model = _get_model(shape, dtype, a_min, a_max)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
<|fim▁end|> | assert a_min >= np.iinfo(dtype).min and a_max <= np.iinfo(dtype).max
a = relay.var("a", shape=shape, dtype=dtype)
relu = relay.clip(a, a_min=a_min, a_max=a_max)
return relu |
<|file_name|>test_relu.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration relu tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, a_min, a_max):
assert a_min >= np.iinfo(dtype).min and a_max <= np.iinfo(dtype).max
a = relay.var("a", shape=shape, dtype=dtype)
relu = relay.clip(a, a_min=a_min, a_max=a_max)
return relu
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_relu(dtype):
<|fim_middle|>
@requires_ethosn
def test_relu_failure():
trials = [
((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
]
for shape, dtype, a_min, a_max, err_msg in trials:
model = _get_model(shape, dtype, a_min, a_max)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
<|fim▁end|> | trials = [
((1, 4, 4, 4), 65, 178, "uint8"),
((1, 8, 4, 2), 1, 254, "uint8"),
((1, 16), 12, 76, "uint8"),
((1, 4, 4, 4), 65, 125, "int8"),
((1, 8, 4, 2), -100, 100, "int8"),
((1, 16), -120, -20, "int8"),
]
np.random.seed(0)
for shape, a_min, a_max, trial_dtype in trials:
if trial_dtype == dtype:
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1) |
<|file_name|>test_relu.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration relu tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, a_min, a_max):
assert a_min >= np.iinfo(dtype).min and a_max <= np.iinfo(dtype).max
a = relay.var("a", shape=shape, dtype=dtype)
relu = relay.clip(a, a_min=a_min, a_max=a_max)
return relu
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_relu(dtype):
trials = [
((1, 4, 4, 4), 65, 178, "uint8"),
((1, 8, 4, 2), 1, 254, "uint8"),
((1, 16), 12, 76, "uint8"),
((1, 4, 4, 4), 65, 125, "int8"),
((1, 8, 4, 2), -100, 100, "int8"),
((1, 16), -120, -20, "int8"),
]
np.random.seed(0)
for shape, a_min, a_max, trial_dtype in trials:
if trial_dtype == dtype:
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
def test_relu_failure():
<|fim_middle|>
<|fim▁end|> | trials = [
((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
]
for shape, dtype, a_min, a_max, err_msg in trials:
model = _get_model(shape, dtype, a_min, a_max)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg) |
<|file_name|>test_relu.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration relu tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, a_min, a_max):
assert a_min >= np.iinfo(dtype).min and a_max <= np.iinfo(dtype).max
a = relay.var("a", shape=shape, dtype=dtype)
relu = relay.clip(a, a_min=a_min, a_max=a_max)
return relu
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_relu(dtype):
trials = [
((1, 4, 4, 4), 65, 178, "uint8"),
((1, 8, 4, 2), 1, 254, "uint8"),
((1, 16), 12, 76, "uint8"),
((1, 4, 4, 4), 65, 125, "int8"),
((1, 8, 4, 2), -100, 100, "int8"),
((1, 16), -120, -20, "int8"),
]
np.random.seed(0)
for shape, a_min, a_max, trial_dtype in trials:
if trial_dtype == dtype:
<|fim_middle|>
@requires_ethosn
def test_relu_failure():
trials = [
((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
]
for shape, dtype, a_min, a_max, err_msg in trials:
model = _get_model(shape, dtype, a_min, a_max)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
<|fim▁end|> | inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1) |
<|file_name|>test_relu.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration relu tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def <|fim_middle|>(shape, dtype, a_min, a_max):
assert a_min >= np.iinfo(dtype).min and a_max <= np.iinfo(dtype).max
a = relay.var("a", shape=shape, dtype=dtype)
relu = relay.clip(a, a_min=a_min, a_max=a_max)
return relu
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_relu(dtype):
trials = [
((1, 4, 4, 4), 65, 178, "uint8"),
((1, 8, 4, 2), 1, 254, "uint8"),
((1, 16), 12, 76, "uint8"),
((1, 4, 4, 4), 65, 125, "int8"),
((1, 8, 4, 2), -100, 100, "int8"),
((1, 16), -120, -20, "int8"),
]
np.random.seed(0)
for shape, a_min, a_max, trial_dtype in trials:
if trial_dtype == dtype:
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
def test_relu_failure():
trials = [
((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
]
for shape, dtype, a_min, a_max, err_msg in trials:
model = _get_model(shape, dtype, a_min, a_max)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
<|fim▁end|> | _get_model |
<|file_name|>test_relu.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration relu tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, a_min, a_max):
assert a_min >= np.iinfo(dtype).min and a_max <= np.iinfo(dtype).max
a = relay.var("a", shape=shape, dtype=dtype)
relu = relay.clip(a, a_min=a_min, a_max=a_max)
return relu
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def <|fim_middle|>(dtype):
trials = [
((1, 4, 4, 4), 65, 178, "uint8"),
((1, 8, 4, 2), 1, 254, "uint8"),
((1, 16), 12, 76, "uint8"),
((1, 4, 4, 4), 65, 125, "int8"),
((1, 8, 4, 2), -100, 100, "int8"),
((1, 16), -120, -20, "int8"),
]
np.random.seed(0)
for shape, a_min, a_max, trial_dtype in trials:
if trial_dtype == dtype:
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
def test_relu_failure():
trials = [
((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
]
for shape, dtype, a_min, a_max, err_msg in trials:
model = _get_model(shape, dtype, a_min, a_max)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
<|fim▁end|> | test_relu |
<|file_name|>test_relu.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration relu tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, a_min, a_max):
assert a_min >= np.iinfo(dtype).min and a_max <= np.iinfo(dtype).max
a = relay.var("a", shape=shape, dtype=dtype)
relu = relay.clip(a, a_min=a_min, a_max=a_max)
return relu
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_relu(dtype):
trials = [
((1, 4, 4, 4), 65, 178, "uint8"),
((1, 8, 4, 2), 1, 254, "uint8"),
((1, 16), 12, 76, "uint8"),
((1, 4, 4, 4), 65, 125, "int8"),
((1, 8, 4, 2), -100, 100, "int8"),
((1, 16), -120, -20, "int8"),
]
np.random.seed(0)
for shape, a_min, a_max, trial_dtype in trials:
if trial_dtype == dtype:
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
def <|fim_middle|>():
trials = [
((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
]
for shape, dtype, a_min, a_max, err_msg in trials:
model = _get_model(shape, dtype, a_min, a_max)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
<|fim▁end|> | test_relu_failure |
<|file_name|>012_divisable_tri_nums.py<|end_file_name|><|fim▁begin|>## Close<|fim▁hole|><|fim▁end|> | ### What is the value of the first triangle number to have over five hundred divisors?
print max([len(m) for m in map(lambda k: [n for n in range(1,(k+1)) if k%n == 0], [sum(range(n)) for n in range(1,1000)])]) |
<|file_name|>twist.py<|end_file_name|><|fim▁begin|>__author__ = "Harish Narayanan"
__copyright__ = "Copyright (C) 2009 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
from cbc.twist import *
from sys import argv
""" DEMO - Twisting of a hyperelastic cube """
class Twist(StaticHyperelasticity):
""" Definition of the hyperelastic problem """
def mesh(self):
n = 8
return UnitCubeMesh(n, n, n)
# Setting up dirichlet conditions and boundaries
def dirichlet_values(self):
clamp = Expression(("0.0", "0.0", "0.0"))
twist = Expression(("0.0",
"y0 + (x[1] - y0) * cos(theta) - (x[2] - z0) * sin(theta) - x[1]",
"z0 + (x[1] - y0) * sin(theta) + (x[2] - z0) * cos(theta) - x[2]"),
y0=0.5, z0=0.5, theta=pi/6)
return [clamp, twist]
def dirichlet_boundaries(self):
left = "x[0] == 0.0"
right = "x[0] == 1.0"
return [left, right]
# List of material models
def material_model(self):
# Material parameters can either be numbers or spatially
# varying fields. For example,
mu = 3.8461
lmbda = Expression("x[0]*5.8 + (1 - x[0])*5.7")
C10 = 0.171; C01 = 4.89e-3; C20 = -2.4e-4; C30 = 5.e-4
delka = 1.0/sqrt(2.0)
M = Constant((0.0,1.0,0.0))
k1 = 1e2; k2 = 1e1
materials = []
materials.append(MooneyRivlin({'C1':mu/2, 'C2':mu/2, 'bulk':lmbda}))
materials.append(StVenantKirchhoff({'mu':mu, 'bulk':lmbda}))
materials.append(neoHookean({'half_nkT':mu, 'bulk':lmbda}))
materials.append(Isihara({'C10':C10,'C01':C01,'C20':C20,'bulk':lmbda}))
materials.append(Biderman({'C10':C10,'C01':C01,'C20':C20,'C30':C30,'bulk':lmbda}))
materials.append(AnisoTest({'mu1':mu,'mu2':2*mu,'M':M,'bulk':lmbda}))
materials.append(GasserHolzapfelOgden({'mu':mu,'k1':k1,'k2':k2,'M':M,'bulk':lmbda}))
materials.append(Ogden({'alpha1':1.3,'alpha2':5.0,'alpha3':-2.0,\
'mu1':6.3e5,'mu2':0.012e5,'mu3':-0.1e5}))
try:
index = int(argv[1])
except:<|fim▁hole|> index = 2
print str(materials[index])
return materials[index]
def name_method(self, method):
self.method = method
def __str__(self):
return "A hyperelastic cube twisted by 30 degrees solved by " + self.method
# Setup the problem
twist = Twist()
twist.name_method("DISPLACEMENT BASED FORMULATION")
# Solve the problem
print twist
twist.solve()<|fim▁end|> | |
<|file_name|>twist.py<|end_file_name|><|fim▁begin|>__author__ = "Harish Narayanan"
__copyright__ = "Copyright (C) 2009 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
from cbc.twist import *
from sys import argv
""" DEMO - Twisting of a hyperelastic cube """
class Twist(StaticHyperelasticity):
<|fim_middle|>
# Setup the problem
twist = Twist()
twist.name_method("DISPLACEMENT BASED FORMULATION")
# Solve the problem
print twist
twist.solve()
<|fim▁end|> | """ Definition of the hyperelastic problem """
def mesh(self):
n = 8
return UnitCubeMesh(n, n, n)
# Setting up dirichlet conditions and boundaries
def dirichlet_values(self):
clamp = Expression(("0.0", "0.0", "0.0"))
twist = Expression(("0.0",
"y0 + (x[1] - y0) * cos(theta) - (x[2] - z0) * sin(theta) - x[1]",
"z0 + (x[1] - y0) * sin(theta) + (x[2] - z0) * cos(theta) - x[2]"),
y0=0.5, z0=0.5, theta=pi/6)
return [clamp, twist]
def dirichlet_boundaries(self):
left = "x[0] == 0.0"
right = "x[0] == 1.0"
return [left, right]
# List of material models
def material_model(self):
# Material parameters can either be numbers or spatially
# varying fields. For example,
mu = 3.8461
lmbda = Expression("x[0]*5.8 + (1 - x[0])*5.7")
C10 = 0.171; C01 = 4.89e-3; C20 = -2.4e-4; C30 = 5.e-4
delka = 1.0/sqrt(2.0)
M = Constant((0.0,1.0,0.0))
k1 = 1e2; k2 = 1e1
materials = []
materials.append(MooneyRivlin({'C1':mu/2, 'C2':mu/2, 'bulk':lmbda}))
materials.append(StVenantKirchhoff({'mu':mu, 'bulk':lmbda}))
materials.append(neoHookean({'half_nkT':mu, 'bulk':lmbda}))
materials.append(Isihara({'C10':C10,'C01':C01,'C20':C20,'bulk':lmbda}))
materials.append(Biderman({'C10':C10,'C01':C01,'C20':C20,'C30':C30,'bulk':lmbda}))
materials.append(AnisoTest({'mu1':mu,'mu2':2*mu,'M':M,'bulk':lmbda}))
materials.append(GasserHolzapfelOgden({'mu':mu,'k1':k1,'k2':k2,'M':M,'bulk':lmbda}))
materials.append(Ogden({'alpha1':1.3,'alpha2':5.0,'alpha3':-2.0,\
'mu1':6.3e5,'mu2':0.012e5,'mu3':-0.1e5}))
try:
index = int(argv[1])
except:
index = 2
print str(materials[index])
return materials[index]
def name_method(self, method):
self.method = method
def __str__(self):
return "A hyperelastic cube twisted by 30 degrees solved by " + self.method |
<|file_name|>twist.py<|end_file_name|><|fim▁begin|>__author__ = "Harish Narayanan"
__copyright__ = "Copyright (C) 2009 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
from cbc.twist import *
from sys import argv
""" DEMO - Twisting of a hyperelastic cube """
class Twist(StaticHyperelasticity):
""" Definition of the hyperelastic problem """
def mesh(self):
<|fim_middle|>
# Setting up dirichlet conditions and boundaries
def dirichlet_values(self):
clamp = Expression(("0.0", "0.0", "0.0"))
twist = Expression(("0.0",
"y0 + (x[1] - y0) * cos(theta) - (x[2] - z0) * sin(theta) - x[1]",
"z0 + (x[1] - y0) * sin(theta) + (x[2] - z0) * cos(theta) - x[2]"),
y0=0.5, z0=0.5, theta=pi/6)
return [clamp, twist]
def dirichlet_boundaries(self):
left = "x[0] == 0.0"
right = "x[0] == 1.0"
return [left, right]
# List of material models
def material_model(self):
# Material parameters can either be numbers or spatially
# varying fields. For example,
mu = 3.8461
lmbda = Expression("x[0]*5.8 + (1 - x[0])*5.7")
C10 = 0.171; C01 = 4.89e-3; C20 = -2.4e-4; C30 = 5.e-4
delka = 1.0/sqrt(2.0)
M = Constant((0.0,1.0,0.0))
k1 = 1e2; k2 = 1e1
materials = []
materials.append(MooneyRivlin({'C1':mu/2, 'C2':mu/2, 'bulk':lmbda}))
materials.append(StVenantKirchhoff({'mu':mu, 'bulk':lmbda}))
materials.append(neoHookean({'half_nkT':mu, 'bulk':lmbda}))
materials.append(Isihara({'C10':C10,'C01':C01,'C20':C20,'bulk':lmbda}))
materials.append(Biderman({'C10':C10,'C01':C01,'C20':C20,'C30':C30,'bulk':lmbda}))
materials.append(AnisoTest({'mu1':mu,'mu2':2*mu,'M':M,'bulk':lmbda}))
materials.append(GasserHolzapfelOgden({'mu':mu,'k1':k1,'k2':k2,'M':M,'bulk':lmbda}))
materials.append(Ogden({'alpha1':1.3,'alpha2':5.0,'alpha3':-2.0,\
'mu1':6.3e5,'mu2':0.012e5,'mu3':-0.1e5}))
try:
index = int(argv[1])
except:
index = 2
print str(materials[index])
return materials[index]
def name_method(self, method):
self.method = method
def __str__(self):
return "A hyperelastic cube twisted by 30 degrees solved by " + self.method
# Setup the problem
twist = Twist()
twist.name_method("DISPLACEMENT BASED FORMULATION")
# Solve the problem
print twist
twist.solve()
<|fim▁end|> | n = 8
return UnitCubeMesh(n, n, n) |
<|file_name|>twist.py<|end_file_name|><|fim▁begin|>__author__ = "Harish Narayanan"
__copyright__ = "Copyright (C) 2009 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
from cbc.twist import *
from sys import argv
""" DEMO - Twisting of a hyperelastic cube """
class Twist(StaticHyperelasticity):
""" Definition of the hyperelastic problem """
def mesh(self):
n = 8
return UnitCubeMesh(n, n, n)
# Setting up dirichlet conditions and boundaries
def dirichlet_values(self):
<|fim_middle|>
def dirichlet_boundaries(self):
left = "x[0] == 0.0"
right = "x[0] == 1.0"
return [left, right]
# List of material models
def material_model(self):
# Material parameters can either be numbers or spatially
# varying fields. For example,
mu = 3.8461
lmbda = Expression("x[0]*5.8 + (1 - x[0])*5.7")
C10 = 0.171; C01 = 4.89e-3; C20 = -2.4e-4; C30 = 5.e-4
delka = 1.0/sqrt(2.0)
M = Constant((0.0,1.0,0.0))
k1 = 1e2; k2 = 1e1
materials = []
materials.append(MooneyRivlin({'C1':mu/2, 'C2':mu/2, 'bulk':lmbda}))
materials.append(StVenantKirchhoff({'mu':mu, 'bulk':lmbda}))
materials.append(neoHookean({'half_nkT':mu, 'bulk':lmbda}))
materials.append(Isihara({'C10':C10,'C01':C01,'C20':C20,'bulk':lmbda}))
materials.append(Biderman({'C10':C10,'C01':C01,'C20':C20,'C30':C30,'bulk':lmbda}))
materials.append(AnisoTest({'mu1':mu,'mu2':2*mu,'M':M,'bulk':lmbda}))
materials.append(GasserHolzapfelOgden({'mu':mu,'k1':k1,'k2':k2,'M':M,'bulk':lmbda}))
materials.append(Ogden({'alpha1':1.3,'alpha2':5.0,'alpha3':-2.0,\
'mu1':6.3e5,'mu2':0.012e5,'mu3':-0.1e5}))
try:
index = int(argv[1])
except:
index = 2
print str(materials[index])
return materials[index]
def name_method(self, method):
self.method = method
def __str__(self):
return "A hyperelastic cube twisted by 30 degrees solved by " + self.method
# Setup the problem
twist = Twist()
twist.name_method("DISPLACEMENT BASED FORMULATION")
# Solve the problem
print twist
twist.solve()
<|fim▁end|> | clamp = Expression(("0.0", "0.0", "0.0"))
twist = Expression(("0.0",
"y0 + (x[1] - y0) * cos(theta) - (x[2] - z0) * sin(theta) - x[1]",
"z0 + (x[1] - y0) * sin(theta) + (x[2] - z0) * cos(theta) - x[2]"),
y0=0.5, z0=0.5, theta=pi/6)
return [clamp, twist] |
<|file_name|>twist.py<|end_file_name|><|fim▁begin|>__author__ = "Harish Narayanan"
__copyright__ = "Copyright (C) 2009 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
from cbc.twist import *
from sys import argv
""" DEMO - Twisting of a hyperelastic cube """
class Twist(StaticHyperelasticity):
""" Definition of the hyperelastic problem """
def mesh(self):
n = 8
return UnitCubeMesh(n, n, n)
# Setting up dirichlet conditions and boundaries
def dirichlet_values(self):
clamp = Expression(("0.0", "0.0", "0.0"))
twist = Expression(("0.0",
"y0 + (x[1] - y0) * cos(theta) - (x[2] - z0) * sin(theta) - x[1]",
"z0 + (x[1] - y0) * sin(theta) + (x[2] - z0) * cos(theta) - x[2]"),
y0=0.5, z0=0.5, theta=pi/6)
return [clamp, twist]
def dirichlet_boundaries(self):
<|fim_middle|>
# List of material models
def material_model(self):
# Material parameters can either be numbers or spatially
# varying fields. For example,
mu = 3.8461
lmbda = Expression("x[0]*5.8 + (1 - x[0])*5.7")
C10 = 0.171; C01 = 4.89e-3; C20 = -2.4e-4; C30 = 5.e-4
delka = 1.0/sqrt(2.0)
M = Constant((0.0,1.0,0.0))
k1 = 1e2; k2 = 1e1
materials = []
materials.append(MooneyRivlin({'C1':mu/2, 'C2':mu/2, 'bulk':lmbda}))
materials.append(StVenantKirchhoff({'mu':mu, 'bulk':lmbda}))
materials.append(neoHookean({'half_nkT':mu, 'bulk':lmbda}))
materials.append(Isihara({'C10':C10,'C01':C01,'C20':C20,'bulk':lmbda}))
materials.append(Biderman({'C10':C10,'C01':C01,'C20':C20,'C30':C30,'bulk':lmbda}))
materials.append(AnisoTest({'mu1':mu,'mu2':2*mu,'M':M,'bulk':lmbda}))
materials.append(GasserHolzapfelOgden({'mu':mu,'k1':k1,'k2':k2,'M':M,'bulk':lmbda}))
materials.append(Ogden({'alpha1':1.3,'alpha2':5.0,'alpha3':-2.0,\
'mu1':6.3e5,'mu2':0.012e5,'mu3':-0.1e5}))
try:
index = int(argv[1])
except:
index = 2
print str(materials[index])
return materials[index]
def name_method(self, method):
self.method = method
def __str__(self):
return "A hyperelastic cube twisted by 30 degrees solved by " + self.method
# Setup the problem
twist = Twist()
twist.name_method("DISPLACEMENT BASED FORMULATION")
# Solve the problem
print twist
twist.solve()
<|fim▁end|> | left = "x[0] == 0.0"
right = "x[0] == 1.0"
return [left, right] |
Subsets and Splits