ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b407dd7c4940a9d22efc0f6fb43352c2d631a0e6 | from piecrust.processing.base import CopyFileProcessor, SimpleFileProcessor
from piecrust.processing.tree import ProcessingTreeBuilder, ProcessingTreeNode
class MockProcessor(SimpleFileProcessor):
def __init__(self):
super(MockProcessor, self).__init__({'mock': 'out'})
self.processed = []
def _doProcess(self, in_path, out_path):
self.processed.append((in_path, out_path))
mock_processors = [MockProcessor(), CopyFileProcessor()]
IDX_MOCK = 0
IDX_COPY = 1
def test_mock_node():
node = ProcessingTreeNode('/foo.mock', list(mock_processors))
assert node.getProcessor() == mock_processors[IDX_MOCK]
def test_copy_node():
node = ProcessingTreeNode('/foo.other', list(mock_processors))
assert node.getProcessor() == mock_processors[IDX_COPY]
def test_build_simple_tree():
builder = ProcessingTreeBuilder(mock_processors)
root = builder.build('/foo.mock')
assert root is not None
assert root.getProcessor() == mock_processors[IDX_MOCK]
assert not root.is_leaf
assert len(root.outputs) == 1
out = root.outputs[0]
assert out.getProcessor() == mock_processors[IDX_COPY]
|
py | b407de051935a7087733120c082e147e63c9346b | # DEPRECATED
from deprecated.atl_model import *
import time
# import resource
__author__ = 'blackbat'
class SimpleVotingModel:
number_of_candidates = 0
number_of_voters = 0
model = None
states = []
states_dictionary = {}
epistemic_states_dictionary = {}
voter_epistemic_states_dictionary = {}
def __init__(self, number_of_candidates, number_of_voters):
self.number_of_candidates = number_of_candidates
self.number_of_voters = number_of_voters
def generate_asynchronous_voting(self):
if self.number_of_voters == 1:
self.model = ATLModel(self.number_of_voters + 1, 15)
elif self.number_of_voters == 2:
self.model = ATLModel(self.number_of_voters + 1, 225)
elif self.number_of_voters == 3:
self.model = ATLModel(self.number_of_voters + 1, 3375)
elif self.number_of_voters == 4:
self.model = ATLModel(self.number_of_voters + 1, 50625)
elif self.number_of_voters == 5:
self.model = ATLModel(self.number_of_voters + 1, 759375)
else:
self.model = ATLModel(self.number_of_voters + 1, 1000000)
self.add_actions()
beginning_array = []
for _ in range(0, self.number_of_voters):
beginning_array.append('')
beginning_array_minus_one = []
for _ in range(0, self.number_of_voters):
beginning_array_minus_one.append(-1)
first_state = {'voted': beginning_array_minus_one[:], 'voters_action': beginning_array[:],
'coercer_actions': beginning_array[:], 'finish': beginning_array_minus_one[:]}
state_number = 0
# self.print_create_for_state(0, first_state)
self.states.append(first_state)
state_string = ' '.join(str(first_state[e]) for e in first_state)
self.states_dictionary[state_string] = state_number
state_number += 1
current_state_number = -1
for state in self.states:
current_state_number += 1
voting_product_array = []
coercer_possible_actions = ['wait']
for voter_number in range(0, self.number_of_voters):
if state['voted'][voter_number] == -1:
voting_product_array.append(list(range(0, self.number_of_candidates)))
voting_product_array[voter_number].append('wait')
elif state['voters_action'][voter_number] == '':
voting_product_array.append(['give', 'ng', 'wait'])
elif state['coercer_actions'][voter_number] == '':
coercer_possible_actions.append('np' + str(voter_number + 1))
coercer_possible_actions.append('pun' + str(voter_number + 1))
voting_product_array.append(['wait'])
else:
voting_product_array.append(['wait'])
voting_product_array.append(coercer_possible_actions)
for possibility in itertools.product(*voting_product_array):
action = {}
new_state = {}
new_state['voted'] = state['voted'][:]
new_state['voters_action'] = state['voters_action'][:]
new_state['coercer_actions'] = state['coercer_actions'][:]
new_state['finish'] = state['finish'][:]
voter_not_voted = False
for voter_number in range(0, self.number_of_voters):
action[voter_number + 1] = possibility[voter_number]
voter_action_string = str(possibility[voter_number])
if voter_action_string[0] == 'g' or voter_action_string[0] == 'n':
new_state['voters_action'][voter_number] = voter_action_string
voter_not_voted = True
elif voter_action_string[0] != 'w':
new_state['voted'][voter_number] = possibility[voter_number]
# else:
# voter_not_voted = True
if state['voted'][0] == -1 and new_state['voted'][1] != -1:
continue
coercer_acted = False
action[0] = possibility[self.number_of_voters]
if action[0][0:3] == 'pun':
pun_voter_number = int(action[0][3:])
new_state['coercer_actions'][pun_voter_number - 1] = 'pun'
new_state['finish'][pun_voter_number - 1] = 1
coercer_acted = True
elif action[0][0:2] == 'np':
np_voter_number = int(action[0][2:])
new_state['coercer_actions'][np_voter_number - 1] = 'np'
new_state['finish'][np_voter_number - 1] = 1
coercer_acted = True
# print(new_state['voters_action'])
if not ((action[1] == 'give' or action[1] == 'ng') and (action[2] == 'give' or action[2] == 'ng')):
if (state['voted'][0] == -1 or state['voted'][1] == -1) and (coercer_acted or action[1] == 'give' or action[1] == 'ng' or action[2] == 'give' or action[2] == 'ng'):
continue
new_state_str = ' '.join(str(new_state[e]) for e in new_state)
if new_state_str not in self.states_dictionary:
self.states_dictionary[new_state_str] = state_number
new_state_number = state_number
self.states.append(new_state)
state_number += 1
# self.print_create_for_state(new_state_number, new_state)
else:
new_state_number = self.states_dictionary[new_state_str]
# self.print_create_transition(current_state_number, new_state_number, action)
self.model.add_transition(current_state_number, new_state_number, action)
self.add_epistemic_state(state, current_state_number)
self.add_voter_epistemic_state(state, current_state_number)
self.prepare_epistemic_class()
self.model.states = self.states
def print_create_for_state(self, state_number, state):
print("CREATE (S" + str(state_number) + ":State {voted: " + str(state['voted']) + ", voters_action: " + str(
state['voters_action']) + ", coercer_actions: " + str(state['coercer_actions']) + ", finish: " + str(
state['finish']) + "})")
def print_create_transition(self, from_state_number, to_state_number, actions):
create_str = "CREATE (S" + str(from_state_number) + ")-[:ACTION {"
for i in range(0, len(actions)):
create_str += "A" + str(i) + ":['" + str(actions[i]) + "'], "
create_str = create_str.rstrip(" ,")
create_str += "}]->(S" + str(to_state_number) + ")"
print(create_str)
def generate_simultaneously_voting(self):
self.model = ATLModel(self.number_of_voters + 1, 1000)
self.add_actions()
beginning_array = []
for _ in range(0, self.number_of_voters):
beginning_array.append(-1)
# first_state = {'voted': beginning_array_minus_one[:], 'voters_action': beginning_array[:],
# 'coercer_actions': beginning_array[:], 'finish': beginning_array_minus_one[:]}
first_state = {'voted': beginning_array[:], 'voters_action': beginning_array[:],
'coercer_actions': beginning_array[:], 'finish': beginning_array[:]}
state_number = 0
self.states.append(first_state)
state_string = ' '.join(str(first_state[e]) for e in first_state)
self.states_dictionary[state_string] = state_number
state_number += 1
current_state_number = -1
voting_product_array = []
decision_product_array = []
for _ in range(0, self.number_of_voters):
voting_product_array.append(range(0, self.number_of_candidates))
decision_product_array.append(['give', 'ng'])
for state in self.states:
current_state_number += 1
# is_finish_state = False
if state['voted'][0] == -1:
for voting_product in itertools.product(*voting_product_array):
new_state = {}
new_state['voted'] = state['voted'][:]
new_state['voters_action'] = state['voters_action'][:]
new_state['coercer_actions'] = state['coercer_actions'][:]
new_state['finish'] = state['finish'][:]
action = {0: 'wait'}
for voter_number in range(0, self.number_of_voters):
new_state['voted'][voter_number] = voting_product[voter_number]
action[voter_number + 1] = voting_product[voter_number]
new_state_str = ' '.join(str(new_state[e]) for e in new_state)
if new_state_str not in self.states_dictionary:
self.states_dictionary[new_state_str] = state_number
new_state_number = state_number
self.states.append(new_state)
state_number += 1
else:
new_state_number = self.states_dictionary[new_state_str]
self.model.add_transition(current_state_number, new_state_number, action)
elif state['voters_action'][0] == -1:
for decision_product in itertools.product(*decision_product_array):
new_state = {}
new_state['voted'] = state['voted'][:]
new_state['voters_action'] = state['voters_action'][:]
new_state['coercer_actions'] = state['coercer_actions'][:]
new_state['finish'] = state['finish'][:]
action = {0: 'wait'}
for voter_number in range(0, self.number_of_voters):
new_state['voters_action'][voter_number] = decision_product[voter_number]
action[voter_number + 1] = decision_product[voter_number]
new_state_str = ' '.join(str(new_state[e]) for e in new_state)
if new_state_str not in self.states_dictionary:
self.states_dictionary[new_state_str] = state_number
new_state_number = state_number
self.states.append(new_state)
state_number += 1
else:
new_state_number = self.states_dictionary[new_state_str]
self.model.add_transition(current_state_number, new_state_number, action)
else:
action = {}
for voter_number in range(1, self.number_of_voters + 1):
action[voter_number] = 'wait'
# is_finish_state = True
for voter_number in range(1, self.number_of_voters + 1):
if state['coercer_actions'][voter_number - 1] == -1:
# is_finish_state = False
new_state = {}
new_state['voted'] = state['voted'][:]
new_state['voters_action'] = state['voters_action'][:]
new_state['coercer_actions'] = state['coercer_actions'][:]
new_state['coercer_actions'][voter_number - 1] = 'pun'
new_state['finish'] = state['finish'][:]
new_state['finish'][voter_number - 1] = 1
action[0] = 'pun' + str(voter_number)
new_state_str = ' '.join(str(new_state[e]) for e in new_state)
if new_state_str not in self.states_dictionary:
self.states_dictionary[new_state_str] = state_number
new_state_number = state_number
self.states.append(new_state)
state_number += 1
else:
new_state_number = self.states_dictionary[new_state_str]
self.model.add_transition(current_state_number, new_state_number, action)
new_state2 = {}
new_state2['voted'] = state['voted'][:]
new_state2['voters_action'] = state['voters_action'][:]
new_state2['coercer_actions'] = state['coercer_actions'][:]
new_state2['coercer_actions'][voter_number - 1] = 'np'
new_state2['finish'] = state['finish'][:]
new_state2['finish'][voter_number - 1] = 1
action[0] = 'np' + str(voter_number)
new_state_str = ' '.join(str(new_state2[e]) for e in new_state2)
if new_state_str not in self.states_dictionary:
self.states_dictionary[new_state_str] = state_number
new_state_number = state_number
self.states.append(new_state2)
state_number += 1
else:
new_state_number = self.states_dictionary[new_state_str]
self.model.add_transition(current_state_number, new_state_number, action)
# state['finish'] = is_finish_state
self.add_epistemic_state(state, current_state_number)
self.prepare_epistemic_class()
self.model.states = self.states
def add_actions(self):
self.model.add_action(0, 'wait')
for voter_number in range(1, self.number_of_voters + 1):
self.model.add_action(0, 'np' + str(voter_number))
self.model.add_action(0, 'pun' + str(voter_number))
self.model.add_action(voter_number, 'give')
self.model.add_action(voter_number, 'ng')
self.model.add_action(voter_number, 'wait')
for candidate_number in range(0, self.number_of_candidates):
self.model.add_action(voter_number, candidate_number)
def add_epistemic_state(self, new_state, new_state_number):
epistemic_state = {}
epistemic_state['coercer_actions'] = new_state['coercer_actions'][:]
epistemic_state['voted'] = new_state['voted'][:]
epistemic_state['voters_action'] = new_state['voters_action'][:]
epistemic_state['finish'] = new_state['finish'][:]
for voter_number in range(0, self.number_of_voters):
if new_state['voters_action'][voter_number] == -1 and new_state['voted'][voter_number] != -1:
epistemic_state['voted'][voter_number] = -2
elif new_state['voters_action'][voter_number] == 'ng':
epistemic_state['voted'][voter_number] = -1
epistemic_state_str = ' '.join(str(epistemic_state[e]) for e in epistemic_state)
if epistemic_state_str not in self.epistemic_states_dictionary:
self.epistemic_states_dictionary[epistemic_state_str] = {new_state_number}
else:
self.epistemic_states_dictionary[epistemic_state_str].add(new_state_number)
def add_voter_epistemic_state(self, new_state, new_state_number):
epistemic_state = {}
epistemic_state['coercer_actions'] = new_state['coercer_actions'][:]
epistemic_state['voted'] = new_state['voted'][:]
epistemic_state['voters_action'] = new_state['voters_action'][:]
epistemic_state['finish'] = new_state['finish'][:]
for voter_number in range(1, self.number_of_voters):
epistemic_state['voters_action'][voter_number] = -1
epistemic_state['voted'][voter_number] = -1
epistemic_state['coercer_actions'][voter_number] = -1
# epistemic_state['finish'][voter_number] = -1
epistemic_state_str = ' '.join(str(epistemic_state[e]) for e in epistemic_state)
if epistemic_state_str not in self.voter_epistemic_states_dictionary:
self.voter_epistemic_states_dictionary[epistemic_state_str] = {new_state_number}
else:
self.voter_epistemic_states_dictionary[epistemic_state_str].add(new_state_number)
def prepare_epistemic_class(self):
for _, epistemic_class in self.epistemic_states_dictionary.items():
self.model.add_epistemic_class(0, epistemic_class)
for _, epistemic_class in self.voter_epistemic_states_dictionary.items():
self.model.add_epistemic_class(1, epistemic_class)
# for state_number in range(0, len(self.states)):
# for voter_number in range(1, self.number_of_voters + 1):
# self.model.add_epistemic_class(voter_number, {state_number})
def print_states(self):
for state in self.states:
print(state)
def print_number_of_epistemic_classes(self):
print('Number of epistemic classes:', len(self.epistemic_states_dictionary))
def print_number_of_states(self):
print('Number of states:', len(self.states))
def is_number(self, s):
try:
float(s)
return True
except ValueError:
return False
def generate_tex(self):
current_state = 0
path = "\t\path (0,0) node[initstate] (q0) {$q_{0}$}\n"
transitions = ""
level = 0
visited = []
for i in range(0, len(self.states)):
visited.append(0)
visited[0] = 1
current_level_states = []
transitions += "\t\path[->,font=\scriptsize] (q0)"
for transition in self.model.transitions[current_state]:
not_wait = 0
for i in range(1, self.number_of_voters + 1):
if transition['actions'][i] != 'wait':
not_wait += 1
if transition['actions'][0] != 'wait':
not_wait += 1
if visited[transition['nextState']] == 0 and not_wait == 1:
action = "("
for i in range(1, self.number_of_voters+1):
if transition['actions'][i] != 'wait':
action += 'vote' + str(transition['actions'][i] + 1) + ','
else:
action += "-,"
action += '-)'
transitions += "\n"
transitions += "\t\tedge\n"
transitions += "\t\t\tnode[midway,sloped]{\onlabel{$"+action+"$}} (q" + str(transition['nextState']) + ")"
current_level_states.append(transition['nextState'])
visited[transition['nextState']] = 1
transitions += ";\n"
state_num = 1
level_dictionary = {}
on_this_level = {}
for state in self.states:
level_number = 0
for i in range(0, len(state['voted'])):
voted = state['voted'][i]
if voted != -1:
level_number += 1
for i in range(0, len(state['voters_action'])):
action = state['voters_action'][i]
if action != '':
level_number += 1
for i in range(0, len(state['coercer_actions'])):
action = state['coercer_actions'][i]
if action != '':
level_number += 1
for i in range(0, len(state['finish'])):
action = state['finish'][i]
if action != -1:
level_number += 1
level = -5 * level_number
if level not in level_dictionary:
level_dictionary[level] = 0
else:
level_dictionary[level] += 1
while len(current_level_states) > 0:
level -= 5
left = -1 * len(current_level_states)
new_states = []
for state in current_level_states:
# print(self.states[state])
level_number = 0
for i in range(0, len(self.states[state]['voted'])):
voted = self.states[state]['voted'][i]
if voted != -1:
level_number += 1
for i in range(0, len(self.states[state]['voters_action'])):
action = self.states[state]['voters_action'][i]
if action != '':
level_number += 1
for i in range(0, len(self.states[state]['coercer_actions'])):
action = self.states[state]['coercer_actions'][i]
if action != '':
level_number += 1
for i in range(0, len(self.states[state]['finish'])):
action = self.states[state]['finish'][i]
if action != -1:
level_number += 1
level = -5 * level_number
if level not in on_this_level:
on_this_level[level] = 0
else:
on_this_level[level] += 1
left = level_dictionary[level] * -2 + on_this_level[level] * 4
path += "\t\t(" + str(left) + "," + str(level) + ") node[state] (q" + str(state) + ") {$q_{" + str(
state) + "}$}\n"
val = -0.5
for i in range(0, len(self.states[state]['voted'])):
voted = self.states[state]['voted'][i]
if voted != -1:
path += "\t\t\t+(-0.15," + str(val) + ") node[left] {$\prop{vote_{" + str(i + 1) + "," + str(
voted + 1) + "}}$}\n"
val -= 0.25
for i in range(0, len(self.states[state]['voters_action'])):
action = self.states[state]['voters_action'][i]
if action != '':
path += "\t\t\t+(-0.15," + str(val) + ") node[left] {$\prop{" + action + "_{" + str(i + 1) + "}}$}\n"
val -= 0.25
for i in range(0, len(self.states[state]['coercer_actions'])):
action = self.states[state]['coercer_actions'][i]
if action != '':
path += "\t\t\t+(-0.15," + str(val) + ") node[left] {$\prop{" + action + "_{" + str(i + 1) + "}}$}\n"
val -= 0.25
for i in range(0, len(self.states[state]['finish'])):
action = self.states[state]['finish'][i]
if action != -1:
path += "\t\t\t+(-0.15," + str(val) + ") node[left] {$\prop{finish_{" + str(i + 1) + "}}$}\n"
val -= 0.25
state_num += 1
left += 2
transitions += "\t\path[->,font=\scriptsize] (q"+str(state)+")"
for transition in self.model.transitions[state]:
not_wait = 0
for i in range(1, self.number_of_voters + 1):
if transition['actions'][i] != 'wait':
not_wait += 1
if transition['actions'][0] != 'wait':
not_wait += 1
if state == 99:
print(transition)
if transition['nextState'] != state and not_wait == 1:
action = "("
# print(transition)
for i in range(1, self.number_of_voters + 1):
if transition['actions'][i] != 'wait':
if self.is_number(transition['actions'][i]):
action += 'vote' + str(transition['actions'][i] + 1) + ','
else:
action += 'vote' + transition['actions'][i] + ','
else:
action += "-,"
if transition['actions'][0] != 'wait':
action += transition['actions'][0] + ')'
else:
action += '-)'
transitions += "\n"
transitions += "\t\tedge\n"
transitions += "\t\t\tnode[midway,sloped]{\onlabel{$" + action + "$}} (q" + str(
transition['nextState']) + ")"
if visited[transition['nextState']] == 0:
new_states.append(transition['nextState'])
visited[transition['nextState']] = 1
transitions += ";\n"
current_level_states = new_states[:]
path += ";\n"
f = open("simple_voting_1_voter.txt", "w")
f.write(path)
f.write(transitions)
f.close()
simple_voting_model = SimpleVotingModel(2, 2)
print('Started generating model')
start = time.clock()
simple_voting_model.generate_asynchronous_voting()
end = time.clock()
print('Generated model in', end - start, 's')
simple_voting_model.generate_tex()
simple_voting_model.print_number_of_states()
simple_voting_model.print_number_of_epistemic_classes()
# simple_voting_model.print_states()
# simple_voting_model.model.walk(0)
voter_number = 0
# print()
# print("<<c>>F(~pun_i -> vote_{i,1})")
# winning_states = []
# i = -1
# for state in simple_voting_model.states:
# i += 1
#
# if not (state['coercer_actions'][voter_number] != 'pun' and state['voted'][voter_number] != 1):
# winning_states.append(i)
#
# start = time.clock()
# result = simple_voting_model.model.minimum_formula_one_agent_multiple_states(0, winning_states)
# end = time.clock()
#
# print("Time:", end - start, "s")
# print("Number of good states ", len(result))
# print("Formula result:", list(result)[0] == 0)
# for state_number in result:
# print(state_number, simple_voting_model.states[state_number])
# print()
# print("<<v_i>>G(~pun_i & ~vote_{i,1})")
# winning_states = []
# i = -1
# for state in simple_voting_model.states:
# i += 1
#
# if state['coercer_actions'][voter_number] != 'pun' and state['voted'][voter_number] != 1:
# winning_states.append(i)
#
# start = time.clock()
# result = simple_voting_model.model.maximum_formula_one_agent_multiple_states(1, winning_states)
# end = time.clock()
#
# print("Time:", end - start, "s")
# print("Number of good states ", len(result))
# print("Formula result:", list(result)[0] == 0)
# for state_number in result:
# print(state_number, simple_voting_model.states[state_number])
# print()
# print("<<c>>G( (finish_i & ~pun_i) -> vote_{i,1} )")
# winning_states = []
# i = -1
# for state in simple_voting_model.states:
# i += 1
#
# if not (state['finish'][voter_number] == 1 and state['coercer_actions'][voter_number] != 'pun' and state['voted'][
# voter_number] != 1):
# winning_states.append(i)
#
# start = time.clock()
# result = simple_voting_model.model.maximum_formula_one_agent_multiple_states(0, winning_states)
# end = time.clock()
#
# print("Time:", end - start, "s")
# print("Number of good states ", len(result))
# print("Formula result:", list(result)[0] == 0)
# for state_number in result:
# print(state_number, simple_voting_model.states[state_number])
print()
print("<<v_i>>F( finish_i & ~pun_i & ~vote_{i,1} )")
winning_states = []
i = -1
for state in simple_voting_model.states:
i += 1
if state['finish'][voter_number] == 1 and state['coercer_actions'][voter_number] != 'pun' and state['voted'][
voter_number] != 1:
winning_states.append(i)
start = time.clock()
result = simple_voting_model.model.minimum_formula_one_agent_multiple_states(1, winning_states)
end = time.clock()
print("Time:", end - start, "s")
print("Number of good states ", len(result))
print("Formula result:", list(result)[0] == 0)
# for state_number in result:
# print(state_number, simple_voting_model.states[state_number])
# print()
# print("Perfect <<c>>G( (finish_i & ~pun_i) -> vote_{i,1} )")
# winning_states = []
# i = -1
# for state in simple_voting_model.states:
# i += 1
#
# if not (state['finish'][voter_number] == 1 and state['coercer_actions'][voter_number] != 'pun' and state['voted'][
# voter_number] != 1):
# winning_states.append(i)
#
# start = time.clock()
# result = simple_voting_model.model.maximum_formula_one_agent_multiple_states_perfect_information(0, winning_states)
# end = time.clock()
#
# print("Time:", end - start, "s")
# print("Number of good states ", len(result))
# print("Formula result:", list(result)[0] == 0)
# for state_number in result:
# print(state_number, simple_voting_model.states[state_number])
print()
print("Perfect <<v_i>>F( finish_i & ~pun_i & ~vote_{i,1} )")
winning_states = []
i = -1
for state in simple_voting_model.states:
i += 1
if state['finish'][voter_number] == 1 and state['coercer_actions'][voter_number] != 'pun' and state['voted'][
voter_number] != 1:
winning_states.append(i)
start = time.clock()
result = simple_voting_model.model.minimum_formula_one_agent_multiple_states_perfect_information(1, winning_states)
end = time.clock()
print("Time:", end - start, "s")
print("Number of good states ", len(result))
print("Formula result:", list(result)[0] == 0)
|
py | b407df981305ce52fd4a9bf69b09e6f17bf46fcd | from dataclasses import dataclass
import numpy as np
import pandas as pd
from power_perceiver.consts import BatchKey
from power_perceiver.load_prepared_batches.data_sources.prepared_data_source import NumpyBatch
from power_perceiver.utils import datetime64_to_float, stack_np_examples_into_batch
@dataclass
class AlignGSPTo5Min:
"""Aligns GSP data to 5 min data.
The GSP data isn't interpolated. Instead, for each 5_min_timestep, we take the GSP data at
5_min_timestep.ceil("30T"). If that GSP timestep does not exist then NaNs will be used.
"""
batch_key_for_5_min_datetimes: BatchKey = BatchKey.hrvsatellite_time_utc
def __call__(self, np_batch: NumpyBatch) -> NumpyBatch:
# Loop through each example and find the index into the GSP time dimension
# of the GSP timestep corresponding to each 5 minute timestep:
gsp_5_min_for_all_examples: list[NumpyBatch] = []
n_examples = np_batch[BatchKey.gsp].shape[0]
for example_i in range(n_examples):
# Find the corresponding GSP 30 minute timestep for each 5 minute satellite timestep.
# We do this by taking the `ceil("30T")` of each 5 minute satellite timestep.
# Most of the code below is just converting to Pandas and back
# so we can use `pd.DatetimeIndex.ceil` on each datetime:
time_5_min = np_batch[self.batch_key_for_5_min_datetimes][example_i]
time_5_min_dt_index = pd.to_datetime(time_5_min, unit="s")
time_30_min_every_5_min_dt_index = time_5_min_dt_index.ceil("30T")
time_30_min_every_5_min = datetime64_to_float(time_30_min_every_5_min_dt_index.values)
# Now, find the index into the original 30-minute GSP data for each 5-min timestep:
gsp_30_min_time = np_batch[BatchKey.gsp_time_utc][example_i]
idx_into_gsp = np.searchsorted(gsp_30_min_time, time_30_min_every_5_min)
gsp_5_min_example: NumpyBatch = {}
for batch_key in (BatchKey.gsp, BatchKey.gsp_time_utc):
new_batch_key_name = batch_key.name.replace("gsp", "gsp_5_min")
new_batch_key = BatchKey[new_batch_key_name]
gsp_5_min_example[new_batch_key] = np_batch[batch_key][example_i, idx_into_gsp]
gsp_5_min_for_all_examples.append(gsp_5_min_example)
# Stack the individual examples back into a batch of examples:
new_np_batch = stack_np_examples_into_batch(gsp_5_min_for_all_examples)
np_batch.update(new_np_batch)
# Copy over the t0_idx scalar:
batch_key_name_for_5_min_t0_idx = self.batch_key_for_5_min_datetimes.name.replace(
"time_utc", "t0_idx"
)
batch_key_for_5_min_t0_idx = BatchKey[batch_key_name_for_5_min_t0_idx]
np_batch[BatchKey.gsp_5_min_t0_idx] = np_batch[batch_key_for_5_min_t0_idx]
return np_batch
|
py | b407dfb86e99ed815f1ccc2255fa61b1a73bd4dd | from typing import List, Tuple, Type, TYPE_CHECKING, Union
from ignite.distributed.comp_models.base import _SerialModel
from ignite.distributed.comp_models.horovod import has_hvd_support
from ignite.distributed.comp_models.native import has_native_dist_support
from ignite.distributed.comp_models.xla import has_xla_support
if TYPE_CHECKING:
from ignite.distributed.comp_models.horovod import _HorovodDistModel
from ignite.distributed.comp_models.native import _NativeDistModel
from ignite.distributed.comp_models.xla import _XlaDistModel
def setup_available_computation_models() -> Tuple[
Type[Union[_SerialModel, "_NativeDistModel", "_XlaDistModel", "_HorovodDistModel"]], ...
]:
models = [
_SerialModel,
] # type: List[Type[Union[_SerialModel, "_NativeDistModel", "_XlaDistModel", "_HorovodDistModel"]]]
if has_native_dist_support:
from ignite.distributed.comp_models.native import _NativeDistModel
models.append(_NativeDistModel)
if has_xla_support:
from ignite.distributed.comp_models.xla import _XlaDistModel
models.append(_XlaDistModel)
if has_hvd_support:
from ignite.distributed.comp_models.horovod import _HorovodDistModel
models.append(_HorovodDistModel)
return tuple(models)
registered_computation_models = setup_available_computation_models()
|
py | b407dfbc49317f1867a18c19b9ad9eacdadd44f2 | # Generated by Django 3.2.3 on 2021-07-07 20:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Resident',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, null=True)),
('apartment_unit', models.CharField(max_length=50, null=True)),
],
),
]
|
py | b407e1186e23b935ff3a24a822afb5eaae04bc3a | import os
import pdb
import re
import string
import torch
import torch.nn.functional as F
import transformers
from torch.utils.data import Dataset
from transformers import AutoTokenizer
from config import SetupParameters
class WikiNER(Dataset):
"""
Class for the wiki NER dataset
self.data = list of sentences.
self.targets = list of NER labels per sentence.
"""
def __init__(self, file_path):
"""
Args:
file_path: the path of the file for the wikiNER dataset
"""
self.data = [] #list of sentences
self.targets = [] #list of NER targets for each sentence
raw_data , raw_targets = self.__read_data(file_path)
self.__parse_sentences(raw_data, raw_targets)
self.__convert_to_BIO()
def __read_data(self, file_path):
data = []
targets = []
article_end = True
just_started = True
with open(file_path, 'r', encoding="utf-8") as file:
curr_words = []
curr_labels = []
for line in file:
if line == '\n':
article_end = True
continue
if article_end:
# when the scan is just started there are no values inside curr_words and curr_labels
if not just_started:
if len(curr_words) != len(curr_labels):
raise ValueError('[ERROR] words-labels mismatch')
data.append(curr_words)
targets.append(curr_labels)
just_started = False
curr_words = []
curr_labels = []
article_end = False
for token in line.split():
triplet = token.split(r'|')
if len(triplet) != 3:
pdb.set_trace()
raise ValueError('[ERROR] Unknown file format')
curr_words.append(triplet[0]) #word
curr_labels.append(triplet[-1]) #NER label
#add the last article
if line != '\n':
if len(curr_words) != len(curr_labels):
raise ValueError('[ERROR] words-labels mismatch')
data.append(curr_words)
targets.append(curr_labels)
return data, targets
def __parse_sentences(self, source_data, source_targets):
"""
Load the data from the source with the right format
"""
curr_tags = []
data = source_data
targets = source_targets
for count, (article, tags) in enumerate(zip(source_data, source_targets)):
curr_tags = []
curr_sentence = ''
for word, tag in zip(article, tags):
if word in ['.']:
self.data.append(curr_sentence + '.')
curr_tags.append(tag)
self.targets.append(curr_tags)
curr_sentence = ''
curr_tags = []
continue
#if word in string.punctuation and word not in [',', '\'', '(', ')']:
#continue
curr_sentence += word + ' '
curr_tags.append(tag)
if len(curr_sentence.split()) != len(curr_tags):
raise ValueError("Sentence and target lengths do not match")
#if SetupParameters.DATA_LIMIT != -1 and count >= SetupParameters.DATA_LIMIT:
#break
def __len__(self):
return len(self.data)
def __getitem__(self, index):
if index < 0:
raise ValueError('[ERROR] fetching negative entry in the dataset')
return self.data[index], self.targets[index]
def get_max_sentence_len(self, data):
curr_max = 0
for item in data:
item_tok_len = len(self.tokenizer.tokenize(item))
if item_tok_len > curr_max:
curr_max = item_tok_len
return curr_max
def __convert_to_BIO(self):
"""
This method converts the wikiNER dataset to BIO notation
"""
for article_num, tags in enumerate(self.targets):
prev_tag = 'O'
for tag_num, curr_tag in enumerate(tags):
if curr_tag != 'O':
if prev_tag == 'O' or prev_tag[1:] != curr_tag[1:]:
#here put B
self.targets[article_num][tag_num] = 'B' + curr_tag[1:]
prev_tag = curr_tag
|
py | b407e341f3def50a41dec2595d9cff5de8f25e59 | import setuptools
from distutils.extension import Extension
from Cython.Build import cythonize
extensions = [
Extension('chia_diff.cython_util',
['chia_diff/cython_util.pyx']),
]
NAME = 'chia_diff'
VERSION = '0.0.1'
setuptools.setup(
name=NAME,
version=VERSION,
author="Henry Zhang",
author_email="[email protected]",
description="A package for measuring difference of ChIA-PET data.",
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
url="https://github.com/c0ver/chia_diff",
packages=setuptools.find_packages(),
install_requires=['numpy>=1.17.0',
'scipy>=1.3.1',
'prettytable>=0.7.2',
'pybedgraph>=0.5.40',
'matplotlib>=3.1.1'],
ext_modules=cythonize(extensions, language_level=3),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
py | b407e3c6a685e163ece3d0dc5c8030a9147816f0 | #!/usr/bin/env python
# Copyright: This document has been placed in the public domain.
"""
Taylor diagram (Taylor, 2001) test implementation.
http://www-pcmdi.llnl.gov/about/staff/Taylor/CV/Taylor_diagram_primer.htm
"""
__version__ = "Time-stamp: <2012-02-17 20:59:35 ycopin>"
__author__ = "Yannick Copin <[email protected]>"
import numpy as NP
import matplotlib.pyplot as PLT
class TaylorDiagram(object):
"""Taylor diagram: plot model standard deviation and correlation
to reference (data) sample in a single-quadrant polar plot, with
r=stddev and theta=arccos(correlation).
"""
def __init__(self, refstd, fig=None, rect=111, label='_'):
"""Set up Taylor diagram axes, i.e. single quadrant polar
plot, using mpl_toolkits.axisartist.floating_axes. refstd is
the reference standard deviation to be compared to.
"""
from matplotlib.projections import PolarAxes
import mpl_toolkits.axisartist.floating_axes as FA
import mpl_toolkits.axisartist.grid_finder as GF
self.refstd = refstd # Reference standard deviation
tr = PolarAxes.PolarTransform()
# Correlation labels
rlocs = NP.concatenate((NP.arange(10)/10.,[0.95,0.99]))
tlocs = NP.arccos(rlocs) # Conversion to polar angles
gl1 = GF.FixedLocator(tlocs) # Positions
tf1 = GF.DictFormatter(dict(zip(tlocs, map(str,rlocs))))
# Standard deviation axis extent
self.smin = 0
self.smax = 1.5*self.refstd
ghelper = FA.GridHelperCurveLinear(tr,
extremes=(0,NP.pi/2, # 1st quadrant
self.smin,self.smax),
grid_locator1=gl1,
tick_formatter1=tf1,
)
if fig is None:
fig = PLT.figure()
ax = FA.FloatingSubplot(fig, rect, grid_helper=ghelper)
fig.add_subplot(ax)
# Adjust axes
ax.axis["top"].set_axis_direction("bottom") # "Angle axis"
ax.axis["top"].toggle(ticklabels=True, label=True)
ax.axis["top"].major_ticklabels.set_axis_direction("top")
ax.axis["top"].label.set_axis_direction("top")
ax.axis["top"].label.set_text("Correlation")
ax.axis["left"].set_axis_direction("bottom") # "X axis"
ax.axis["left"].label.set_text("Standard deviation")
ax.axis["right"].set_axis_direction("top") # "Y axis"
ax.axis["right"].toggle(ticklabels=True)
ax.axis["right"].major_ticklabels.set_axis_direction("left")
ax.axis["bottom"].set_visible(False) # Useless
# Contours along standard deviations
ax.grid(False)
self._ax = ax # Graphical axes
self.ax = ax.get_aux_axes(tr) # Polar coordinates
# Add reference point and stddev contour
print "Reference std:", self.refstd
l, = self.ax.plot([0], self.refstd, 'k*',
ls='', ms=10, label=label)
t = NP.linspace(0, NP.pi/2)
r = NP.zeros_like(t) + self.refstd
self.ax.plot(t,r, 'k--', label='_')
# Collect sample points for latter use (e.g. legend)
self.samplePoints = [l]
def add_sample(self, stddev, corrcoef, *args, **kwargs):
"""Add sample (stddev,corrcoeff) to the Taylor diagram. args
and kwargs are directly propagated to the Figure.plot
command."""
l, = self.ax.plot(NP.arccos(corrcoef), stddev,
*args, **kwargs) # (theta,radius)
self.samplePoints.append(l)
return l
def add_contours(self, levels=5, **kwargs):
"""Add constant centered RMS difference contours."""
rs,ts = NP.meshgrid(NP.linspace(self.smin,self.smax),
NP.linspace(0,NP.pi/2))
# Compute centered RMS difference
rms = NP.sqrt(self.refstd**2 + rs**2 - 2*self.refstd*rs*NP.cos(ts))
contours = self.ax.contour(ts, rs, rms, levels, **kwargs)
return contours
if __name__=='__main__':
# Reference dataset
x = NP.linspace(0,4*NP.pi,100)
data = NP.sin(x)
refstd = data.std(ddof=1) # Reference standard deviation
# Models
m1 = data + 0.2*NP.random.randn(len(x)) # Model 1
m2 = 0.8*data + .1*NP.random.randn(len(x)) # Model 2
m3 = NP.sin(x-NP.pi/10) # Model 3
# Compute stddev and correlation coefficient of models
samples = NP.array([ [m.std(ddof=1), NP.corrcoef(data, m)[0,1]]
for m in (m1,m2,m3)])
fig = PLT.figure(figsize=(10,4))
ax1 = fig.add_subplot(1,2,1, xlabel='X', ylabel='Y')
# Taylor diagram
dia = TaylorDiagram(refstd, fig=fig, rect=122, label="Reference")
colors = PLT.matplotlib.cm.jet(NP.linspace(0,1,len(samples)))
ax1.plot(x,data,'ko', label='Data')
for i,m in enumerate([m1,m2,m3]):
ax1.plot(x,m, c=colors[i], label='Model %d' % (i+1))
ax1.legend(numpoints=1, prop=dict(size='small'), loc='best')
# Add samples to Taylor diagram
for i,(stddev,corrcoef) in enumerate(samples):
dia.add_sample(stddev, corrcoef, marker='s', ls='', c=colors[i],
label="Model %d" % (i+1))
# Add RMS contours, and label them
contours = dia.add_contours(colors='0.5')
PLT.clabel(contours, inline=1, fontsize=10)
# Add a figure legend
fig.legend(dia.samplePoints,
[ p.get_label() for p in dia.samplePoints ],
numpoints=1, prop=dict(size='small'), loc='upper right')
PLT.show()
|
py | b407e3cda08fc0c93717a3e930595dd6631c18ad | from dateutil import parser as time_parser
from .base_scannable import ScannableParserBase
from ..record import Registrar, Contact, Nameserver
from ..scanner.base_shared3 import BaseShared3Scanner
from ..utils import array_wrapper
class BaseShared3Parser(ScannableParserBase):
_scanner = BaseShared3Scanner
@property
def disclaimer(self):
return self.node("field:disclaimer")
@property
def domain(self):
if self.node("domain name"):
return self.node("domain name").lower()
@property
def status(self):
if self.available:
return "available"
else:
return "registered"
@property
def available(self):
return bool(self.node('status:available'))
@property
def registered(self):
return not bool(self.node('status:available'))
@property
def created_on(self):
if self.node("created date"):
return time_parser.parse(self.node("created date"))
@property
def updated_on(self):
if self.node("updated date"):
return time_parser.parse(self.node("updated date"))
@property
def expires_on(self):
if self.node("expiration date"):
return time_parser.parse(self.node("expiration date"))
@property
def registrar(self):
value = self.node('registrar')
if value:
return Registrar(None, value, None, self.node('url'))
@property
def registrant_contacts(self):
return array_wrapper(self._build_contact("owner", Contact.TYPE_REGISTRANT))
@property
def admin_contacts(self):
return array_wrapper(self._build_contact('admin', Contact.TYPE_ADMINISTRATIVE))
@property
def technical_contacts(self):
return array_wrapper(self._build_contact('tech', Contact.TYPE_TECHNICAL))
@property
def nameservers(self):
return [Nameserver(name = name) for name in filter(None, array_wrapper(self.node("nameserver")))]
def _build_contact(self, element, type_):
if self.node("%s-contact" % element):
return Contact(**{
'type' : type_,
'id' : self.node("%s-contact" % element),
'name' : self.node("%s-name" % element),
'organization' : self.node("%s-organization" % element),
'address' : self.node("%s-street" % element),
'city' : self.node("%s-city" % element),
'zip' : self.node("%s-zip" % element),
'state' : None,
'country_code' : self.node("%s-country" % element),
'phone' : self.node("%s-phone" % element),
'fax' : self.node("%s-fax" % element),
'email' : self.node("%s-email" % element)
})
|
py | b407e47ca9337eec527096a095fccadf28b0a02d | from pytorch_lightning.plugins.precision.apex_amp import ApexMixedPrecisionPlugin # noqa: F401
from pytorch_lightning.plugins.precision.deepspeed import DeepSpeedPrecisionPlugin # noqa: F401
from pytorch_lightning.plugins.precision.double import DoublePrecisionPlugin # noqa: F401
from pytorch_lightning.plugins.precision.fully_sharded_native_amp import ( # noqa: F401
FullyShardedNativeMixedPrecisionPlugin,
)
from pytorch_lightning.plugins.precision.ipu import IPUPrecisionPlugin # noqa: F401
from pytorch_lightning.plugins.precision.mixed import MixedPrecisionPlugin # noqa: F401
from pytorch_lightning.plugins.precision.native_amp import NativeMixedPrecisionPlugin # noqa: F401
from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin # noqa: F401
from pytorch_lightning.plugins.precision.sharded_native_amp import ShardedNativeMixedPrecisionPlugin # noqa: F401
from pytorch_lightning.plugins.precision.tpu import TPUPrecisionPlugin # noqa: F401
from pytorch_lightning.plugins.precision.tpu_bf16 import TPUBf16PrecisionPlugin # noqa: F401
|
py | b407e4d05b2068dd6fcd9e6bbb3ab221e87d5366 | from sys import argv
from contextlib import redirect_stdout
from random import randint
import numpy as np
# generate two RxC matrices and their multiplication
# $ python c_array_gen.py 16 16 > data.txt
RS = 8
CS = 8
fm_np = np.random.randint(-128,127, size=(RS, CS))
sm_np = np.random.randint(-128,127, size=(RS, CS))
verify_np = np.zeros((RS,CS))
def create_c_array(RS,CS,rnd_matrix,nm):
print(f'static const signed char DATA_{nm}[{RS}][{CS}] = ',end='')
for row in range(0,RS):
if(row == 0):
print('{ {',end='')
else:
print(' {',end='')
for column in range(0,CS-1):
r_int = rnd_matrix[row,column]
print("{0:>6}".format(r_int) + ',',end='')
r_int = rnd_matrix[row,column+1]
print("{0:>6}".format(r_int) ,end='}')
if(row == RS-1):
print("};")
else:
print(',')
create_c_array(RS,CS,fm_np,0)
print('',end='\n')
create_c_array(RS,CS,sm_np,1)
print('',end='\n')
verify_np = np.matmul(fm_np, sm_np)
create_c_array(RS,CS,verify_np,2)
|
py | b407e5cdef9e0eed56a57db12e67ca219adabfae | import pickle as pkl
import tarfile
import tarfile
from urllib.request import urlretrieve
from urllib.request import urlretrieve
import numpy as np
import skimage
import skimage.transform
import torchvision
def _compose_image(digit, background):
"""Difference-blend a digit and a random patch from a background image."""
w, h, _ = background.shape
dw, dh, _ = digit.shape
x = np.random.randint(0, w - dw)
y = np.random.randint(0, h - dh)
bg = background[x:x+dw, y:y+dh]
return np.abs(bg - digit).astype(np.uint8)
def _mnist_to_img(x):
"""Binarize MNIST digit and convert to RGB."""
x = (x > 0).astype(np.float32)
d = x.reshape([28, 28, 1]) * 255
return np.concatenate([d, d, d], 2)
def _create_mnistm(X, rand, background_data):
"""
Give an array of MNIST digits, blend random background patches to
build the MNIST-M dataset as described in
http://jmlr.org/papers/volume17/15-239/15-239.pdf
"""
X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)
for i in range(X.shape[0]):
if i % 10000 == 0:
print('Processing example', i)
bg_img = rand.choice(background_data)
d = _mnist_to_img(X[i])
d = _compose_image(d, bg_img)
X_[i] = d
return X_
def create_mnistm():
if os.path.exists('mnistm_data.pkl'):
return
if not os.path.exists("BSR_bsds500.tgz"):
urlretrieve("http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz", "BSR_bsds500.tgz")
print('Creating MNIST-M... That may takes a minute')
BST_PATH = 'BSR_bsds500.tgz'
rand = np.random.RandomState(42)
f = tarfile.open(BST_PATH)
train_files = []
for name in f.getnames():
if name.startswith('BSR/BSDS500/data/images/train/'):
train_files.append(name)
print('Loading BSR training images')
background_data = []
for name in train_files:
try:
fp = f.extractfile(name)
bg_img = skimage.io.imread(fp)
background_data.append(bg_img)
except:
continue
mnist_train = torchvision.datasets.MNIST('.', train=True, download=True)
mnist_test = torchvision.datasets.MNIST('.', train=False, download=True)
print('Building train set...')
train = _create_mnistm(mnist_train.data.numpy(), rand, background_data)
print('Building test set...')
test = _create_mnistm(mnist_test.data.numpy(), rand, background_data)
# Save dataset as pickle
with open('mnistm_data.pkl', 'wb+') as f:
pkl.dump({ 'x_train': train, 'x_test': test, "y_train": mnist_train.targets.numpy(), "y_test": mnist_test.targets.numpy()}, f, pkl.HIGHEST_PROTOCOL)
print("Done!")
|
py | b407e5e5c6771ecefbcbfb016db544ee78ff2a86 | # Generated by Django 2.0.1 on 2019-01-11 07:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app01', '0019_auto_20190111_1505'),
]
operations = [
migrations.CreateModel(
name='CustomerCss',
fields=[
('Jsid', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=100)),
('imgUrl', models.CharField(max_length=100)),
('details', models.CharField(max_length=200)),
('fee', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='CustomerHtml',
fields=[
('Jsid', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=100)),
('imgUrl', models.CharField(max_length=100)),
('details', models.CharField(max_length=200)),
('fee', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='CustomerJavaScript',
fields=[
('Jsid', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=100)),
('imgUrl', models.CharField(max_length=100)),
('details', models.CharField(max_length=200)),
('fee', models.IntegerField(default=0)),
],
),
]
|
py | b407e67cd55100406747e52379c39aa08b1113dc | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import visdom
import torch
import torch.nn as nn
import torch.optim as optim
from utils.helpers import loggerConfig
CONFIGS = [
# agent_type, env_type, game, circuit_type
[ "empty", "repeat-copy", "", "none" ], # 0
[ "sl", "copy", "", "ntm" ], # 1
[ "sl", "repeat-copy", "", "dnc" ] # 2
]
class Params(object): # NOTE: shared across all modules
def __init__(self):
self.verbose = 0 # 0(warning) | 1(info) | 2(debug)
# training signature
self.machine = "daim" # "machine_id"
self.timestamp = "17080800" # "yymmdd##"
# training configuration
self.mode = 1 # 1(train) | 2(test model_file)
self.config = 1
self.seed = 1
self.render = False # whether render the window from the original envs or not
self.visualize = True # whether do online plotting and stuff or not
self.save_best = False # save model w/ highest reward if True, otherwise always save the latest model
self.agent_type, self.env_type, self.game, self.circuit_type = CONFIGS[self.config]
self.use_cuda = torch.cuda.is_available()
self.dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
# prefix for model/log/visdom
self.refs = self.machine + "_" + self.timestamp # NOTE: using this as env for visdom
self.root_dir = os.getcwd()
# model files
# NOTE: will save the current model to model_name
self.model_name = self.root_dir + "/models/" + self.refs + ".pth"
# NOTE: will load pretrained model_file if not None
self.model_file = None#self.root_dir + "/models/{TODO:FILL_IN_PRETAINED_MODEL_FILE}.pth"
if self.mode == 2:
self.model_file = self.model_name # NOTE: so only need to change self.mode to 2 to test the current training
assert self.model_file is not None, "Pre-Trained model is None, Testing aborted!!!"
self.refs = self.refs + "_test" # NOTE: using this as env for visdom for testing, to avoid accidentally redraw on the training plots
# logging configs
self.log_name = self.root_dir + "/logs/" + self.refs + ".log"
self.logger = loggerConfig(self.log_name, self.verbose)
self.logger.warning("<===================================>")
if self.visualize:
self.vis = visdom.Visdom()
self.logger.warning("bash$: python -m visdom.server") # activate visdom server on bash
self.logger.warning("http://localhost:8097/env/" + self.refs) # open this address on browser
class EnvParams(Params): # settings for network architecture
def __init__(self):
super(EnvParams, self).__init__()
self.batch_size = None
if self.env_type == "copy":
self.len_word = 8
self.min_num_words = 5
self.max_num_words = 10
elif self.env_type == "repeat-copy":
self.len_word = 4
self.min_num_words = 1
self.max_num_words = 2
self.min_repeats = 1
self.max_repeats = 2
self.max_repeats_norm = 10.
class ControllerParams(Params):
def __init__(self):
super(ControllerParams, self).__init__()
self.batch_size = None
self.input_dim = None # set after env
self.read_vec_dim = None # num_read_heads x mem_wid
self.output_dim = None # set after env
self.hidden_dim = None #
self.mem_hei = None # set after memory
self.mem_wid = None # set after memory
class HeadParams(Params):
def __init__(self):
super(HeadParams, self).__init__()
self.num_heads = None
self.batch_size = None
self.hidden_dim = None
self.mem_hei = None
self.mem_wid = None
self.num_allowed_shifts = 3
class WriteHeadParams(HeadParams):
def __init__(self):
super(WriteHeadParams, self).__init__()
class ReadHeadParams(HeadParams):
def __init__(self):
super(ReadHeadParams, self).__init__()
if self.circuit_type == "dnc":
self.num_read_modes = None
class MemoryParams(Params):
def __init__(self):
super(MemoryParams, self).__init__()
self.batch_size = None
self.mem_hei = None
self.mem_wid = None
class AccessorParams(Params):
def __init__(self):
super(AccessorParams, self).__init__()
self.batch_size = None
self.hidden_dim = None
self.num_write_heads = None
self.num_read_heads = None
self.mem_hei = None
self.mem_wid = None
self.clip_value = None
self.write_head_params = WriteHeadParams()
self.read_head_params = ReadHeadParams()
self.memory_params = MemoryParams()
class CircuitParams(Params):# settings for network architecture
def __init__(self):
super(CircuitParams, self).__init__()
self.batch_size = None
self.input_dim = None # set after env
self.read_vec_dim = None # num_read_heads x mem_wid
self.output_dim = None # set after env
if self.circuit_type == "ntm":
self.hidden_dim = 100
self.num_write_heads = 1
self.num_read_heads = 1
self.mem_hei = 128
self.mem_wid = 20
self.clip_value = 20. # clips controller and circuit output values to in between
elif self.circuit_type == "dnc":
self.hidden_dim = 64
self.num_write_heads = 1
self.num_read_heads = 4
self.mem_hei = 16
self.mem_wid = 16
self.clip_value = 20. # clips controller and circuit output values to in between
self.controller_params = ControllerParams()
self.accessor_params = AccessorParams()
class AgentParams(Params): # hyperparameters for drl agents
def __init__(self):
super(AgentParams, self).__init__()
if self.agent_type == "sl":
if self.circuit_type == "ntm":
self.criteria = nn.BCELoss()
self.optim = optim.RMSprop
self.steps = 100000 # max #iterations
self.batch_size = 16
self.early_stop = None # max #steps per episode
self.clip_grad = 50.
self.lr = 1e-4
self.optim_eps = 1e-10 # NOTE: we use this setting to be equivalent w/ the default settings in tensorflow
self.optim_alpha = 0.9 # NOTE: only for rmsprop, alpha is the decay in tensorflow, whose default is 0.9
self.eval_freq = 500
self.eval_steps = 50
self.prog_freq = self.eval_freq
self.test_nepisodes = 5
elif self.circuit_type == "dnc":
self.criteria = nn.BCELoss()
self.optim = optim.RMSprop
self.steps = 100000 # max #iterations
self.batch_size = 16
self.early_stop = None # max #steps per episode
self.clip_grad = 50.
self.lr = 1e-4
self.optim_eps = 1e-10 # NOTE: we use this setting to be equivalent w/ the default settings in tensorflow
self.optim_alpha = 0.9 # NOTE: only for rmsprop, alpha is the decay in tensorflow, whose default is 0.9
self.eval_freq = 500
self.eval_steps = 50
self.prog_freq = self.eval_freq
self.test_nepisodes = 5
elif self.agent_type == "empty":
self.criteria = nn.BCELoss()
self.optim = optim.RMSprop
self.steps = 100000 # max #iterations
self.batch_size = 16
self.early_stop = None # max #steps per episode
self.clip_grad = 50.
self.lr = 1e-4
self.optim_eps = 1e-10 # NOTE: we use this setting to be equivalent w/ the default settings in tensorflow
self.optim_alpha = 0.9 # NOTE: only for rmsprop, alpha is the decay in tensorflow, whose default is 0.9
self.eval_freq = 500
self.eval_steps = 50
self.prog_freq = self.eval_freq
self.test_nepisodes = 5
self.env_params = EnvParams()
self.circuit_params = CircuitParams()
class Options(Params):
agent_params = AgentParams()
|
py | b407e6a66353b5fd37cc749d870d8e14471cff66 | # @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import random
from stack.api import Call, ReturnCode
def test_box():
"""
Test list/add/remove/set box.
"""
# Search for a unused box name and use it for
# the following tests.
done = False
while not done:
box = 'default-%s' % str(random.randint(0, 100))
result = Call('list box', [ box ], stderr=False)
if ReturnCode() and not result:
done = True
assert box
# add box
result = Call('add box', [ box ])
assert ReturnCode() == 0 and result == []
# lookup current box for host
result = Call('list host', [ 'localhost' ])
assert ReturnCode() == 0 and len(result) == 1
prevBox = result[0]['box']
# set box for this host
result = Call('set host box', [ 'localhost', 'box=%s' % box ])
assert ReturnCode() == 0
# verify box was set
result = Call('list host', [ 'localhost' ])
assert ReturnCode() == 0 and len(result) == 1
assert result[0]['box'] == box
# restore prev setting
result = Call('set host box', [ 'localhost', 'box=%s' % prevBox ])
assert ReturnCode() == 0
# remove box
result = Call('remove box', [ box ])
assert ReturnCode() == 0
# try to remove default
# "remove box" should protect against this
result = Call('remove box', [ 'default' ], stderr=False)
assert ReturnCode() == 255
# remove multiple boxes
# Add the first box back
result = Call('add box', [ box ])
assert ReturnCode() == 0 and result == []
# get a second box name
done = False
while not done:
second_box = 'default-%s' % str(random.randint(0, 100))
result = Call('list box', [ second_box ], stderr=False)
if ReturnCode() and not result:
done = True
assert second_box
result = Call('add box', [ second_box ])
assert ReturnCode() == 0 and result == []
# remove multiple boxes
result = Call('remove box', [ box, second_box ])
assert ReturnCode() == 0
|
py | b407e779ca4a3bc20e35fc16e2918b438a356fc9 | """Classes for loading, saving, evaluating, and operating on trajectories.
* For piecewise-linear interpolation in cartesian space, use :class:`~klampt.model.trajectory.Trajectory`.
* For piecewise-linear interpolation on a robot, use :class:`~klampt.model.trajectory.RobotTrajectory`.
* For Hermite interpolation in cartesian space, use :class:`~klampt.model.trajectory.HermiteTrajectory`.
"""
import bisect
from ..math import so3,se3,vectorops
from ..math import spline
from ..math.geodesic import *
import warnings
from ..robotsim import RobotModel,RobotModelLink
from .subrobot import SubRobotModel
from typing import Optional,Union,Sequence,List,Tuple,Callable
from .typing import Vector3,Vector,Rotation,RigidTransform
MetricType = Callable[[Vector,Vector],float]
class Trajectory:
"""A basic piecewise-linear trajectory class, which can be overloaded
to provide different functionality. A plain Trajectory interpolates
in Cartesian space.
(To interpolate for a robot, use RobotTrajectory. To perform
Hermite interpolation, use HermiteTrajectory)
Attributes:
times (list of floats): a list of times at which the milestones are met.
milestones (list of Configs): a list of milestones that are interpolated.
"""
def __init__(self,
times: Optional[List[float]] = None,
milestones: Optional[List[Vector]] = None
):
"""Args:
times (list of floats, optional): if provided, initializes the
self.times attribute. If milestones is provided, a uniform
timing is set. Otherwise self.times is empty.
milestones (list of Configs, optional): if provided, initializes
the self.milestones attribute. Otherwise milestones is empty.
Does not perform error checking. The caller must be sure that
the lists have the same size, the times are non-decreasing, and the configs
are equally-sized (you can call checkValid() for this).
"""
if milestones is None:
milestones = []
if times is None:
times = list(range(len(milestones)))
self.times = times
self.milestones = milestones
def load(self, fn: str) -> None:
"""Reads from a whitespace-separated file in the format::
t1 [q1]
t2 [q2]
...
where each [qi] is a Klamp't formatted length-n configuration, written
in the form ``n qi1 ... qin``.
"""
fin = open(fn, 'r')
self.times = []
self.milestones = []
for line in fin.readlines():
timedMilestone = [float(i) for i in line.strip().split()]
self.times.append(timedMilestone[0])
self.milestones.append(timedMilestone[2:])
fin.close()
def save(self, fn: str) -> None:
"""Writes to a whitespace-separated file"""
fout = open(fn, 'w')
for t,x in zip(self.times,self.milestones):
fout.write('%f\t%d '%(t,len(x)))
fout.write(' '.join([str(xi) for xi in x]))
fout.write('\n')
fout.close()
def startTime(self) -> float:
"""Returns the initial time."""
try: return self.times[0]
except IndexError: return 0.0
def endTime(self) -> float:
"""Returns the final time."""
try: return self.times[-1]
except IndexError: return 0.0
def duration(self) -> float:
"""Returns the duration of the trajectory."""
return self.endTime()-self.startTime()
def checkValid(self) -> None:
"""Checks whether this is a valid trajectory, raises a
ValueError if not."""
if len(self.times) != len(self.milestones):
raise ValueError("Times and milestones are not the same length")
if len(self.times)==0:
raise ValueError("Trajectory is empty")
for (tprev,t) in zip(self.times[:-1],self.times[1:]):
if tprev > t:
raise ValueError("Timing is not sorted")
n = len(self.milestones[0])
for q in self.milestones:
if len(q) != n:
raise ValueError("Invalid milestone size")
return
def getSegment(self, t: float, endBehavior: str = 'halt') -> Tuple[int,float]:
"""Returns the index and interpolation parameter for the
segment at time t.
Running time is O(log n) time where n is the number of segments.
Args:
t (float): The time at which to evaluate the segment
endBehavior (str): If 'loop' then the trajectory loops forever.
Returns:
(index,param) giving the segment index and interpolation
parameter. index < 0 indicates that the time is before the first
milestone and/or there is only 1 milestone.
"""
if len(self.times)==0:
raise ValueError("Empty trajectory")
if len(self.times)==1:
return (-1,0)
if t > self.times[-1]:
if endBehavior == 'loop':
try:
t = t % self.times[-1]
except ZeroDivisionError:
t = 0
else:
return (len(self.milestones)-1,0)
if t >= self.times[-1]:
return (len(self.milestones)-1,0)
if t <= self.times[0]:
return (-1,0)
i = bisect.bisect_right(self.times,t)
p=i-1
assert i > 0 and i < len(self.times),"Invalid time index "+str(t)+" in "+str(self.times)
u=(t-self.times[p])/(self.times[i]-self.times[p])
if i==0:
if endBehavior == 'loop':
t = t + self.times[-1]
p = -2
u=(t-self.times[p])/(self.times[-1]-self.times[p])
else:
return (-1,0)
assert u >= 0 and u <= 1
return (p,u)
def eval(self, t: float, endBehavior: str = 'halt') -> Vector:
"""Evaluates the trajectory using piecewise linear
interpolation.
Args:
t (float): The time at which to evaluate the segment
endBehavior (str): If 'loop' then the trajectory loops forever.
Returns:
The configuration at time t
"""
return self.eval_state(t,endBehavior)
def deriv(self, t: float, endBehavior: str = 'halt') -> Vector:
"""Evaluates the trajectory velocity using piecewise linear
interpolation.
Args:
t (float): The time at which to evaluate the segment
endBehavior (str): If 'loop' then the trajectory loops forever.
Returns:
The velocity (derivative) at time t
"""
return self.deriv_state(t,endBehavior)
def waypoint(self, state: Vector) -> Vector:
"""Returns the primary configuration corresponding to the given state.
This is usually the same as ``state`` but for some trajectories,
specifically Hermite curves, the state and configuration are not
identically the same.
"""
return state
def eval_state(self, t: float, endBehavior: str = 'halt') -> Vector:
"""Internal eval, used on the underlying state representation"""
i,u = self.getSegment(t,endBehavior)
if i<0: return self.milestones[0]
elif i+1>=len(self.milestones): return self.milestones[-1]
#linear interpolate between milestones[i] and milestones[i+1]
return self.interpolate_state(self.milestones[i],self.milestones[i+1],u,self.times[i+1]-self.times[i])
def deriv_state(self, t: float, endBehavior: str = 'halt') -> Vector:
"""Internal deriv, used on the underlying state representation"""
i,u = self.getSegment(t,endBehavior)
if i<0: return [0.0]*len(self.milestones[0])
elif i+1>=len(self.milestones): return [0.0]*len(self.milestones[-1])
return self.difference_state(self.milestones[i+1],self.milestones[i],u,self.times[i+1]-self.times[i])
def interpolate_state(self, a: Vector, b: Vector, u: float, dt: float) -> Vector:
"""Can override this to implement non-cartesian spaces.
Interpolates along the geodesic from a to b. dt is the
duration of the segment from a to b"""
return vectorops.interpolate(a,b,u)
def difference_state(self, a: Vector, b: Vector, u: float, dt: float) -> Vector:
"""Subclasses can override this to implement non-Cartesian
spaces. Returns the time derivative along the geodesic from b to
a, with time domain [0,dt]. In cartesian spaces, this is (a-b)/dt.
Args:
a (vector): the end point of the segment
b (vector): the start point of the segment.
u (float): the evaluation point of the derivative along the
segment, with 0 indicating b and 1 indicating a
dt (float): the duration of the segment from b to a.
"""
return vectorops.mul(vectorops.sub(a,b),1.0/dt)
def concat(self,
suffix: 'Trajectory',
relative: bool = False,
jumpPolicy: str = 'strict'
) -> 'Trajectory':
"""Returns a new trajectory with another trajectory
concatenated onto self.
Args:
suffix (Trajectory): the suffix trajectory
relative (bool): If True, then the suffix's time domain is shifted
so that self.times[-1] is added on before concatenation.
jumpPolicy (str): If the suffix starts exactly at the existing trajectory's
end time, then jumpPolicy is checked. Can be:
- 'strict': the suffix's first milestone has to be equal to the
existing trajectory's last milestone. Otherwise an exception
is raised.
- 'blend': the existing trajectory's last milestone is
discarded.
- 'jump': a discontinuity is added to the trajectory.
"""
if self.__class__ is not suffix.__class__:
raise ValueError("Can only concatenate like Trajectory classes: %s != %s"%(self.__class__.__name__,suffix.__class__.__name__))
if not relative or len(self.times)==0:
offset = 0
else:
offset = self.times[-1]
if len(self.times)!=0:
if suffix.times[0]+offset < self.times[-1]:
raise ValueError("Invalid concatenation, suffix startTime precedes endTime")
if suffix.times[0]+offset == self.times[-1]:
#keyframe exactly equal; skip the first milestone
#check equality with last milestone
if jumpPolicy=='strict' and suffix.milestones[0] != self.milestones[-1]:
print("Suffix start:",suffix.milestones[0])
print("Self end:",self.milestones[-1])
raise ValueError("Concatenation would cause a jump in configuration")
if jumpPolicy=='strict' or (jumpPolicy=='blend' and suffix.milestones[0] != self.milestones[-1]):
#discard last milestone of self
times = self.times[:-1] + [t+offset for t in suffix.times]
milestones = self.milestones[:-1] + suffix.milestones
return self.constructor()(times,milestones)
times = self.times + [t+offset for t in suffix.times]
milestones = self.milestones + suffix.milestones
return self.constructor()(times,milestones)
def insert(self, time: float) -> int:
"""Inserts a milestone and keyframe at the given time. Returns the index of the new
milestone, or if a milestone already exists, then it returns that milestone index.
If the path is empty, the milestone is set to an empty list [].
"""
if len(self.times) == 0:
self.times = [time]
self.milestones = [[]]
return 0
if time <= self.times[0]:
if time < self.times[0]:
self.times.insert(0,time)
self.milestones.insert(0,self.milestones[0][:])
return 0
elif time >= self.times[-1]:
if time > self.times[-1]:
self.times.append(time)
self.milestones.append(self.milestones[-1][:])
return len(self.times)-1
else:
i,u = self.getSegment(time)
assert i >= 0,"getSegment returned -1? something must be wrong with the times"
if u == 0:
return i
elif u == 1:
return i+1
else:
q = self.interpolate_state(self.milestones[i],self.milestones[i+1],u,self.times[i+1]-self.times[i])
self.times.insert(i,time)
self.milestones.insert(i,q)
return i
def split(self, time: float) -> Tuple['Trajectory','Trajectory']:
"""Returns a pair of trajectories obtained from splitting this
one at the given time"""
if time <= self.times[0]:
#split before start of trajectory
return self.constructor()([time],[self.milestones[0]]),self.constructor()([time]+self.times,[self.milestones[0]]+self.milestones)
elif time >= self.times[-1]:
#split after end of trajectory
return self.constructor()(self.times+[time],self.milestones+[self.milestones[-1]]),self.constructor()([time],[self.milestones[-1]])
i,u = self.getSegment(time)
assert i >= 0,"getSegment returned -1? something must be wrong with the times"
#split in middle of trajectory
splitpt = self.interpolate_state(self.milestones[i],self.milestones[i+1],u,self.times[i+1]-self.times[i])
front = self.constructor()(self.times[:i+1],self.milestones[:i+1])
back = self.constructor()(self.times[i+1:],self.milestones[i+1:])
if u > 0:
front.times.append(time)
front.milestones.append(splitpt)
if u < 1:
back.times = [time] + back.times
back.milestones = [splitpt] + back.milestones
return (front,back)
def before(self, time: float) -> 'Trajectory':
"""Returns the part of the trajectory before the given time"""
return self.split(time)[0]
def after(self, time: float) -> 'Trajectory':
"""Returns the part of the trajectory after the given time"""
return self.split(time)[1]
def splice(self,
suffix: 'Trajectory',
time: List[float] = None,
relative: bool = False,
jumpPolicy: str = 'strict'
) -> 'Trajectory':
"""Returns a path such that the suffix is spliced in at some time
Args:
suffix (Trajectory): the trajectory to splice in
time (float, optional): determines when the splice occurs.
The suffix is spliced in at the suffix's start time if time=None,
or the given time if specified.
jumpPolicy (str): if 'strict', then it is required that
suffix(t0)=path(t0) where t0 is the absolute start time
of the suffix.
"""
offset = 0
if time is None:
time = suffix.times[0]
if relative and len(self.times) > 0:
offset = self.times[-1]
time = time+offset
before = self.before(time)
return before.concat(suffix,relative,jumpPolicy)
def constructor(self) -> Callable[[List,List],'Trajectory']:
"""Returns a "standard" constructor for the split / concat
routines. The result should be a function that takes two
arguments: a list of times and a list of milestones."""
return Trajectory
def length(self, metric: Optional[MetricType] = None) -> float:
"""Returns the arc-length of the trajectory, according to the given
metric.
If metric = None, uses the "natural" metric for this trajectory,
which is usually Euclidean. Otherwise it is a function f(a,b)
from configurations to nonnegative numbers.
"""
if metric is None:
metric = vectorops.distance
return sum(metric(a,b) for a,b in zip(self.milestones[:-1],self.milestones[1:]))
def discretize_state(self, dt: float) -> 'Trajectory':
"""Returns a copy of this but with uniformly defined milestones at
resolution dt. Start and goal are maintained exactly"""
assert dt > 0,"dt must be positive"
t = self.times[0]
new_milestones = [self.milestones[0][:]]
new_times = [self.times[0]]
#TODO: (T/dt) log n time, can be done in (T/dt) time
while t+dt < self.times[-1]:
t += dt
new_times.append(t)
new_milestones.append(self.eval_state(t))
if abs(t-self.times[-1]) > 1e-6:
new_times.append(self.times[-1])
new_milestones.append(self.milestones[-1][:])
else:
new_times[-1] = self.times[-1]
new_milestones[-1] = self.milestones[-1][:]
return self.constructor()(new_times,new_milestones)
def discretize(self, dt: float) -> 'Trajectory':
"""Returns a trajectory, uniformly discretized at resolution dt, and
with state-space the same as its configuration space. Similar to
discretize, but if the state space is of higher dimension (e.g.,
Hermite trajectories) this projects to a piecewise linear trajectory.
"""
return self.discretize_state(dt)
def remesh(self, newtimes: List[float], tol: float=1e-6) -> Tuple['Trajectory',List[int]]:
"""Returns a path that has milestones at the times given in newtimes, as well
as the current milestone times. Return value is (path,newtimeidx) where
path is the remeshed path, and newtimeidx is a list of time indices for which
path.times[newtimeidx[i]] = newtimes[i].
newtimes is an iterable over floats. It does not need to be sorted.
tol is a parameter specifying how closely the returned path must interpolate
the original path. Old milestones will be dropped if they are not needed to follow
the path within this tolerance.
The end behavior is assumed to be 'halt'.
"""
sorter = [(t,-1-i) for (i,t) in enumerate(self.times)] + [(t,i) for (i,t) in enumerate(newtimes)]
sorter = sorted(sorter)
res = self.constructor()(None,None)
res.times.append(sorter[0][0])
res.milestones.append(self.milestones[0])
#maybe a constant first section
resindices = []
i = 0
while sorter[i][0] < self.startTime():
if sorter[i][1] >= 0:
resindices.append(0)
i += 1
if i != 0:
res.times.append(self.startTime())
res.milestones.append(self.milestones[0])
firstold = 0
lastold = 0
while i < len(sorter):
#check if we should add this
t,idx = sorter[i]
i+=1
if idx >= 0: #new time
if t == res.times[-1]:
resindices.append(len(res.times)-1)
continue
#it's a new mesh point, add it and check whether previous old milestones should be added
if self.times[lastold] == t:
#matched the last old mesh point, no need to call eval_state()
newx = self.milestones[lastold]
else:
newx = self.eval_state(t)
res.times.append(t)
res.milestones.append(newx)
for j in range(firstold,lastold):
if self.times[j] == t:
continue
x = res.eval_state(self.times[j])
if vectorops.norm(self.difference_state(x,self.milestones[j],1.0,1.0)) > tol:
#add it
res.times[-1] = self.times[j]
res.milestones[-1] = self.milestones[j]
res.times.append(t)
res.milestones.append(newx)
resindices.append(len(res.times)-1)
firstold = lastold+1
else:
#mark the range of old milestones to add
lastold = -idx-1
for j in range(firstold,lastold):
res.times.append(self.times[j])
res.milestones.append(self.milestones[j])
#sanity check
for i in range(len(res.times)-1):
assert res.times[i] < res.times[i+1]
for i,idx in enumerate(resindices):
assert newtimes[i] == res.times[idx],"Resindices mismatch? {} should index {} to {}".format(resindices,newtimes,res.times)
return (res,resindices)
def extractDofs(self,dofs:List[int]) -> 'Trajectory':
"""Returns a trajectory just over the given DOFs.
Args:
dofs (list of int): the indices to extract.
Returns:
A copy of this trajectory but only over the given DOFs.
"""
if len(self.times)==0:
return self.constructor()
n = len(self.milestones[0])
for d in dofs:
if abs(d) >= n:
raise ValueError("Invalid dof")
return self.constructor([t for t in self.times],[[m[j] for j in dofs] for m in self.milestones])
def stackDofs(self, trajs: List['Trajectory'], strict: bool = True) -> None:
"""Stacks the degrees of freedom of multiple trajectories together.
The result is contained in self.
All evaluations are assumed to take place with the 'halt' endBehavior.
Args:
trajs (list or tuple of Trajectory): the trajectories to stack
strict (bool, optional): if True, will warn if the classes of the
trajectories do not match self.
"""
if not isinstance(trajs,(list,tuple)):
raise ValueError("Trajectory.stackDofs takes in a list of trajectories as input")
warned = not strict
for traj in trajs:
if traj.__class__ != self.__class__:
if not warned:
warnings.warn("Trajectory.stackDofs is merging trajectories of different classes?")
warned = True
alltimes = set()
for traj in trajs:
for t in traj.times:
alltimes.add(t)
self.times = sorted(alltimes)
stacktrajs = [traj.remesh(self.times) for traj in trajs]
for traj in stacktrajs:
assert len(traj.milestones) == len(self.times)
self.milestones = []
for i,t in enumerate(self.times):
self.milestones.append(sum([list(traj.milestones[i]) for traj in stacktrajs],[]))
class RobotTrajectory(Trajectory):
"""A trajectory that performs interpolation according to the robot's
interpolation scheme."""
def __init__(self,
robot: Union[RobotModel,SubRobotModel],
times: Optional[List[float]] = None,
milestones: Optional[List[Vector]] = None
):
"""
Args:
robot (RobotModel or SubRobotModel): the robot whose configuration
should follow this trajectory.
times (list of floats, optional): if provided, initializes the
self.times attribute. If milestones is provided, a uniform
timing is set. Otherwise self.times is empty.
milestones (list of Configs, optional): if provided, initializes
the self.milestones attribute. Otherwise milestones is empty.
"""
if not isinstance(robot,(RobotModel,SubRobotModel)):
raise ValueError("RobotTrajectory must be provided with a RobotModel or SubRobotModel as first argument")
Trajectory.__init__(self,times,milestones)
self.robot = robot
def interpolate_state(self,a,b,u,dt):
return self.robot.interpolate(a,b,u)
def difference_state(self,a,b,u,dt):
assert len(a) == self.robot.numLinks(),"Invalid config "+str(a)+" should have length "+str(self.robot.numLinks())
assert len(b) == self.robot.numLinks(),"Invalid config "+str(b)+" should have length "+str(self.robot.numLinks())
#TODO: evaluate at u units from b to a
return vectorops.mul(self.robot.interpolateDeriv(b,a),1.0/dt)
def constructor(self):
return lambda times=None,milestones=None: RobotTrajectory(self.robot,times,milestones)
def getLinkTrajectory(self,
link: Union[int,str,RobotModelLink],
discretization: Optional[List[float]] = None
) -> 'SE3Trajectory':
"""Returns the SE3Trajectory corresponding to the link's pose along the robot's
trajectory. If discretization = None, only the milestones are extracted.
Otherwise, the piecewise linear approximation at dt = discretization is used.
"""
if discretization != None:
return self.discretize(discretization).getLinkTrajectory(link)
if isinstance(link,(int,str)):
link = self.robot.link(link)
Rmilestones = []
for m in self.milestones:
self.robot.setConfig(m)
Rmilestones.append(link.getTransform())
return SE3Trajectory(self.times[:],Rmilestones)
def length(self,metric=None):
if metric is None:
return Trajectory.length(self,self.robot.distance)
else:
return Trajectory.length(self,metric)
def checkValid(self):
Trajectory.checkValid(self)
for m in self.milestones:
if len(m) != self.robot.numLinks():
raise ValueError("Invalid length of milestone: {} != {}".format(len(m),self.robot.numLinks()))
def extractDofs(self, dofs: List[Union[int,str]]) -> 'RobotTrajectory':
"""Returns a RobotTrajectory just over the given DOFs.
Args:
dofs (list of int or str): the indices to extract
Returns:
A copy of this trajectory but over a SubRobotModel.
"""
from .subrobot import SubRobotModel
subrob = SubRobotModel(self.robot,dofs)
if len(self.times)==0:
return RobotTrajectory(subrob)
return RobotTrajectory(subrob,[t for t in self.times],[[m[j] for j in subrob._links] for m in self.milestones])
def stackDofs(self,trajs):
Trajectory.stackDofs(self,trajs,strict=False)
if len(self.milestones) > 0 and len(self.milestones[0]) != self.robot.numDofs():
warnings.warn("RobotTrajectory.stackDofs: the result doesn't match the robot's #DOF")
class GeodesicTrajectory(Trajectory):
"""A trajectory that performs interpolation on a GeodesicSpace.
See :mod:`klampt.math.geodesic` for more information."""
def __init__(self,
geodesic: GeodesicSpace,
times: Optional[List[float]] = None,
milestones: Optional[List[Vector]] = None
):
self.geodesic = geodesic
Trajectory.__init__(self,times,milestones)
def interpolate_state(self,a,b,u,dt):
return self.geodesic.interpolate(a,b,u)
def difference_state(self,a,b,u,dt):
x = self.interpolate_state(b,a,u,dt)
return vectorops.mul(vectorops.sub(self.geodesic.difference(a,x),self.geodesic.difference(b,x)),1.0/dt)
def constructor(self):
return lambda times,milestones:GeodesicTrajectory(self.geodesic,times,milestones)
def length(self,metric=None):
if metric is None:
return Trajectory.length(self,self.geodesic.distance)
else:
return Trajectory.length(self,metric)
def checkValid(self):
Trajectory.checkValid(self)
try:
d = self.geodesic.extrinsicDimension()
for m in self.milestones:
if len(m) != d:
raise ValueError("Milestone length doesn't match geodesic space's dimension: {} != {}".format(len(m),d))
except NotImplementedError:
pass
def extractDofs(self,dofs):
"""Invalid for GeodesicTrajectory."""
raise ValueError("Cannot extract DOFs from a GeodesicTrajectory")
def stackDofs(self,trajs):
Trajectory.stackDofs(self,trajs,strict=False)
try:
self.checkValid()
except ValueError:
warnings.warn("GeodesicTrajectory.stackDofs: the result doesn't match the geodesic's dimension")
class SO3Trajectory(GeodesicTrajectory):
"""A trajectory that performs interpolation in SO3. Each milestone
is a 9-D :mod:`klampt.math.so3` element."""
def __init__(self, times: Optional[List[float]] = None, milestones: Optional[List[Vector]] = None):
GeodesicTrajectory.__init__(self,SO3Space(),times,milestones)
def deriv_angvel(self, t: float,endBehavior: str = 'halt') -> Vector3:
"""Returns the derivative at t, in angular velocity form"""
cw = GeodesicTrajectory.deriv(self,t,endBehavior)
return so3.deskew(cw)
def preTransform(self,R: Rotation) -> None:
"""Premultiplies every rotation in here by the so3 element
R. In other words, if R rotates a local frame F to frame F',
this method converts this SO3Trajectory from coordinates in F
to coordinates in F'"""
for i,m in enumerate(self.milestones):
self.milestones[i] = so3.mul(R,m)
def postTransform(self,R: Rotation) -> None:
"""Postmultiplies every rotation in here by the se3 element
R. In other words, if R rotates a local frame F to frame F',
this method converts this SO3Trajectory from describing how F'
rotates to how F rotates."""
for i,m in enumerate(self.milestones):
self.milestones[i] = so3.mul(m,R)
def getPointTrajectory(self, localPt: Vector3) -> Trajectory:
"""Returns a Trajectory describing the movement of the point localPt
attached to this rotating frame. """
return Trajectory(self.times,[so3.apply(m,localPt) for m in self.milestones])
def checkValid(self):
Trajectory.checkValid(self)
for m in self.milestones:
if len(m) != 9:
raise ValueError("Invalid length of milestone: {} != 9".format(len(m)))
def constructor(self):
return SO3Trajectory
class SE3Trajectory(GeodesicTrajectory):
"""A trajectory that performs interpolation in SE3. Each milestone (state)
is a 12-D flattened :mod:`klampt.math.se3` element (i.e., the concatenation of
R + t for an (R,t) pair)."""
def __init__(self,
times: Optional[List[float]] = None,
milestones: Optional[Union[List[Vector],List[RigidTransform]]] = None
):
"""Constructor can take either a list of SE3 elements or
12-element vectors."""
if milestones is not None and len(milestones) > 0 and len(milestones[0])==2:
GeodesicTrajectory.__init__(self,SE3Space(),times,[m[0]+m[1] for m in milestones])
else:
GeodesicTrajectory.__init__(self,SE3Space(),times,milestones)
def to_se3(self, state: Vector) -> RigidTransform:
"""Converts a state parameter vector to a klampt.se3 element"""
return (state[:9],state[9:])
def waypoint(self, state: Vector) -> RigidTransform:
return self.to_se3(state)
def from_se3(self, T: RigidTransform) -> Vector:
"""Converts a klampt.se3 element to a state parameter vector"""
return list(T[0]) + list(T[1])
def eval(self, t: float, endBehavior: str = 'halt') -> RigidTransform:
"""Returns an SE3 element"""
res = self.eval_state(t,endBehavior)
return self.to_se3(res)
def deriv(self, t: float, endBehavior: str = 'halt') -> RigidTransform:
"""Returns the derivative as the derivatives of an SE3
element"""
res = self.deriv_state(t,endBehavior)
return self.to_se3(res)
def deriv_screw(self, t:float, endBehavior: str = 'halt') -> Tuple[Vector3,Vector3]:
"""Returns the derivative at t, in screw form, that is, a 6D
(angular velocity,velocity) vector."""
dT = self.deriv(t,endBehavior)
return so3.deskew(dT[0])+dT[1]
def preTransform(self, T: RigidTransform) -> None:
"""Premultiplies every transform in self by the se3 element
T. In other words, if T transforms a local frame F to frame F',
this method converts this SE3Trajectory from coordinates in F
to coordinates in F'"""
for i,m in enumerate(self.milestones):
Tm = self.to_se3(m)
self.milestones[i] = self.from_se3(se3.mul(T,Tm))
def postTransform(self, T: RigidTransform) -> None:
"""Postmultiplies every transform in self by the se3 element
T. In other words, if T transforms a local frame F to frame F',
this method converts this SE3Trajectory from describing how F'
moves to how F moves."""
for i,m in enumerate(self.milestones):
Tm = self.to_se3(m)
self.milestones[i] = self.from_se3(se3.mul(Tm,T))
def getRotationTrajectory(self) -> SO3Trajectory:
"""Returns an SO3Trajectory describing the rotation
trajectory."""
return SO3Trajectory(self.times,[m[:9] for m in self.milestones])
def getPositionTrajectory(self, localPt: Optional[Vector3] = None) -> Trajectory:
"""Returns a Trajectory describing the movement of the given
local point localPt (or the origin, if none is provided)."""
if localPt is None:
return Trajectory(self.times,[m[9:] for m in self.milestones])
else:
return Trajectory(self.times,[se3.apply(self.to_se3(m),localPt) for m in self.milestones])
def checkValid(self):
Trajectory.checkValid(self)
for m in self.milestones:
if len(m) != 9:
raise ValueError("Invalid length of milestone: {} != 12".format(len(m)))
def extractDofs(self, dofs: List[int]) -> Trajectory:
if list(dofs) == list(range(9)):
traj = Trajectory.extractDofs(self,dofs)
return SO3Trajectory(traj.times.traj.milestones)
elif all(d >= 9 for d in dofs):
return Trajectory.extractDofs(self,dofs)
else:
raise ValueError("Cannot extract DOFs from a SE3Trajectory")
def constructor(self):
return SE3Trajectory
class HermiteTrajectory(Trajectory):
"""A trajectory that performs cubic interpolation between prescribed
segment endpoints and velocities.
The milestones (states) are given in phase space (x,dx).
``eval(t)`` returns the primary configuration x, and ``deriv(t)``
returns the velocity dx. To get acceleration, use ``accel(t)``. To get
the state space (x,dx), use ``eval_state(t)``.
Args:
times (list of float, optional): the knot points
milestones (list of lists, optional): the milestones met at the knot
points.
dmilestones (list of lists, optional): the velocities (derivatives
w.r.t time) at each knot point.
Possible constructor options are:
- HermiteTrajectory(): empty trajectory
- HermiteTrajectory(times,milestones): milestones contains
2N-D lists consisting of the concatenation of a point and its outgoing
velocity.
- HermiteTrajectory(times,milestones,dmilestones):
milestones and dmilestones each contain N-D lists defining the points and
outgoing velocities.
Note: the curve is assumed to be smooth. To make a non-smooth curve,
duplicate the knot point and milestone, but set a different velocity
at the copy.
"""
def __init__(self,
times: Optional[List[float]] = None,
milestones: Optional[List[Vector]] = None,
dmilestones: Optional[List[Vector]] = None
):
if dmilestones is None:
Trajectory.__init__(self,times,milestones)
else:
assert milestones != None
#interpret as config/velocity
self.times = times
self.milestones = [q+dq for (q,dq) in zip(milestones,dmilestones)]
def makeSpline(self,
waypointTrajectory: Trajectory,
preventOvershoot: bool = True,
loop: bool = False
) -> None:
"""Computes natural velocities for a standard configuration-
space Trajectory to make it smoother."""
if loop and waypointTrajectory.milestones[-1] != waypointTrajectory.milestones[0]:
raise ValueError("Asking for a loop trajectory but the endpoints don't match up")
velocities = []
t = waypointTrajectory
d = len(t.milestones[0])
if len(t.milestones)==1:
velocities.append([0]*d)
elif len(t.milestones)==2:
if loop:
v = [0]*d
else:
s = (1.0/(t.times[1]-t.times[0]) if (t.times[1]-t.times[0]) != 0 else 0)
v = vectorops.mul(vectorops.sub(t.milestones[1],t.milestones[0]),s)
velocities.append(v)
velocities.append(v)
else:
third = 1.0/3.0
N = len(waypointTrajectory.milestones)
if loop:
timeiter = zip([-2]+list(range(N-1)),range(0,N),list(range(1,N))+[1])
else:
timeiter = zip(range(0,N-2),range(1,N-1),range(2,N))
for p,i,n in timeiter:
if p < 0:
dtp = t.times[-1] - t.times[-2]
else:
dtp = t.times[i] - t.times[p]
if n <= i:
dtn = t.times[1]-t.times[0]
else:
dtn = t.times[n]-t.times[i]
assert dtp >= 0 and dtn >= 0
s = (1.0/(dtp+dtn) if (dtp+dtn) != 0 else 0)
v = vectorops.mul(vectorops.sub(t.milestones[n],t.milestones[p]),s)
if preventOvershoot:
for j,(x,a,b) in enumerate(zip(t.milestones[i],t.milestones[p],t.milestones[n])):
if x <= min(a,b):
v[j] = 0.0
elif x >= max(a,b):
v[j] = 0.0
elif v[j] < 0 and x - v[j]*third*dtp >= a:
v[j] = 3.0/dtp*(x-a)
elif v[j] > 0 and x - v[j]*third*dtp <= a:
v[j] = 3.0/dtp*(x-a)
elif v[j] < 0 and x + v[j]*third*dtn < b:
v[j] = 3.0/dtn*(b-x)
elif v[j] > 0 and x + v[j]*third*dtn > b:
v[j] = 3.0/dtn*(b-x)
velocities.append(v)
if not loop:
#start velocity as quadratic
x2 = vectorops.madd(t.milestones[1],velocities[0],-third*(t.times[1]-t.times[0]))
x1 = vectorops.madd(x2,vectorops.sub(t.milestones[1],t.milestones[0]),-third)
v0 = vectorops.mul(vectorops.sub(x1,t.milestones[0]),3.0/(t.times[1]-t.times[0]))
#terminal velocity as quadratic
xn_2 = vectorops.madd(t.milestones[-2],velocities[-1],third*(t.times[-1]-t.times[-2]))
xn_1 = vectorops.madd(xn_2,vectorops.sub(t.milestones[-1],t.milestones[-2]),third)
vn = vectorops.mul(vectorops.sub(t.milestones[-1],xn_1),3.0/(t.times[-1]-t.times[-2]))
velocities = [v0]+velocities+[vn]
self.__init__(waypointTrajectory.times[:],waypointTrajectory.milestones,velocities)
def makeBezier(self, times: List[float], controlPoints: List[Vector]) -> None:
"""Sets up this spline to perform Bezier interpolation of the given
control points, with segment 0 a Bezier curve on cps[0:3], segment 1 a
Bezier curve on cps[3:6], etc.
"""
nsegs = len(times)-1
if nsegs*3+1 != len(controlPoints):
raise ValueError("To perform Bezier interpolation, need # of controlPoints to be 3*Nsegs+1")
newtimes = []
milestones = []
outgoingVelocities = []
for i in range(0,len(times)-1):
a,b,c,d = controlPoints[i*3:i*3+4]
dt = times[i+1]-times[i]
if dt <= 0: raise ValueError("Times must be strictly monotonically increasing")
lieDeriv0 = vectorops.mul(vectorops.sub(b,a),3/dt)
lieDeriv1 = vectorops.mul(vectorops.sub(c,d),-3/dt)
if i > 0:
if vectorops.distance(lieDeriv0,outgoingVelocities[-1]) > 1e-4:
#need to double up knot point
newtimes.append(newtimes[-1])
milestones.append(milestones[-1])
outgoingVelocities.append(lieDeriv0)
else:
newtimes.append(times[i])
milestones.append(a)
outgoingVelocities.append(lieDeriv0)
newtimes.append(times[i+1])
milestones.append(d)
outgoingVelocities.append(lieDeriv1)
self.__init__(newtimes,milestones,outgoingVelocities)
def makeMinTimeSpline(self,
milestones: List[Vector],
velocities: Optional[List[Vector]] = None,
xmin: Optional[Vector] = None,
xmax: Optional[Vector] = None,
vmax: Optional[Vector] = None,
amax: Optional[Vector] = None
) -> None:
"""Creates a spline that interpolates between the given milestones with
bounded velocities, accelerations, and positions.
If velocities==None, this requires the spline to move in a straight
configuration-space path between the given milestones. This option is
helpful for postprocessing the results for kinematic motion planning,
for example.
"""
from ..plan import motionplanning
if vmax is None and amax is None:
raise ValueError("Either vmax or amax must be provided")
if len(milestones) == 0 or len(milestones[0]) == 0:
raise ValueError("Milestones need to be provided and at least 1-d")
n = len(milestones[0])
for m in milestones[1:]:
if len(m) != n:
raise ValueError("Invalid size of milestone")
if velocities is not None:
if len(velocities) != len(milestones):
raise ValueError("Velocities need to have the same size as milestones")
for v in velocities:
if len(v) != n:
raise ValueError("Invalid size of velocity milestone")
inf = float('inf')
if xmin is None:
xmin = [-inf]*n
else:
if len(xmin) != n:
raise ValueError("Invalid size of lower bound")
if xmax is None:
xmax = [inf]*n
else:
if len(xmax) != n:
raise ValueError("Invalid size of upper bound")
if vmax is None:
vmax = [inf]*n
else:
if len(vmax) != n:
raise ValueError("Invalid size of velocity bound")
if amax is None:
#do a piecewise linear interpolation, ignore x bounds
raise NotImplementedError("TODO: amax = None case")
else:
if len(amax) != n:
raise ValueError("Invalid size of acceleration bound")
zeros = [0]*n
newtimes = [0]
newmilestones = [milestones[0]]
newvelocities = [velocities[0] if velocities is not None else zeros]
for i in range(len(milestones)-1):
m0 = milestones[i]
m1 = milestones[i+1]
if velocities is None:
ts,xs,vs = motionplanning.interpolate_nd_min_time_linear(m0,m1,vmax,amax)
else:
v0 = velocities[i]
v1 = velocities[i+1]
ts,xs,vs = motionplanning.interpolate_nd_min_time(m0,v0,m1,v1,xmin,xmax,vmax,amax)
ts,xs,vs = motionplanning.combine_nd_cubic(ts,xs,vs)
newtimes += [newtimes[-1] + t for t in ts[1:]]
newmilestones += xs[1:]
newvelocities += vs[1:]
self.__init__(newtimes,newmilestones,newvelocities)
def waypoint(self,state):
return state[:len(state)//2]
def eval_state(self,t,endBehavior='halt'):
"""Returns the (configuration,velocity) state at time t."""
return Trajectory.eval_state(self,t,endBehavior)
def eval(self,t,endBehavior='halt'):
"""Returns just the configuration component of the result"""
res = Trajectory.eval_state(self,t,endBehavior)
return res[:len(res)//2]
def deriv(self,t,endBehavior='halt'):
"""Returns just the velocity component of the result"""
res = Trajectory.eval_state(self,t,endBehavior)
return res[len(res)//2:]
def eval_accel(self,t,endBehavior='halt') -> Vector:
"""Returns just the acceleration component of the derivative"""
res = Trajectory.deriv_state(self,t,endBehavior)
return res[len(res)//2:]
def interpolate_state(self,a,b,u,dt):
assert len(a)==len(b)
x1,v1 = a[:len(a)//2],vectorops.mul(a[len(a)//2:],dt)
x2,v2 = b[:len(b)//2],vectorops.mul(b[len(b)//2:],dt)
x = spline.hermite_eval(x1,v1,x2,v2,u)
dx = vectorops.mul(spline.hermite_deriv(x1,v1,x2,v2,u),1.0/dt)
return x+dx
def difference_state(self,a,b,u,dt):
assert len(a)==len(b)
x1,v1 = a[:len(a)//2],vectorops.mul(a[len(a)//2:],dt)
x2,v2 = b[:len(b)//2],vectorops.mul(b[len(b)//2:],dt)
dx = vectorops.mul(spline.hermite_deriv(x1,v1,x2,v2,u,order=1),1.0/dt)
ddx = vectorops.mul(spline.hermite_deriv(x1,v1,x2,v2,u,order=2),1.0/pow(dt,2))
return dx+ddx
def discretize(self,dt):
"""Creates a discretized piecewise linear Trajectory in config space
that approximates this curve with resolution dt.
"""
res = self.discretize_state(dt)
n = len(res.milestones[0])//2
return Trajectory(res.times,[m[:n] for m in res.milestones])
def length(self) -> float:
"""Returns an upper bound on length given by the Bezier property.
Faster than calculating the true length. To retrieve an approximation
of true length, use self.discretize(dt).length().
"""
n = len(self.milestones[0])//2
third = 1.0/3.0
def distance(x,y):
cp0 = x[:n]
cp1 = vectorops.madd(cp0,x[n:],third)
cp3 = y[:n]
cp2 = vectorops.madd(cp3,y[n:],-third)
return third*vectorops.norm(x[n:]) + vectorops.distance(cp1,cp2) + third*vectorops.norm(y[n:])
return Trajectory.length(self,distance)
def checkValid(self):
Trajectory.checkValid(self)
for m in self.milestones:
if len(m)%2 != 0:
raise ValueError("Milestone length isn't even?: {} != {}".format(len(m)))
def extractDofs(self,dofs) -> 'HermiteTrajectory':
"""Returns a trajectory just over the given DOFs.
Args:
dofs (list of int): the (primary) indices to extract. Each entry
must be < len(milestones[0])/2.
Returns:
A copy of this trajectory but only over the given DOFs.
"""
if len(self.times)==0:
return self.constructor()
n = len(self.milestones[0])//2
for d in dofs:
if abs(d) >= n:
raise ValueError("Invalid dof")
return self.constructor([t for t in self.times],[[m[j] for j in dofs] + [m[n+j] for j in dofs] for m in self.milestones])
def stackDofs(self,trajs,strict=True) -> None:
"""Stacks the degrees of freedom of multiple trajectories together.
The result is contained in self.
All evaluations are assumed to take place with the 'halt' endBehavior.
Args:
trajs (list or tuple of HermiteTrajectory): the trajectories to
stack
strict (bool, optional): ignored. Will always warn for invalid
classes.
"""
if not isinstance(trajs,(list,tuple)):
raise ValueError("HermiteTrajectory.stackDofs takes in a list of trajectories as input")
for traj in trajs:
if not isinstance(traj,HermiteTrajectory):
raise ValueError("Can't stack non-HermiteTrajectory objects into a HermiteTrajectory")
alltimes = set()
for traj in trajs:
for t in traj.times:
alltimes.add(t)
self.times = sorted(alltimes)
stacktrajs = [traj.remesh(self.times) for traj in trajs]
for traj in stacktrajs:
assert len(traj.milestones) == len(self.times)
self.milestones = []
for i,t in enumerate(self.times):
q = []
v = []
for traj in stacktrajs:
n = len(traj.milestones[i])//2
q += list(traj.milestones[i][:n])
v += list(traj.milestones[i][n:])
self.milestones.append(q + v)
def constructor(self):
return HermiteTrajectory
class GeodesicHermiteTrajectory(Trajectory):
"""A trajectory that performs Hermite interpolation on a GeodesicSpace
using the DeCastlejau algorithm.
The milestones are a concatenation of the segment start point and the
outgoing Lie derivatives w.r.t. t. The incoming Lie derivative at the
segment end point is assumed to be the negative of the outgoing Lie
derivative.
Args:
geodesic (GeodesicSpace): the underlying space
times (list of floats, optional): the knot points defining each segment
milestones (list of lists, optional): the points at the ends of each
segment
outgoingLieDerivatives (list of lists, optional): the Lie derivatives
(velocities) at the ends of each segment.
Possible constructor options are:
- GeodesicHermiteTrajectory(geodesic): empty trajectory
- GeodesicHermiteTrajectory(geodesic,times,milestones): milestones contains
2N-D lists consisting of the concatenation of a point and its outgoing
Lie derivative.
- GeodesicHermiteTrajectory(geodesic,times,milestones,lieDerivatives):
milestones and lieDerivatives contain N-D lists defining the points and
outgoing Lie derivatives.
Note: the curve is assumed to be smooth. To make a non-smooth curve,
duplicate the knot point and milestone, but set a different Lie derivative
at the copy.
"""
def __init__(self,
geodesic: GeodesicSpace,
times: Optional[List[float]] = None,
milestones: Optional[List[Vector]] = None,
outgoingLieDerivatives: Optional[List[Vector]] = None
):
self.geodesic = geodesic
if outgoingLieDerivatives is not None:
assert milestones is not None
milestones = [list(a)+list(b) for (a,b) in zip(milestones,outgoingLieDerivatives)]
if milestones is not None:
assert all(len(m)==geodesic.extrinsicDimension()*2 for m in milestones),"Milestones must be a concatenation of the point and outgoing milestone"
Trajectory.__init__(self,times,milestones)
self._skip_deriv = False
def makeSpline(self, waypointTrajectory: Trajectory, loop: bool=False) -> None:
"""Creates a spline from a set of waypoints, with smooth interpolation
between waypoints."""
if loop and waypointTrajectory.milestones[-1] != waypointTrajectory.milestones[0]:
print(waypointTrajectory.milestones[-1],"!=",waypointTrajectory.milestones[0])
raise ValueError("Asking for a loop trajectory but the endpoints don't match up")
velocities = []
t = waypointTrajectory
d = len(t.milestones[0])
third = 1.0/3.0
if len(t.milestones)==1:
velocities.append([0]*d)
elif len(t.milestones)==2:
if loop:
v = [0.0]*d
velocities = [v,v]
else:
s = (1.0/(t.times[1]-t.times[0]) if (t.times[1]-t.times[0]) != 0 else 0)
v = vectorops.mul(self.geodesic.difference(t.milestones[1],t.milestones[0]),s)
velocities.append(v)
v2 = vectorops.mul(self.geodesic.difference(t.milestones[0],t.milestones[1]),-s)
velocities.append(v2)
else:
N = len(waypointTrajectory.milestones)
if loop:
timeiter = zip([-2]+list(range(N-1)),range(0,N),list(range(1,N))+[1])
else:
timeiter = zip(range(0,N-2),range(1,N-1),range(2,N))
for p,i,n in timeiter:
if p < 0: dtp = t.times[-1] - t.times[-2]
else: dtp = t.times[i] - t.times[p]
if n <= i: dtn = t.times[1]-t.times[0]
else: dtn = t.times[n]-t.times[i]
assert dtp >= 0 and dtn >= 0
s2 = (1.0/dtn if dtn != 0 else 0)
v2 = vectorops.mul(self.geodesic.difference(t.milestones[n],t.milestones[i]),s2)
s1 = (1.0/dtp if dtp != 0 else 0)
v1 = vectorops.mul(self.geodesic.difference(t.milestones[p],t.milestones[i]),-s1)
v = vectorops.mul(vectorops.add(v1,v2),0.5)
velocities.append(v)
if not loop:
#start velocity as linear
v0 = vectorops.mul(self.geodesic.difference(t.milestones[1],t.milestones[0]),1.0/(t.times[1]-t.times[0]))
#terminal velocity as quadratic
vn = vectorops.mul(self.geodesic.difference(t.milestones[-2],t.milestones[-1]),-1.0/(t.times[-1]-t.times[-2]))
velocities = [v0]+velocities+[vn]
else:
assert len(velocities) == N
GeodesicHermiteTrajectory.__init__(self,self.geodesic,waypointTrajectory.times[:],waypointTrajectory.milestones,velocities)
def makeBezier(self, times: Vector, controlPoints:List[Vector]) -> None:
"""Sets up this spline to perform Bezier interpolation of the given
control points, with segment 0 a Bezier curve on cps[0:3], segment 1 a
Bezier curve on cps[3:6], etc.
"""
nsegs = len(times)-1
if nsegs*3+1 != len(controlPoints):
raise ValueError("To perform Bezier interpolation, need # of controlPoints to be 3*Nsegs+1")
newtimes = []
milestones = []
outgoingLieDerivatives = []
for i in range(0,len(times)-1):
a,b,c,d = controlPoints[i*3:i*3+4]
dt = times[i+1]-times[i]
if dt <= 0: raise ValueError("Times must be strictly monotonically increasing")
lieDeriv0 = vectorops.mul(self.geodesic.difference(b,a),3/dt)
lieDeriv1 = vectorops.mul(self.geodesic.difference(c,d),-3/dt)
if i > 0:
if vectorops.distance(lieDeriv0,outgoingLieDerivatives[-1]) > 1e-4:
#need to double up knot point
newtimes.append(newtimes[-1])
milestones.append(milestones[-1])
outgoingLieDerivatives.append(lieDeriv0)
else:
newtimes.append(times[i])
milestones.append(a)
outgoingLieDerivatives.append(lieDeriv0)
newtimes.append(times[i+1])
milestones.append(d)
outgoingLieDerivatives.append(lieDeriv1)
GeodesicHermiteTrajectory.__init__(self,self.geodesic,newtimes,milestones,outgoingLieDerivatives)
def waypoint(self,state):
return state[:len(state)//2]
def interpolate_state(self,a,b,u,dt):
n = self.geodesic.extrinsicDimension()
assert len(a) == n*2
assert len(b) == n*2
c0 = a[:n]
v0 = a[n:]
c3 = b[:n]
v3 = b[n:]
third = 1.0/3.0
c1 = self.geodesic.integrate(c0,vectorops.mul(v0,third*dt))
c2 = self.geodesic.integrate(c3,vectorops.mul(v3,-third*dt))
d0 = self.geodesic.interpolate(c0,c1,u)
d1 = self.geodesic.interpolate(c1,c2,u)
d2 = self.geodesic.interpolate(c2,c3,u)
e0 = self.geodesic.interpolate(d0,d1,u)
e1 = self.geodesic.interpolate(d1,d2,u)
f = self.geodesic.interpolate(e0,e1,u)
if self._skip_deriv:
v = [0.0]*n
else:
#since it's difficult to do the derivatives analytically, do finite differences instead
eps = 1e-6
u2 = u + eps
d0 = self.geodesic.interpolate(c0,c1,u2)
d1 = self.geodesic.interpolate(c1,c2,u2)
d2 = self.geodesic.interpolate(c2,c3,u2)
e0 = self.geodesic.interpolate(d0,d1,u2)
e1 = self.geodesic.interpolate(d1,d2,u2)
f2 = self.geodesic.interpolate(e0,e1,u2)
v = vectorops.mul(self.geodesic.difference(f2,f),1.0/eps)
return f + v
def difference_state(self,a,b,u,dt):
raise NotImplementedError("Can't do derivatives of Bezier geodesic yet")
def eval_state(self,t,endBehavior='halt'):
"""Returns the (configuration,velocity) state at time t."""
return Trajectory.eval_state(self,t,endBehavior)
def eval(self,t,endBehavior='halt'):
"""Evaluates the configuration at time t"""
self._skip_deriv = True
res = Trajectory.eval_state(self,t,endBehavior)
self._skip_deriv = False
return res[:len(res)//2]
def deriv(self,t,endBehavior='halt'):
"""Returns the velocity at time t"""
res = Trajectory.eval_state(self,t,endBehavior)
return res[len(res)//2:]
def length(self,metric=None):
"""Upper bound on the length"""
if metric is None:
metric = self.geodesic.distance
n = self.geodesic.extrinsicDimension()
l = 0
for i,(a,b) in enumerate(zip(self.milestones[:-1],self.milestones[1:])):
dt = self.times[i+1]-self.times[i]
c0 = a[:n]
v0 = vectorops.mul(a[n:],dt)
c3 = b[:n]
v3 = vectorops.mul(b[n:],dt)
third = 1.0/3.0
c1 = self.geodesic.integrate(c0,v0,third)
c2 = self.geodesic.integrate(c3,v3,-third)
l += metric(c0,c1)
l += metric(c1,c2)
l += metric(c2,c3)
return l
def discretize(self,dt):
"""Creates a discretized piecewise-geodesic (GeodesicTrajectory)
approximation of this curve in config space, with resolution dt.
"""
self._skip_deriv = True
res = self.discretize_state(dt)
self._skip_deriv = False
n = self.geodesic.extrinsicDimension()
return GeodesicTrajectory(self.geodesic,res.times,[m[:n] for m in res.milestones])
def length(self) -> float:
"""Returns an upper bound on length given by the Bezier property.
Faster than calculating the true length. To retrieve an approximation
of true length, use self.discretize(dt).length().
"""
n = self.geodesic.extrinsicDimension()
third = 1.0/3.0
def distance(x,y):
cp0 = x[:n]
cp1 = self.geodesic.integrate(cp0,vectorops.mul(x[n:],third))
cp3 = y[:n]
cp2 = self.geodesic.integrate(cp3,vectorops.mul(y[n:],-third))
return self.geodesic.distance(cp0,cp1) + self.geodesic.distance(cp1,cp2) + self.geodesic.distance(cp2,cp3)
return Trajectory.length(self,distance)
def checkValid(self):
Trajectory.checkValid(self)
n = self.geodesic.extrinsicDimension()
for m in self.milestones:
if len(m) != n*2:
raise ValueError("Invalid length of milestone: {} != {}*2".format(len(m),n))
def extractDofs(self,dofs):
"""Invalid for GeodesicHermiteTrajectory."""
raise ValueError("Cannot extract DOFs from a GeodesicHermiteTrajectory")
def stackDofs(self,trajs,strict=True):
raise ValueError("Cannot stack DOFs for a GeodesicHermiteTrajectory")
def constructor(self):
return lambda times,milestones:GeodesicHermiteTrajectory(self.geodesic,times,milestones)
class SO3HermiteTrajectory(GeodesicHermiteTrajectory):
"""A trajectory that performs Hermite interpolation in SO3. Each milestone
is 18-D, consisting of a 9-D :mod:`klampt.math.so3` element and its
subsequent Lie derivative.
Args:
times (list of float): knot points.
milestones (list of 9-D lists): list of waypoint orientations.
outgoingLieDerivatives (list of 9-D lists): list of Lie derivatives,
i.e. cross product matrix (:func:`~klampt.math.so3.cross_product`)
for each angular velocity.
"""
def __init__(self,
times: Optional[List[float]] = None,
milestones: Optional[List[Vector]] = None,
outgoingLieDerivatives: Optional[List[Vector]] = None
):
GeodesicHermiteTrajectory.__init__(self,SO3Space(),times,milestones,outgoingLieDerivatives)
def preTransform(self, R: Rotation) -> None:
"""Premultiplies every rotation in here by the so3 element
R. In other words, if R rotates a local frame F to frame F',
this method converts this SO3HermiteTrajectory from coordinates in F
to coordinates in F'"""
for i,m in enumerate(self.milestones):
assert len(m) == 18
mq = m[:9]
mv = m[9:]
self.milestones[i] = so3.mul(R,mq) + so3.mul(R,mv)
def deriv_angvel(self, t: float, endBehavior: str = 'halt') -> Vector3:
"""Returns the derivative at t, in angular velocity form"""
dR = GeodesicHermiteTrajectory.eval_velocity(self,t,endBehavior)
return so3.deskew(dR)
def postTransform(self, R:Rotation) -> None:
"""Postmultiplies every rotation in here by the se3 element
R. In other words, if R rotates a local frame F to frame F',
this method converts this SO3HermiteTrajectory from describing how F'
rotates to how F rotates."""
for i,m in enumerate(self.milestones):
assert len(m) == 18
mq = m[:9]
mv = m[9:]
self.milestones[i] = so3.mul(mq,R) + so3.mul(mv,R)
def discretize(self,dt):
self._skip_deriv = True
res = self.discretize_state(dt)
self._skip_deriv = False
n = 9
return SO3Trajectory(res.times,[m[:n] for m in res.milestones])
def constructor(self):
return SO3HermiteTrajectory
class SE3HermiteTrajectory(GeodesicHermiteTrajectory):
"""A trajectory that performs Bezier interpolation in SE3. Each milestone
is 24-D, consisting of a 12-D flattened :mod:`klampt.math.se3` element and
its subsequent Lie derivative.
"""
def __init__(self,times=None,milestones=None,outgoingLieDerivatives=None):
if milestones is not None and len(milestones) > 0 and len(milestones[0])==2:
milestones = [R+t for (R,t) in milestones]
if outgoingLieDerivatives is not None and len(outgoingLieDerivatives) > 0 and len(outgoingLieDerivatives[0])==2:
outgoingLieDerivatives = [R+t for (R,t) in outgoingLieDerivatives]
GeodesicHermiteTrajectory.__init__(self,SE3Space(),times,milestones,outgoingLieDerivatives)
def to_se3(self, state: Vector) -> RigidTransform:
"""Converts a state parameter vector to a klampt.se3 element"""
return (state[:9],state[9:12])
def from_se3(self, T: RigidTransform) -> Vector:
"""Converts a klampt.se3 element to a state parameter vector"""
return list(T[0]) + list(T[1])
def waypoint(self,state):
return self.to_se3(state)
def eval(self, t: float, endBehavior: str = 'halt') -> RigidTransform:
"""Returns an SE3 element"""
res = GeodesicHermiteTrajectory.eval(self,t,endBehavior)
return self.to_se3(res)
def deriv(self, t: float, endBehavior: str = 'halt') -> RigidTransform:
"""Returns the derivative as the derivatives of an SE3
element"""
res = GeodesicHermiteTrajectory.deriv(self,t,endBehavior)
return self.to_se3(res[:12])
def deriv_screw(self, t: float, endBehavior: str = 'halt') -> Vector:
"""Returns the derivative at t, in screw vector form, that is, a 6D
vector (angular velocity, velocity)."""
dT = self.deriv(t,endBehavior)
return so3.deskew(dT[0])+dT[1]
def preTransform(self, T: RigidTransform) -> None:
"""Premultiplies every transform in here by the se3 element T. In other
words, if T transforms a local frame F to frame F', this method
converts this SE3HermiteTrajectory from coordinates in F to coordinates
in F'"""
for i,m in enumerate(self.milestones):
assert len(m) == 24
mq = self.to_se3(m[:12])
mv = self.to_se3(m[12:])
self.milestones[i] = self.from_se3(se3.mul(T,mq)) + self.from_se3((so3.mul(T[0],mv[0]),so3.apply(T[0],mv[1])))
def postTransform(self,T: RigidTransform) -> None:
"""Postmultiplies every transform in here by the se3 element
R. In other words, if R rotates a local frame F to frame F',
this method converts this SO3Trajectory from describing how F'
rotates to how F rotates."""
for i,m in enumerate(self.milestones):
assert len(m) == 24
mq = self.to_se3(m[:12])
mv = self.to_se3(m[12:])
self.milestones[i] = self.from_se3(se3.mul(mq,T)) + self.from_se3((so3.mul(mv[0],T[0]),so3.apply(so3.inv(T[0]),mv[1])))
def discretize(self,dt):
self._skip_deriv = True
res = self.discretize_state(dt)
self._skip_deriv = False
n = 12
return SE3Trajectory(res.times,[m[:n] for m in res.milestones])
def constructor(self):
return SE3HermiteTrajectory
try:
from typing import Literal
_VELOCITIES_OPTIONS = Literal['auto','trapezoidal','constant','triangular','parabolic','cosine','minimum-jerk','optimal']
_TIMING_OPTIONS = Literal['limited','uniform','path','L2','Linf','robot','sqrt-L2','sqrt-Linf','sqrt-robot']
_SMOOTHING_OPTIONS = Literal['linear','cubic','spline','ramp']
_SMOOTHING_OPTIONS2 = Literal['spline','pause']
except ImportError:
_VELOCITIES_OPTIONS = str
_TIMING_OPTIONS = str
_SMOOTHING_OPTIONS = str
_SMOOTHING_OPTIONS2 = str
def path_to_trajectory(
path: Union[Sequence[Vector],Trajectory,RobotTrajectory],
velocities: _VELOCITIES_OPTIONS = 'auto',
timing: Union[_TIMING_OPTIONS,List[float],MetricType]= 'limited',
smoothing: str='spline',
stoptol: Optional[float] = None,
vmax: Union[str,float,Vector] = 'auto',
amax: Union[str,float,Vector] = 'auto',
speed: float = 1.0,
dt: float = 0.01,
startvel: float = 0.0,
endvel: float = 0.0,
verbose: int = 0
) -> Trajectory:
"""Converts an untimed path to a timed trajectory.
The resulting trajectory passes through each of the milestones **without
stopping**, except for "stop" milestones. Stop milestones by default are
only the first and last milestone, but if ``stoptol`` is given, then the
trajectory will be stopped if the curvature of the path exceeds this value.
The first step is to assign each segment a 'distance' d[i] suggesting how
much time it would take to traverse that much spatial distance. This
distance assignment method is controlled by the ``timing`` parameter.
The second step is to smooth the spline, if smoothing='spline' is given
(default).
The third step creates the trajectory, assigning the times and velocity
profile as specified by the ``velocities`` parameter. ``velocities``
dictates how the overall velocity profile behaves from beginning to end,
and basically, each profile gradually speeds up and slows down. The
path length L = :math:`\sum_{i=1}^n d[i]` determines the duration T of
the trajectory, as follows:
- For constant velocity profiles, T=L.
- For trapezoidal, triangular, parabolic, and cosine, T = sqrt(L).
- For minimum-jerk, T = L^(1/3).
The fourth step is to time scale the result to respect limits velocity /
acceleration limits, if timing=='limited' or speed=='limited'.
The fifth step is to time scale the result by speed.
.. note::
There are only some meaningful combinations of arguments:
- velocities='auto'; timing='limited': a generates a timed spline using a
heuristic and then revises it to respect velocity and acceleration limits.
The limited timing heuristic works best when the milestones are widely
spaced.
Be sure to specify vmax and amax if you don't have a RobotTrajectory.
- velocities='auto', 'trapezoidal', 'triangular', 'parabolic', 'cosine', or
'minimum-jerk';
timing='L2', 'Linf', 'robot', 'sqrt-L2', 'sqrt-Linf', or 'sqrt-robot':
an entirely heuristic approach.
The sqrt values lead to somewhat better tangents for smoothed splines with
nonuniform distances between milestones.
In these cases, vmax and amax are ignored.
- If path uses non-Euclidean interpolation, then smoothing=None should be
provided. Smoothing is not yet supported for non-Euclidean spaces (e.g.,
robots with special joints, SO3, SE3).
Args:
path: a list of milestones, a trajectory, or a RobotTrajectory. In the
latter cases, if durations = 'path' then the durations are extracted
from the trajectory's timing.
velocities (str, optional): the manner in which velocities are assigned
along the path. Can be:
- 'auto' (default): if timing is 'limited', this is equivalent to
'constant'. Otherwise, this is equivalent to 'trapezoidal'.
- 'trapezoidal': a trapezoidal velocity profile with max
acceleration and velocity. If timing is 'limited', the velocity
max is determined by vmax. Otherwise, the ramp
up proceeds for 1/4 of the time, stays constant 1/2 of the time,
and then ramps down for 1/4 of the time.
- 'constant': the path is executed at fixed constant velocity
- 'triangular': velocities are ramped up for 1/2 of the duration
and then ramped back down.
- 'parabolic': a parabolic curve (output is a Hermite spline)
- 'cosine': velocities follow (1-cosine)/2
- 'minimum-jerk': minimum jerk velocities
- 'optimal': uses time scaling optimization. NOT IMPLEMENTED YET
timing (optional): affects how path timing between milestones is
normalized. Valid options are:
- 'limited' (default): uses the vmax, amax variables along with
the velocity profile to dynamically determine the duration
assigned to each segment.
- 'uniform': base timing between milestones is uniform
(1/(\|path\|*speed))
- 'path': only valid if path is a Trajectory object. Uses the
timing in path.times as the base timing.
- 'L2': base timing is set proportional to L2 distance between
milestones
- 'Linf': base timing is set proportional to L-infinity distance
between milestones
- 'robot': base timing is set proportional to robot's distance
function between milestones
- 'sqrt-L2', 'sqrt-Linf', or 'sqrt-robot': base timing is set
proportional to the square root of the L2, Linf, or robot
distance between milestones
- a list or tuple: the base timing is given in this list
- callable function f(a,b): sets the normalization to the function
f(a,b).
smoothing (str, optional): if 'spline', the geometric path is first
smoothed before assigning times. Otherwise, the geometric path
is interpreted as a piecewise linear path.
stoptol (float, optional): determines how start/stop segments are
determined. If None, the trajectory only pauses at the start and
end of the path. If 0, it pauses at every milestone. Otherwise,
it pauses if the curvature at the milestone exceeds stoptol.
vmax (optional): only meaningful if timing=='limited'. Can be:
- 'auto' (default): either 1 or the robot's joint velocity limits
if a RobotTrajectory is provided
- a positive number: the L2 norm of the derivative of the result
trajectory is limited to this value
- a list of positive floats: the element-wise derivative of the
result trajectory is limited to this value
amax (optional): only meaningful if timing=='limited'. Can be:
- 'auto' (default): either 4 or the robot's joint acceleration
limits if a RobotTrajectory is provided
- a positive number: the L2 norm of the acceleration of the result
trajectory is limited to this value
- a list of positive floats: the element-wise acceleration of the
result trajectory is limited to this value.
speed (float or str, optional): if a float, this is a speed multiplier
applied to the resulting trajectory. This can also be 'limited',
which applies the velocity and acceleration limits.
dt (float, optional): the resolution of the resulting trajectory.
Default 0.01.
startvel (float, optional): the starting velocity of the path, given as
a multiplier of path[1]-path[0]. Must be nonnegative.
Note: might not be respected for some velocity profiles.
.. warning::
NOT IMPLEMENTED YET
endvel (float, optional): the ending velocity of the path, given as a
multiplier of path[-1]-path[-2]. Must be nonnegative.
Note: might not be respected for some velocity profiles.
.. warning::
NOT IMPLEMENTED YET
verbose (int, optional): if > 0, some debug printouts will be given.
Returns:
A finely-discretized, timed trajectory that is C1 continuous
and respects the limits defined in the arguments.
"""
assert dt > 0.0,"dt has to be positive"
if vmax == 'auto' and (timing == 'limited' or speed == 'limited'):
if isinstance(path,RobotTrajectory):
vmax = path.robot.getVelocityLimits()
else:
vmax = 1.0
if amax == 'auto' and (timing == 'limited' or speed == 'limited'):
if isinstance(path,RobotTrajectory):
amax = path.robot.getAccelerationLimits()
else:
amax = 4.0
if isinstance(speed,(int,float)) and speed != 1.0:
if not (speed > 0):
raise ValueError("Invalid value for speed, must be positive")
dt *= speed
startvel /= speed
endvel /= speed
milestones = path
if isinstance(path,Trajectory):
milestones = path.milestones
_durations = None
if isinstance(timing,(list,tuple)):
_durations = timing
elif callable(timing):
_durations = [timing(a,b) for a,b in zip(milestones[:-1],milestones[1:])]
else:
if isinstance(path,Trajectory):
if timing == 'path':
_durations = [(b-a) for a,b in zip(path.times[:-1],path.times[1:])]
if _durations is None:
if timing == 'limited':
if hasattr(vmax,'__iter__'):
if not all(v >= 0 for v in vmax):
raise ValueError("Invalid value for vmax, must be positive")
else:
if not vmax >= 0:
raise ValueError("Invalid value for vmax, must be positive")
if hasattr(amax,'__iter__'):
if not all(v >= 0 for v in amax):
raise ValueError("Invalid value for amax, must be positive")
else:
if not amax >= 0:
raise ValueError("Invalid value for amax, must be positive")
_durations = [0.0]*(len(milestones)-1)
for i in range(len(milestones)-1):
q,n = milestones[i],milestones[i+1]
if i == 0: p = q
else: p = milestones[i-1]
if i+2 == len(milestones): nn = n
else: nn = milestones[i+2]
if isinstance(path,Trajectory):
v = vectorops.mul(path.difference_state(p,n,0.5,1.0),0.5)
a1 = vectorops.sub(path.difference_state(q,n,0.,1.),path.difference_state(p,q,1.,1.))
a2 = vectorops.sub(path.difference_state(n,nn,0.,1.),path.difference_state(q,n,1.,1.))
else:
v = vectorops.mul(vectorops.sub(n,p),0.5)
a1 = vectorops.madd(vectorops.add(p,n),q,-2.0)
a2 = vectorops.madd(vectorops.add(q,nn),n,-2.0)
if hasattr(vmax,'__iter__'):
for j,(x,lim) in enumerate(zip(v,vmax)):
if abs(x) > lim*_durations[i]:
_durations[i] = abs(x)/lim
#print("Segment",i,"limited on axis",j,"path velocity",x,"limit",lim)
else:
_durations[i] = vectorops.norm(v)/vmax
if hasattr(amax,'__iter__'):
if i > 0:
for j,(x,lim) in enumerate(zip(a1,amax)):
if abs(x) > lim*_durations[i]**2:
_durations[i] = math.sqrt(abs(x)/lim)
#print("Segment",i,"limited on axis",j,"path accel",x,"limit",lim)
if i+2 < len(milestones):
for j,(x,lim) in enumerate(zip(a2,amax)):
if abs(x) > lim*_durations[i]**2:
_durations[i] = math.sqrt(abs(x)/lim)
#print("Segment",i,"limited on axis",j,"outgoing path accel",x,"limit",lim)
else:
if i > 0:
n = vectorops.norm(a1)
if n > amax*_durations[i]**2:
_durations[i] = math.sqrt(n/amax)
if i+2 < len(milestones):
n = vectorops.norm(a2)
if n > amax*_durations[i]**2:
_durations[i] = math.sqrt(n/amax)
else:
durationfuncs = dict()
durationfuncs['L2'] = vectorops.distance
durationfuncs['Linf'] = lambda a,b:max(abs(u-v) for (u,v) in zip(a,b))
durationfuncs['sqrt-L2'] = lambda a,b:math.sqrt(vectorops.distance(a,b))
durationfuncs['sqrt-Linf'] = lambda a,b:math.sqrt(max(abs(u-v) for (u,v) in zip(a,b)))
if hasattr(path,'robot'):
durationfuncs['robot'] = path.robot.distance
durationfuncs['sqrt-robot'] = lambda a,b:math.sqrt(path.robot.distance(a,b))
assert timing in durationfuncs,"Invalid duration function specified, valid values are: "+", ".join(list(durationfuncs.keys()))
timing = durationfuncs[timing]
_durations = [timing(a,b) for a,b in zip(milestones[:-1],milestones[1:])]
assert _durations is not None,"Hmm... didn't assign durations properly?"
if verbose >= 1:
print("path_to_trajectory(): Segment durations are",_durations)
#by this time we have all milestones and durations
if stoptol is not None:
splits = [0]
#split the trajectory then reassemble it
for i in range(1,len(milestones)-1):
prev = milestones[i-1]
q = milestones[i]
next = milestones[i+1]
acc = vectorops.madd(vectorops.add(prev,next),q,-2.0)
if vectorops.norm(acc) > stoptol*(_durations[i]*_durations[i-1]):
splits.append(i)
splits.append(len(milestones)-1)
if len(splits) > 2:
if verbose >= 1:
print("path_to_trajectory(): Splitting path into",len(splits)-1,"segments, starting and stopping between")
res = None
for i in range(len(splits)-1):
a,b = splits[i],splits[i+1]
segmentspeed = (1.0 if isinstance(speed,(int,float)) else speed)
traj = path_to_trajectory(milestones[a:b+1],velocities,timing,smoothing,
None,vmax,amax,
segmentspeed,dt)
if res is None:
res = traj
else:
if res.milestones[-1] != traj.milestones[0]: #may have hermite spline interpolation problems
res.times.append(res.times[-1])
res.milestones.append(traj.milestones[0])
res = res.concat(traj,relative=True)
if isinstance(speed,(int,float)) and speed != 1.0:
res.times = vectorops.mul(res.times,1.0/speed)
return res
#canonical case:
#milestones and _durations are lists
#start and stop at beginning / end
#speed = 1 or 'limited'
normalizedPath = Trajectory()
if isinstance(path,RobotTrajectory):
normalizedPath = RobotTrajectory(path.robot)
normalizedPath.milestones = milestones
normalizedPath.times = [0]
totaldistance = 0
for d in _durations:
totaldistance += d
normalizedPath.times.append(totaldistance)
if startvel != 0.0 or endvel != 0.0:
print("path_to_trajectory(): WARNING: respecting nonzero start/end velocity not implemented yet")
if smoothing == 'spline':
hpath = HermiteTrajectory()
hpath.makeSpline(normalizedPath)
normalizedPath = hpath
#print("path_to_trajectory(): Total distance",totaldistance)
if totaldistance == 0.0:
return normalizedPath
finalduration = totaldistance
evmax = 1
eamax = 0
if velocities == 'auto':
if timing == 'limited':
velocities = 'constant'
else:
velocities = 'trapezoidal'
if velocities == 'constant':
easing = lambda t: t
evmax = 1.0
eamax = 0.0
elif velocities == 'trapezoidal' or velocities == 'triangular':
easing = lambda t: 2*t**2 if t < 0.5 else 1.0-(2*(1.0-t)**2)
evmax = 2.0
eamax = 2.0
if velocities == 'trapezoidal' and timing != 'limited':
#ramp up c t^2 until 0.25
#velocity 2 c t, ending velocity c/2, ending point c/16
#continue for 0.5, ending point c/16 + c/4
#ramp down for distance c/16, total distance c/8 + c/4 = 1 => c = 8/3
easing = lambda t: 8.0/3.0*t**2 if t < 0.25 else (1.0-(8.0/3.0*(1.0-t)**2) if t > 0.75 else 1.0/6.0 + 4.0/3.0*(t-0.25))
finalduration = math.sqrt(totaldistance)
elif velocities == 'cosine':
easing = lambda t: 0.5*(1.0-math.cos(t*math.pi))
evmax = math.pi*0.5 #pi/2 sin (t*pi)
eamax = math.pi**2*0.5 #pi**2/2 cos(t*pi)
finalduration = math.sqrt(totaldistance)
elif velocities == 'parabolic':
easing = lambda t: -2*t**3 + 3*t**2
evmax = 1.5 #-6t*2 + 6t
eamax = 6 #-12t + 6
finalduration = math.sqrt(totaldistance)
elif velocities == 'minimum-jerk':
easing = lambda t: 10.0*t**3 - 15.0*t**4 + 6.0*t**5
evmax = 15*0.25 #30t^2 - 60t*3 + 30t^4 => 1/4*(30 - 30 + 30/4)= 30/8
t = 1.0 + math.sqrt(1.0/3.0)
eamax = 30*t - 45*t**2 + 15*t**3 #60t - 180t*2 + 120t^3 => max at 1/6 - t + t^2 = 0 => t = (1 +/- sqrt(1 - 4/6))/2 = 1/2 +/- 1/2 sqrt(1/3)
#30(1 + sqrt(1/3)) - 45(1 + sqrt(1/3))^2 + 15(1 + sqrt(1/3))^3
finalduration = math.pow(totaldistance,1.0/3.0)
else:
raise NotImplementedError("Can't do velocity profile "+velocities+" yet")
if timing == 'limited':
#print("Easing velocity max",evmax,"acceleration max",eamax)
#print("Velocity and acceleration-limited segment distances",_durations)
#print("Total distance traveled",totaldistance)
finalduration = totaldistance*evmax
#y(t) = p(L*e(t/T))
#y'(t) = p'(L*e(t)/T)*e'(t) L/T
#y''(t) = p''(e(t))*e'(t)^2(L/T)^2 + p'(e(t))*e''(t) (L/T)^2
#assume |p'(u)| <= vmax, |p''(u)| <= amax
#set T so that |p'(u)| e'(t) L/T <= |p'(u)| evmax L/T <= vmax evmax L/T <= vmax
#set T so that |p''(u)| evmax^2 (L/T)^2 + |p'(u)|*e''(t) (L/T)^2 <= (amax evmax^2 + vmax eamax) (L/T)^2 <= amax
#T >= L sqrt(evmax^2 + vmax/amax eamax)
if finalduration < totaldistance*math.sqrt(evmax**2 + eamax):
finalduration = totaldistance*math.sqrt(evmax**2 + eamax)
if verbose >= 1:
print("path_to_trajectory(): Setting first guess of path duration to",finalduration)
res = normalizedPath.constructor()()
if finalduration == 0:
if verbose >= 1:
print("path_to_trajectory(): there is no movement in the path, returning a 0-duration path")
res.times = [0.0,0.0]
res.milestones = [normalizedPath.milestones[0],normalizedPath.milestones[0]]
return res
N = int(math.ceil(finalduration/dt))
assert N > 0
dt = finalduration / N
res.times=[0.0]*(N+1)
res.milestones = [None]*(N+1)
res.milestones[0] = normalizedPath.milestones[0][:]
dt = finalduration/float(N)
#print(velocities,"easing:")
for i in range(1,N+1):
res.times[i] = float(i)/float(N)*finalduration
u = easing(float(i)/float(N))
#print(float(i)/float(N),"->",u)
res.milestones[i] = normalizedPath.eval_state(u*totaldistance)
if timing == 'limited' or speed == 'limited':
scaling = 0.0
vscaling = 0.0
aLimitingTime = 0
vLimitingTime = 0
for i in range(N):
q,n = res.waypoint(res.milestones[i]),res.waypoint(res.milestones[i+1])
if i == 0: p = q
else: p = res.waypoint(res.milestones[i-1])
if isinstance(path,Trajectory):
v = path.difference_state(p,n,0.5,dt*2.0)
a = vectorops.sub(path.difference_state(q,n,0.,dt),path.difference_state(p,q,1.,dt))
a = vectorops.div(a,dt)
else:
v = vectorops.div(vectorops.sub(n,p),dt*2.0)
a = vectorops.div(vectorops.madd(vectorops.add(p,n),q,-2.0),dt**2)
if not hasattr(vmax,'__iter__'):
n = vectorops.norm(v)
if n > vmax*scaling:
#print("path segment",i,"exceeded scaling",scaling,"by |velocity|",n,' > ',vmax*scaling)
vscaling = n/vmax
vLimitingTime = i
else:
for x,lim in zip(v,vmax):
if abs(x) > lim*vscaling:
#print("path segment",i,"exceeded scaling",scaling,"by velocity",x,' > ',lim*scaling)
#print("Velocity",v)
vscaling = abs(x)/lim
vLimitingTime = i
if i == 0:
continue
if not hasattr(amax,'__iter__'):
n = vectorops.norm(a)
if n > amax*scaling**2:
#print("path segment",i,"exceeded scaling",scaling,"by |acceleration|",n,' > ',amax*scaling**2)
scaling = math.sqrt(n/amax)
aLimitingTime = i
else:
for x,lim in zip(a,amax):
if abs(x) > lim*scaling**2:
#print("path segment",i,"exceeded scaling",scaling,"by acceleration",x,' > ',lim*scaling**2)
#print(p,q,n)
#print("Velocity",v)
#print("Previous velocity",path.difference(p,q,1.,dt))
scaling = math.sqrt(abs(x)/lim)
aLimitingTime = i
if verbose >= 1:
print("path_to_trajectory(): Base traj exceeded velocity limit by factor of",vscaling,"at time",res.times[vLimitingTime]*max(scaling,vscaling))
print("path_to_trajectory(): Base traj exceeded acceleration limit by factor of",scaling,"at time",res.times[aLimitingTime]*max(scaling,vscaling))
if velocities == 'trapezoidal':
#speed up until vscaling is hit
if vscaling < scaling:
if verbose >= 1:
print("path_to_trajectory(): Velocity maximum not hit")
else:
if verbose >= 1:
print("path_to_trajectory(): TODO: fiddle with velocity maximum.")
scaling = max(vscaling,scaling)
res.times = [t*scaling for t in res.times]
else:
scaling = max(vscaling,scaling)
if verbose >= 1:
print("path_to_trajectory(): Velocity / acceleration limiting yields a time expansion of",scaling)
res.times = vectorops.mul(res.times,scaling)
if isinstance(speed,(int,float)) and speed != 1.0:
res.times = vectorops.mul(res.times,1.0/speed)
return res
def execute_path(
path: List[Vector],
controller: Union['SimRobotController','RobotInterfaceBase'],
speed: float = 1.0,
smoothing: Optional[_SMOOTHING_OPTIONS] = None,
activeDofs: Optional[List[Union[int,str]]] = None
):
"""Sends an untimed trajectory to a controller.
If smoothing = None, the path will be executed as a sequence of go-to
commands, starting and stopping at each milestone. Otherwise, it will
be smoothed somehow and sent to the controller as faithfully as possible.
Args:
path (list of Configs): a list of milestones
controller (SimRobotController or RobotInterfaceBase): the controller
to execute the path.
speed (float, optional): if given, changes the execution speed of the
path. Not valid with smoothing=None or 'ramp'.
smoothing (str, optional): any smoothing applied to the path. Valid
values are:
- None: starts / stops at each milestone, moves in linear joint-space
paths. Trapezoidal velocity profile used. This is most useful for
executing paths coming from a kinematic motion planner.
- 'linear': interpolates milestones linearly with fixed duration.
Constant velocity profile used.
- 'cubic': interpolates milestones with cubic spline with fixed
duration. Parabolic velocity profile used. Starts/stops at each
milestone.
- 'spline': interpolates milestones smoothly with some differenced
velocity.
- 'ramp': starts / stops at each milestone, moves in minimum-time /
minimum-acceleration paths. Trapezoidal velocity profile used.
activeDofs (list, optional): if not None, a list of dofs that are moved
by the trajectory. Each entry may be an integer or a string.
"""
if len(path)==0: return #be tolerant of empty paths?
if speed <= 0: raise ValueError("Speed must be positive")
from ..control.robotinterface import RobotInterfaceBase
from ..robotsim import SimRobotController
if isinstance(controller,SimRobotController):
robot_model = controller.model()
q0 = controller.getCommandedConfig()
elif isinstance(controller,RobotInterfaceBase):
robot_model = controller.klamptModel()
cq0 = controller.commandedPosition()
if cq0[0] is None:
cq0 = controller.sensedPosition()
q0 = controller.configFromKlampt(cq0)
else:
raise ValueError("Invalid type of controller, must be SimRobotController or RobotInterfaceBase")
if activeDofs is not None:
indices = [robot_model.link(d).getIndex for d in activeDofs]
liftedMilestones = []
for m in path:
assert(len(m)==len(indices))
q = q0[:]
for i,v in zip(indices,m):
q[i] = v
liftedMilestones.append(q)
return execute_path(liftedMilestones,controller,speed,smoothing)
if smoothing == None:
if isinstance(controller,SimRobotController):
if speed != 1.0: raise ValueError("Can't specify speed with no smoothing")
controller.setMilestone(path[0])
for i in range(1,len(path)):
controller.addMilestoneLinear(path[i])
else:
vmax = robot_model.getVelocityLimits()
amax = robot_model.getAccelerationLimits()
if speed != 1.0:
vmax = vectorops.mul(vmax,speed)
amax = vectorops.mul(amax,speed**2)
htraj = HermiteTrajectory()
if q0 != path[0]:
mpath = [q0] + path
else:
mpath = path
htraj.makeMinTimeSpline(mpath,vmax=vmax,amax=amax)
return execute_trajectory(htraj,controller)
elif smoothing == 'linear':
dt = 1.0/speed
if isinstance(controller,SimRobotController):
controller.setLinear(dt,path[0])
for i in range(1,len(path)):
controller.addLinear(dt,path[i])
else:
traj = Trajectory()
traj.times,traj.milestones = [0],[q0]
for i in range(len(path)):
if i==0 and q0 == path[i]: continue #skip first milestone
traj.times.append(traj.times[-1]+dt)
traj.milestones.append(path[i])
return execute_trajectory(traj,controller)
elif smoothing == 'cubic':
dt = 1.0/speed
if isinstance(controller,SimRobotController):
zero = [0.0]*len(path[0])
controller.setCubic(dt,path[0],zero)
for i in range(1,len(path)):
controller.addCubic(dt,path[i],zero)
else:
zero = [0.0]*controller.numJoints()
times,milestones = [0],[q0]
for i in range(len(path)):
if i==0 and q0 == path[i]: continue #skip first milestone
times.append(times[-1]+dt)
milestones.append(path[i])
htraj = HermiteTrajectory(times,milestones,[zero]*len(milestones))
return execute_trajectory(htraj,controller)
elif smoothing == 'spline':
dt = 1.0/speed
times = [0]
mpath = [q0]
for i in range(len(path)):
if i==0 and path[0]==q0: continue
times.append(times[-1]+dt)
mpath.append(path[i])
hpath = HermiteTrajectory()
hpath.makeSpline(Trajectory(times,mpath))
return execute_trajectory(hpath,controller)
elif smoothing == 'ramp':
if isinstance(controller,SimRobotController):
if speed != 1.0: raise ValueError("Can't specify speed with ramp smoothing")
controller.setMilestone(path[0])
for i in range(1,len(path)):
controller.addMilestone(path[i])
else:
cv0 = controller.commandedVelocity()
if cv0[0] == None:
cv0 = controller.sensedVelocity()
v0 = controller.velocityFromKlampt(cv0)
xmin,xmax = robot_model.getJointLimits()
vmax = robot_model.getVelocityLimits()
amax = robot_model.getAccelerationLimits()
if speed != 1.0:
vmax = vectorops.mul(vmax,speed)
amax = vectorops.mul(amax,speed**2)
zero = [0.0]*len(q0)
if q0 != path[0]:
mpath = [q0] + path
mvels = [v0] + [zero]*len(path)
else:
mpath = path
mvels = [v0] + [zero]*(len(path)-1)
zero = [0.0]*len(q0)
htraj = HermiteTrajectory()
htraj.makeMinTimeSpline(mpath,mvels,xmin=xmin,xmax=xmax,vmax=vmax,amax=amax)
return execute_trajectory(htraj,controller)
else:
raise ValueError("Invalid smoothing method specified")
def execute_trajectory(
trajectory: Trajectory,
controller: Union['SimRobotController','RobotInterfaceBase'],
speed: float = 1.0,
smoothing: Optional[_SMOOTHING_OPTIONS2] = None,
activeDofs: Optional[List[Union[int,str]]] = None
):
"""Sends a timed trajectory to a controller.
Args:
trajectory (Trajectory): a Trajectory, RobotTrajectory, or
HermiteTrajectory instance.
controller (SimRobotController or RobotInterfaceBase): the controller
to execute the trajectory.
speed (float, optional): modulates the speed of the path.
smoothing (str, optional): any smoothing applied to the path. Only
valid for piecewise linear trajectories. Valid values are
* None: no smoothing, just do a piecewise linear trajectory
* 'spline': interpolate tangents to the curve
* 'pause': smoothly speed up and slow down
activeDofs (list, optional): if not None, a list of dofs that are moved
by the trajectory. Each entry may be an integer or a string.
"""
if len(trajectory.times)==0: return #be tolerant of empty paths?
if speed <= 0: raise ValueError("Speed must be positive")
from ..control.robotinterface import RobotInterfaceBase
from ..robotsim import SimRobotController
if isinstance(controller,SimRobotController):
robot_model = controller.model()
q0 = controller.getCommandedConfig()
elif isinstance(controller,RobotInterfaceBase):
robot_model = controller.klamptModel()
cq0 = controller.commandedPosition()
if cq0[0] is None:
cq0 = controller.sensedPosition()
q0 = controller.configToKlampt(cq0)
else:
raise ValueError("Invalid type of controller, must be SimRobotController or RobotInterfaceBase")
if activeDofs is not None:
indices = [robot_model.link(d).getIndex for d in activeDofs]
liftedMilestones = []
assert not isinstance(trajectory,HermiteTrajectory),"TODO: hermite trajectory lifting"
for m in trajectory.milestones:
assert(len(m)==len(indices))
q = q0[:]
for i,v in zip(indices,m):
q[i] = v
liftedMilestones.append(q)
tfull = trajectory.constructor()(trajectory.times,liftedMilestones)
return execute_trajectory(tfull,controller,speed,smoothing)
if isinstance(trajectory,HermiteTrajectory):
assert smoothing == None,"Smoothing cannot be applied to hermite trajectories"
ts = trajectory.startTime()
n = len(q0)
if isinstance(controller,SimRobotController):
controller.setMilestone(trajectory.eval(ts),vectorops.mul(trajectory.deriv(ts),speed))
n = len(trajectory.milestones[0])//2
for i in range(1,len(trajectory.times)):
q,v = trajectory.milestones[i][:n],trajectory.milestones[i][n:]
controller.addCubic(q,vectorops.mul(v,speed),(trajectory.times[i]-trajectory.times[i-1])/speed)
else:
cv0 = controller.commandedVelocity()
if cv0[0] is None:
cv0 = controller.sensedVelocity()
times,positions,velocities = [0],[controller.configFromKlampt(q0)],[cv0]
start = 1 if trajectory.times[0]==0 else 0
for i in range(start,len(trajectory.milestones)):
times.append(trajectory.times[i]/speed)
positions.append(controller.configFromKlampt(trajectory.milestones[i][:n]))
velocities.append(controller.velocityFromKlampt(trajectory.milestones[i][n:]))
controller.setPiecewiseCubic(times,positions,velocities)
else:
if smoothing == None:
ts = trajectory.startTime()
if isinstance(controller,SimRobotController):
controller.setMilestone(trajectory.eval(ts))
for i in range(1,len(trajectory.times)):
q = trajectory.milestones[i]
controller.addLinear(q,(trajectory.times[i]-trajectory.times[i-1])/speed)
else:
#TODO: move to start?
times,positions = [0],[controller.configFromKlampt(q0)]
start = 1 if 0==trajectory.times[0] else 0
for i in range(start,len(trajectory.milestones)):
times.append(trajectory.times[i]/speed)
positions.append(controller.configFromKlampt(trajectory.milestones[i]))
controller.setPiecewiseLinear(times,positions)
elif smoothing == 'spline':
t = HermiteTrajectory()
t.makeSpline(trajectory)
return execute_trajectory(t,controller)
elif smoothing == 'pause':
if isinstance(controller,SimRobotController):
ts = trajectory.startTime()
controller.setMilestone(trajectory.eval(ts))
zero = [0.0]*len(trajectory.milestones[0])
for i in range(1,len(trajectory.times)):
q = trajectory.milestones[i]
controller.addCubic(q,zero,(trajectory.times[i]-trajectory.times[i-1])/speed)
else:
#TODO: move to start?
zero = [.0]*len(q0)
t = HermiteTrajectory(trajectory.times,trajectory.milestones,[zero]*len(trajectory.milestones))
return execute_trajectory(t,controller)
else:
raise ValueError("Invalid smoothing method specified")
|
py | b407e81006d0ea86e7e614a95e8ae5b78c6de740 | import inspect
import os
import platform
import socket
import sys
from enum import Enum
from importlib import import_module
from multiprocessing.synchronize import Event as EventType
from pathlib import Path
from typing import Any, Awaitable, Callable, cast, Dict, List, Optional, Tuple, TYPE_CHECKING
from .config import Config
from .typing import ASGI2Framework, ASGI3Framework, ASGIFramework
if TYPE_CHECKING:
from .protocol.events import Request
class Shutdown(Exception):
pass
class MustReloadException(Exception):
pass
class NoAppException(Exception):
pass
class LifespanTimeout(Exception):
def __init__(self, stage: str) -> None:
super().__init__(
f"Timeout whilst awaiting {stage}. Your application may not support the ASGI Lifespan "
f"protocol correctly, alternatively the {stage}_timeout configuration is incorrect."
)
class LifespanFailure(Exception):
def __init__(self, stage: str, message: str) -> None:
super().__init__(f"Lifespan failure in {stage}. '{message}'")
class UnexpectedMessage(Exception):
def __init__(self, state: Enum, message_type: str) -> None:
super().__init__(f"Unexpected message type, {message_type} given the state {state}")
class FrameTooLarge(Exception):
pass
def suppress_body(method: str, status_code: int) -> bool:
return method == "HEAD" or 100 <= status_code < 200 or status_code in {204, 304, 412}
def build_and_validate_headers(headers: List[Tuple[bytes, bytes]]) -> List[Tuple[bytes, bytes]]:
# Validates that the header name and value are bytes
validated_headers: List[Tuple[bytes, bytes]] = []
for name, value in headers:
if name[0] == b":"[0]:
raise ValueError("Pseudo headers are not valid")
validated_headers.append((bytes(name).lower().strip(), bytes(value).strip()))
return validated_headers
def filter_pseudo_headers(headers: List[Tuple[bytes, bytes]]) -> List[Tuple[bytes, bytes]]:
filtered_headers: List[Tuple[bytes, bytes]] = [(b"host", b"")] # Placeholder
for name, value in headers:
if name == b":authority": # h2 & h3 libraries validate this is present
filtered_headers[0] = (b"host", value)
elif name[0] != b":"[0]:
filtered_headers.append((name, value))
return filtered_headers
def load_application(path: str) -> ASGIFramework:
try:
module_name, app_name = path.split(":", 1)
except ValueError:
module_name, app_name = path, "app"
except AttributeError:
raise NoAppException()
module_path = Path(module_name).resolve()
sys.path.insert(0, str(module_path.parent))
if module_path.is_file():
import_name = module_path.with_suffix("").name
else:
import_name = module_path.name
try:
module = import_module(import_name)
except ModuleNotFoundError as error:
if error.name == import_name:
raise NoAppException()
else:
raise
try:
return eval(app_name, vars(module))
except NameError:
raise NoAppException()
async def observe_changes(sleep: Callable[[float], Awaitable[Any]]) -> None:
last_updates: Dict[Path, float] = {}
for module in list(sys.modules.values()):
filename = getattr(module, "__file__", None)
if filename is None:
continue
path = Path(filename)
try:
last_updates[Path(filename)] = path.stat().st_mtime
except (FileNotFoundError, NotADirectoryError):
pass
while True:
await sleep(1)
for index, (path, last_mtime) in enumerate(last_updates.items()):
if index % 10 == 0:
# Yield to the event loop
await sleep(0)
try:
mtime = path.stat().st_mtime
except FileNotFoundError:
# File deleted
raise MustReloadException()
else:
if mtime > last_mtime:
raise MustReloadException()
else:
last_updates[path] = mtime
def restart() -> None:
# Restart this process (only safe for dev/debug)
executable = sys.executable
script_path = Path(sys.argv[0]).resolve()
args = sys.argv[1:]
main_package = sys.modules["__main__"].__package__
if main_package is None:
# Executed by filename
if platform.system() == "Windows":
if not script_path.exists() and script_path.with_suffix(".exe").exists():
# quart run
executable = str(script_path.with_suffix(".exe"))
else:
# python run.py
args.append(str(script_path))
else:
if script_path.is_file() and os.access(script_path, os.X_OK):
# hypercorn run:app --reload
executable = str(script_path)
else:
# python run.py
args.append(str(script_path))
else:
# Executed as a module e.g. python -m run
module = script_path.stem
import_name = main_package
if module != "__main__":
import_name = f"{main_package}.{module}"
args[:0] = ["-m", import_name.lstrip(".")]
os.execv(executable, [executable] + args)
async def raise_shutdown(shutdown_event: Callable[..., Awaitable[None]]) -> None:
await shutdown_event()
raise Shutdown()
async def check_multiprocess_shutdown_event(
shutdown_event: EventType, sleep: Callable[[float], Awaitable[Any]]
) -> None:
while True:
if shutdown_event.is_set():
return
await sleep(0.1)
def write_pid_file(pid_path: str) -> None:
with open(pid_path, "w") as file_:
file_.write(f"{os.getpid()}")
def parse_socket_addr(family: int, address: tuple) -> Optional[Tuple[str, int]]:
if family == socket.AF_INET:
return address # type: ignore
elif family == socket.AF_INET6:
return (address[0], address[1])
else:
return None
def repr_socket_addr(family: int, address: tuple) -> str:
if family == socket.AF_INET:
return f"{address[0]}:{address[1]}"
elif family == socket.AF_INET6:
return f"[{address[0]}]:{address[1]}"
elif family == socket.AF_UNIX:
return f"unix:{address}"
else:
return f"{address}"
async def invoke_asgi(app: ASGIFramework, scope: dict, receive: Callable, send: Callable) -> None:
if _is_asgi_2(app):
scope["asgi"]["version"] = "2.0"
app = cast(ASGI2Framework, app)
asgi_instance = app(scope)
await asgi_instance(receive, send)
else:
scope["asgi"]["version"] = "3.0"
app = cast(ASGI3Framework, app)
await app(scope, receive, send)
def _is_asgi_2(app: ASGIFramework) -> bool:
if inspect.isclass(app):
return True
if hasattr(app, "__call__") and inspect.iscoroutinefunction(app.__call__): # type: ignore
return False
return not inspect.iscoroutinefunction(app)
def valid_server_name(config: Config, request: "Request") -> bool:
if len(config.server_names) == 0:
return True
host = ""
for name, value in request.headers:
if name.lower() == b"host":
host = value.decode()
return host in config.server_names
|
py | b407e8af25485c2ce51e91b5d53632e6186ae490 | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
@login_required
def user_detail(request):
user = request.user.libraryuser
context = {
'reservations': user.reservations(),
'borrowings': user.borrowings()
}
return render(request, 'library_app/user_detail.html', context)
|
py | b407e8e751337f2d12fcf1abb28266d96f9547bc | #!/usr/bin/env python3
#
# Copyright 2019 Miklos Vajna. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""Compares reference house numbers with OSM ones and shows the diff."""
from typing import List
from typing import TextIO
import sys
import areas
import context
import util
def main(argv: List[str], stdout: TextIO, ctx: context.Context) -> None:
"""Commandline interface."""
relation_name = argv[1]
relations = areas.Relations(ctx)
relation = relations.get_relation(relation_name)
ongoing_streets, _ = relation.get_missing_housenumbers()
for result in ongoing_streets:
# House number, # of only_in_reference items.
range_list = util.get_housenumber_ranges(result[1])
range_strings = [i.get_number() for i in range_list]
range_strings = sorted(range_strings, key=util.split_house_number)
stdout.write("%s\t%s\n" % (result[0].get_osm_name(), len(range_strings)))
# only_in_reference items.
stdout.write(str(range_strings) + "\n")
if __name__ == '__main__':
main(sys.argv, sys.stdout, context.Context(""))
# vim:set shiftwidth=4 softtabstop=4 expandtab:
|
py | b407e8e776e52df2157c3477013d41a50b32c0ef | #!/usr/bin/python
# Copyright (c) Open Connectivity Foundation (OCF), AllJoyn Open Source
# Project (AJOSP) Contributors and others.
#
# SPDX-License-Identifier: Apache-2.0
#
# All rights reserved. This program and the accompanying materials are
# made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Copyright (c) Open Connectivity Foundation and Contributors to AllSeen
# Alliance. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all
# copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
# AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
# PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
import sys
import os
import getopt
from xml.dom import minidom
from xml.sax.saxutils import escape
if sys.version_info[:3] < (2,4,0):
from sets import Set as set
includeSet = set()
def openFile(name, type):
try:
return open(name, type)
except IOError, e:
errno, errStr = e
print "I/O Operation on %s failed" % name
print "I/O Error(%d): %s" % (errno, errStr)
raise e
def main(argv=None):
"""
make_status --code <code_file> --base <base_dir> [--deps <dep_file>] [--help]
Where:
<code_file> - Output "Java" code
<base_dir> - Root directory for xi:include directives
<dep_file> - Ouput makefile dependency file
"""
global codeOut
global depOut
global isFirst
global fileArgs
global baseDir
codeOut = None
depOut = None
isFirst = True
baseDir = ""
if argv is None:
argv = sys.argv[1:]
try:
opts, fileArgs = getopt.getopt(argv, "h", ["help", "code=", "dep=", "base="])
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
return 0
if o in ("--code"):
codeOut = openFile(a, 'w')
if o in ("--dep"):
depOut = openFile(a, 'w')
if o in ("--base"):
baseDir = a
if None == codeOut:
raise Error("Must specify --code")
writeHeaders()
for arg in fileArgs:
ret = parseDocument(arg)
writeFooters()
if None != codeOut:
codeOut.close()
if None != depOut:
depOut.close()
except getopt.error, msg:
print msg
print "for help use --help"
return 1
except Exception, e:
print "ERROR: %s" % e
if None != codeOut:
os.unlink(codeOut.name)
if None != depOut:
os.unlink(depOut.name)
return 1
return 0
def writeHeaders():
global codeOut
global depOut
global fileArgs
if None != depOut:
depOut.write("%s %s %s:" % (depOut.name, codeOut.name))
for arg in fileArgs:
depOut.write(" \\\n %s" % arg)
if None != codeOut:
codeOut.write("""/* This file is auto-generated. Do not modify. */
/*
* Copyright (c) Open Connectivity Foundation (OCF), AllJoyn Open Source
* Project (AJOSP) Contributors and others.
*
* SPDX-License-Identifier: Apache-2.0
*
* All rights reserved. This program and the accompanying materials are
* made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution, and is available at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Copyright (c) Open Connectivity Foundation and Contributors to AllSeen
* Alliance. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
package org.alljoyn.bus;
/**
* Standard function return codes for this package.
*/
public enum Status {
""")
def writeFooters():
global codeOut
global depOut
if None != depOut:
depOut.write("\n")
if None != codeOut:
codeOut.write(""";
/** Error Code */
private int errorCode;
/** Constructor */
private Status(int errorCode) {
this.errorCode = errorCode;
}
/** Static constructor */
private static Status create(int errorCode) {
for (Status s : Status.values()) {
if (s.getErrorCode() == errorCode) {
return s;
}
}
return NONE;
}
/**
* Gets the numeric error code.
*
* @return the numeric error code
*/
public int getErrorCode() { return errorCode; }
}
""")
def parseDocument(fileName):
dom = minidom.parse(fileName)
for child in dom.childNodes:
if child.localName == 'status_block':
parseStatusBlock(child)
elif child.localName == 'include' and child.namespaceURI == 'http://www.w3.org/2001/XInclude':
parseInclude(child)
dom.unlink()
def parseStatusBlock(blockNode):
global codeOut
global isFirst
offset = 0
for node in blockNode.childNodes:
if node.localName == 'offset':
offset = int(node.firstChild.data, 0)
elif node.localName == 'status':
if isFirst:
if None != codeOut:
codeOut.write("\n /** <b><tt>%s</tt></b> %s. */" % (escape(node.getAttribute('value')), escape(node.getAttribute('comment'))))
codeOut.write("\n %s(%s)" % (node.getAttribute('name')[3:], node.getAttribute('value')))
isFirst = False
else:
if None != codeOut:
codeOut.write(",\n /** <b><tt>%s</tt></b> %s. */" % (escape(node.getAttribute('value')), escape(node.getAttribute('comment'))))
codeOut.write("\n %s(%s)" % (node.getAttribute('name')[3:], node.getAttribute('value')))
offset += 1
elif node.localName == 'include' and node.namespaceURI == 'http://www.w3.org/2001/XInclude':
parseInclude(node)
def parseInclude(includeNode):
global baseDir
global includeSet
href = os.path.join(baseDir, includeNode.attributes['href'].nodeValue)
if href not in includeSet:
includeSet.add(href)
if None != depOut:
depOut.write(" \\\n %s" % href)
parseDocument(href)
def JavaStatus(source):
return main(['--base=%s' % os.path.abspath('..'),
'--code=%s.java' % source,
'%s.xml' % source])
if __name__ == "__main__":
sys.exit(main())
|
py | b407e93f5119c28b8de410b02b16794d5ca2bd2b | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from ._peak_finder import peak_finder
from .. import pick_types, pick_channels
from ..utils import logger, verbose, _pl, warn, _validate_type
from ..filter import filter_data
from ..epochs import Epochs
@verbose
def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10,
filter_length='10s', ch_name=None, tstart=0,
reject_by_annotation=False, thresh=None, verbose=None):
"""Locate EOG artifacts.
.. note:: To control true-positive and true-negative detection rates, you
may adjust the ``thresh`` parameter.
Parameters
----------
raw : instance of Raw
The raw data.
event_id : int
The index to assign to found events.
l_freq : float
Low cut-off frequency to apply to the EOG channel in Hz.
h_freq : float
High cut-off frequency to apply to the EOG channel in Hz.
filter_length : str | int | None
Number of taps to use for filtering.
%(eog_ch_name)s
tstart : float
Start detection after tstart seconds.
reject_by_annotation : bool
Whether to omit data that is annotated as bad.
thresh : float | None
Threshold to trigger the detection of an EOG event. This controls the
thresholding of the underlying peak-finding algorithm. Larger values
mean that fewer peaks (i.e., fewer EOG events) will be detected.
If ``None``, use the default of ``(max(eog) - min(eog)) / 4``,
with ``eog`` being the filtered EOG signal.
%(verbose)s
Returns
-------
eog_events : array
Events.
See Also
--------
create_eog_epochs
compute_proj_eog
"""
# Getting EOG Channel
eog_inds = _get_eog_channel_index(ch_name, raw)
logger.info('EOG channel index for this subject is: %s' % eog_inds)
# Reject bad segments.
reject_by_annotation = 'omit' if reject_by_annotation else None
eog, times = raw.get_data(picks=eog_inds,
reject_by_annotation=reject_by_annotation,
return_times=True)
times = times * raw.info['sfreq'] + raw.first_samp
eog_events = _find_eog_events(eog, event_id=event_id, l_freq=l_freq,
h_freq=h_freq,
sampling_rate=raw.info['sfreq'],
first_samp=raw.first_samp,
filter_length=filter_length,
tstart=tstart, thresh=thresh,
verbose=verbose)
# Map times to corresponding samples.
eog_events[:, 0] = np.round(times[eog_events[:, 0] -
raw.first_samp]).astype(int)
return eog_events
@verbose
def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp,
filter_length='10s', tstart=0., thresh=None,
verbose=None):
"""Find EOG events."""
logger.info('Filtering the data to remove DC offset to help '
'distinguish blinks from saccades')
# filtering to remove dc offset so that we know which is blink and saccades
# hardcode verbose=False to suppress filter param messages (since this
# filter is not under user control)
fmax = np.minimum(45, sampling_rate / 2.0 - 0.75) # protect Nyquist
filteog = np.array([filter_data(
x, sampling_rate, 2, fmax, None, filter_length, 0.5, 0.5,
phase='zero-double', fir_window='hann', fir_design='firwin2',
verbose=False) for x in eog])
temp = np.sqrt(np.sum(filteog ** 2, axis=1))
indexmax = np.argmax(temp)
# easier to detect peaks with filtering.
filteog = filter_data(
eog[indexmax], sampling_rate, l_freq, h_freq, None,
filter_length, 0.5, 0.5, phase='zero-double', fir_window='hann',
fir_design='firwin2')
# detecting eog blinks and generating event file
logger.info('Now detecting blinks and generating corresponding events')
temp = filteog - np.mean(filteog)
n_samples_start = int(sampling_rate * tstart)
if np.abs(np.max(temp)) > np.abs(np.min(temp)):
eog_events, _ = peak_finder(filteog[n_samples_start:],
thresh, extrema=1)
else:
eog_events, _ = peak_finder(filteog[n_samples_start:],
thresh, extrema=-1)
eog_events += n_samples_start
n_events = len(eog_events)
logger.info(f'Number of EOG events detected: {n_events}')
eog_events = np.array([eog_events + first_samp,
np.zeros(n_events, int),
event_id * np.ones(n_events, int)]).T
return eog_events
def _get_eog_channel_index(ch_name, inst):
"""Get EOG channel indices."""
_validate_type(ch_name, types=(None, str, list), item_name='ch_name')
if ch_name is None:
eog_inds = pick_types(inst.info, meg=False, eeg=False, stim=False,
eog=True, ecg=False, emg=False, ref_meg=False,
exclude='bads')
if eog_inds.size == 0:
warn('No EOG channel found. Trying with EEG 061 and EEG 062. '
'This functionality will be removed in version 0.24',
DeprecationWarning)
eog_inds = pick_channels(inst.ch_names,
include=['EEG 061', 'EEG 062'])
if eog_inds.size == 0:
raise ValueError('Could not find any EOG channels.')
ch_names = [inst.ch_names[i] for i in eog_inds]
elif isinstance(ch_name, str):
ch_names = [ch_name]
else: # it's a list
ch_names = ch_name.copy()
# ensure the specified channels are present in the data
if ch_name is not None:
not_found = [ch_name for ch_name in ch_names
if ch_name not in inst.ch_names]
if not_found:
raise ValueError(f'The specified EOG channel{_pl(not_found)} '
f'cannot be found: {", ".join(not_found)}')
eog_inds = pick_channels(inst.ch_names, include=ch_names)
logger.info(f'Using EOG channel{_pl(ch_names)}: {", ".join(ch_names)}')
return eog_inds
@verbose
def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5,
tmax=0.5, l_freq=1, h_freq=10, reject=None, flat=None,
baseline=None, preload=True, reject_by_annotation=True,
thresh=None, decim=1, verbose=None):
"""Conveniently generate epochs around EOG artifact events.
%(create_eog_epochs)s
Parameters
----------
raw : instance of Raw
The raw data.
%(eog_ch_name)s
event_id : int
The index to assign to found events.
%(picks_all)s
tmin : float
Start time before event.
tmax : float
End time after event.
l_freq : float
Low pass frequency to apply to the EOG channel while finding events.
h_freq : float
High pass frequency to apply to the EOG channel while finding events.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
baseline : tuple or list of length 2, or None
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used. If None, no correction is applied.
preload : bool
Preload epochs or not.
%(reject_by_annotation_epochs)s
.. versionadded:: 0.14.0
thresh : float
Threshold to trigger EOG event.
%(decim)s
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
eog_epochs : instance of Epochs
Data epoched around EOG events.
See Also
--------
find_eog_events
compute_proj_eog
Notes
-----
Filtering is only applied to the EOG channel while finding events.
The resulting ``eog_epochs`` will have no filtering applied (i.e., have
the same filter properties as the input ``raw`` instance).
"""
events = find_eog_events(raw, ch_name=ch_name, event_id=event_id,
l_freq=l_freq, h_freq=h_freq,
reject_by_annotation=reject_by_annotation,
thresh=thresh)
# create epochs around EOG events
eog_epochs = Epochs(raw, events=events, event_id=event_id, tmin=tmin,
tmax=tmax, proj=False, reject=reject, flat=flat,
picks=picks, baseline=baseline, preload=preload,
reject_by_annotation=reject_by_annotation,
decim=decim)
return eog_epochs
|
py | b407e947273008cd1754261cfdc3c11e5c62a0cc | import tkinter as tk
import stories as st
window = tk.Tk()
window.geometry('900x600')
window.title('Madlibs generator')
tk.Label(window, text='Madlibs Generator \n:D', font='aerial 30 bold').pack()
tk.Label(window, text = 'Choose any one story:', font='aerial 20 bold').place(x=50, y=125)
tk.Button(window, text = 'The Magic Book', font = 'ariel 20', command=st.story1, bg = 'grey').place(x= 50, y = 200 )
tk.Button(window, text= 'The butterfly', font = 'areiel 20', command=st.story2, bg='grey').place(x=50, y=300)
tk.Button(window, text= 'Apple and apple', font = 'aerial 20', command=st.story3, bg='grey').place(x=50, y=400)
window.mainloop()
|
py | b407ea5fa34889ae527b9d8ba1f51b56a55b3955 | import config
import time
import azure.cognitiveservices.speech as speechsdk
from tkinter import *
import tkinter as tk
timestr = time.strftime("%Y%m%d-%H%M%S")
f = open('captions' + timestr + '.txt', 'a', buffering=1)
appHeight = 150
padding = 20
labelText = NONE
def recognizing(args):
global labelText
labelText.set(args.result.text)
def recognized(args):
global f
if args.result.text.strip() != '':
f.write(args.result.text + "\n")
root = Tk()
labelText = StringVar()
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
root.geometry(str(screen_width) + "x" + str(appHeight) + "+0+" + str(screen_height - appHeight))
root.configure(background='black')
root.update_idletasks()
root.overrideredirect(True)
root.attributes('-alpha', 0.8)
labelWidth = screen_width-(padding * 2)
label = Label(root, textvariable=labelText,
foreground="white", height=appHeight, width=labelWidth,
background='black', font=("Courier", 44),
justify=LEFT, anchor=SW, wraplength=labelWidth)
label.pack(padx=padding, pady=padding)
root.attributes('-topmost', True)
speech_config = speechsdk.SpeechConfig(subscription=config.speech_key, region=config.service_region)
if config.device_uuid == "":
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)
else:
audio_config = speechsdk.AudioConfig(device_name=config.device_uuid);
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
speech_recognizer.recognizing.connect(recognizing)
speech_recognizer.recognized.connect(recognized)
speech_recognizer.start_continuous_recognition()
root.mainloop()
root.destroy()
|
py | b407ea921989eec447bc1d366976486f8c6fcc21 | from ciperpus_exception import *
from ciperpus_test_exception import *
from ciperpus_test_context import *
from ciperpus_client import ciperpus_client
class ciperpus_test_client:
def __init__(self, client=None):
if client is None:
self.client = ciperpus_client()
else:
self.client = client
def login(self, username, password, expect_error=None, use_button=True):
with ciperpus_test_context(expect_error) as context:
self.client.login(username, password)
def logout(self, expect_error=None):
with ciperpus_test_context(expect_error) as context:
self.client.logout()
def dashboard(self, expect_error=None):
with ciperpus_test_context(expect_error) as context:
self.client.logout()
@property
def url(self):
return self.client.url
def check_url(self, endpoint):
return self.client.check_url(endpoint)
def close(self):
return self.client.close()
def quit(self):
return self.client.quit()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.quit()
test_client_instance = None
def get_test_client():
global test_client_instance
test_client_instance = test_client_instance or ciperpus_test_client()
return test_client_instance |
py | b407ec42088cf16388fc48e81c317ba1df7861dd | """Make your user-facing Celery jobs totally awesomer"""
VERSION = (2, 1, 1)
__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
__author__ = 'Wes Winham'
__contact__ = '[email protected]'
__homepage__ = 'http://policystat.github.com/jobtastic'
__docformat__ = 'markdown'
__all__ = (
'JobtasticTask',
'__version__',
)
# -eof meta-
from jobtastic.task import JobtasticTask # NOQA
|
py | b407ecd063e9849adcad14ab01f126537847ca58 | ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Hello World Web App
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% import packages
import cherrypy
#%% FDD tool Web application
class FDDapp(object):
@cherrypy.expose
def getResults(self):
processState = runPCAmodel() # returns 'All good' or 'Issue detected'
return processState
#%% execution settings
cherrypy.config.update({'server.socket_host': '0.0.0.0'})
if __name__ == '__main__':
cherrypy.quickstart(FDDapp()) # when this script is executed, host FDDapp app
|
py | b407ecfb12a4c56f4ea7e7b8b0f12c1155101e24 | # swift.py -- Repo implementation atop OpenStack SWIFT
# Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Author: Fabien Boucher <[email protected]>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Repo implementation atop OpenStack SWIFT."""
# TODO: Refactor to share more code with dulwich/repo.py.
# TODO(fbo): Second attempt to _send() must be notified via real log
# TODO(fbo): More logs for operations
import os
import stat
import zlib
import tempfile
import posixpath
import urllib.parse as urlparse
from io import BytesIO
from configparser import ConfigParser
from geventhttpclient import HTTPClient
from dulwich.greenthreads import (
GreenThreadsMissingObjectFinder,
GreenThreadsObjectStoreIterator,
)
from dulwich.lru_cache import LRUSizeCache
from dulwich.objects import (
Blob,
Commit,
Tree,
Tag,
S_ISGITLINK,
)
from dulwich.object_store import (
PackBasedObjectStore,
PACKDIR,
INFODIR,
)
from dulwich.pack import (
PackData,
Pack,
PackIndexer,
PackStreamCopier,
write_pack_header,
compute_file_sha,
iter_sha1,
write_pack_index_v2,
load_pack_index_file,
read_pack_header,
_compute_object_size,
unpack_object,
write_pack_object,
)
from dulwich.protocol import TCP_GIT_PORT
from dulwich.refs import (
InfoRefsContainer,
read_info_refs,
write_info_refs,
)
from dulwich.repo import (
BaseRepo,
OBJECTDIR,
)
from dulwich.server import (
Backend,
TCPGitServer,
)
import json
import sys
"""
# Configuration file sample
[swift]
# Authentication URL (Keystone or Swift)
auth_url = http://127.0.0.1:5000/v2.0
# Authentication version to use
auth_ver = 2
# The tenant and username separated by a semicolon
username = admin;admin
# The user password
password = pass
# The Object storage region to use (auth v2) (Default RegionOne)
region_name = RegionOne
# The Object storage endpoint URL to use (auth v2) (Default internalURL)
endpoint_type = internalURL
# Concurrency to use for parallel tasks (Default 10)
concurrency = 10
# Size of the HTTP pool (Default 10)
http_pool_length = 10
# Timeout delay for HTTP connections (Default 20)
http_timeout = 20
# Chunk size to read from pack (Bytes) (Default 12228)
chunk_length = 12228
# Cache size (MBytes) (Default 20)
cache_length = 20
"""
class PackInfoObjectStoreIterator(GreenThreadsObjectStoreIterator):
def __len__(self):
while len(self.finder.objects_to_send):
for _ in range(0, len(self.finder.objects_to_send)):
sha = self.finder.next()
self._shas.append(sha)
return len(self._shas)
class PackInfoMissingObjectFinder(GreenThreadsMissingObjectFinder):
def next(self):
while True:
if not self.objects_to_send:
return None
(sha, name, leaf) = self.objects_to_send.pop()
if sha not in self.sha_done:
break
if not leaf:
info = self.object_store.pack_info_get(sha)
if info[0] == Commit.type_num:
self.add_todo([(info[2], "", False)])
elif info[0] == Tree.type_num:
self.add_todo([tuple(i) for i in info[1]])
elif info[0] == Tag.type_num:
self.add_todo([(info[1], None, False)])
if sha in self._tagged:
self.add_todo([(self._tagged[sha], None, True)])
self.sha_done.add(sha)
self.progress("counting objects: %d\r" % len(self.sha_done))
return (sha, name)
def load_conf(path=None, file=None):
"""Load configuration in global var CONF
Args:
path: The path to the configuration file
file: If provided read instead the file like object
"""
conf = ConfigParser()
if file:
try:
conf.read_file(file, path)
except AttributeError:
# read_file only exists in Python3
conf.readfp(file)
return conf
confpath = None
if not path:
try:
confpath = os.environ["DULWICH_SWIFT_CFG"]
except KeyError:
raise Exception("You need to specify a configuration file")
else:
confpath = path
if not os.path.isfile(confpath):
raise Exception("Unable to read configuration file %s" % confpath)
conf.read(confpath)
return conf
def swift_load_pack_index(scon, filename):
"""Read a pack index file from Swift
Args:
scon: a `SwiftConnector` instance
filename: Path to the index file objectise
Returns: a `PackIndexer` instance
"""
with scon.get_object(filename) as f:
return load_pack_index_file(filename, f)
def pack_info_create(pack_data, pack_index):
pack = Pack.from_objects(pack_data, pack_index)
info = {}
for obj in pack.iterobjects():
# Commit
if obj.type_num == Commit.type_num:
info[obj.id] = (obj.type_num, obj.parents, obj.tree)
# Tree
elif obj.type_num == Tree.type_num:
shas = [
(s, n, not stat.S_ISDIR(m))
for n, m, s in obj.items()
if not S_ISGITLINK(m)
]
info[obj.id] = (obj.type_num, shas)
# Blob
elif obj.type_num == Blob.type_num:
info[obj.id] = None
# Tag
elif obj.type_num == Tag.type_num:
info[obj.id] = (obj.type_num, obj.object[1])
return zlib.compress(json.dumps(info))
def load_pack_info(filename, scon=None, file=None):
if not file:
f = scon.get_object(filename)
else:
f = file
if not f:
return None
try:
return json.loads(zlib.decompress(f.read()))
finally:
f.close()
class SwiftException(Exception):
pass
class SwiftConnector(object):
"""A Connector to swift that manage authentication and errors catching"""
def __init__(self, root, conf):
"""Initialize a SwiftConnector
Args:
root: The swift container that will act as Git bare repository
conf: A ConfigParser Object
"""
self.conf = conf
self.auth_ver = self.conf.get("swift", "auth_ver")
if self.auth_ver not in ["1", "2"]:
raise NotImplementedError("Wrong authentication version use either 1 or 2")
self.auth_url = self.conf.get("swift", "auth_url")
self.user = self.conf.get("swift", "username")
self.password = self.conf.get("swift", "password")
self.concurrency = self.conf.getint("swift", "concurrency") or 10
self.http_timeout = self.conf.getint("swift", "http_timeout") or 20
self.http_pool_length = self.conf.getint("swift", "http_pool_length") or 10
self.region_name = self.conf.get("swift", "region_name") or "RegionOne"
self.endpoint_type = self.conf.get("swift", "endpoint_type") or "internalURL"
self.cache_length = self.conf.getint("swift", "cache_length") or 20
self.chunk_length = self.conf.getint("swift", "chunk_length") or 12228
self.root = root
block_size = 1024 * 12 # 12KB
if self.auth_ver == "1":
self.storage_url, self.token = self.swift_auth_v1()
else:
self.storage_url, self.token = self.swift_auth_v2()
token_header = {"X-Auth-Token": str(self.token)}
self.httpclient = HTTPClient.from_url(
str(self.storage_url),
concurrency=self.http_pool_length,
block_size=block_size,
connection_timeout=self.http_timeout,
network_timeout=self.http_timeout,
headers=token_header,
)
self.base_path = str(
posixpath.join(urlparse.urlparse(self.storage_url).path, self.root)
)
def swift_auth_v1(self):
self.user = self.user.replace(";", ":")
auth_httpclient = HTTPClient.from_url(
self.auth_url,
connection_timeout=self.http_timeout,
network_timeout=self.http_timeout,
)
headers = {"X-Auth-User": self.user, "X-Auth-Key": self.password}
path = urlparse.urlparse(self.auth_url).path
ret = auth_httpclient.request("GET", path, headers=headers)
# Should do something with redirections (301 in my case)
if ret.status_code < 200 or ret.status_code >= 300:
raise SwiftException(
"AUTH v1.0 request failed on "
+ "%s with error code %s (%s)"
% (
str(auth_httpclient.get_base_url()) + path,
ret.status_code,
str(ret.items()),
)
)
storage_url = ret["X-Storage-Url"]
token = ret["X-Auth-Token"]
return storage_url, token
def swift_auth_v2(self):
self.tenant, self.user = self.user.split(";")
auth_dict = {}
auth_dict["auth"] = {
"passwordCredentials": {
"username": self.user,
"password": self.password,
},
"tenantName": self.tenant,
}
auth_json = json.dumps(auth_dict)
headers = {"Content-Type": "application/json"}
auth_httpclient = HTTPClient.from_url(
self.auth_url,
connection_timeout=self.http_timeout,
network_timeout=self.http_timeout,
)
path = urlparse.urlparse(self.auth_url).path
if not path.endswith("tokens"):
path = posixpath.join(path, "tokens")
ret = auth_httpclient.request("POST", path, body=auth_json, headers=headers)
if ret.status_code < 200 or ret.status_code >= 300:
raise SwiftException(
"AUTH v2.0 request failed on "
+ "%s with error code %s (%s)"
% (
str(auth_httpclient.get_base_url()) + path,
ret.status_code,
str(ret.items()),
)
)
auth_ret_json = json.loads(ret.read())
token = auth_ret_json["access"]["token"]["id"]
catalogs = auth_ret_json["access"]["serviceCatalog"]
object_store = [
o_store for o_store in catalogs if o_store["type"] == "object-store"
][0]
endpoints = object_store["endpoints"]
endpoint = [endp for endp in endpoints if endp["region"] == self.region_name][0]
return endpoint[self.endpoint_type], token
def test_root_exists(self):
"""Check that Swift container exist
Returns: True if exist or None it not
"""
ret = self.httpclient.request("HEAD", self.base_path)
if ret.status_code == 404:
return None
if ret.status_code < 200 or ret.status_code > 300:
raise SwiftException(
"HEAD request failed with error code %s" % ret.status_code
)
return True
def create_root(self):
"""Create the Swift container
Raises:
SwiftException: if unable to create
"""
if not self.test_root_exists():
ret = self.httpclient.request("PUT", self.base_path)
if ret.status_code < 200 or ret.status_code > 300:
raise SwiftException(
"PUT request failed with error code %s" % ret.status_code
)
def get_container_objects(self):
"""Retrieve objects list in a container
Returns: A list of dict that describe objects
or None if container does not exist
"""
qs = "?format=json"
path = self.base_path + qs
ret = self.httpclient.request("GET", path)
if ret.status_code == 404:
return None
if ret.status_code < 200 or ret.status_code > 300:
raise SwiftException(
"GET request failed with error code %s" % ret.status_code
)
content = ret.read()
return json.loads(content)
def get_object_stat(self, name):
"""Retrieve object stat
Args:
name: The object name
Returns:
A dict that describe the object or None if object does not exist
"""
path = self.base_path + "/" + name
ret = self.httpclient.request("HEAD", path)
if ret.status_code == 404:
return None
if ret.status_code < 200 or ret.status_code > 300:
raise SwiftException(
"HEAD request failed with error code %s" % ret.status_code
)
resp_headers = {}
for header, value in ret.items():
resp_headers[header.lower()] = value
return resp_headers
def put_object(self, name, content):
"""Put an object
Args:
name: The object name
content: A file object
Raises:
SwiftException: if unable to create
"""
content.seek(0)
data = content.read()
path = self.base_path + "/" + name
headers = {"Content-Length": str(len(data))}
def _send():
ret = self.httpclient.request("PUT", path, body=data, headers=headers)
return ret
try:
# Sometime got Broken Pipe - Dirty workaround
ret = _send()
except Exception:
# Second attempt work
ret = _send()
if ret.status_code < 200 or ret.status_code > 300:
raise SwiftException(
"PUT request failed with error code %s" % ret.status_code
)
def get_object(self, name, range=None):
"""Retrieve an object
Args:
name: The object name
range: A string range like "0-10" to
retrieve specified bytes in object content
Returns:
A file like instance or bytestring if range is specified
"""
headers = {}
if range:
headers["Range"] = "bytes=%s" % range
path = self.base_path + "/" + name
ret = self.httpclient.request("GET", path, headers=headers)
if ret.status_code == 404:
return None
if ret.status_code < 200 or ret.status_code > 300:
raise SwiftException(
"GET request failed with error code %s" % ret.status_code
)
content = ret.read()
if range:
return content
return BytesIO(content)
def del_object(self, name):
"""Delete an object
Args:
name: The object name
Raises:
SwiftException: if unable to delete
"""
path = self.base_path + "/" + name
ret = self.httpclient.request("DELETE", path)
if ret.status_code < 200 or ret.status_code > 300:
raise SwiftException(
"DELETE request failed with error code %s" % ret.status_code
)
def del_root(self):
"""Delete the root container by removing container content
Raises:
SwiftException: if unable to delete
"""
for obj in self.get_container_objects():
self.del_object(obj["name"])
ret = self.httpclient.request("DELETE", self.base_path)
if ret.status_code < 200 or ret.status_code > 300:
raise SwiftException(
"DELETE request failed with error code %s" % ret.status_code
)
class SwiftPackReader(object):
"""A SwiftPackReader that mimic read and sync method
The reader allows to read a specified amount of bytes from
a given offset of a Swift object. A read offset is kept internaly.
The reader will read from Swift a specified amount of data to complete
its internal buffer. chunk_length specifiy the amount of data
to read from Swift.
"""
def __init__(self, scon, filename, pack_length):
"""Initialize a SwiftPackReader
Args:
scon: a `SwiftConnector` instance
filename: the pack filename
pack_length: The size of the pack object
"""
self.scon = scon
self.filename = filename
self.pack_length = pack_length
self.offset = 0
self.base_offset = 0
self.buff = b""
self.buff_length = self.scon.chunk_length
def _read(self, more=False):
if more:
self.buff_length = self.buff_length * 2
offset = self.base_offset
r = min(self.base_offset + self.buff_length, self.pack_length)
ret = self.scon.get_object(self.filename, range="%s-%s" % (offset, r))
self.buff = ret
def read(self, length):
"""Read a specified amount of Bytes form the pack object
Args:
length: amount of bytes to read
Returns:
a bytestring
"""
end = self.offset + length
if self.base_offset + end > self.pack_length:
data = self.buff[self.offset :]
self.offset = end
return data
if end > len(self.buff):
# Need to read more from swift
self._read(more=True)
return self.read(length)
data = self.buff[self.offset : end]
self.offset = end
return data
def seek(self, offset):
"""Seek to a specified offset
Args:
offset: the offset to seek to
"""
self.base_offset = offset
self._read()
self.offset = 0
def read_checksum(self):
"""Read the checksum from the pack
Returns: the checksum bytestring
"""
return self.scon.get_object(self.filename, range="-20")
class SwiftPackData(PackData):
"""The data contained in a packfile.
We use the SwiftPackReader to read bytes from packs stored in Swift
using the Range header feature of Swift.
"""
def __init__(self, scon, filename):
"""Initialize a SwiftPackReader
Args:
scon: a `SwiftConnector` instance
filename: the pack filename
"""
self.scon = scon
self._filename = filename
self._header_size = 12
headers = self.scon.get_object_stat(self._filename)
self.pack_length = int(headers["content-length"])
pack_reader = SwiftPackReader(self.scon, self._filename, self.pack_length)
(version, self._num_objects) = read_pack_header(pack_reader.read)
self._offset_cache = LRUSizeCache(
1024 * 1024 * self.scon.cache_length,
compute_size=_compute_object_size,
)
self.pack = None
def get_object_at(self, offset):
if offset in self._offset_cache:
return self._offset_cache[offset]
assert offset >= self._header_size
pack_reader = SwiftPackReader(self.scon, self._filename, self.pack_length)
pack_reader.seek(offset)
unpacked, _ = unpack_object(pack_reader.read)
return (unpacked.pack_type_num, unpacked._obj())
def get_stored_checksum(self):
pack_reader = SwiftPackReader(self.scon, self._filename, self.pack_length)
return pack_reader.read_checksum()
def close(self):
pass
class SwiftPack(Pack):
"""A Git pack object.
Same implementation as pack.Pack except that _idx_load and
_data_load are bounded to Swift version of load_pack_index and
PackData.
"""
def __init__(self, *args, **kwargs):
self.scon = kwargs["scon"]
del kwargs["scon"]
super(SwiftPack, self).__init__(*args, **kwargs)
self._pack_info_path = self._basename + ".info"
self._pack_info = None
self._pack_info_load = lambda: load_pack_info(self._pack_info_path, self.scon)
self._idx_load = lambda: swift_load_pack_index(self.scon, self._idx_path)
self._data_load = lambda: SwiftPackData(self.scon, self._data_path)
@property
def pack_info(self):
"""The pack data object being used."""
if self._pack_info is None:
self._pack_info = self._pack_info_load()
return self._pack_info
class SwiftObjectStore(PackBasedObjectStore):
"""A Swift Object Store
Allow to manage a bare Git repository from Openstack Swift.
This object store only supports pack files and not loose objects.
"""
def __init__(self, scon):
"""Open a Swift object store.
Args:
scon: A `SwiftConnector` instance
"""
super(SwiftObjectStore, self).__init__()
self.scon = scon
self.root = self.scon.root
self.pack_dir = posixpath.join(OBJECTDIR, PACKDIR)
self._alternates = None
def _update_pack_cache(self):
objects = self.scon.get_container_objects()
pack_files = [
o["name"].replace(".pack", "")
for o in objects
if o["name"].endswith(".pack")
]
ret = []
for basename in pack_files:
pack = SwiftPack(basename, scon=self.scon)
self._pack_cache[basename] = pack
ret.append(pack)
return ret
def _iter_loose_objects(self):
"""Loose objects are not supported by this repository"""
return []
def iter_shas(self, finder):
"""An iterator over pack's ObjectStore.
Returns: a `ObjectStoreIterator` or `GreenThreadsObjectStoreIterator`
instance if gevent is enabled
"""
shas = iter(finder.next, None)
return PackInfoObjectStoreIterator(self, shas, finder, self.scon.concurrency)
def find_missing_objects(self, *args, **kwargs):
kwargs["concurrency"] = self.scon.concurrency
return PackInfoMissingObjectFinder(self, *args, **kwargs)
def pack_info_get(self, sha):
for pack in self.packs:
if sha in pack:
return pack.pack_info[sha]
def _collect_ancestors(self, heads, common=set()):
def _find_parents(commit):
for pack in self.packs:
if commit in pack:
try:
parents = pack.pack_info[commit][1]
except KeyError:
# Seems to have no parents
return []
return parents
bases = set()
commits = set()
queue = []
queue.extend(heads)
while queue:
e = queue.pop(0)
if e in common:
bases.add(e)
elif e not in commits:
commits.add(e)
parents = _find_parents(e)
queue.extend(parents)
return (commits, bases)
def add_pack(self):
"""Add a new pack to this object store.
Returns: Fileobject to write to and a commit function to
call when the pack is finished.
"""
f = BytesIO()
def commit():
f.seek(0)
pack = PackData(file=f, filename="")
entries = pack.sorted_entries()
if len(entries):
basename = posixpath.join(
self.pack_dir,
"pack-%s" % iter_sha1(entry[0] for entry in entries),
)
index = BytesIO()
write_pack_index_v2(index, entries, pack.get_stored_checksum())
self.scon.put_object(basename + ".pack", f)
f.close()
self.scon.put_object(basename + ".idx", index)
index.close()
final_pack = SwiftPack(basename, scon=self.scon)
final_pack.check_length_and_checksum()
self._add_cached_pack(basename, final_pack)
return final_pack
else:
return None
def abort():
pass
return f, commit, abort
def add_object(self, obj):
self.add_objects(
[
(obj, None),
]
)
def _pack_cache_stale(self):
return False
def _get_loose_object(self, sha):
return None
def add_thin_pack(self, read_all, read_some):
"""Read a thin pack
Read it from a stream and complete it in a temporary file.
Then the pack and the corresponding index file are uploaded to Swift.
"""
fd, path = tempfile.mkstemp(prefix="tmp_pack_")
f = os.fdopen(fd, "w+b")
try:
indexer = PackIndexer(f, resolve_ext_ref=self.get_raw)
copier = PackStreamCopier(read_all, read_some, f, delta_iter=indexer)
copier.verify()
return self._complete_thin_pack(f, path, copier, indexer)
finally:
f.close()
os.unlink(path)
def _complete_thin_pack(self, f, path, copier, indexer):
entries = list(indexer)
# Update the header with the new number of objects.
f.seek(0)
write_pack_header(f, len(entries) + len(indexer.ext_refs()))
# Must flush before reading (http://bugs.python.org/issue3207)
f.flush()
# Rescan the rest of the pack, computing the SHA with the new header.
new_sha = compute_file_sha(f, end_ofs=-20)
# Must reposition before writing (http://bugs.python.org/issue3207)
f.seek(0, os.SEEK_CUR)
# Complete the pack.
for ext_sha in indexer.ext_refs():
assert len(ext_sha) == 20
type_num, data = self.get_raw(ext_sha)
offset = f.tell()
crc32 = write_pack_object(f, type_num, data, sha=new_sha)
entries.append((ext_sha, offset, crc32))
pack_sha = new_sha.digest()
f.write(pack_sha)
f.flush()
# Move the pack in.
entries.sort()
pack_base_name = posixpath.join(
self.pack_dir,
"pack-" + os.fsdecode(iter_sha1(e[0] for e in entries)),
)
self.scon.put_object(pack_base_name + ".pack", f)
# Write the index.
filename = pack_base_name + ".idx"
index_file = BytesIO()
write_pack_index_v2(index_file, entries, pack_sha)
self.scon.put_object(filename, index_file)
# Write pack info.
f.seek(0)
pack_data = PackData(filename="", file=f)
index_file.seek(0)
pack_index = load_pack_index_file("", index_file)
serialized_pack_info = pack_info_create(pack_data, pack_index)
f.close()
index_file.close()
pack_info_file = BytesIO(serialized_pack_info)
filename = pack_base_name + ".info"
self.scon.put_object(filename, pack_info_file)
pack_info_file.close()
# Add the pack to the store and return it.
final_pack = SwiftPack(pack_base_name, scon=self.scon)
final_pack.check_length_and_checksum()
self._add_cached_pack(pack_base_name, final_pack)
return final_pack
class SwiftInfoRefsContainer(InfoRefsContainer):
"""Manage references in info/refs object."""
def __init__(self, scon, store):
self.scon = scon
self.filename = "info/refs"
self.store = store
f = self.scon.get_object(self.filename)
if not f:
f = BytesIO(b"")
super(SwiftInfoRefsContainer, self).__init__(f)
def _load_check_ref(self, name, old_ref):
self._check_refname(name)
f = self.scon.get_object(self.filename)
if not f:
return {}
refs = read_info_refs(f)
if old_ref is not None:
if refs[name] != old_ref:
return False
return refs
def _write_refs(self, refs):
f = BytesIO()
f.writelines(write_info_refs(refs, self.store))
self.scon.put_object(self.filename, f)
def set_if_equals(self, name, old_ref, new_ref):
"""Set a refname to new_ref only if it currently equals old_ref."""
if name == "HEAD":
return True
refs = self._load_check_ref(name, old_ref)
if not isinstance(refs, dict):
return False
refs[name] = new_ref
self._write_refs(refs)
self._refs[name] = new_ref
return True
def remove_if_equals(self, name, old_ref):
"""Remove a refname only if it currently equals old_ref."""
if name == "HEAD":
return True
refs = self._load_check_ref(name, old_ref)
if not isinstance(refs, dict):
return False
del refs[name]
self._write_refs(refs)
del self._refs[name]
return True
def allkeys(self):
try:
self._refs["HEAD"] = self._refs["refs/heads/master"]
except KeyError:
pass
return self._refs.keys()
class SwiftRepo(BaseRepo):
def __init__(self, root, conf):
"""Init a Git bare Repository on top of a Swift container.
References are managed in info/refs objects by
`SwiftInfoRefsContainer`. The root attribute is the Swift
container that contain the Git bare repository.
Args:
root: The container which contains the bare repo
conf: A ConfigParser object
"""
self.root = root.lstrip("/")
self.conf = conf
self.scon = SwiftConnector(self.root, self.conf)
objects = self.scon.get_container_objects()
if not objects:
raise Exception("There is not any GIT repo here : %s" % self.root)
objects = [o["name"].split("/")[0] for o in objects]
if OBJECTDIR not in objects:
raise Exception("This repository (%s) is not bare." % self.root)
self.bare = True
self._controldir = self.root
object_store = SwiftObjectStore(self.scon)
refs = SwiftInfoRefsContainer(self.scon, object_store)
BaseRepo.__init__(self, object_store, refs)
def _determine_file_mode(self):
"""Probe the file-system to determine whether permissions can be trusted.
Returns: True if permissions can be trusted, False otherwise.
"""
return False
def _put_named_file(self, filename, contents):
"""Put an object in a Swift container
Args:
filename: the path to the object to put on Swift
contents: the content as bytestring
"""
with BytesIO() as f:
f.write(contents)
self.scon.put_object(filename, f)
@classmethod
def init_bare(cls, scon, conf):
"""Create a new bare repository.
Args:
scon: a `SwiftConnector` instance
conf: a ConfigParser object
Returns:
a `SwiftRepo` instance
"""
scon.create_root()
for obj in [
posixpath.join(OBJECTDIR, PACKDIR),
posixpath.join(INFODIR, "refs"),
]:
scon.put_object(obj, BytesIO(b""))
ret = cls(scon.root, conf)
ret._init_files(True)
return ret
class SwiftSystemBackend(Backend):
def __init__(self, logger, conf):
self.conf = conf
self.logger = logger
def open_repository(self, path):
self.logger.info("opening repository at %s", path)
return SwiftRepo(path, self.conf)
def cmd_daemon(args):
"""Entry point for starting a TCP git server."""
import optparse
parser = optparse.OptionParser()
parser.add_option(
"-l",
"--listen_address",
dest="listen_address",
default="127.0.0.1",
help="Binding IP address.",
)
parser.add_option(
"-p",
"--port",
dest="port",
type=int,
default=TCP_GIT_PORT,
help="Binding TCP port.",
)
parser.add_option(
"-c",
"--swift_config",
dest="swift_config",
default="",
help="Path to the configuration file for Swift backend.",
)
options, args = parser.parse_args(args)
try:
import gevent
import geventhttpclient # noqa: F401
except ImportError:
print(
"gevent and geventhttpclient libraries are mandatory "
" for use the Swift backend."
)
sys.exit(1)
import gevent.monkey
gevent.monkey.patch_socket()
from dulwich import log_utils
logger = log_utils.getLogger(__name__)
conf = load_conf(options.swift_config)
backend = SwiftSystemBackend(logger, conf)
log_utils.default_logging_config()
server = TCPGitServer(backend, options.listen_address, port=options.port)
server.serve_forever()
def cmd_init(args):
import optparse
parser = optparse.OptionParser()
parser.add_option(
"-c",
"--swift_config",
dest="swift_config",
default="",
help="Path to the configuration file for Swift backend.",
)
options, args = parser.parse_args(args)
conf = load_conf(options.swift_config)
if args == []:
parser.error("missing repository name")
repo = args[0]
scon = SwiftConnector(repo, conf)
SwiftRepo.init_bare(scon, conf)
def main(argv=sys.argv):
commands = {
"init": cmd_init,
"daemon": cmd_daemon,
}
if len(sys.argv) < 2:
print("Usage: %s <%s> [OPTIONS...]" % (sys.argv[0], "|".join(commands.keys())))
sys.exit(1)
cmd = sys.argv[1]
if cmd not in commands:
print("No such subcommand: %s" % cmd)
sys.exit(1)
commands[cmd](sys.argv[2:])
if __name__ == "__main__":
main()
|
py | b407ede7abdb8fede1cece5f2bc85215d6620f6c | from django.test import TestCase
from django.contrib.auth import get_user_model
from unittest import mock
from headway.models import Profile
from headway.tasks import get_harvest_id_for_user
class GetHarvestIdForUserTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User = get_user_model()
cls.user = User.objects.create(email="[email protected]")
def test_user_not_exist(self):
task = get_harvest_id_for_user('7e546c57-91f3-4aa6-93d2-078f0e591517')
result = task()
self.assertIsNone(result)
def test_harvest_id_not_found(self):
with mock.patch('headway.tasks.get_user_by_email', return_value=None):
task = get_harvest_id_for_user(self.user.id)
result = task()
self.assertIsNone(result)
def test_harvest_id_found(self):
ret = {"id": 12345}
with mock.patch('headway.tasks.get_user_by_email', return_value=ret):
task = get_harvest_id_for_user(self.user.id)
result = task()
self.user.refresh_from_db()
self.assertIsNotNone(result)
self.assertEqual(result, ret["id"])
self.assertEqual(self.user.profile.harvest_id, str(ret["id"])) |
py | b407ee77daa2ded5a6cbf79813cdb29ec68bb721 | """
This script creates a test that fails when garage.tf.algos.REPS performance is
too low.
"""
import gym
import pytest
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import REPS
from garage.tf.envs import TfEnv
from garage.tf.experiment import LocalTFRunner
from garage.tf.policies import CategoricalMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestREPS(TfGraphTestCase):
@pytest.mark.large
def test_reps_cartpole(self):
"""Test REPS with gym Cartpole environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = TfEnv(gym.make('CartPole-v0'))
policy = CategoricalMLPPolicy(env_spec=env.spec,
hidden_sizes=[32, 32])
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = REPS(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=4000)
assert last_avg_ret > 5
env.close()
|
py | b407eede15f62ae2f6aa8f191e1a13eb4c451d8f | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Stress(AutotoolsPackage):
"""stress is a deliberately simple workload generator for POSIX systems.
It imposes a configurable amount of CPU, memory, I/O, and disk stress on
the system. It is written in C, and is free software licensed under the
GPLv2."""
# Moved from original homepage
# homepage = "https://people.seas.harvard.edu/~apw/stress/"
homepage = "https://github.com/javiroman/system-stress"
url = "https://github.com/javiroman/system-stress/archive/v1.0.4.tar.gz"
version('1.0.4', sha256='b03dbb9664d7f8dcb3eadc918c2e8eb822f5a3ba47d9bd51246540bac281bd75')
|
py | b407ef1bdd692b56ecae95b7752e9b7f30965fd0 | class Result:
END_MARKER = 'end'
def __init__(self, result, action, modifiers):
if Result.END_MARKER in [result, action, modifiers]:
self.result = ''
self.action = ''
self.modifiers = ''
else:
self.result = result
self.action = action
self.modifiers = modifiers
|
py | b407efb8741c2a23b9b3dd85176c85862583d299 | # -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud resources list command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.util import resource_search
from googlecloudsdk.calliope import base
class List(base.ListCommand):
r"""List Google Cloud resources accessible from your account.
*{command}* lists all indexed Google Cloud resources that you have access to.
Filter expressions apply to the specific resource type selected. Currently,
only a limited subset of Cloud resource types are supported.
## EXAMPLES
List all compute instances URIs with names starting with `test` or `prod`:
$ gcloud alpha resources list --uri \
--filter="@type:compute.instances name:(test prod)"
Print the number of resources with any part containing the substring `foobar`:
$ gcloud alpha resources list --filter="foobar" --uri | wc -l
The previous command uses `--uri` to count because each output line is the URI
for one resource. Otherwise the resource descriptions could be multiple lines
per resource.
"""
@staticmethod
def Args(parser):
base.FILTER_FLAG.RemoveFromParser(parser)
parser.add_argument(
'--filter',
help=('A filter expression that is rewritten into a '
'CloudResourceSearch query expression. It is applied to the '
'resource specific data in the search results.'
'\n\n'
'By default all indexed resources are listed. Use '
'`@type`:_COLLECTION_ to select resources for _COLLECTION_. It '
'is an error to specify a _COLLECTION_ not indexed by the API. '
'The supported collections are:\n * {collections}\n'
'Collections named `resources.`_RESOURCE-TYPE_ may be used for '
'debugging, where _RESOURCE-TYPE_ is defined by the '
'CloudResourceSearch API.'
'\n\n'
'See `$ gcloud topic filters` for filter expression details.'
.format(collections='\n * '.join(sorted(
resource_search.RESOURCE_TYPES.keys())))),
)
base.PAGE_SIZE_FLAG.SetDefault(parser, resource_search.PAGE_SIZE)
def Run(self, args):
query = args.filter
args.filter = None
return resource_search.List(limit=args.limit,
page_size=args.page_size,
query=query,
sort_by=args.sort_by,
uri=args.uri)
|
py | b407f0b5d40f4ad8fba4b7658942a861ccd64f3b | #!/usr/bin/env python3
from flask import *
import markdown
app = Flask(__name__)
things_i_like = open('things_i_like.list', 'r').readlines()
projects = open('projects.list', 'r').readlines()
@app.route('/')
def start_page():
return render_template('index.html', things_i_like=things_i_like, projects=projects)
@app.route('/<number>')
def hello_name(number):
return "{0} in binary is {1}".format(number, bin(number))
|
py | b407f399e8225fa1a941ca94122f836485b92a71 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualNetworkPeeringResult',
'AwaitableGetVirtualNetworkPeeringResult',
'get_virtual_network_peering',
]
@pulumi.output_type
class GetVirtualNetworkPeeringResult:
"""
Peerings in a virtual network resource.
"""
def __init__(__self__, allow_forwarded_traffic=None, allow_gateway_transit=None, allow_virtual_network_access=None, etag=None, name=None, peering_state=None, provisioning_state=None, remote_virtual_network=None, use_remote_gateways=None):
if allow_forwarded_traffic and not isinstance(allow_forwarded_traffic, bool):
raise TypeError("Expected argument 'allow_forwarded_traffic' to be a bool")
pulumi.set(__self__, "allow_forwarded_traffic", allow_forwarded_traffic)
if allow_gateway_transit and not isinstance(allow_gateway_transit, bool):
raise TypeError("Expected argument 'allow_gateway_transit' to be a bool")
pulumi.set(__self__, "allow_gateway_transit", allow_gateway_transit)
if allow_virtual_network_access and not isinstance(allow_virtual_network_access, bool):
raise TypeError("Expected argument 'allow_virtual_network_access' to be a bool")
pulumi.set(__self__, "allow_virtual_network_access", allow_virtual_network_access)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peering_state and not isinstance(peering_state, str):
raise TypeError("Expected argument 'peering_state' to be a str")
pulumi.set(__self__, "peering_state", peering_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if remote_virtual_network and not isinstance(remote_virtual_network, dict):
raise TypeError("Expected argument 'remote_virtual_network' to be a dict")
pulumi.set(__self__, "remote_virtual_network", remote_virtual_network)
if use_remote_gateways and not isinstance(use_remote_gateways, bool):
raise TypeError("Expected argument 'use_remote_gateways' to be a bool")
pulumi.set(__self__, "use_remote_gateways", use_remote_gateways)
@property
@pulumi.getter(name="allowForwardedTraffic")
def allow_forwarded_traffic(self) -> Optional[bool]:
"""
Whether the forwarded traffic from the VMs in the remote virtual network will be allowed/disallowed.
"""
return pulumi.get(self, "allow_forwarded_traffic")
@property
@pulumi.getter(name="allowGatewayTransit")
def allow_gateway_transit(self) -> Optional[bool]:
"""
If gateway links can be used in remote virtual networking to link to this virtual network.
"""
return pulumi.get(self, "allow_gateway_transit")
@property
@pulumi.getter(name="allowVirtualNetworkAccess")
def allow_virtual_network_access(self) -> Optional[bool]:
"""
Whether the VMs in the linked virtual network space would be able to access all the VMs in local Virtual network space.
"""
return pulumi.get(self, "allow_virtual_network_access")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringState")
def peering_state(self) -> Optional[str]:
"""
The status of the virtual network peering. Possible values are 'Initiated', 'Connected', and 'Disconnected'.
"""
return pulumi.get(self, "peering_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="remoteVirtualNetwork")
def remote_virtual_network(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the remote virtual network.
"""
return pulumi.get(self, "remote_virtual_network")
@property
@pulumi.getter(name="useRemoteGateways")
def use_remote_gateways(self) -> Optional[bool]:
"""
If remote gateways can be used on this virtual network. If the flag is set to true, and allowGatewayTransit on remote peering is also true, virtual network will use gateways of remote virtual network for transit. Only one peering can have this flag set to true. This flag cannot be set if virtual network already has a gateway.
"""
return pulumi.get(self, "use_remote_gateways")
class AwaitableGetVirtualNetworkPeeringResult(GetVirtualNetworkPeeringResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkPeeringResult(
allow_forwarded_traffic=self.allow_forwarded_traffic,
allow_gateway_transit=self.allow_gateway_transit,
allow_virtual_network_access=self.allow_virtual_network_access,
etag=self.etag,
name=self.name,
peering_state=self.peering_state,
provisioning_state=self.provisioning_state,
remote_virtual_network=self.remote_virtual_network,
use_remote_gateways=self.use_remote_gateways)
def get_virtual_network_peering(resource_group_name: Optional[str] = None,
virtual_network_name: Optional[str] = None,
virtual_network_peering_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkPeeringResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_name: The name of the virtual network.
:param str virtual_network_peering_name: The name of the virtual network peering.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkName'] = virtual_network_name
__args__['virtualNetworkPeeringName'] = virtual_network_peering_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20161201:getVirtualNetworkPeering', __args__, opts=opts, typ=GetVirtualNetworkPeeringResult).value
return AwaitableGetVirtualNetworkPeeringResult(
allow_forwarded_traffic=__ret__.allow_forwarded_traffic,
allow_gateway_transit=__ret__.allow_gateway_transit,
allow_virtual_network_access=__ret__.allow_virtual_network_access,
etag=__ret__.etag,
name=__ret__.name,
peering_state=__ret__.peering_state,
provisioning_state=__ret__.provisioning_state,
remote_virtual_network=__ret__.remote_virtual_network,
use_remote_gateways=__ret__.use_remote_gateways)
|
py | b407f48e83585c5340f2cb7ba706a9baaa6df359 | import torch
import numpy
from criterion import disagree_regularization
# sample_prob = torch.FloatTensor(6).uniform_(0, 1)
# sample_mask = sample_prob < 0.2
# a = sample_mask.nonzero()
# print(sample_mask)
# b = numpy.array([[[5,6,4,1],
# [7,8,3,2],
# [1,2,1,3]]])
# c = numpy.array([[[5],[6],[7]]])
# x = torch.tensor([[3,3],[3,3]])
# y = b*c #x.dot(x)
# z = torch.mul(b,c) #x.mul(x)
# print(y)
# print(z)
# e = c[-1,:]
# d = b*c[-1,:, None]
# print(d)
# all_masks = numpy.array([[0,1,
# 0]])
# (finished,) = numpy.where(all_masks[-1] == 0)
# print(finished)
# m = set()
# m.add(torch.tensor([1,4,3]))
# m.add(torch.tensor([2,4,3]))
# m.add(torch.tensor([1,5,3]))
# a = [1,4,3]
# a = torch.tensor(a)
# if a in m:
# print("yes")
x = torch.FloatTensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
print(disagree_regularization(x))
|
py | b407f49572ddd42d41fd326ee916ee85481c212b | a,b=int(input()),int(input())
print(a+b,a-b,a*b,sep="\n")
|
py | b407f4bddfa1d5af492fbd8f6ba574f37808a4d0 | import logging
def get_module_logger(mod_name):
logger = logging.getLogger(mod_name)
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s [%(name)-12s] %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
|
py | b407f4df6fa2eaf41dedd9ca4c1e5cec97c71fb4 | #!/usr/bin/python
# Copyright (c) 2007-2008 Mozilla Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import httplib
import os
import sys
import re
import urlparse
import string
import gzip
import StringIO
extPat = re.compile(r'^.*\.([A-Za-z]+)$')
extDict = {
"html" : "text/html",
"htm" : "text/html",
"xhtml" : "application/xhtml+xml",
"xht" : "application/xhtml+xml",
"xml" : "application/xml",
}
argv = sys.argv[1:]
forceXml = 0
forceHtml = 0
gnu = 0
errorsOnly = 0
encoding = None
fileName = None
contentType = None
inputHandle = None
service = 'http://html5.validator.nu/'
for arg in argv:
if '--help' == arg:
print '-h : force text/html'
print '-x : force application/xhtml+xml'
print '-g : GNU output'
print '-e : errors only (no info or warnings)'
print '--encoding=foo : declare encoding foo'
print '--service=url : the address of the HTML5 validator'
print 'One file argument allowed. Leave out to read from stdin.'
sys.exit(0)
elif arg.startswith("--encoding="):
encoding = arg[11:]
elif arg.startswith("--service="):
service = arg[10:]
elif arg.startswith("--"):
sys.stderr.write('Unknown argument %s.\n' % arg)
sys.exit(2)
elif arg.startswith("-"):
for c in arg[1:]:
if 'x' == c:
forceXml = 1
elif 'h' == c:
forceHtml = 1
elif 'g' == c:
gnu = 1
elif 'e' == c:
errorsOnly = 1
else:
sys.stderr.write('Unknown argument %s.\n' % arg)
sys.exit(3)
else:
if fileName:
sys.stderr.write('Cannot have more than one input file.\n')
sys.exit(1)
fileName = arg
if forceXml and forceHtml:
sys.stderr.write('Cannot force HTML and XHTML at the same time.\n')
sys.exit(2)
if forceXml:
contentType = 'application/xhtml+xml'
elif forceHtml:
contentType = 'text/html'
elif fileName:
m = extPat.match(fileName)
if m:
ext = m.group(1)
ext = ext.translate(string.maketrans(string.ascii_uppercase, string.ascii_lowercase))
if extDict.has_key(ext):
contentType = extDict[ext]
else:
sys.stderr.write('Unable to guess Content-Type from file name. Please force the type.\n')
sys.exit(3)
else:
sys.stderr.write('Could not extract a filename extension. Please force the type.\n')
sys.exit(6)
else:
sys.stderr.write('Need to force HTML or XHTML when reading from stdin.\n')
sys.exit(4)
if encoding:
contentType = '%s; charset=%s' % (contentType, encoding)
if fileName:
inputHandle = open(fileName, "rb")
else:
inputHandle = sys.stdin
data = inputHandle.read()
buf = StringIO.StringIO()
gzipper = gzip.GzipFile(fileobj=buf, mode='wb')
gzipper.write(data)
gzipper.close()
gzippeddata = buf.getvalue()
buf.close()
connection = None
response = None
status = 302
redirectCount = 0
url = service
if gnu:
url = url + '?out=gnu'
else:
url = url + '?out=text'
if errorsOnly:
url = url + '&level=error'
while (status == 302 or status == 301 or status == 307) and redirectCount < 10:
if redirectCount > 0:
url = response.getheader('Location')
parsed = urlparse.urlsplit(url)
if parsed[0] != 'http':
sys.stderr.write('URI scheme %s not supported.\n' % parsed[0])
sys.exit(7)
if redirectCount > 0:
connection.close() # previous connection
print 'Redirecting to %s' % url
print 'Please press enter to continue or type "stop" followed by enter to stop.'
if raw_input() != "":
sys.exit(0)
connection = httplib.HTTPConnection(parsed[1])
connection.connect()
connection.putrequest("POST", "%s?%s" % (parsed[2], parsed[3]), skip_accept_encoding=1)
connection.putheader("User-Agent", 'html5check.py/2008-02-12')
connection.putheader("Accept-Encoding", 'gzip')
connection.putheader("Content-Type", contentType)
connection.putheader("Content-Encoding", 'gzip')
connection.putheader("Content-Length", len(gzippeddata))
connection.endheaders()
connection.send(gzippeddata)
response = connection.getresponse()
status = response.status
redirectCount += 1
if status != 200:
sys.stderr.write('%s %s\n' % (status, response.reason))
sys.exit(5)
if response.getheader('Content-Encoding', 'identity').lower() == 'gzip':
response = gzip.GzipFile(fileobj=StringIO.StringIO(response.read()))
if fileName and gnu:
quotedName = '"%s"' % fileName.replace('"', '\\042')
for line in response:
sys.stdout.write(quotedName)
sys.stdout.write(line)
else:
sys.stdout.write(response.read())
connection.close()
|
py | b407f537dfc280d636fc4ae394023094c2f1f63a | class conta: #Classe conta corrente
def __init__(self,numero,saldo):#Geradora
self.numero = numero
self.saldo =0
def saque(self,saldo): #Modulo para realizar saques
saldo = saldo
a = float(input('Insira o valor a ser sacado: '))
if a>saldo:
ValueError
print('Impossivel realizar esse saque, tente novamente.')
return saldo
else:
u = saldo - a
print('O valor sacado foi de: ',a,'Em sua conta restam: ',u)
return u
def deposito(self,saldo):#Modulo que realiza deposito
saldo = saldo
a = float(input('Insira o valor a ser depositado: '))
saldo = saldo +a
print(saldo)
return saldo
a =conta(numero = '3213123',saldo =0)#conta que usamos de prova
def menu():#menu do caixa
print('''
1 Sacar
2 Depositar
3 encerrar
4 mostrar saldo
''')
w = str(input('Insira sua opção: '))
b = a.saldo
while w != '3':
if w =='1':
b = a.saque(b)
if w =='2':
b = a.deposito(b)
print('Obrigado pelo deposito!')
if w =='4':
print(b)
w = str(input('Insira sua opção: '))
print('Obrigado por usar nossos serviços')
def main():#função onde o programa roda
menu()
main()
|
py | b407f6bb32e8fc9c689efb59bc89530c36fd4e3d | import os
import numpy as np
import multiprocessing as mp
import uuid
from datetime import datetime
import src.envs.lunar_lander.utils as utils
from src.teacher.lunar_lander.teacher_env import create_single_switch_env, \
create_teacher_env, evaluate_single_switch_policy, SingleSwitchPolicy
from src.envs.lunar_lander.interventions import LanderOrthogonalIntervention
from src.teacher.lunar_lander.analysis import plot_results, get_data_experiment_type
def evaluate_single_student(policy, base_dir=None, video=False,
teacher_env_kwargs={}):
if base_dir is None:
base_dir = os.path.join(os.path.abspath('.'), 'logs')
exp_id = datetime.now().strftime('%d_%m_%y__%H_%M_%S') + str(uuid.uuid4())
if teacher_env_kwargs['original']:
name = 'original'
else:
name = policy.name
logdir = os.path.join(base_dir, name, exp_id)
os.makedirs(logdir, exist_ok=True)
teacher_env, student_final_env_f = create_teacher_env(**teacher_env_kwargs)
r, succ, crash, oom, to, tog, actions, failures = \
evaluate_single_switch_policy(policy,
teacher_env,
student_final_env_f(),
timesteps=int(1e5))
np.savez(os.path.join(logdir, 'results.npz'), r=r, succ=succ,
crash=crash, oom=oom, to=to, tog=tog, actions=actions,
failures=failures)
if video:
env = utils.MonitorVideoIntervention(
LanderOrthogonalIntervention(None, None, timeout=500),
dirname=logdir, skipframe=10)
obs = env.reset()
for i in range(2000):
a, _ = teacher_env.student.predict(obs)
obs, r, g, done, info = env.step(a)
if done:
obs = env.reset()
env.close()
def evaluate_parallel(policy_list, base_dir=None, teacher_env_kwargs={}):
processes = []
if base_dir is None:
base_dir = os.path.join(os.path.abspath('.'), 'logs')
for pi in policy_list:
p = mp.Process(target=evaluate_single_student,
args=[pi, base_dir],
kwargs={'teacher_env_kwargs': teacher_env_kwargs})
p.start()
processes.append(p)
for p in processes:
p.join()
# Need to load all the data and get the mean reward to pass back
name = policy_list[0].name
r, succ, crash, oom, to, tog, actions, failures = get_data_experiment_type(
base_dir, name, return_mean=True)
return np.mean(r)
|
py | b407f96457edacb450b16cf7cd0d7bed466e7592 | import os
import torch
import torch.nn as nn
from torch.utils.data.dataset import Dataset
from PIL import Image
import fnmatch
import cv2
import sys
import numpy as np
#torch.set_printoptions(precision=10)
class _bn_relu_conv(nn.Module):
def __init__(self, in_filters, nb_filters, fw, fh, subsample=1):
super(_bn_relu_conv, self).__init__()
self.model = nn.Sequential(
nn.BatchNorm2d(in_filters, eps=1e-3),
nn.LeakyReLU(0.2),
nn.Conv2d(in_filters, nb_filters, (fw, fh), stride=subsample, padding=(fw//2, fh//2), padding_mode='zeros')
)
def forward(self, x):
return self.model(x)
# the following are for debugs
print("****", np.max(x.cpu().numpy()), np.min(x.cpu().numpy()), np.mean(x.cpu().numpy()), np.std(x.cpu().numpy()), x.shape)
for i,layer in enumerate(self.model):
if i != 2:
x = layer(x)
else:
x = layer(x)
#x = nn.functional.pad(x, (1, 1, 1, 1), mode='constant', value=0)
print("____", np.max(x.cpu().numpy()), np.min(x.cpu().numpy()), np.mean(x.cpu().numpy()), np.std(x.cpu().numpy()), x.shape)
print(x[0])
return x
class _u_bn_relu_conv(nn.Module):
def __init__(self, in_filters, nb_filters, fw, fh, subsample=1):
super(_u_bn_relu_conv, self).__init__()
self.model = nn.Sequential(
nn.BatchNorm2d(in_filters, eps=1e-3),
nn.LeakyReLU(0.2),
nn.Conv2d(in_filters, nb_filters, (fw, fh), stride=subsample, padding=(fw//2, fh//2)),
nn.Upsample(scale_factor=2, mode='nearest')
)
def forward(self, x):
return self.model(x)
class _shortcut(nn.Module):
def __init__(self, in_filters, nb_filters, subsample=1):
super(_shortcut, self).__init__()
self.process = False
self.model = None
if in_filters != nb_filters or subsample != 1:
self.process = True
self.model = nn.Sequential(
nn.Conv2d(in_filters, nb_filters, (1, 1), stride=subsample)
)
def forward(self, x, y):
#print(x.size(), y.size(), self.process)
if self.process:
y0 = self.model(x)
#print("merge+", torch.max(y0+y), torch.min(y0+y),torch.mean(y0+y), torch.std(y0+y), y0.shape)
return y0 + y
else:
#print("merge", torch.max(x+y), torch.min(x+y),torch.mean(x+y), torch.std(x+y), y.shape)
return x + y
class _u_shortcut(nn.Module):
def __init__(self, in_filters, nb_filters, subsample):
super(_u_shortcut, self).__init__()
self.process = False
self.model = None
if in_filters != nb_filters:
self.process = True
self.model = nn.Sequential(
nn.Conv2d(in_filters, nb_filters, (1, 1), stride=subsample, padding_mode='zeros'),
nn.Upsample(scale_factor=2, mode='nearest')
)
def forward(self, x, y):
if self.process:
return self.model(x) + y
else:
return x + y
class basic_block(nn.Module):
def __init__(self, in_filters, nb_filters, init_subsample=1):
super(basic_block, self).__init__()
self.conv1 = _bn_relu_conv(in_filters, nb_filters, 3, 3, subsample=init_subsample)
self.residual = _bn_relu_conv(nb_filters, nb_filters, 3, 3)
self.shortcut = _shortcut(in_filters, nb_filters, subsample=init_subsample)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.residual(x1)
return self.shortcut(x, x2)
class _u_basic_block(nn.Module):
def __init__(self, in_filters, nb_filters, init_subsample=1):
super(_u_basic_block, self).__init__()
self.conv1 = _u_bn_relu_conv(in_filters, nb_filters, 3, 3, subsample=init_subsample)
self.residual = _bn_relu_conv(nb_filters, nb_filters, 3, 3)
self.shortcut = _u_shortcut(in_filters, nb_filters, subsample=init_subsample)
def forward(self, x):
y = self.residual(self.conv1(x))
return self.shortcut(x, y)
class _residual_block(nn.Module):
def __init__(self, in_filters, nb_filters, repetitions, is_first_layer=False):
super(_residual_block, self).__init__()
layers = []
for i in range(repetitions):
init_subsample = 1
if i == repetitions - 1 and not is_first_layer:
init_subsample = 2
if i == 0:
l = basic_block(in_filters=in_filters, nb_filters=nb_filters, init_subsample=init_subsample)
else:
l = basic_block(in_filters=nb_filters, nb_filters=nb_filters, init_subsample=init_subsample)
layers.append(l)
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class _upsampling_residual_block(nn.Module):
def __init__(self, in_filters, nb_filters, repetitions):
super(_upsampling_residual_block, self).__init__()
layers = []
for i in range(repetitions):
l = None
if i == 0:
l = _u_basic_block(in_filters=in_filters, nb_filters=nb_filters)#(input)
else:
l = basic_block(in_filters=nb_filters, nb_filters=nb_filters)#(input)
layers.append(l)
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class res_skip(nn.Module):
def __init__(self):
super(res_skip, self).__init__()
self.block0 = _residual_block(in_filters=1, nb_filters=24, repetitions=2, is_first_layer=True)#(input)
self.block1 = _residual_block(in_filters=24, nb_filters=48, repetitions=3)#(block0)
self.block2 = _residual_block(in_filters=48, nb_filters=96, repetitions=5)#(block1)
self.block3 = _residual_block(in_filters=96, nb_filters=192, repetitions=7)#(block2)
self.block4 = _residual_block(in_filters=192, nb_filters=384, repetitions=12)#(block3)
self.block5 = _upsampling_residual_block(in_filters=384, nb_filters=192, repetitions=7)#(block4)
self.res1 = _shortcut(in_filters=192, nb_filters=192)#(block3, block5, subsample=(1,1))
self.block6 = _upsampling_residual_block(in_filters=192, nb_filters=96, repetitions=5)#(res1)
self.res2 = _shortcut(in_filters=96, nb_filters=96)#(block2, block6, subsample=(1,1))
self.block7 = _upsampling_residual_block(in_filters=96, nb_filters=48, repetitions=3)#(res2)
self.res3 = _shortcut(in_filters=48, nb_filters=48)#(block1, block7, subsample=(1,1))
self.block8 = _upsampling_residual_block(in_filters=48, nb_filters=24, repetitions=2)#(res3)
self.res4 = _shortcut(in_filters=24, nb_filters=24)#(block0,block8, subsample=(1,1))
self.block9 = _residual_block(in_filters=24, nb_filters=16, repetitions=2, is_first_layer=True)#(res4)
self.conv15 = _bn_relu_conv(in_filters=16, nb_filters=1, fh=1, fw=1, subsample=1)#(block7)
def forward(self, x):
x0 = self.block0(x)
x1 = self.block1(x0)
x2 = self.block2(x1)
x3 = self.block3(x2)
x4 = self.block4(x3)
x5 = self.block5(x4)
res1 = self.res1(x3, x5)
x6 = self.block6(res1)
res2 = self.res2(x2, x6)
x7 = self.block7(res2)
res3 = self.res3(x1, x7)
x8 = self.block8(res3)
res4 = self.res4(x0, x8)
x9 = self.block9(res4)
y = self.conv15(x9)
return y
class MyDataset(Dataset):
def __init__(self, image_paths, transform=None):
self.image_paths = image_paths
self.transform = transform
def get_class_label(self, image_name):
# your method here
head, tail = os.path.split(image_name)
#print(tail)
return tail
def __getitem__(self, index):
image_path = self.image_paths[index]
x = Image.open(image_path)
y = self.get_class_label(image_path.split('/')[-1])
if self.transform is not None:
x = self.transform(x)
return x, y
def __len__(self):
return len(self.image_paths)
def loadImages(folder):
imgs = []
matches = []
for root, dirnames, filenames in os.walk(folder):
for filename in fnmatch.filter(filenames, '*'):
matches.append(os.path.join(root, filename))
return matches
if __name__ == "__main__":
model = res_skip()
model.load_state_dict(torch.load('erika.pth'))
is_cuda = torch.cuda.is_available()
if is_cuda:
model.cuda()
else:
model.cpu()
model.eval()
filelists = loadImages(sys.argv[1])
with torch.no_grad():
for imname in filelists:
src = cv2.imread(imname,cv2.IMREAD_GRAYSCALE)
rows = int(np.ceil(src.shape[0]/16))*16
cols = int(np.ceil(src.shape[1]/16))*16
# manually construct a batch. You can change it based on your usecases.
patch = np.ones((1,1,rows,cols),dtype="float32")
patch[0,0,0:src.shape[0],0:src.shape[1]] = src
if is_cuda:
tensor = torch.from_numpy(patch).cuda()
else:
tensor = torch.from_numpy(patch).cpu()
y = model(tensor)
print(imname, torch.max(y), torch.min(y))
yc = y.cpu().numpy()[0,0,:,:]
yc[yc>255] = 255
yc[yc<0] = 0
head, tail = os.path.split(imname)
cv2.imwrite(sys.argv[2]+"/"+tail.replace(".jpg",".png"),yc[0:src.shape[0],0:src.shape[1]])
|
py | b407f96d2391f8b07c4074ae1b287f5729e98b9e | ## @package muji
# Module caffe2.python.muji
"""muji.py does multi-gpu training for caffe2 with no need to change the c++
side code. Everything is defined on the computation graph level.
We support the following use cases:
- 2 gpus, where peer access is enabled between them.
- 4 gpus, where peer access are enabled between all of them.
- 4 gpus, where peer access are enabled in two groups,
between {1, 2} and {3, 4}
- 8 gpus, where peer access are enabled in two groups,
between {1, 2, 3, 4} and {5, 6, 7, 8}.
If above cases are not satisfied, a fallback function which does not rely on
peer access will be called.
"""
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace
def OnGPU(gpu_id):
"""A utility function that returns a device option protobuf of the
specified gpu id.
"""
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = gpu_id
return device_option
def OnCPU():
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CPU
return device_option
def Allreduce(net, blobs, reduced_affix="_reduced", gpu_indices=None):
"""The general Allreduce interface that reroutes the function calls.
CPUs and AMD GPUs are not supported because
GetCudaPeerAccessPattern is called to get gpu peer access pattern.
"""
if gpu_indices is None:
gpu_indices = list(range(len(blobs)))
if len(gpu_indices) != len(blobs):
raise RuntimeError(
"gpu_indices length and blobs length mismatch: %d vs %d" %
(len(gpu_indices), len(blobs))
)
pattern = workspace.GetCudaPeerAccessPattern()
if len(blobs) == 2 and pattern.shape[0] >= 2 and np.all(pattern[:2, :2]):
return Allreduce2(net, blobs, reduced_affix, gpu_indices)
elif len(blobs) == 4 and pattern.shape[0] >= 4 and np.all(pattern[:4, :4]):
return Allreduce4(net, blobs, reduced_affix, gpu_indices)
elif len(blobs) == 4 and pattern.shape[0] >= 4 and np.all(pattern[:2, :2]) and np.all(pattern[2:4, 2:4]):
return Allreduce4Group2(net, blobs, reduced_affix, gpu_indices)
elif len(blobs) == 8 and pattern.shape[0] >= 8 and np.all(pattern[:8, :8]):
return Allreduce8(net, blobs, reduced_affix, gpu_indices)
else:
return AllreduceFallback(net, blobs, reduced_affix, gpu_indices)
def Allreduce2(net, blobs, reduced_affix, gpu_indices):
"""Allreduce for 2 gpus.
Algorithm: 0r <- 0 + 1, 1r <- 0r, where r means "reduced"
"""
a, b = blobs
gpu_a, gpu_b = gpu_indices
a_reduced = net.Add([a, b], a + reduced_affix, device_option=OnGPU(gpu_a))
b_reduced = a_reduced.Copy(
[],
b + reduced_affix,
device_option=OnGPU(gpu_b)
)
return a_reduced, b_reduced
def Allreduce4(net, blobs, reduced_affix, gpu_indices):
"""Allreduce for 4 gpus.
Algorithm: 2 level reduction.
0r <- 0 + 1, 2r <- 2 + 3
0r <- 0r + 2r
2r <- 0r,
1r <- 0r, 3r <- 2r
"""
a, b, c, d = blobs
gpu_a, gpu_b, gpu_c, gpu_d = gpu_indices
# a_reduced <- a+b, c_reduced <- c + d
a_reduced = net.Add(
[a, b],
str(a) + reduced_affix,
device_option=OnGPU(gpu_a)
)
c_reduced = net.Add(
[c, d],
str(c) + reduced_affix,
device_option=OnGPU(gpu_c)
)
# a_reduced <- a_reduced + c_reduced
a_reduced = a_reduced.Add(c_reduced, a_reduced, device_option=OnGPU(gpu_a))
# broadcast a_reduced to c_reduced
c_reduced = a_reduced.Copy([], c_reduced, device_option=OnGPU(gpu_c))
# broadcast to b and d
b_reduced = a_reduced.Copy(
[],
str(b) + reduced_affix,
device_option=OnGPU(gpu_b)
)
d_reduced = c_reduced.Copy(
[],
str(d) + reduced_affix,
device_option=OnGPU(gpu_d)
)
return a_reduced, b_reduced, c_reduced, d_reduced
def Allreduce4Group2(net, blobs, reduced_affix, gpu_indices):
"""Allreduce for 4 gpus where peer access are enabled in {0,1} and {2,3}
Algorithm: 2 level reduction.
0r <- 0 + 1, 2r <- 2 + 3
0r <- 0r + 2r
2r <- 0r,
1r <- 0r, 3r <- 2r
"""
a, b, c, d = blobs
gpu_a, gpu_b, gpu_c, gpu_d = gpu_indices
# a_reduced <- a+b, c_reduced <- c + d
a_reduced = net.Add(
[a, b],
str(a) + reduced_affix,
device_option=OnGPU(gpu_a)
)
c_reduced = net.Add(
[c, d],
str(c) + reduced_affix,
device_option=OnGPU(gpu_c)
)
# copy from c_reduce(gpu_c) to c_reduce_copy(gpu_a)
c_reduced_copy = c_reduced.Copy(
[],
str(c_reduced) + '_copy',
device_option=OnGPU(gpu_a)
)
# a_reduced <- a_reduced + c_reduced_copy
a_reduced = a_reduced.Add(c_reduced_copy, a_reduced, device_option=OnGPU(gpu_a))
# broadcast a_reduced to c_reduced
c_reduced = a_reduced.Copy([], c_reduced, device_option=OnGPU(gpu_c))
# broadcast to b and d
b_reduced = a_reduced.Copy(
[],
str(b) + reduced_affix,
device_option=OnGPU(gpu_b)
)
d_reduced = c_reduced.Copy(
[],
str(d) + reduced_affix,
device_option=OnGPU(gpu_d)
)
return a_reduced, b_reduced, c_reduced, d_reduced
def Allreduce8(net, blobs, reduced_affix, gpu_indices):
"""Allreduce for 8 gpus.
Algorithm: 3 level reduction.
0r <- 0 + 1, 2r <- 2 + 3, 4r <- 4 + 5, 6r <- 6 + 7
0r <- 0r + 2r, 4r <- 4r + 6r
0r <- 0r + 4r
4r <- 0r
2r <- 0r, 6r <- 4r
1r <- 0r, 3r <- 2r, 5r <- 4r, 7r <- 6r
"""
reduced = [None] * 8
# Reduction level 1
for i in [0, 2, 4, 6]:
reduced[i] = net.Add(
[blobs[i], blobs[i + 1]],
blobs[i] + reduced_affix,
device_option=OnGPU(gpu_indices[i])
)
# Reduction level 2
for i in [0, 4]:
reduced[i] = net.Add(
[reduced[i], reduced[i + 2]],
str(blobs[i]) + reduced_affix,
device_option=OnGPU(gpu_indices[i])
)
# Reduction level 3: this involves a copy.
reduced_4_copy = reduced[4].Copy(
[],
str(reduced[4]) + '_copy',
device_option=OnGPU(gpu_indices[0])
)
reduced[0] = reduced[0].Add(
reduced_4_copy,
reduced[0],
device_option=OnGPU(gpu_indices[0])
)
# Broadcast level 1
reduced[4] = reduced[0].Copy(
[],
reduced[4],
device_option=OnGPU(gpu_indices[4])
)
# Broadcast level 2
for i in [2, 6]:
reduced[i] = reduced[i - 2].Copy(
[],
reduced[i],
device_option=OnGPU(gpu_indices[i])
)
# Broadcast level 3
for i in [1, 3, 5, 7]:
reduced[i] = reduced[i - 1].Copy(
[],
blobs[i] + reduced_affix,
device_option=OnGPU(gpu_indices[i])
)
return reduced
def AllreduceFallback(net, blobs, reduced_affix, gpu_indices):
"""A fallback option for Allreduce with no assumption on p2p.
Algorithm: a flat operation on gpu 0
0r <- 0
0r <- 0r + i for i in gpu_indices[1:]
ir <- 0r for i in gpu_indices[1:]
"""
reduced = [None] * len(gpu_indices)
if reduced_affix != '':
# copy first
reduced[0] = net.Copy(
blobs[0],
blobs[0] + reduced_affix,
device_option=OnGPU(gpu_indices[0])
)
else:
reduced[0] = blobs[0]
# do temp copy and add
temp_name = reduced[0] + '_temp_copy'
for i in range(1, len(gpu_indices)):
temp = net.Copy(
blobs[i],
temp_name,
device_option=OnGPU(gpu_indices[0])
)
reduced[0] = net.Add(
[temp, reduced[0]],
reduced[0],
device_option=OnGPU(gpu_indices[0])
)
# Broadcast to everyone else
for i in range(1, len(gpu_indices)):
reduced[i] = net.Copy(
reduced[0],
blobs[i] + reduced_affix,
device_option=OnGPU(gpu_indices[i])
)
return reduced
|
py | b407f971125e15429df9fe5495b16a574b40dc54 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn import model_selection
print(check_output(["ls", "../input"]).decode("utf8"))
from sklearn.linear_model import LinearRegression
# Any results you write to the current directory are saved as output.
# **Loading dataset into pandas dataframe**
# In[ ]:
shanghai = pd.read_csv('../input/shanghaiData.csv')
# Checking for datatypes of columns
# In[ ]:
print(shanghai.dtypes)
# **Splitting dataset into Training data and Test data**
# In[ ]:
X = shanghai[['alumni','award','hici','ns','pub','pcp']]
y = shanghai.total_score
X = X.fillna(method='ffill')
y = y.fillna(method='ffill')
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.25)
# * **Applying Linear Regression and fitting data on a straing line.**
# * Predicting the test data.
# In[ ]:
reg = LinearRegression()
reg.fit(X_train,y_train)
y_pred = reg.predict(X_test)
# **Printing the score and other variables on the straight line**
# In[ ]:
print('Scoring =',reg.score(X_train,y_train))
print('Coefficient :',reg.coef_)
print('Intercept :',reg.intercept_)
# **Printing scores of metrics**
# In[ ]:
print('Root Mean Square Error =',np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('Mean Absolute Error =',metrics.mean_absolute_error(y_test, y_pred))
print('Mean Square Error =',metrics.mean_squared_error(y_test, y_pred))
print('R^2 =',metrics.r2_score(y_test, y_pred))
# **Printing scores of metrics using K fold cross validation**
# In[ ]:
kfold = model_selection.KFold(n_splits=10, random_state=7)
scoring = 'neg_mean_absolute_error'
results = cross_val_score(reg, X, y, cv=kfold, scoring=scoring)
print("Mean Absolute Error: %.3f (%.3f)" % (results.mean(), results.std()))
# In[ ]:
scoring = 'neg_mean_squared_error'
results = model_selection.cross_val_score(reg, X, y, cv=kfold, scoring=scoring)
print("Mean Squared Error: %.3f (%.3f)" % (results.mean(), results.std()))
# In[ ]:
scoring = 'r2'
results = model_selection.cross_val_score(reg, X, y, cv=kfold, scoring=scoring)
print("R^2: %.3f (%.3f)" % (results.mean(), results.std()))
|
py | b407f98eda91f8ff13054f3ad195ab12dc89b2f8 | import numpy as np
import random
import warnings
import os
import time
from glob import glob
from skimage import color, io
from PIL import Image
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import torch
from torchvision.transforms import ColorJitter, functional, Compose
import torch.nn.functional as F
def get_middlebury_images():
root = "datasets/Middlebury/MiddEval3"
with open(os.path.join(root, "official_train.txt"), 'r') as f:
lines = f.read().splitlines()
return sorted([os.path.join(root, 'trainingQ', f'{name}/im0.png') for name in lines])
def get_eth3d_images():
return sorted(glob('datasets/ETH3D/two_view_training/*/im0.png'))
def get_kitti_images():
return sorted(glob('datasets/KITTI/training/image_2/*_10.png'))
def transfer_color(image, style_mean, style_stddev):
reference_image_lab = color.rgb2lab(image)
reference_stddev = np.std(reference_image_lab, axis=(0,1), keepdims=True)# + 1
reference_mean = np.mean(reference_image_lab, axis=(0,1), keepdims=True)
reference_image_lab = reference_image_lab - reference_mean
lamb = style_stddev/reference_stddev
style_image_lab = lamb * reference_image_lab
output_image_lab = style_image_lab + style_mean
l, a, b = np.split(output_image_lab, 3, axis=2)
l = l.clip(0, 100)
output_image_lab = np.concatenate((l,a,b), axis=2)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
output_image_rgb = color.lab2rgb(output_image_lab) * 255
return output_image_rgb
class AdjustGamma(object):
def __init__(self, gamma_min, gamma_max, gain_min=1.0, gain_max=1.0):
self.gamma_min, self.gamma_max, self.gain_min, self.gain_max = gamma_min, gamma_max, gain_min, gain_max
def __call__(self, sample):
gain = random.uniform(self.gain_min, self.gain_max)
gamma = random.uniform(self.gamma_min, self.gamma_max)
return functional.adjust_gamma(sample, gamma, gain)
def __repr__(self):
return f"Adjust Gamma {self.gamma_min}, ({self.gamma_max}) and Gain ({self.gain_min}, {self.gain_max})"
class FlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=True, yjitter=False, saturation_range=[0.6,1.4], gamma=[1,1,1,1]):
# spatial augmentation params
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.spatial_aug_prob = 1.0
self.stretch_prob = 0.8
self.max_stretch = 0.2
# flip augmentation params
self.yjitter = yjitter
self.do_flip = do_flip
self.h_flip_prob = 0.5
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = Compose([ColorJitter(brightness=0.4, contrast=0.4, saturation=saturation_range, hue=0.5/3.14), AdjustGamma(*gamma)])
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
""" Photometric augmentation """
# asymmetric
if np.random.rand() < self.asymmetric_color_aug_prob:
img1 = np.array(self.photo_aug(Image.fromarray(img1)), dtype=np.uint8)
img2 = np.array(self.photo_aug(Image.fromarray(img2)), dtype=np.uint8)
# symmetric
else:
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2, bounds=[50, 100]):
""" Occlusion augmentation """
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(bounds[0], bounds[1])
dy = np.random.randint(bounds[0], bounds[1])
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def spatial_transform(self, img1, img2, flow):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 8) / float(ht),
(self.crop_size[1] + 8) / float(wd))
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = scale
scale_y = scale
if np.random.rand() < self.stretch_prob:
scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_x = np.clip(scale_x, min_scale, None)
scale_y = np.clip(scale_y, min_scale, None)
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = flow * [scale_x, scale_y]
if self.do_flip:
if np.random.rand() < self.h_flip_prob and self.do_flip == 'hf': # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
if np.random.rand() < self.h_flip_prob and self.do_flip == 'h': # h-flip for stereo
tmp = img1[:, ::-1]
img1 = img2[:, ::-1]
img2 = tmp
if np.random.rand() < self.v_flip_prob and self.do_flip == 'v': # v-flip
img1 = img1[::-1, :]
img2 = img2[::-1, :]
flow = flow[::-1, :] * [1.0, -1.0]
if self.yjitter:
y0 = np.random.randint(2, img1.shape[0] - self.crop_size[0] - 2)
x0 = np.random.randint(2, img1.shape[1] - self.crop_size[1] - 2)
y1 = y0 + np.random.randint(-2, 2 + 1)
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y1:y1+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
else:
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0])
x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow
def __call__(self, img1, img2, flow):
img1, img2 = self.color_transform(img1, img2)
img1, img2 = self.eraser_transform(img1, img2)
img1, img2, flow = self.spatial_transform(img1, img2, flow)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
return img1, img2, flow
class SparseFlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=False, yjitter=False, saturation_range=[0.7,1.3], gamma=[1,1,1,1]):
# spatial augmentation params
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.spatial_aug_prob = 0.8
self.stretch_prob = 0.8
self.max_stretch = 0.2
# flip augmentation params
self.do_flip = do_flip
self.h_flip_prob = 0.5
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = Compose([ColorJitter(brightness=0.3, contrast=0.3, saturation=saturation_range, hue=0.3/3.14), AdjustGamma(*gamma)])
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2):
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(50, 100)
dy = np.random.randint(50, 100)
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0):
ht, wd = flow.shape[:2]
coords = np.meshgrid(np.arange(wd), np.arange(ht))
coords = np.stack(coords, axis=-1)
coords = coords.reshape(-1, 2).astype(np.float32)
flow = flow.reshape(-1, 2).astype(np.float32)
valid = valid.reshape(-1).astype(np.float32)
coords0 = coords[valid>=1]
flow0 = flow[valid>=1]
ht1 = int(round(ht * fy))
wd1 = int(round(wd * fx))
coords1 = coords0 * [fx, fy]
flow1 = flow0 * [fx, fy]
xx = np.round(coords1[:,0]).astype(np.int32)
yy = np.round(coords1[:,1]).astype(np.int32)
v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1)
xx = xx[v]
yy = yy[v]
flow1 = flow1[v]
flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32)
valid_img = np.zeros([ht1, wd1], dtype=np.int32)
flow_img[yy, xx] = flow1
valid_img[yy, xx] = 1
return flow_img, valid_img
def spatial_transform(self, img1, img2, flow, valid):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 1) / float(ht),
(self.crop_size[1] + 1) / float(wd))
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = np.clip(scale, min_scale, None)
scale_y = np.clip(scale, min_scale, None)
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y)
if self.do_flip:
if np.random.rand() < self.h_flip_prob and self.do_flip == 'hf': # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
if np.random.rand() < self.h_flip_prob and self.do_flip == 'h': # h-flip for stereo
tmp = img1[:, ::-1]
img1 = img2[:, ::-1]
img2 = tmp
if np.random.rand() < self.v_flip_prob and self.do_flip == 'v': # v-flip
img1 = img1[::-1, :]
img2 = img2[::-1, :]
flow = flow[::-1, :] * [1.0, -1.0]
margin_y = 20
margin_x = 50
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y)
x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x)
y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0])
x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow, valid
def __call__(self, img1, img2, flow, valid):
img1, img2 = self.color_transform(img1, img2)
img1, img2 = self.eraser_transform(img1, img2)
img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
valid = np.ascontiguousarray(valid)
return img1, img2, flow, valid
|
py | b407f9d78e0ae9bea0f85f6a8a653e742bbef698 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from __future__ import print_function
import tensorflow as tf
import edgeml.utils as utils
import numpy as np
import os
import sys
class BonsaiTrainer:
def __init__(self, bonsaiObj, lW, lT, lV, lZ, sW, sT, sV, sZ,
learningRate, X, Y, useMCHLoss=False, outFile=None, regLoss='huber'):
'''
bonsaiObj - Initialised Bonsai Object and Graph
lW, lT, lV and lZ are regularisers to Bonsai Params
sW, sT, sV and sZ are sparsity factors to Bonsai Params
learningRate - learningRate fro optimizer
X is the Data Placeholder - Dims [_, dataDimension]
Y - Label placeholder for loss computation
useMCHLoss - For choice between HingeLoss vs CrossEntropy
useMCHLoss - True - MultiClass - multiClassHingeLoss
useMCHLoss - False - MultiClass - crossEntropyLoss
'''
self.bonsaiObj = bonsaiObj
self.regressionLoss = regLoss
self.lW = lW
self.lV = lV
self.lT = lT
self.lZ = lZ
self.sW = sW
self.sV = sV
self.sT = sT
self.sZ = sZ
self.Y = Y
self.X = X
self.useMCHLoss = useMCHLoss
if outFile is not None:
print("Outfile : ", outFile)
self.outFile = open(outFile, 'w')
else:
self.outFile = sys.stdout
self.learningRate = learningRate
self.assertInit()
self.sigmaI = tf.placeholder(tf.float32, name='sigmaI')
self.score, self.X_ = self.bonsaiObj(self.X, self.sigmaI)
self.loss, self.marginLoss, self.regLoss = self.lossGraph()
self.trainStep = self.trainGraph()
'''
self.accuracy -> 'MAE' for Regression.
self.accuracy -> 'Accuracy' for Classification.
'''
self.accuracy = self.accuracyGraph()
self.prediction = self.bonsaiObj.getPrediction()
if self.sW > 0.99 and self.sV > 0.99 and self.sZ > 0.99 and self.sT > 0.99:
self.isDenseTraining = True
else:
self.isDenseTraining = False
self.hardThrsd()
self.sparseTraining()
def lossGraph(self):
'''
Loss Graph for given Bonsai Obj
'''
self.regLoss = 0.5 * (self.lZ * tf.square(tf.norm(self.bonsaiObj.Z)) +
self.lW * tf.square(tf.norm(self.bonsaiObj.W)) +
self.lV * tf.square(tf.norm(self.bonsaiObj.V)) +
self.lT * tf.square(tf.norm(self.bonsaiObj.T)))
# Loss functions for classification.
if (self.bonsaiObj.isRegression == False):
if (self.bonsaiObj.numClasses > 2):
if self.useMCHLoss is True:
self.batch_th = tf.placeholder(tf.int64, name='batch_th')
self.marginLoss = utils.multiClassHingeLoss(
tf.transpose(self.score), self.Y,
self.batch_th)
else:
self.marginLoss = utils.crossEntropyLoss(
tf.transpose(self.score), self.Y)
self.loss = self.marginLoss + self.regLoss
else:
self.marginLoss = tf.reduce_mean(tf.nn.relu(
1.0 - (2 * self.Y - 1) * tf.transpose(self.score)))
self.loss = self.marginLoss + self.regLoss
# Loss functions for regression.
elif (self.bonsaiObj.isRegression == True):
if(self.regressionLoss == 'huber'):
# Use of Huber Loss , because it is more robust to outliers.
self.marginLoss = tf.losses.huber_loss(self.Y, tf.transpose(self.score))
self.loss = self.marginLoss + self.regLoss
elif (self.regressionLoss == 'l2'):
# L2 loss function.
self.marginLoss = tf.nn.l2_loss(self.Y - tf.transpose(self.score))
self.loss = self.marginLoss + self.regLoss
return self.loss, self.marginLoss, self.regLoss
def trainGraph(self):
'''
Train Graph for the loss generated by Bonsai
'''
self.bonsaiObj.TrainStep = tf.train.AdamOptimizer(
self.learningRate).minimize(self.loss)
return self.bonsaiObj.TrainStep
def accuracyGraph(self):
'''
Accuracy Graph to evaluate accuracy when needed
'''
if(self.bonsaiObj.isRegression == False):
if (self.bonsaiObj.numClasses > 2):
correctPrediction = tf.equal(
tf.argmax(tf.transpose(self.score), 1), tf.argmax(self.Y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correctPrediction, tf.float32))
else:
y_ = self.Y * 2 - 1
correctPrediction = tf.multiply(tf.transpose(self.score), y_)
correctPrediction = tf.nn.relu(correctPrediction)
correctPrediction = tf.ceil(tf.tanh(correctPrediction))
self.accuracy = tf.reduce_mean(tf.cast(correctPrediction, tf.float32))
elif (self.bonsaiObj.isRegression == True):
# Accuracy for regression , in terms of mean absolute error.
self.accuracy = utils.mean_absolute_error(tf.reshape(
self.score, [-1, 1]), tf.reshape(self.Y, [-1, 1]))
return self.accuracy
def hardThrsd(self):
'''
Set up for hard Thresholding Functionality
'''
self.__Wth = tf.placeholder(tf.float32, name='Wth')
self.__Vth = tf.placeholder(tf.float32, name='Vth')
self.__Zth = tf.placeholder(tf.float32, name='Zth')
self.__Tth = tf.placeholder(tf.float32, name='Tth')
self.__Woph = self.bonsaiObj.W.assign(self.__Wth)
self.__Voph = self.bonsaiObj.V.assign(self.__Vth)
self.__Toph = self.bonsaiObj.T.assign(self.__Tth)
self.__Zoph = self.bonsaiObj.Z.assign(self.__Zth)
self.hardThresholdGroup = tf.group(
self.__Woph, self.__Voph, self.__Toph, self.__Zoph)
def sparseTraining(self):
'''
Set up for Sparse Retraining Functionality
'''
self.__Wops = self.bonsaiObj.W.assign(self.__Wth)
self.__Vops = self.bonsaiObj.V.assign(self.__Vth)
self.__Zops = self.bonsaiObj.Z.assign(self.__Zth)
self.__Tops = self.bonsaiObj.T.assign(self.__Tth)
self.sparseRetrainGroup = tf.group(
self.__Wops, self.__Vops, self.__Tops, self.__Zops)
def runHardThrsd(self, sess):
'''
Function to run the IHT routine on Bonsai Obj
'''
currW = self.bonsaiObj.W.eval()
currV = self.bonsaiObj.V.eval()
currZ = self.bonsaiObj.Z.eval()
currT = self.bonsaiObj.T.eval()
self.__thrsdW = utils.hardThreshold(currW, self.sW)
self.__thrsdV = utils.hardThreshold(currV, self.sV)
self.__thrsdZ = utils.hardThreshold(currZ, self.sZ)
self.__thrsdT = utils.hardThreshold(currT, self.sT)
fd_thrsd = {self.__Wth: self.__thrsdW, self.__Vth: self.__thrsdV,
self.__Zth: self.__thrsdZ, self.__Tth: self.__thrsdT}
sess.run(self.hardThresholdGroup, feed_dict=fd_thrsd)
def runSparseTraining(self, sess):
'''
Function to run the Sparse Retraining routine on Bonsai Obj
'''
currW = self.bonsaiObj.W.eval()
currV = self.bonsaiObj.V.eval()
currZ = self.bonsaiObj.Z.eval()
currT = self.bonsaiObj.T.eval()
newW = utils.copySupport(self.__thrsdW, currW)
newV = utils.copySupport(self.__thrsdV, currV)
newZ = utils.copySupport(self.__thrsdZ, currZ)
newT = utils.copySupport(self.__thrsdT, currT)
fd_st = {self.__Wth: newW, self.__Vth: newV,
self.__Zth: newZ, self.__Tth: newT}
sess.run(self.sparseRetrainGroup, feed_dict=fd_st)
def assertInit(self):
err = "sparsity must be between 0 and 1"
assert self.sW >= 0 and self.sW <= 1, "W " + err
assert self.sV >= 0 and self.sV <= 1, "V " + err
assert self.sZ >= 0 and self.sZ <= 1, "Z " + err
assert self.sT >= 0 and self.sT <= 1, "T " + err
errMsg = "Dimension Mismatch, Y has to be [_, " + \
str(self.bonsaiObj.numClasses) + "]"
errCont = " numClasses are 1 in case of Binary case by design"
assert (len(self.Y.shape) == 2 and
self.Y.shape[1] == self.bonsaiObj.numClasses), errMsg + errCont
def saveParams(self, currDir):
'''
Function to save Parameter matrices into a given folder
'''
paramDir = currDir + '/'
np.save(paramDir + "W.npy", self.bonsaiObj.W.eval())
np.save(paramDir + "V.npy", self.bonsaiObj.V.eval())
np.save(paramDir + "T.npy", self.bonsaiObj.T.eval())
np.save(paramDir + "Z.npy", self.bonsaiObj.Z.eval())
hyperParamDict = {'dataDim': self.bonsaiObj.dataDimension,
'projDim': self.bonsaiObj.projectionDimension,
'numClasses': self.bonsaiObj.numClasses,
'depth': self.bonsaiObj.treeDepth,
'sigma': self.bonsaiObj.sigma}
hyperParamFile = paramDir + 'hyperParam.npy'
np.save(hyperParamFile, hyperParamDict)
def loadModel(self, currDir):
'''
Load the Saved model and load it to the model using constructor
Returns two dict one for params and other for hyperParams
'''
paramDir = currDir + '/'
paramDict = {}
paramDict['W'] = np.load(paramDir + "W.npy")
paramDict['V'] = np.load(paramDir + "V.npy")
paramDict['T'] = np.load(paramDir + "T.npy")
paramDict['Z'] = np.load(paramDir + "Z.npy")
hyperParamDict = np.load(paramDir + "hyperParam.npy").item()
return paramDict, hyperParamDict
# Function to get aimed model size
def getModelSize(self):
'''
Function to get aimed model size
'''
nnzZ, sizeZ, sparseZ = utils.countnnZ(self.bonsaiObj.Z, self.sZ)
nnzW, sizeW, sparseW = utils.countnnZ(self.bonsaiObj.W, self.sW)
nnzV, sizeV, sparseV = utils.countnnZ(self.bonsaiObj.V, self.sV)
nnzT, sizeT, sparseT = utils.countnnZ(self.bonsaiObj.T, self.sT)
totalnnZ = (nnzZ + nnzT + nnzV + nnzW)
totalSize = (sizeZ + sizeW + sizeV + sizeT)
hasSparse = (sparseW or sparseV or sparseT or sparseZ)
return totalnnZ, totalSize, hasSparse
def train(self, batchSize, totalEpochs, sess,
Xtrain, Xtest, Ytrain, Ytest, dataDir, currDir):
'''
The Dense - IHT - Sparse Retrain Routine for Bonsai Training
'''
resultFile = open(dataDir + '/TFBonsaiResults.txt', 'a+')
numIters = Xtrain.shape[0] / batchSize
totalBatches = numIters * totalEpochs
bonsaiObjSigmaI = 1
counter = 0
if self.bonsaiObj.numClasses > 2:
trimlevel = 15
else:
trimlevel = 5
ihtDone = 0
if (self.bonsaiObj.isRegression == True):
maxTestAcc = 100000007
else:
maxTestAcc = -10000
if self.isDenseTraining is True:
ihtDone = 1
bonsaiObjSigmaI = 1
itersInPhase = 0
header = '*' * 20
for i in range(totalEpochs):
print("\nEpoch Number: " + str(i), file=self.outFile)
'''
trainAcc -> For Regression, it is 'Mean Absolute Error'.
trainAcc -> For Classification, it is 'Accuracy'.
'''
trainAcc = 0.0
trainLoss = 0.0
numIters = int(numIters)
for j in range(numIters):
if counter == 0:
msg = " Dense Training Phase Started "
print("\n%s%s%s\n" %
(header, msg, header), file=self.outFile)
# Updating the indicator sigma
if ((counter == 0) or (counter == int(totalBatches / 3.0)) or
(counter == int(2 * totalBatches / 3.0))) and (self.isDenseTraining is False):
bonsaiObjSigmaI = 1
itersInPhase = 0
elif (itersInPhase % 100 == 0):
indices = np.random.choice(Xtrain.shape[0], 100)
batchX = Xtrain[indices, :]
batchY = Ytrain[indices, :]
batchY = np.reshape(
batchY, [-1, self.bonsaiObj.numClasses])
_feed_dict = {self.X: batchX}
Xcapeval = self.X_.eval(feed_dict=_feed_dict)
Teval = self.bonsaiObj.T.eval()
sum_tr = 0.0
for k in range(0, self.bonsaiObj.internalNodes):
sum_tr += (np.sum(np.abs(np.dot(Teval[k], Xcapeval))))
if(self.bonsaiObj.internalNodes > 0):
sum_tr /= (100 * self.bonsaiObj.internalNodes)
sum_tr = 0.1 / sum_tr
else:
sum_tr = 0.1
sum_tr = min(
1000, sum_tr * (2**(float(itersInPhase) /
(float(totalBatches) / 30.0))))
bonsaiObjSigmaI = sum_tr
itersInPhase += 1
batchX = Xtrain[j * batchSize:(j + 1) * batchSize]
batchY = Ytrain[j * batchSize:(j + 1) * batchSize]
batchY = np.reshape(
batchY, [-1, self.bonsaiObj.numClasses])
if self.bonsaiObj.numClasses > 2:
if self.useMCHLoss is True:
_feed_dict = {self.X: batchX, self.Y: batchY,
self.batch_th: batchY.shape[0],
self.sigmaI: bonsaiObjSigmaI}
else:
_feed_dict = {self.X: batchX, self.Y: batchY,
self.sigmaI: bonsaiObjSigmaI}
else:
_feed_dict = {self.X: batchX, self.Y: batchY,
self.sigmaI: bonsaiObjSigmaI}
# Mini-batch training
_, batchLoss, batchAcc = sess.run(
[self.trainStep, self.loss, self.accuracy],
feed_dict=_feed_dict)
# Classification.
if (self.bonsaiObj.isRegression == False):
trainAcc += batchAcc
trainLoss += batchLoss
# Regression.
else:
trainAcc += np.mean(batchAcc)
trainLoss += np.mean(batchLoss)
# Training routine involving IHT and sparse retraining
if (counter >= int(totalBatches / 3.0) and
(counter < int(2 * totalBatches / 3.0)) and
counter % trimlevel == 0 and
self.isDenseTraining is False):
self.runHardThrsd(sess)
if ihtDone == 0:
msg = " IHT Phase Started "
print("\n%s%s%s\n" %
(header, msg, header), file=self.outFile)
ihtDone = 1
elif ((ihtDone == 1 and counter >= int(totalBatches / 3.0) and
(counter < int(2 * totalBatches / 3.0)) and
counter % trimlevel != 0 and
self.isDenseTraining is False) or
(counter >= int(2 * totalBatches / 3.0) and
self.isDenseTraining is False)):
self.runSparseTraining(sess)
if counter == int(2 * totalBatches / 3.0):
msg = " Sparse Retraining Phase Started "
print("\n%s%s%s\n" %
(header, msg, header), file=self.outFile)
counter += 1
try:
if (self.bonsaiObj.isRegression == True):
print("\nRegression Train Loss: " + str(trainLoss / numIters) +
"\nTraining MAE (Regression): " + str(trainAcc / numIters),
file=self.outFile)
else:
print("\nClassification Train Loss: " + str(trainLoss / numIters) +
"\nTraining accuracy (Classification): " + str(trainAcc / numIters),
file=self.outFile)
except:
continue
oldSigmaI = bonsaiObjSigmaI
bonsaiObjSigmaI = 1e9
if self.bonsaiObj.numClasses > 2:
if self.useMCHLoss is True:
_feed_dict = {self.X: Xtest, self.Y: Ytest,
self.batch_th: Ytest.shape[0],
self.sigmaI: bonsaiObjSigmaI}
else:
_feed_dict = {self.X: Xtest, self.Y: Ytest,
self.sigmaI: bonsaiObjSigmaI}
else:
_feed_dict = {self.X: Xtest, self.Y: Ytest,
self.sigmaI: bonsaiObjSigmaI}
# This helps in direct testing instead of extracting the model out
testAcc, testLoss, regTestLoss, pred = sess.run(
[self.accuracy, self.loss, self.regLoss, self.prediction], feed_dict=_feed_dict)
if ihtDone == 0:
if (self.bonsaiObj.isRegression == False):
maxTestAcc = -10000
maxTestAccEpoch = i
elif (self.bonsaiObj.isRegression == True):
maxTestAcc = testAcc
maxTestAccEpoch = i
else:
if (self.bonsaiObj.isRegression == False):
if maxTestAcc <= testAcc:
maxTestAccEpoch = i
maxTestAcc = testAcc
self.saveParams(currDir)
elif (self.bonsaiObj.isRegression == True):
print("Minimum Training MAE : ", np.mean(maxTestAcc))
if maxTestAcc >= testAcc:
# For regression , we're more interested in the minimum MAE.
maxTestAccEpoch = i
maxTestAcc = testAcc
self.saveParams(currDir)
if (self.bonsaiObj.isRegression == True):
print("Testing MAE %g" % np.mean(testAcc), file=self.outFile)
else:
print("Test accuracy %g" % np.mean(testAcc), file=self.outFile)
if (self.bonsaiObj.isRegression == True):
testAcc = np.mean(testAcc)
else:
testAcc = testAcc
maxTestAcc = maxTestAcc
print("MarginLoss + RegLoss: " + str(testLoss - regTestLoss) +
" + " + str(regTestLoss) + " = " + str(testLoss) + "\n",
file=self.outFile)
self.outFile.flush()
bonsaiObjSigmaI = oldSigmaI
# sigmaI has to be set to infinity to ensure
# only a single path is used in inference
bonsaiObjSigmaI = 1e9
print("\nNon-Zero : " + str(self.getModelSize()[0]) + " Model Size: " +
str(float(self.getModelSize()[1]) / 1024.0) + " KB hasSparse: " +
str(self.getModelSize()[2]) + "\n", file=self.outFile)
if (self.bonsaiObj.isRegression == True):
maxTestAcc = np.mean(maxTestAcc)
if (self.bonsaiObj.isRegression == True):
print("For Regression, Minimum MAE at compressed" +
" model size(including early stopping): " +
str(maxTestAcc) + " at Epoch: " +
str(maxTestAccEpoch + 1) + "\nFinal Test" +
" MAE: " + str(testAcc), file=self.outFile)
resultFile.write("MinTestMAE: " + str(maxTestAcc) +
" at Epoch(totalEpochs): " +
str(maxTestAccEpoch + 1) +
"(" + str(totalEpochs) + ")" + " ModelSize: " +
str(float(self.getModelSize()[1]) / 1024.0) +
" KB hasSparse: " + str(self.getModelSize()[2]) +
" Param Directory: " +
str(os.path.abspath(currDir)) + "\n")
elif (self.bonsaiObj.isRegression == False):
print("For Classification, Maximum Test accuracy at compressed" +
" model size(including early stopping): " +
str(maxTestAcc) + " at Epoch: " +
str(maxTestAccEpoch + 1) + "\nFinal Test" +
" Accuracy: " + str(testAcc), file=self.outFile)
resultFile.write("MaxTestAcc: " + str(maxTestAcc) +
" at Epoch(totalEpochs): " +
str(maxTestAccEpoch + 1) +
"(" + str(totalEpochs) + ")" + " ModelSize: " +
str(float(self.getModelSize()[1]) / 1024.0) +
" KB hasSparse: " + str(self.getModelSize()[2]) +
" Param Directory: " +
str(os.path.abspath(currDir)) + "\n")
print("The Model Directory: " + currDir + "\n")
resultFile.close()
self.outFile.flush()
if self.outFile is not sys.stdout:
self.outFile.close()
|
py | b407fa4e0b6a6cc7d432d11d6a62ed3ca24cafd5 | # Copyright (c) 2010 Arthur Mesh
# 2010 Christopher Nelson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from pysnmp.smi import builder, view
class Builder(object):
"""
Build the strings from which the tree and data and gathered.
"""
def __init__(self, mib):
"""
Create the builder with the given mib module.
"""
self.mib = mib
self.mibbuilder = builder.MibBuilder().loadModules(mib)
self.viewcontroller = view.MibViewController(self.mibbuilder)
def get_parts(self):
"""
Get the lists of bits of the mib.
"""
parts = []
oid, label, suffix = self.viewcontroller.getFirstNodeName(self.mib)
parts.append( (label, oid) )
done = False
while not done:
try:
oid, label, suffix = self.viewcontroller.getNextNodeName(
label, self.mib)
parts.append( (label, oid) )
except:
done = True
return parts
|
py | b407faa072ffd07abcc294ee91a3cc24e0c03af9 | """
Copyright 2017 Fair Isaac Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy.settings as s
from collections import namedtuple
from cvxpy.problems.problem import Problem
from cvxpy.utilities.deterministic import unique_list
# Used in self._cached_data to check if problem's objective or constraints have
# changed.
CachedProblem = namedtuple('CachedProblem', ['objective', 'constraints'])
# Used by pool.map to send solve result back.
SolveResult = namedtuple(
'SolveResult', ['opt_value', 'status', 'primal_values', 'dual_values'])
class XpressProblem (Problem):
"""A convex optimization problem associated with the Xpress Optimizer
Attributes
----------
objective : Minimize or Maximize
The expression to minimize or maximize.
constraints : list
The constraints on the problem variables.
"""
# The solve methods available.
REGISTERED_SOLVE_METHODS = {}
def __init__(self, objective, constraints=None) -> None:
super(XpressProblem, self).__init__(objective, constraints)
self._iis = None
def _reset_iis(self) -> None:
"""Clears the iis information
"""
self._iis = None
self._transferRow = None
def __repr__(self) -> str:
return "XpressProblem(%s, %s)" % (repr(self.objective),
repr(self.constraints))
def __neg__(self) -> "XpressProblem":
return XpressProblem(-self.objective, self.constraints)
def __add__(self, other):
if other == 0:
return self
elif not isinstance(other, XpressProblem):
raise NotImplementedError()
return XpressProblem(self.objective + other.objective,
unique_list(self.constraints + other.constraints))
def __sub__(self, other):
if not isinstance(other, XpressProblem):
raise NotImplementedError()
return XpressProblem(self.objective - other.objective,
unique_list(self.constraints + other.constraints))
def __mul__(self, other):
if not isinstance(other, (int, float)):
raise NotImplementedError()
return XpressProblem(self.objective * other, self.constraints)
def __div__(self, other):
if not isinstance(other, (int, float)):
raise NotImplementedError()
return XpressProblem(self.objective * (1.0 / other), self.constraints)
|
py | b407fb1c79ff4753f75e9543d11ec402ddc02d66 | from typing import TextIO
from io import StringIO
def read_molecule(reader: TextIO) -> list:
"""Read a single molecule from reader and return it, or return None to
signal end of file. The first item in the result is the name of the
compound; each list contains an atom type and the X, Y, and Z coordinates
of that atom.
>>> instring = 'COMPND TEST\\nATOM 1 N 0.1 0.2 0.3\\nATOM 2 N 0.2 0.1 0.0\\nEND\\n'
>>> infile = StringIO(instring)
>>> read_molecule(infile)
['TEST', ['N', '0.1', '0.2', '0.3'], ['N', '0.2', '0.1', '0.0']]
"""
# If there isn't another line, we're at the end of the file.
line = reader.readline()
if not line:
return None
# skip comments and empty lines outside of the molecule block
while line.startswith('CMNT') or line.strip() == '':
line = reader.readline()
# Name of the molecule: "COMPND name"
parts = line.split()
name = parts[1]
# Other lines are either "END" or "ATOM num atom_type x y z"
molecule = [name]
reading = True
while reading:
line = reader.readline()
# skip comments and empty lines inside of the molecule block
while line.startswith('CMNT') or line.strip() == '':
line = reader.readline()
if line.startswith('END'):
reading = False
else:
parts = line.split()
molecule.append(parts[2:])
return molecule
def read_all_molecules(reader: TextIO) -> list:
"""Read zero or more molecules from reader, returning a list of the
molecule information.
>>> cmpnd1 = 'COMPND T1\\nATOM 1 N 0.1 0.2 0.3\\nATOM 2 N 0.2 0.1 0.0\\nEND\\n'
>>> cmpnd2 = 'COMPND T2\\nATOM 1 A 0.1 0.2 0.3\\nATOM 2 A 0.2 0.1 0.0\\nEND\\n'
>>> infile = StringIO(cmpnd1 + cmpnd2)
>>> result = read_all_molecules(infile)
>>> result[0]
['T1', ['N', '0.1', '0.2', '0.3'], ['N', '0.2', '0.1', '0.0']]
>>> result[1]
['T2', ['A', '0.1', '0.2', '0.3'], ['A', '0.2', '0.1', '0.0']]
"""
# The list of molecule information.
result = []
reading = True
while reading:
molecule = read_molecule(reader)
if molecule: # None is treated as False in an if statement
result.append(molecule)
else:
reading = False
return result
if __name__ == '__main__':
molecule_file = open('multimol.pdb', 'r')
molecules = read_all_molecules(molecule_file)
molecule_file.close()
print(molecules) |
py | b407fbdb3cdfce58dda38eb31b7b61d938dd75c3 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import logging
import unittest
import uuid
from unittest import mock
import pytest
from azure.cosmos.cosmos_client import CosmosClient
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.microsoft.azure.hooks.cosmos import AzureCosmosDBHook
from airflow.utils import db
class TestAzureCosmosDbHook(unittest.TestCase):
# Set up an environment to test with
def setUp(self):
# set up some test variables
self.test_end_point = 'https://test_endpoint:443'
self.test_master_key = 'magic_test_key'
self.test_database_name = 'test_database_name'
self.test_collection_name = 'test_collection_name'
self.test_database_default = 'test_database_default'
self.test_collection_default = 'test_collection_default'
db.merge_conn(
Connection(
conn_id='azure_cosmos_test_key_id',
conn_type='azure_cosmos',
login=self.test_end_point,
password=self.test_master_key,
extra=json.dumps(
{
'database_name': self.test_database_default,
'collection_name': self.test_collection_default,
}
),
)
)
@mock.patch('airflow.providers.microsoft.azure.hooks.cosmos.CosmosClient', autospec=True)
def test_client(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
assert hook._conn is None
assert isinstance(hook.get_conn(), CosmosClient)
@mock.patch('airflow.providers.microsoft.azure.hooks.cosmos.CosmosClient')
def test_create_database(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
hook.create_database(self.test_database_name)
expected_calls = [mock.call().create_database('test_database_name')]
mock_cosmos.assert_any_call(self.test_end_point, {'masterKey': self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
@mock.patch('airflow.providers.microsoft.azure.hooks.cosmos.CosmosClient')
def test_create_database_exception(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
with pytest.raises(AirflowException):
hook.create_database(None)
@mock.patch('airflow.providers.microsoft.azure.hooks.cosmos.CosmosClient')
def test_create_container_exception(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
with pytest.raises(AirflowException):
hook.create_collection(None)
@mock.patch('airflow.providers.microsoft.azure.hooks.cosmos.CosmosClient')
def test_create_container(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
hook.create_collection(self.test_collection_name, self.test_database_name)
expected_calls = [
mock.call().get_database_client('test_database_name').create_container('test_collection_name')
]
mock_cosmos.assert_any_call(self.test_end_point, {'masterKey': self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
@mock.patch('airflow.providers.microsoft.azure.hooks.cosmos.CosmosClient')
def test_create_container_default(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
hook.create_collection(self.test_collection_name)
expected_calls = [
mock.call().get_database_client('test_database_name').create_container('test_collection_name')
]
mock_cosmos.assert_any_call(self.test_end_point, {'masterKey': self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
@mock.patch('airflow.providers.microsoft.azure.hooks.cosmos.CosmosClient')
def test_upsert_document_default(self, mock_cosmos):
test_id = str(uuid.uuid4())
# fmt: off
(mock_cosmos
.return_value
.get_database_client
.return_value
.get_container_client
.return_value
.upsert_item
.return_value) = {'id': test_id}
# fmt: on
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
returned_item = hook.upsert_document({'id': test_id})
expected_calls = [
mock.call()
.get_database_client('test_database_name')
.get_container_client('test_collection_name')
.upsert_item({'id': test_id})
]
mock_cosmos.assert_any_call(self.test_end_point, {'masterKey': self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
logging.getLogger().info(returned_item)
assert returned_item['id'] == test_id
@mock.patch('airflow.providers.microsoft.azure.hooks.cosmos.CosmosClient')
def test_upsert_document(self, mock_cosmos):
test_id = str(uuid.uuid4())
# fmt: off
(mock_cosmos
.return_value
.get_database_client
.return_value
.get_container_client
.return_value
.upsert_item
.return_value) = {'id': test_id}
# fmt: on
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
returned_item = hook.upsert_document(
{'data1': 'somedata'},
database_name=self.test_database_name,
collection_name=self.test_collection_name,
document_id=test_id,
)
expected_calls = [
mock.call()
.get_database_client('test_database_name')
.get_container_client('test_collection_name')
.upsert_item({'data1': 'somedata', 'id': test_id})
]
mock_cosmos.assert_any_call(self.test_end_point, {'masterKey': self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
logging.getLogger().info(returned_item)
assert returned_item['id'] == test_id
@mock.patch('airflow.providers.microsoft.azure.hooks.cosmos.CosmosClient')
def test_insert_documents(self, mock_cosmos):
test_id1 = str(uuid.uuid4())
test_id2 = str(uuid.uuid4())
test_id3 = str(uuid.uuid4())
documents = [
{'id': test_id1, 'data': 'data1'},
{'id': test_id2, 'data': 'data2'},
{'id': test_id3, 'data': 'data3'},
]
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
returned_item = hook.insert_documents(documents)
expected_calls = [
mock.call()
.get_database_client('test_database_name')
.get_container_client('test_collection_name')
.create_item({'data': 'data1', 'id': test_id1}),
mock.call()
.get_database_client('test_database_name')
.get_container_client('test_collection_name')
.create_item({'data': 'data2', 'id': test_id2}),
mock.call()
.get_database_client('test_database_name')
.get_container_client('test_collection_name')
.create_item({'data': 'data3', 'id': test_id3}),
]
logging.getLogger().info(returned_item)
mock_cosmos.assert_any_call(self.test_end_point, {'masterKey': self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls, any_order=True)
@mock.patch('airflow.providers.microsoft.azure.hooks.cosmos.CosmosClient')
def test_delete_database(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
hook.delete_database(self.test_database_name)
expected_calls = [mock.call().delete_database('test_database_name')]
mock_cosmos.assert_any_call(self.test_end_point, {'masterKey': self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
@mock.patch('airflow.providers.microsoft.azure.hooks.cosmos.CosmosClient')
def test_delete_database_exception(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
with pytest.raises(AirflowException):
hook.delete_database(None)
@mock.patch('azure.cosmos.cosmos_client.CosmosClient')
def test_delete_container_exception(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
with pytest.raises(AirflowException):
hook.delete_collection(None)
@mock.patch('airflow.providers.microsoft.azure.hooks.cosmos.CosmosClient')
def test_delete_container(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
hook.delete_collection(self.test_collection_name, self.test_database_name)
expected_calls = [
mock.call().get_database_client('test_database_name').delete_container('test_collection_name')
]
mock_cosmos.assert_any_call(self.test_end_point, {'masterKey': self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
@mock.patch('airflow.providers.microsoft.azure.hooks.cosmos.CosmosClient')
def test_delete_container_default(self, mock_cosmos):
hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')
hook.delete_collection(self.test_collection_name)
expected_calls = [
mock.call().get_database_client('test_database_name').delete_container('test_collection_name')
]
mock_cosmos.assert_any_call(self.test_end_point, {'masterKey': self.test_master_key})
mock_cosmos.assert_has_calls(expected_calls)
|
py | b407fe45649cfcd75e10723d6a0fbb54b7f9edad | #!/usr/bin/env python3
"""
Extract all compressed files in a Minecraft world directory.
python3 {this_py_file} [<world_path>] [<target_path>] [--force]
world_path If omitted (1st unnamed arg), use
current directory.
target_path If omitted (2nd unnamed arg), use (create)
"/extracted/" under current directory.
If specified, the directory must exist.
--force Delete target_path if exists!
(If force is not provided, using an existing non-empty path will cause the program to fail.)
If you get a large negative number as an error return on Windows, you are
probably using the builtin (quiet) copy of Python included with Windows.
Instead, install Python from python.org using the "All Users" and
"Add to PATH" options.
"""
import os
import sys
import platform
import shutil
import gzip
ignore = [sys.argv[0], "extract.py", "README.md", "CHANGELOG.md"]
ignore_paths = []
def count_sub_paths(path):
# See algrice' answer on <https://stackoverflow.com/questions/
# 49284015/how-to-check-if-folder-is-empty-with-python>
results = [f for f in os.listdir(path) if not f.startswith('.')]
# sys.stderr.write("{} results in {}".format(len(results), path))
return len(results)
def is_empty(path):
count_sub_paths(path) == 0
total = 0
def extract(src, dst, level=0):
# sys.stdout.write(" "*level + "(IN {})\n".format(dst))
global total
for sub in os.listdir(src):
if sub.startswith("."):
continue
if sub in ignore:
continue
# print("Trying {}...".format(src))
src_sub = os.path.join(src, sub)
if src_sub.lower() in ignore_paths:
continue
dst_sub = os.path.join(dst, sub)
if os.path.isdir(src_sub):
# sys.stderr.write(" "*level + "{}/".format(sub))
# sys.stdout.write(" ({})".format(dst_sub))
# sys.stdout.write("\n")
extract(src_sub, dst_sub, level+2)
else:
sys.stderr.write(" "*level + "{}".format(sub))
try:
# See <https://stackoverflow.com/questions
# /31028815/how-to-unzip-gz-file-using-python>
with gzip.open(src_sub, 'rb') as f_in:
if not os.path.isdir(dst):
os.makedirs(dst)
# else:
# sys.stderr.write("{} is present"
# "...".format(dst))
with open(dst_sub, 'wb') as f_out:
# os.mkdir(dst_sub)
# sys.stderr.write("new directory name"
# " is from gz filename.")
shutil.copyfileobj(f_in, f_out)
sys.stderr.write(": extracted")
# total += count_sub_paths(dst_sub)
total += 1
except gzip.BadGzipFile:
sys.stderr.write(": SKIPPED non-gz file.")
if is_empty(dst):
os.rmdir(dst)
if os.path.exists(dst_sub):
os.remove(dst_sub)
sys.stderr.write("\n")
def main():
world_path = "."
target_name = "extracted"
ignore.append(target_name)
dest_path = "."
profile = None
AppData = None
print("Running...")
if platform.system() == "Windows":
profile = os.environ.get("USERPROFILE")
AppData = os.environ.get("APPDATA")
else:
profile = os.environ.get("HOME")
AppData = profile
dot_minecraft = os.path.join(AppData, ".minecraft")
settings = {}
name = None
sequential_args = ["world_path", "target_path"]
seq_arg_i = 0
for i in range(1, len(sys.argv)):
arg = sys.argv[i]
if arg.startswith("--"):
start = 2
if name is not None:
settings[name] = True
name = None
sign_i = arg.find("=")
if sign_i > start:
settings[arg[start:sign_i]] = arg[sign_i+1]
else:
name = arg[start:]
else:
if name is not None:
settings[name] = arg
else:
if seq_arg_i < len(sequential_args):
name = sequential_args[seq_arg_i]
settings[name] = arg
seq_arg_i += 1
name = None
if name is not None:
settings[name] = True
name = None
if settings.get("world_path") is None:
print("* You didn't specify a world"
"name or world path as the parameter.")
settings["world_path"] = os.getcwd()
print(" * Trying current directory:"
" '{}'".format(settings["world_path"]))
else:
if not os.path.isdir(settings["world_path"]):
try_path = os.path.join(settings["world_path"])
if os.path.isdir(try_path):
settings["world_path"] = try_path
good_flag = "level.dat"
good_flag_path = os.path.join(settings["world_path"], good_flag)
if not os.path.isfile(good_flag_path):
raise ValueError("* The '{}' is not present, so the world"
" path does not seem to be a Minecraft world:"
" '{}'".format(good_flag,
settings["world_path"]))
if settings.get("target_path") is None:
settings["target_path"] = os.path.join(settings["world_path"],
target_name)
else:
if not os.path.isdir(settings["target_path"]):
raise ValueError("The target directory '{}' does not exist.")
ignore_paths.append(settings["target_path"].lower())
print("Using settings: {}".format(settings))
print("* found '{}' (using world "
"'{}')...".format(good_flag, settings["world_path"]))
# if settings.get("target_path") is None:
# settings["target_path"] = os.path.join(dest_path, target_name)
if settings["world_path"] == settings["target_path"]:
raise RuntimeError("The target path is same as the source path.")
if os.path.isdir(settings["target_path"]):
if settings.get("force") is True:
shutil.rmtree(settings["target_path"])
if os.path.isdir(settings["target_path"]):
raise RuntimeError("Removing '{}' (with --force) failed"
".".format(settings["target_path"]))
else:
print("* removed '{}' (due to using --force option)"
".".format(settings["target_path"]))
os.mkdir(settings["target_path"])
else:
if not is_empty(settings["target_path"]):
raise RuntimeWarning("'{}' contains files. You should"
" delete it before running this"
" script or use --force"
".".format(settings["target_path"]))
else:
# Code further up checks if should exist and stops if in that
# case, so creating it now is OK.
os.mkdir(settings["target_path"])
extract(settings["world_path"], settings["target_path"])
sys.stderr.write("Extracted {} file(s).\n".format(total))
sys.stderr.write("New names are same as compressed names,"
" but are in {}.".format(settings["target_path"]))
if __name__ == "__main__":
main()
|
py | b407fec5feed77b8239ba6bfd6a13d399458f63c | #
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Tuple
import tensorflow as tf
import numpy as np
from src.logger import get_logger
log = get_logger(__name__)
class ImageDecodeError(ValueError):
pass
class ImageResizeError(ValueError):
pass
class ImageTransformError(ValueError):
pass
def preprocess_binary_image(image: bytes, channels: int = None,
target_size: Tuple[int, int] = None,
channels_first=True,
dtype=tf.dtypes.float32, scale: float = None,
standardization=False,
reverse_input_channels=True) -> np.ndarray:
"""
Preprocess binary image in PNG, JPG or BMP format, producing numpy array as a result.
:param image: Image bytes
:param channels: Number of image's channels
:param target_size: A tuple of desired height and width
:param channels_first: If set to True, image array will be in NCHW format,
NHWC format will be used otherwise
:param dtype: Data type that will be used for decoding
:param scale: If passed, decoded image array will be multiplied by this value
:param standardization: If set to true, image array values will be standarized
to have mean 0 and standard deviation of 1
:param reverse_input_channels: If set to True, image channels will be reversed
from RGB to BGR format
:raises TypeError: if type of provided parameters are incorrect
:raises ValueError: if values of provided parameters is incorrect
:raises ImageDecodeError(ValueError): if image cannot be decoded
:raises ImageResizeError(ValueError): if image cannot be resized
:raises ImageTransformError(ValueError): if image cannot be properly transformed
:returns: Preprocessed image as numpy array
"""
params_to_check = {'channels': channels,
'scale': scale}
for param_name, value in params_to_check.items():
try:
if value is not None and value < 0:
raise ValueError(
'Invalid value {} for parameter {}.'.format(value, param_name))
except TypeError:
raise TypeError('Invalid type {} for parameter {}.'.format(
type(value), param_name))
try:
if target_size:
height, width = target_size
if height <= 0 or width <= 0:
raise ValueError('Invalid target size parameter.')
except TypeError:
raise TypeError('Invalid target size type.')
if not isinstance(dtype, tf.dtypes.DType):
raise TypeError(
'Invalid type {} for parameter dtype.'.format(type(dtype)))
try:
decoded_image = tf.io.decode_image(image, channels=channels)
decoded_image = tf.dtypes.cast(decoded_image, dtype)
except Exception as e:
raise ImageDecodeError(
'Provided image is invalid, unable to decode.') from e
if target_size:
try:
decoded_image = tf.image.resize(decoded_image, target_size)
except Exception as e:
raise ImageResizeError('Failed to resize provided binary image from: {} '
'to: {}.'.format(tf.shape(decoded_image), target_size)) from e
try:
if standardization:
decoded_image = tf.image.per_image_standardization(decoded_image)
image_array = decoded_image.numpy()
if reverse_input_channels:
image_array = image_array[..., ::-1]
if channels_first:
image_array = np.transpose(image_array, [2, 0, 1])
if scale:
array_type = image_array.dtype
image_array = image_array * scale
image_array = image_array.astype(array_type)
except Exception as e:
log.exception(str(e))
raise ImageTransformError('Failed to preprocess image, '
'check if provided parameters are correct.') from e
return image_array
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print('Pass path to the image as the first argument')
sys.exit(1)
img_path = sys.argv[1]
with open(img_path, mode='rb') as img_file:
binary_image = img_file.read()
preprocessed_image = preprocess_binary_image(
binary_image, channels_first=False)
print(preprocessed_image)
print(preprocessed_image.shape)
# Keep in mind that matplotlib will not be able to display image in NCHW format
try:
import matplotlib.pyplot as plt
plt.imshow(preprocessed_image)
plt.show()
except ImportError:
print('Please install matplotlib if you want to inspect preprocessed image.')
|
py | b4080117aeccaece12c503fbaa4b6b8c27588bc8 | import json
import unittest
from stix_shifter_utils.stix_translation.src.json_to_stix import json_to_stix_translator
from stix_shifter_modules.azure_sentinel.entry_point import EntryPoint
from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers
MODULE = "azure_sentinel"
entry_point = EntryPoint()
map_data = entry_point.get_results_translator().map_data
data_source = {
"type": "identity",
"id": "identity--3532c56d-ea72-48be-a2ad-1a53f4c9c6d3",
"name": "azure_sentinel",
"identity_class": "events"
}
options = {}
DATA1 = {'id': '2518268485253060642_52b1a353-2fd8-4c45-8f8a-94db98dca29d',
'azureTenantId': 'b73e5ba8-34d5-495a-9901-06bdb84cf13e',
'azureSubscriptionId': '083de1fb-cd2d-4b7c-895a-2b5af1d091e8', 'category': 'SuspiciousSVCHOSTRareGroup',
'createdDateTime': '2019-12-04T09:38:05.2024952Z',
'description': 'The system process SVCHOST was observed running a rare service group. Malware often '
'use SVCHOST to masquerade its malicious activity.',
'eventDateTime': '2019-12-04T09:37:54.6939357Z', 'lastModifiedDateTime': '2019-12-04T09:38:06.7571701Z',
'recommendedActions': ['1. Run Process Explorer and try to identify unknown running processes (see '
'https://technet.microsoft.com/en-us/sysinternals/bb896653.aspx)',
'2. Make sure the machine is completely updated and has an updated '
'anti-malware application installed',
'3. Run a full anti-malware scan and verify that the threat was removed',
'4. Install and run Microsoft’s Malicious Software Removal Tool (see '
'https://www.microsoft.com/en-us/download/malicious-software-removal-tool' '-details.aspx)',
'5. Run Microsoft’s Autoruns utility and try to identify unknown applications '
'that are configured to run at login (see '
'https://technet.microsoft.com/en-us/sysinternals/bb963902.aspx)'],
'severity': 'informational', 'status': 'newAlert', 'title': 'Rare SVCHOST service group executed',
'vendorInformation_provider': 'ASC', 'vendorInformation_subProvider': 'Detection',
'vendorInformation_vendor': 'Microsoft', 'fileStates': [{'name': 'services.exe',
'path': 'c:\\windows\\system32\\services.exe'}, {'name': 'svchost.exe',
'path': 'c:\\windows\\system32\\svchost.exe'}], 'hostStates': [{'netBiosName': 'TEST-WINDOW',
'os': 'Windows', 'commandLine': '', 'name': 'services.exe',
'path': 'c:\\windows\\system32\\services.exe'}, {'accountName': 'test-window$',
'commandLine': 'c:\\windows\\system32\\svchost.exe -k clipboardsvcgroup -p -s cbdhsvc',
'createdDateTime': '2019-12-04T09:37:54.6939357Z', 'name': 'svchost.exe',
'parentProcessName': 'services.exe', 'path': 'c:\\windows\\system32\\svchost.exe'}],
'userStates': [{'accountName': 'test-window$', 'domainName': 'WORKGROUP', 'emailRole': 'unknown',
'logonId': '0x3e7', 'onPremisesSecurityIdentifier': 'S-1-5-18', 'userPrincipalName': 'test-window$@TEST-WINDOW'}], 'event_count': '1'}
DATA2 = {'id': '2518267967999999999_13d684ad-1397-4db8-be04-9a7fe750bb1d',
'azureTenantId': 'b73e5ba8-34d5-495a-9901-06bdb84cf13e',
'azureSubscriptionId': '083de1fb-cd2d-4b7c-895a-2b5af1d091e8',
'category': 'AdaptiveNetworkHardeningInbound', 'createdDateTime': '2019-12-06T10:25:09.1750985Z',
'description': 'Azure security center has detected incoming traffic from IP addresses, which have '
'been identified as IP addresses that should be blocked by the Adaptive Network '
'Hardening control',
'eventDateTime': '2019-12-05T00:00:00Z', 'lastModifiedDateTime': '2019-12-06T10:25:12.3478085Z',
'recommendedActions': ['{"kind":"openBlade","displayValue":"Enforce rule",'
'"extension":"Microsoft_Azure_Security_R3",'
'"detailBlade":"AdaptiveNetworkControlsResourceBlade",'
'"detailBladeInputs":"protectedResourceId=/subscriptions/083de1fb-cd2d-4b7c'
'-895a-2b5af1d091e8/resourcegroups/eastus/providers/microsoft.compute'
'/virtualmachines/bigfixcentos"}'],
'severity': 'low', 'status': 'newAlert',
'title': 'Traffic from unrecommended IP addresses was detected', 'vendorInformation_provider': 'ASC',
'vendorInformation_subProvider': 'AdaptiveNetworkHardenings', 'vendorInformation_vendor': 'Microsoft',
"networkConnections": [{"applicationName": "Microsoft", "destinationAddress": "61.23.79.168", "destinationDomain": None,
"destinationLocation": None, "destinationPort": "22", "destinationUrl": None, "direction": None,
"domainRegisteredDateTime": None, "localDnsName": None, "natDestinationAddress": None, "natDestinationPort": None,
"natSourceAddress": None, "natSourcePort": None, "protocol": "tcp", "riskScore": None, "sourceAddress": "118.32.223.14",
"sourceLocation": None, "sourcePort": "9475", "status": None, "urlParameters": None}], 'event_count': '1'}
class TestAzureSentinelResultsToStix(unittest.TestCase):
"""
class to perform unit test case for azure_sentinel translate results
"""
@staticmethod
def get_first(itr, constraint):
"""
return the obj in the itr if constraint is true
"""
return next(
(obj for obj in itr if constraint(obj)),
None
)
@staticmethod
def get_first_of_type(itr, typ):
"""
to check whether the object belongs to respective stix object
"""
return TestAzureSentinelResultsToStix.get_first(itr, lambda o: isinstance(o, dict) and o.get('type') == typ)
@staticmethod
def test_common_prop():
"""
to test the common stix object properties
"""
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [DATA1], get_module_transformers(MODULE), options)
assert result_bundle['type'] == 'bundle'
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
assert result_bundle_identity['id'] == data_source['id']
assert result_bundle_identity['name'] == data_source['name']
assert result_bundle_identity['identity_class'] == data_source['identity_class']
observed_data = result_bundle_objects[1]
assert observed_data['id'] is not None
assert observed_data['type'] == "observed-data"
assert observed_data['created_by_ref'] == result_bundle_identity['id']
assert observed_data['modified'] is not None
assert observed_data['created'] is not None
assert observed_data['first_observed'] is not None
assert observed_data['last_observed'] is not None
assert observed_data['number_observed'] is not None
@staticmethod
def test_custom_property():
"""
to test the custom stix object properties
"""
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [DATA1], get_module_transformers(MODULE), options)
assert result_bundle['type'] == 'bundle'
result_bundle_objects = result_bundle['objects']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
x_msazure_sentinel = TestAzureSentinelResultsToStix.get_first_of_type(objects.values(), 'x-msazure-sentinel')
x_msazure_sentinel_alert = TestAzureSentinelResultsToStix.get_first_of_type(objects.values(), 'x-msazure-sentinel-alert')
x_ibm_finding = TestAzureSentinelResultsToStix.get_first_of_type(objects.values(), 'x-ibm-finding')
x_oca_event = TestAzureSentinelResultsToStix.get_first_of_type(objects.values(), 'x-oca-event')
assert x_msazure_sentinel is not None, 'Custom object type not found'
assert x_msazure_sentinel.keys() == {'type', 'tenant_id', 'subscription_id'}
assert x_msazure_sentinel['tenant_id'] == 'b73e5ba8-34d5-495a-9901-06bdb84cf13e'
assert x_msazure_sentinel['subscription_id'] == '083de1fb-cd2d-4b7c-895a-2b5af1d091e8'
assert x_msazure_sentinel_alert is not None, 'Custom object type not found'
assert x_msazure_sentinel_alert.keys() == {'type', 'recommendedactions', 'status'}
assert type(x_msazure_sentinel_alert['recommendedactions']) is list
assert x_ibm_finding.keys() == {'type', 'createddatetime', 'description', 'time_observed', 'severity', 'name'}
assert x_ibm_finding['name'] == 'Rare SVCHOST service group executed'
assert x_oca_event.keys() == {'type', 'code', 'category', 'created', 'action'}
assert x_oca_event['category'] == 'SuspiciousSVCHOSTRareGroup'
# assert False
@staticmethod
def test_file_process_json_to_stix():
"""
to test file stix object properties
"""
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [DATA1], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
file_obj = TestAzureSentinelResultsToStix.get_first_of_type(objects.values(), 'file')
directory_obj = TestAzureSentinelResultsToStix.get_first_of_type(objects.values(), 'directory')
assert file_obj is not None, 'file object type not found'
assert file_obj .keys() == {'type', 'name', 'parent_directory_ref'}
assert file_obj['type'] == 'file'
assert file_obj['name'] == 'services.exe'
assert file_obj['parent_directory_ref'] == '5'
assert directory_obj['path'] == 'c:\\windows\\system32'
@staticmethod
def test_network_json_to_stix():
"""
to test network stix object properties
"""
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [DATA2], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
network_obj = TestAzureSentinelResultsToStix.get_first_of_type(objects.values(), 'network-traffic')
assert network_obj is not None, 'network-traffic object type not found'
assert network_obj.keys() == {'type', 'dst_ref', 'dst_port', 'protocols', 'src_ref','src_port'}
assert network_obj['type'] == 'network-traffic'
assert network_obj['src_port'] == 9475
assert network_obj['dst_port'] == 22
assert network_obj['protocols'] == ['tcp']
assert network_obj['src_ref'] == '7'
assert network_obj['dst_ref'] == '5'
@staticmethod
def test_network_json_to_stix_negative():
"""
to test negative test case for stix object
"""
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [DATA2], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
network_obj = TestAzureSentinelResultsToStix.get_first_of_type(objects.values(), 'file')
assert network_obj is None
@staticmethod
def test_unmapped_attribute_with_mapped_attribute():
message = "\"GET /blog HTTP/1.1\" 200 2571"
data = {"message": message, "unmapped": "nothing to see here"}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
assert objects == {}
curr_obj = TestAzureSentinelResultsToStix.get_first_of_type(objects.values(), 'message')
assert (curr_obj is None), 'url object type not found'
@staticmethod
def test_unmapped_attribute_alone():
data = {"unmapped": "nothing to see here"}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
assert objects == {}
|
py | b40801b39d352cd4e15e3353dac56a8ca059a51f | from django.urls import include, path, re_path
from . import views
urlpatterns = [
path('articles/2003/', views.empty_view, name='articles-2003'),
path('articles/<int:year>/', views.empty_view, name='articles-year'),
path('articles/<int:year>/<int:month>/', views.empty_view, name='articles-year-month'),
path('articles/<int:year>/<int:month>/<int:day>/', views.empty_view, name='articles-year-month-day'),
path('users/', views.empty_view, name='users'),
path('users/<id>/', views.empty_view, name='user-with-id'),
path('included_urls/', include('urlpatterns.included_urls')),
re_path(r'^regex/(?P<pk>[0-9]+)/$', views.empty_view, name='regex'),
re_path(r'^regex_optional/(?P<arg1>\d+)/(?:(?P<arg2>\d+)/)?', views.empty_view, name='regex_optional'),
path('', include('urlpatterns.more_urls')),
path('<lang>/<path:url>/', views.empty_view, name='lang-and-path'),
]
|
py | b40801ffbbd4d13fc8f1d01af8ecc6a7a80fbde9 | from flask import Flask, jsonify, request
import json
from agent import Agent, GraphDataset
from typing import Dict, Any
import torch
import torch.optim as optim
import random
from network import GraphSAC
from trainer import train
from config import Config
from utils import Writer
import logging
from tqdm import tqdm
random.seed(1)
torch.manual_seed(1)
# ロガーの取得
werkzeug_logger = logging.getLogger("werkzeug")
# レベルの変更
werkzeug_logger.setLevel(logging.ERROR)
app = Flask(__name__)
agents: Dict[int, Agent] = {}
model = GraphSAC()
value_optim = optim.Adam([
# {"params": model.q.parameters()}
{"params": model.q1.parameters()},
{"params": model.q2.parameters()}
# {"params": model.v.parameters()}
], Config.lr)
policy_optim = optim.Adam(model.policy.parameters(), Config.lr)
alpha_optim = optim.Adam([model.alpha], Config.alpha_update)
writer = Writer()
writer.save(model)
def train_func(batches):
policy_loss, value_loss = train(model, batches, writer, value_optim, policy_optim, alpha_optim)
tqdm.write(f"[{writer.step} step]policy_loss:{policy_loss:.5g}, value_loss:{value_loss:.5g}")
dataset = GraphDataset(train_func=train_func,
log_reward=writer.log_reward)
@app.route("/", methods=["GET"])
def hello_check() -> str:
out_text = ""
for key, value in agents.items():
out_text += "id :" + str(key) + "\n" + str(value)
return out_text
# @app.route("/", methods=["POST"])
# def init_academy():
# return "hoge"
@app.route("/new", methods=["POST"])
def new_agent() -> Any:
req_data = json.loads(request.data)
print(req_data)
id = random.randint(0, 2**20)
agent = Agent(
adj_mat=torch.Tensor(req_data["Graph"]).reshape(req_data["NodeSize"], -1),
dataset=dataset
)
agents[id] = agent
res_data: Dict[str, int] = {"Id": id}
return jsonify(res_data) # type: ignore
@app.route("/push/<int:id>", methods=["POST"])
def push_data(id):
# assert(id in agents.keys())
# print(f"ID: {id} not found")
req_data = json.loads(request.data)
agent = agents[id]
agent.push_data(
state=torch.Tensor(req_data["State"]).reshape(agent.node_size, -1),
action=torch.Tensor(req_data["Action"]).reshape(agent.node_size, -1),
reward=req_data["Reward"],
done=req_data["Done"]
)
return "ok"
@app.route("/onnx", methods=["GET"])
def get_onnx_policy():
return writer.get_onnx_policy()
@app.route("/save", methods=["GET"])
def save():
writer.save(model)
return "ok"
@app.route("/stop/<int:id>", methods=["GET"])
def stop_agent(id):
assert(id in agents.keys())
agents.pop(id)
return "ok"
@app.route("/step", methods=["GET"])
def get_step():
return str(1 + (writer.step // Config.save_freq))
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080, debug=False)
|
py | b40803cd05aaf74181ed2ee531dbfe153791ff60 | #// -------------------------------------------------------------
#// Copyright 2010 Synopsys, Inc.
#// Copyright 2010 Cadence Design Systems, Inc.
#// Copyright 2011 Mentor Graphics Corporation
#// Copyright 2019 Tuomas Poikela (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#// -------------------------------------------------------------
#//
from typing import List
from .uvm_reg import UVMReg
from .uvm_reg_model import *
from .uvm_reg_item import UVMRegItem
from .uvm_reg_sequence import UVMRegFrontdoor
from ..base.uvm_resource_db import UVMResourceDb
from ..base.sv import sv
from ..macros import uvm_fatal, uvm_error, uvm_warning
from ..base.uvm_globals import uvm_check_output_args
#//-----------------------------------------------------------------
#// CLASS: uvm_reg_indirect_data
#// Indirect data access abstraction class
#//
#// Models the behavior of a register used to indirectly access
#// a register array, indexed by a second ~address~ register.
#//
#// This class should not be instantiated directly.
#// A type-specific class extension should be used to
#// provide a factory-enabled constructor and specify the
#// ~n_bits~ and coverage models.
#//-----------------------------------------------------------------
class UVMRegIndirectData(UVMReg):
def __init__(self, name="uvm_reg_indirect", n_bits=0, has_cover=False):
"""
Function: new
Create an instance of this class
Should not be called directly,
other than via super.new().
The value of `n_bits` must match the number of bits
in the indirect register array.
function new(string name = "uvm_reg_indirect",
int unsigned n_bits,
int has_cover)
Args:
name:
n_bits:
has_cover:
"""
UVMReg.__init__(self, name,n_bits,has_cover)
self.m_idx = None
self.m_tbl = []
#endfunction: new
def build(self):
pass
def configure(self, idx, reg_a, blk_parent, regfile_parent=None):
"""
Function: configure
Configure the indirect data register.
The `idx` register specifies the index,
in the `reg_a` register array, of the register to access.
The `idx` must be written to first.
A read or write operation to this register will subsequently
read or write the indexed register in the register array.
The number of bits in each register in the register array must be
equal to `n_bits` of this register.
See <uvm_reg::configure()> for the remaining arguments.
function void configure (uvm_reg idx,
uvm_reg reg_a[],
uvm_reg_block blk_parent,
uvm_reg_file regfile_parent = None)
Args:
idx:
reg_a:
blk_parent:
regfile_parent:
"""
super().configure(blk_parent, regfile_parent, "")
self.m_idx = idx
self.m_tbl = reg_a
# Not testable using pre-defined sequences
UVMResourceDb.set("REG::" + self.get_full_name(), "NO_REG_TESTS", 1)
# Add a frontdoor to each indirectly-accessed register
# for every address map this register is in.
# foreach (m_maps[map]):
for _map in self.m_maps:
self.add_frontdoors(_map)
def add_map(self, _map):
"""
/*local*/ virtual function void add_map(uvm_reg_map map)
Args:
_map:
"""
super().add_map(_map)
self.add_frontdoors(_map)
# endfunction
def add_frontdoors(self, _map):
"""
local function void add_frontdoors(uvm_reg_map map)
Args:
_map:
"""
for i in range(len(self.m_tbl)):
fd = None # uvm_reg_indirect_ftdr_seq
if self.m_tbl[i] is None:
uvm_error(self.get_full_name(),
sv.sformatf("Indirect register #%0d is None", i))
continue
fd = uvm_reg_indirect_ftdr_seq(self.m_idx, i, self)
if self.m_tbl[i].is_in_map(_map):
self.m_tbl[i].set_frontdoor(fd, _map)
else:
_map.add_reg(self.m_tbl[i], -1, "RW", 1, fd)
def do_predict(self, rw, kind=UVM_PREDICT_DIRECT, be=-1):
"""
virtual function void do_predict (uvm_reg_item rw,
uvm_predict_e kind = UVM_PREDICT_DIRECT,
uvm_reg_byte_en_t be = -1)
Args:
rw:
kind:
UVM_PREDICT_DIRECT:
be:
"""
if self.m_idx.get() >= len(self.m_tbl):
uvm_error(self.get_full_name(), sv.sformatf(
"Address register %s has a value (%0d) greater than the maximum indirect register array size (%0d)",
self.m_idx.get_full_name(), self.m_idx.get(), len(self.m_tbl)))
rw.status = UVM_NOT_OK
return
# NOTE limit to 2**32 registers
idx = self.m_idx.get()
self.m_tbl[idx].do_predict(rw, kind, be)
def get_local_map(self, _map, caller=""):
"""
virtual function uvm_reg_map get_local_map(uvm_reg_map map, string caller="")
Args:
_map:
caller:
Returns:
"""
return self.m_idx.get_local_map(_map,caller)
def add_field(self, field):
"""
Just for good measure, to catch and short-circuit non-sensical uses
virtual function void add_field (uvm_reg_field field)
Args:
field:
"""
uvm_error(self.get_full_name(), "Cannot add field to an indirect data access register")
# virtual function void set (uvm_reg_data_t value,
# string fname = "",
# int lineno = 0)
# `uvm_error(get_full_name(), "Cannot set() an indirect data access register")
# endfunction
#
# virtual function uvm_reg_data_t get(string fname = "",
# int lineno = 0)
# `uvm_error(get_full_name(), "Cannot get() an indirect data access register")
# return 0
# endfunction
#
# virtual function uvm_reg get_indirect_reg(string fname = "",
# int lineno = 0)
# int unsigned idx = m_idx.get_mirrored_value()
# return(m_tbl[idx])
# endfunction
def needs_update(self):
return 0
async def write(self, status, value, path=UVM_DEFAULT_PATH, _map=None,
parent=None, prior=-1, extension=None, fname="", lineno=0):
"""
virtual task write(output uvm_status_e status,
input uvm_reg_data_t value,
input uvm_path_e path = UVM_DEFAULT_PATH,
input uvm_reg_map map = None,
input uvm_sequence_base parent = None,
input int prior = -1,
input uvm_object extension = None,
input string fname = "",
input int lineno = 0)
Args:
status:
value:
path:
UVM_DEFAULT_PATH:
_map:
parent:
prior:
extension:
fname:
lineno:
"""
uvm_check_output_args([status])
if path == UVM_DEFAULT_PATH:
blk = self.get_parent() # uvm_reg_block
path = blk.get_default_path()
if path == UVM_BACKDOOR:
uvm_warning(self.get_full_name(),
"Cannot backdoor-write an indirect data access register. Switching to frontdoor.")
path = UVM_FRONTDOOR
# Can't simply call super.write() because it'll call set()
# uvm_reg_item rw
rw = UVMRegItem.type_id.create("write_item", None, self.get_full_name())
await self.XatomicX(1, rw)
rw.element = self
rw.element_kind = UVM_REG
rw.kind = UVM_WRITE
rw.value[0] = value
rw.path = path
rw.map = _map
rw.parent = parent
rw.prior = prior
rw.extension = extension
rw.fname = fname
rw.lineno = lineno
await self.do_write(rw)
status.append(rw.status)
await self.XatomicX(0)
# endtask
# virtual task read(output uvm_status_e status,
# output uvm_reg_data_t value,
# input uvm_path_e path = UVM_DEFAULT_PATH,
# input uvm_reg_map map = None,
# input uvm_sequence_base parent = None,
# input int prior = -1,
# input uvm_object extension = None,
# input string fname = "",
# input int lineno = 0)
#
# if (path == UVM_DEFAULT_PATH):
# uvm_reg_block blk = get_parent()
# path = blk.get_default_path()
# end
#
# if (path == UVM_BACKDOOR):
# `uvm_warning(get_full_name(), "Cannot backdoor-read an indirect data access register. Switching to frontdoor.")
# path = UVM_FRONTDOOR
# end
#
# super.read(status, value, path, map, parent, prior, extension, fname, lineno)
# endtask
# virtual task poke(output uvm_status_e status,
# input uvm_reg_data_t value,
# input string kind = "",
# input uvm_sequence_base parent = None,
# input uvm_object extension = None,
# input string fname = "",
# input int lineno = 0)
# `uvm_error(get_full_name(), "Cannot poke() an indirect data access register")
# status = UVM_NOT_OK
# endtask
#
# virtual task peek(output uvm_status_e status,
# output uvm_reg_data_t value,
# input string kind = "",
# input uvm_sequence_base parent = None,
# input uvm_object extension = None,
# input string fname = "",
# input int lineno = 0)
# `uvm_error(get_full_name(), "Cannot peek() an indirect data access register")
# status = UVM_NOT_OK
# endtask
#
# virtual task update(output uvm_status_e status,
# input uvm_path_e path = UVM_DEFAULT_PATH,
# input uvm_reg_map map = None,
# input uvm_sequence_base parent = None,
# input int prior = -1,
# input uvm_object extension = None,
# input string fname = "",
# input int lineno = 0)
# status = UVM_IS_OK
# endtask
#
# virtual task mirror(output uvm_status_e status,
# input uvm_check_e check = UVM_NO_CHECK,
# input uvm_path_e path = UVM_DEFAULT_PATH,
# input uvm_reg_map map = None,
# input uvm_sequence_base parent = None,
# input int prior = -1,
# input uvm_object extension = None,
# input string fname = "",
# input int lineno = 0)
# status = UVM_IS_OK
# endtask
#
#endclass : uvm_reg_indirect_data
class uvm_reg_indirect_ftdr_seq(UVMRegFrontdoor):
# local uvm_reg m_addr_reg
# local uvm_reg m_data_reg
# local int m_idx
def __init__(self, addr_reg, idx, data_reg):
"""
Constructor
Args:
addr_reg:
idx:
data_reg:
"""
super().__init__("uvm_reg_indirect_ftdr_seq")
self.m_addr_reg = addr_reg # uvm_reg
self.m_data_reg = data_reg # uvm_reg
self.m_idx = idx # int
async def body(self):
"""
Body of indirect sequence
"""
arr: List[UVMRegItem] = []
if not sv.cast(arr, self.rw_info.clone(), UVMRegItem):
uvm_fatal("CAST_FAIL", "Expected UVMRegItem, got " + str(self.rw_info))
rw = arr[0]
rw.element = self.m_addr_reg
rw.kind = UVM_WRITE
rw.value[0] = self.m_idx
await self.m_addr_reg.XatomicX(1, rw)
await self.m_data_reg.XatomicX(1, rw)
# This write selects the address to write/read
await self.m_addr_reg.do_write(rw)
if rw.status == UVM_NOT_OK:
return
arr = []
if sv.cast(arr, self.rw_info.clone(), UVMRegItem):
rw = arr[0]
rw.element = self.m_data_reg
# This fetches the actual data
if self.rw_info.kind == UVM_WRITE:
await self.m_data_reg.do_write(rw)
else:
await self.m_data_reg.do_read(rw)
self.rw_info.value[0] = rw.value[0]
await self.m_addr_reg.XatomicX(0)
await self.m_data_reg.XatomicX(0)
self.rw_info.status = rw.status
|
py | b408052acbc670dc9a8f063aae74043747412bcf | """Version info"""
__version__ = "0.0.1"
|
py | b40807486268cb25a1868ef258b14ffaed9c0b6b | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Account']
class Account(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cloudwatch_role_arn: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a settings of an API Gateway Account. Settings is applied region-wide per `provider` block.
> **Note:** As there is no API method for deleting account settings or resetting it to defaults, destroying this resource will keep your account settings intact
## Example Usage
```python
import pulumi
import pulumi_aws as aws
cloudwatch_role = aws.iam.Role("cloudwatchRole", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "apigateway.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
\"\"\")
demo = aws.apigateway.Account("demo", cloudwatch_role_arn=cloudwatch_role.arn)
cloudwatch_role_policy = aws.iam.RolePolicy("cloudwatchRolePolicy",
role=cloudwatch_role.id,
policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"logs:FilterLogEvents"
],
"Resource": "*"
}
]
}
\"\"\")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cloudwatch_role_arn: The ARN of an IAM role for CloudWatch (to allow logging & monitoring).
See more [in AWS Docs](https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-stage-settings.html#how-to-stage-settings-console).
Logging & monitoring can be enabled/disabled and otherwise tuned on the API Gateway Stage level.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['cloudwatch_role_arn'] = cloudwatch_role_arn
__props__['throttle_settings'] = None
super(Account, __self__).__init__(
'aws:apigateway/account:Account',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cloudwatch_role_arn: Optional[pulumi.Input[str]] = None,
throttle_settings: Optional[pulumi.Input[pulumi.InputType['AccountThrottleSettingsArgs']]] = None) -> 'Account':
"""
Get an existing Account resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cloudwatch_role_arn: The ARN of an IAM role for CloudWatch (to allow logging & monitoring).
See more [in AWS Docs](https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-stage-settings.html#how-to-stage-settings-console).
Logging & monitoring can be enabled/disabled and otherwise tuned on the API Gateway Stage level.
:param pulumi.Input[pulumi.InputType['AccountThrottleSettingsArgs']] throttle_settings: Account-Level throttle settings. See exported fields below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["cloudwatch_role_arn"] = cloudwatch_role_arn
__props__["throttle_settings"] = throttle_settings
return Account(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="cloudwatchRoleArn")
def cloudwatch_role_arn(self) -> pulumi.Output[Optional[str]]:
"""
The ARN of an IAM role for CloudWatch (to allow logging & monitoring).
See more [in AWS Docs](https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-stage-settings.html#how-to-stage-settings-console).
Logging & monitoring can be enabled/disabled and otherwise tuned on the API Gateway Stage level.
"""
return pulumi.get(self, "cloudwatch_role_arn")
@property
@pulumi.getter(name="throttleSettings")
def throttle_settings(self) -> pulumi.Output['outputs.AccountThrottleSettings']:
"""
Account-Level throttle settings. See exported fields below.
"""
return pulumi.get(self, "throttle_settings")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | b408075f41d2d4f241ee64301283c5c3abedda26 | # -*- coding: utf-8 -*-
#
# # Copyright (c) 2021 Facebook, inc. and its affiliates. All Rights Reserved
#
#
"""
Package wide configuration
"""
import os
import getpass
from pathlib import Path
os.environ['MKL_THREADING_LAYER'] = 'gnu' # Weird bug.
__DEBUG__ = int(os.getenv('DEBUG', 0))
__USE_CHECKPOINTS__ = int(os.getenv('USE_CHECKPOINTS', 0))
__USERNAME__ = getpass.getuser()
__PROJECT_ROOT__ = os.path.realpath(
os.path.join(
os.path.dirname(os.path.join(os.path.abspath(__file__))),
'..')) # Project Root
# SLURM configurations
__SLURM_CONFIGS__ = {
'one_gpu': dict(nodes=1, gpus_per_node=1, ntasks_per_node=1),
'distributed_4': dict(nodes=1, gpus_per_node=4, ntasks_per_node=4),
'distributed_8': dict(nodes=1, gpus_per_node=8, ntasks_per_node=8),
'distributed_16': dict(nodes=2, gpus_per_node=8, ntasks_per_node=8),
'distributed_32': dict(nodes=4, gpus_per_node=8, ntasks_per_node=8)
}
if __name__ == 'main':
pass
|
py | b40807f33c55806d88e5ec36660a5999778b62dd | import evdev
e = evdev.ecodes
js_map = {
0x08: e.KEY_BACKSPACE, # BACKSPACE
0x09: e.KEY_TAB, # TAB
0x0D: e.KEY_ENTER, # ENTER
0x10: e.KEY_LEFTSHIFT, # SHIFT
0x11: e.KEY_LEFTCTRL, # CTRL
0x12: e.KEY_LEFTALT, # ALT
0x13: e.KEY_PAUSE, # PAUSE
0x14: e.KEY_CAPSLOCK, # CAPS_LOCK
0x1B: e.KEY_ESC, # ESC
0x20: e.KEY_SPACE, # SPACE
0x21: e.KEY_PAGEUP, # PAGE_UP # also NUM_NORTH_EAST
0x22: e.KEY_DOWN, # PAGE_DOWN # also NUM_SOUTH_EAST
0x23: e.KEY_END, # END # also NUM_SOUTH_WEST
0x24: e.KEY_HOME, # HOME # also NUM_NORTH_WEST
0x25: e.KEY_LEFT, # LEFT # also NUM_WEST
0x26: e.KEY_UP, # UP # also NUM_NORTH
0x27: e.KEY_RIGHT, # RIGHT # also NUM_EAST
0x28: e.KEY_DOWN, # DOWN # also NUM_SOUTH
0x2D: e.KEY_INSERT, # INSERT # also NUM_INSERT
0x2E: e.KEY_DELETE, # DELETE # also NUM_DELETE
0x30: e.KEY_0, # ZERO
0x31: e.KEY_1, # ONE
0x32: e.KEY_2, # TWO
0x33: e.KEY_3, # THREE
0x34: e.KEY_4, # FOUR
0x35: e.KEY_5, # FIVE
0x36: e.KEY_6, # SIX
0x37: e.KEY_7, # SEVEN
0x38: e.KEY_8, # EIGHT
0x39: e.KEY_9, # NINE
0x41: e.KEY_A, # A
0x42: e.KEY_B, # B
0x43: e.KEY_C, # C
0x44: e.KEY_D, # D
0x45: e.KEY_E, # E
0x46: e.KEY_F, # F
0x47: e.KEY_G, # G
0x48: e.KEY_H, # H
0x49: e.KEY_I, # I
0x4A: e.KEY_J, # J
0x4B: e.KEY_K, # K
0x4C: e.KEY_L, # L
0x4D: e.KEY_M, # M
0x4E: e.KEY_N, # N
0x4F: e.KEY_O, # O
0x50: e.KEY_P, # P
0x51: e.KEY_Q, # Q
0x52: e.KEY_R, # R
0x53: e.KEY_S, # S
0x54: e.KEY_T, # T
0x55: e.KEY_U, # U
0x56: e.KEY_V, # V
0x57: e.KEY_W, # W
0x58: e.KEY_X, # X
0x59: e.KEY_Y, # Y
0x5A: e.KEY_Z, # Z
0x5B: e.KEY_LEFTMETA, # META # WIN_KEY_LEFT
0x5C: e.KEY_RIGHTMETA, # WIN_KEY_RIGHT
0x60: e.KEY_KP0, # NUM_ZERO
0x61: e.KEY_KP1, # NUM_ONE
0x62: e.KEY_KP2, # NUM_TWO
0x63: e.KEY_KP3, # NUM_THREE
0x64: e.KEY_KP4, # NUM_FOUR
0x65: e.KEY_KP5, # NUM_FIVE
0x66: e.KEY_KP6, # NUM_SIX
0x67: e.KEY_KP7, # NUM_SEVEN
0x68: e.KEY_KP8, # NUM_EIGHT
0x69: e.KEY_KP9, # NUM_NINE
0x6A: e.KEY_KPASTERISK, # NUM_MULTIPLY
0x6B: e.KEY_KPPLUS, # NUM_PLUS
0x6D: e.KEY_KPMINUS, # NUM_MINUS
0x6E: e.KEY_KPDOT, # NUM_PERIOD
0x6F: e.KEY_KPSLASH, # NUM_DIVISION
0x70: e.KEY_F1, # F1
0x71: e.KEY_F2, # F2
0x72: e.KEY_F3, # F3
0x73: e.KEY_F4, # F4
0x74: e.KEY_F5, # F5
0x75: e.KEY_F6, # F6
0x76: e.KEY_F7, # F7
0x77: e.KEY_F8, # F8
0x78: e.KEY_F9, # F9
0x79: e.KEY_F10, # F10
0x7A: e.KEY_F11, # F11
0x7B: e.KEY_F12, # F12
0x90: e.KEY_NUMLOCK, # NUMLOCK
0x91: e.KEY_SCROLLLOCK, # SCROLL_LOCK
0xBA: e.KEY_SEMICOLON, # SEMICOLON
0xBC: e.KEY_COMMA, # COMMA
0xBE: e.KEY_DOT, # PERIOD
0xBF: e.KEY_SLASH, # SLASH
0xC0: e.KEY_GRAVE, # APOSTROPHE
0xDE: e.KEY_APOSTROPHE, # SINGLE_QUOTE
0xDB: e.KEY_LEFTBRACE, # OPEN_SQUARE_BRACKET
0xDC: e.KEY_BACKSLASH, # BACKSLASH
0xDD: e.KEY_RIGHTBRACE, # CLOSE_SQUARE_BRACKET
}
class Keyboard:
def __init__(self):
self.uinput = evdev.UInput()
def close(self):
self.uinput.close()
def send_key(self, js_keycode, state):
self.uinput.write(evdev.ecodes.EV_KEY, js_map[js_keycode], 1 if state else 0)
self.uinput.syn()
|
py | b40808d8acf690e91dea4fdc541652e092517a5c | import hashlib
import time
from mayan.apps.django_gpg.tests.literals import (
TEST_KEY_PRIVATE_PASSPHRASE, TEST_KEY_PUBLIC_ID
)
from mayan.apps.django_gpg.tests.mixins import KeyTestMixin
from mayan.apps.documents.models import DocumentVersion
from mayan.apps.documents.tests.base import GenericDocumentTestCase
from mayan.apps.documents.tests.literals import (
TEST_DOCUMENT_PATH, TEST_SMALL_DOCUMENT_PATH
)
from ..models import DetachedSignature, EmbeddedSignature
from ..tasks import task_verify_missing_embedded_signature
from .literals import TEST_SIGNED_DOCUMENT_PATH, TEST_SIGNATURE_ID
from .mixins import SignatureTestMixin
class DetachedSignaturesTestCase(
KeyTestMixin, SignatureTestMixin, GenericDocumentTestCase
):
auto_upload_test_document = False
def test_detached_signature_upload_no_key(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
self._upload_test_detached_signature()
self.assertEqual(DetachedSignature.objects.count(), 1)
self.assertEqual(
self.test_signature.document_version,
self.test_document.latest_version
)
self.assertEqual(self.test_signature.key_id, TEST_KEY_PUBLIC_ID)
self.assertEqual(self.test_signature.public_key_fingerprint, None)
def test_detached_signature_upload_with_key(self):
self._create_test_key_public()
self.test_document_path = TEST_DOCUMENT_PATH
self._upload_test_document()
self._upload_test_detached_signature()
self.assertEqual(DetachedSignature.objects.count(), 1)
self.assertEqual(
self.test_signature.document_version,
self.test_document.latest_version
)
self.assertEqual(self.test_signature.key_id, TEST_KEY_PUBLIC_ID)
self.assertEqual(
self.test_signature.public_key_fingerprint,
self.test_key_public.fingerprint
)
def test_detached_signature_upload_post_key_verify(self):
self.test_document_path = TEST_DOCUMENT_PATH
self._upload_test_document()
self._upload_test_detached_signature()
self.assertEqual(DetachedSignature.objects.count(), 1)
self.assertEqual(
self.test_signature.document_version,
self.test_document.latest_version
)
self.assertEqual(self.test_signature.key_id, TEST_KEY_PUBLIC_ID)
self.assertEqual(self.test_signature.public_key_fingerprint, None)
self._create_test_key_public()
signature = DetachedSignature.objects.first()
self.assertEqual(
signature.public_key_fingerprint, self.test_key_public.fingerprint
)
def test_detached_signature_upload_post_no_key_verify(self):
self._create_test_key_public()
self.test_document_path = TEST_DOCUMENT_PATH
self._upload_test_document()
self._upload_test_detached_signature()
self.assertEqual(DetachedSignature.objects.count(), 1)
self.assertEqual(
self.test_signature.document_version,
self.test_document.latest_version
)
self.assertEqual(self.test_signature.key_id, TEST_KEY_PUBLIC_ID)
self.assertEqual(
self.test_signature.public_key_fingerprint,
self.test_key_public.fingerprint
)
self.test_key_public.delete()
signature = DetachedSignature.objects.first()
self.assertEqual(signature.public_key_fingerprint, None)
def test_sign_detached(self):
self._create_test_key_private()
self._upload_test_document()
test_detached_signature = DetachedSignature.objects.sign_document_version(
document_version=self.test_document.latest_version,
key=self.test_key_private,
passphrase=TEST_KEY_PRIVATE_PASSPHRASE
)
self.assertEqual(DetachedSignature.objects.count(), 1)
self.assertTrue(test_detached_signature.signature_file.file is not None)
class DocumentSignaturesTestCase(SignatureTestMixin, GenericDocumentTestCase):
auto_upload_test_document = False
def test_unsigned_document_version_method(self):
TEST_UNSIGNED_DOCUMENT_COUNT = 2
TEST_SIGNED_DOCUMENT_COUNT = 2
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
for count in range(TEST_UNSIGNED_DOCUMENT_COUNT):
self._upload_test_document()
self.test_document_path = TEST_SIGNED_DOCUMENT_PATH
for count in range(TEST_SIGNED_DOCUMENT_COUNT):
self._upload_test_document()
self.assertEqual(
EmbeddedSignature.objects.unsigned_document_versions().count(),
TEST_UNSIGNED_DOCUMENT_COUNT
)
def test_method_get_absolute_url(self):
self.test_document_path = TEST_SIGNED_DOCUMENT_PATH
self._upload_test_document()
EmbeddedSignature.objects.first().get_absolute_url()
class EmbeddedSignaturesTestCase(
KeyTestMixin, SignatureTestMixin, GenericDocumentTestCase
):
auto_upload_test_document = False
def test_embedded_signature_no_key(self):
self.test_document_path = TEST_SIGNED_DOCUMENT_PATH
self._upload_test_document()
self.assertEqual(EmbeddedSignature.objects.count(), 1)
signature = EmbeddedSignature.objects.first()
self.assertEqual(
signature.document_version, self.test_document.latest_version
)
self.assertEqual(signature.key_id, TEST_KEY_PUBLIC_ID)
self.assertEqual(signature.signature_id, None)
def test_embedded_signature_post_key_verify(self):
self.test_document_path = TEST_SIGNED_DOCUMENT_PATH
self._upload_test_document()
self.assertEqual(EmbeddedSignature.objects.count(), 1)
signature = EmbeddedSignature.objects.first()
self.assertEqual(
signature.document_version, self.test_document.latest_version
)
self.assertEqual(signature.key_id, TEST_KEY_PUBLIC_ID)
self.assertEqual(signature.signature_id, None)
self._create_test_key_public()
signature = EmbeddedSignature.objects.first()
self.assertEqual(signature.signature_id, TEST_SIGNATURE_ID)
def test_embedded_signature_post_no_key_verify(self):
self._create_test_key_public()
self.test_document_path = TEST_SIGNED_DOCUMENT_PATH
self._upload_test_document()
self.assertEqual(EmbeddedSignature.objects.count(), 1)
signature = EmbeddedSignature.objects.first()
self.assertEqual(
signature.document_version, self.test_document.latest_version
)
self.assertEqual(signature.key_id, TEST_KEY_PUBLIC_ID)
self.assertEqual(signature.signature_id, TEST_SIGNATURE_ID)
self.test_key_public.delete()
signature = EmbeddedSignature.objects.first()
self.assertEqual(signature.signature_id, None)
def test_embedded_signature_with_key(self):
self._create_test_key_public()
self.test_document_path = TEST_SIGNED_DOCUMENT_PATH
self._upload_test_document()
self.assertEqual(EmbeddedSignature.objects.count(), 1)
signature = EmbeddedSignature.objects.first()
self.assertEqual(
signature.document_version,
self.test_document.latest_version
)
self.assertEqual(signature.key_id, TEST_KEY_PUBLIC_ID)
self.assertEqual(
signature.public_key_fingerprint, self.test_key_public.fingerprint
)
self.assertEqual(signature.signature_id, TEST_SIGNATURE_ID)
def test_task_verify_missing_embedded_signature(self):
# Silence converter logging
self._silence_logger(name='mayan.apps.converter.backends')
old_hooks = DocumentVersion._post_save_hooks
DocumentVersion._post_save_hooks = {}
TEST_UNSIGNED_DOCUMENT_COUNT = 2
TEST_SIGNED_DOCUMENT_COUNT = 2
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
for count in range(TEST_UNSIGNED_DOCUMENT_COUNT):
self._upload_test_document()
self.test_document_path = TEST_SIGNED_DOCUMENT_PATH
for count in range(TEST_SIGNED_DOCUMENT_COUNT):
self._upload_test_document()
self.assertEqual(
EmbeddedSignature.objects.unsigned_document_versions().count(),
TEST_UNSIGNED_DOCUMENT_COUNT + TEST_SIGNED_DOCUMENT_COUNT
)
DocumentVersion._post_save_hooks = old_hooks
task_verify_missing_embedded_signature.delay()
self.assertEqual(
EmbeddedSignature.objects.unsigned_document_versions().count(),
TEST_UNSIGNED_DOCUMENT_COUNT
)
def test_embedded_signing(self):
self._create_test_key_private()
self.test_document_path = TEST_DOCUMENT_PATH
self._upload_test_document()
with self.test_document.latest_version.open() as file_object:
file_object.seek(0, 2)
original_size = file_object.tell()
file_object.seek(0)
original_hash = hashlib.sha256(file_object.read()).hexdigest()
signature = EmbeddedSignature.objects.sign_document_version(
document_version=self.test_document.latest_version,
key=self.test_key_private,
passphrase=TEST_KEY_PRIVATE_PASSPHRASE
)
self.assertEqual(EmbeddedSignature.objects.count(), 1)
with signature.document_version.open() as file_object:
file_object.seek(0, 2)
new_size = file_object.tell()
file_object.seek(0)
new_hash = hashlib.sha256(file_object.read()).hexdigest()
self.assertEqual(original_size, new_size)
self.assertEqual(original_hash, new_hash)
def test_document_no_signature(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
self.assertEqual(EmbeddedSignature.objects.count(), 0)
def test_new_signed_version(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
with open(file=TEST_SIGNED_DOCUMENT_PATH, mode='rb') as file_object:
signed_version = self.test_document.new_version(
file_object=file_object, comment=''
)
# Artifical delay since MySQL doesn't store microsecond data in
# timestamps. Version timestamp is used to determine which version
# is the latest.
time.sleep(1)
self.assertEqual(EmbeddedSignature.objects.count(), 1)
signature = EmbeddedSignature.objects.first()
self.assertEqual(signature.document_version, signed_version)
self.assertEqual(signature.key_id, TEST_KEY_PUBLIC_ID)
|
py | b40808f80b16f72ad6fe91742d417848e3aeb9d4 | # gcp_funcs.py 3/27/2018
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Google Cloud Service Provider specific functions
#
# HELPTEXT: "Google Cloud Service Provider"
#
# See: https://cloud.google.com/sdk/docs/scripting-gcloud
#
import json
import time
import sys
from cspbaseclass import CSPBaseClass
from cspbaseclass import Which
from cspbaseclass import error, trace, trace_do, debug, debug_stop
import os
##############################################################################
# some gcloud aws defaults values that will vary based on users
#
# default_key_name: User will need to create their own security key and
# specify it's name here.
# region: The gcp region that they wish to run in. Note that
# GPU instances might not be avaliable at all sites
# user: Login user name for instance. May be hardcoded by ISP
# based on the image_name being selected.
##############################################################################
default_key_name = "my-security-key-name"
default_region = "my-region-name"
default_user = "my-user-name"
default_project = "my-project"
default_service_account = "my-service-account"
##############################################################################
# What image and instance type to bring up.
#
# default_image_name: Name of OS template that instance will be created with
# default_instance_type: The default name that defines the memory and cpu sizes
# and the gpu types for the instance. Changes
# default_choices: Avaliable instance types that user can select with
# This will probably be different per region, and will
# continue to change over time. Used as a pre-check in
# command parser to verify choice before sending to csp
##############################################################################
default_image_project = "nvidia-ngc-public"
default_image_name = "nvidia-gpu-cloud-image"
default_instance_type = "n1-standard-1"
default_instance_type_choices = ['n1-standard-1', 'n1-standard-8', 'n1-standard-16', 'n1-standard-32', 'n1-standard-64']
default_maintenance_policy = "TERMINATE"
default_scopes = ["https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring.write","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append"]
default_boot_disk_size = 32
default_boot_disk_type = "pd-standard"
default_boot_disk_type_choices = ["pd-standard"]
default_min_cpu_platform = "Automatic"
default_min_cpu_platform_choices= ["Automatic", "Intel Groadwell", "Intel Skylake"]
default_subnet = "default"
default_accelerator_type = "nvidia-tesla-p100"
default_accelerator_type_choices= ["nvidia-tesla-p100"]
default_accelerator_count = 0
default_accelerator_count_choices=[0,1,2,4] # up to 4x P100s
TIMEOUT_1 = (60 * 2) # create, start, terminate
TIMEOUT_2 = (60 * 1) # stop, ping
##############################################################################
# CSPClass
#
# Cloud Service Provided primitive access functions
##############################################################################
class CSPClass(CSPBaseClass):
''' Cloud Service Provider Class for gcp '''
##############################################################################
# CSPSetupOK
#
# checks to see that user has ability to create and minipulate VM's on this
# CSP. Want to check that up front, instead of later when actually talking
# to the CSP.
#
# does things like verifing that the CLI is installed on the machine, and
# whatever else is quick and easy to check
#
# Should also check to see that can actually connect with the CSP provider
# (network connection) and that network is reliable.
#
def CSPSetupOK(self):
''' quick check to verify Google gcloud command line interface is installed '''
fullpath = Which("gcloud") # does cli application exist?
if (fullpath == None):
return 1 # error, not found
else:
# TODO: verify network connection to CSP
# TODO: check login setup correctly
return 0
##############################################################################
# ArgOptions
#
# gcp specific argument parser. This extends or overrides default argument
# parsing that is set up in ncsp.py/add_common_options() function
#
# All arguments set up here and in the common code are saved/restored from
# the csp specific args file. See my_class.ArgSave/RestoreFromFile(parser)
# in the base class for implementation.
#
def ArgOptions(self, parser):
''' gcp specific option parser '''
region_list = self.GetRegionsCached()
parser.add_argument('--region', dest='region',
default=default_region, required=False,
choices=region_list, # regions change, this is queried output
help='region in which to create the VM')
parser.add_argument('--project', dest='project',
default=default_project, required=False,
help='is the project in which to create the VM')
parser.add_argument('--image_project', dest='image_project',
default=default_image_project, required=False,
help='is the image project to which the image belongs')
parser.add_argument('--service_account', dest='service_account',
default=default_service_account, required=False,
help='service account')
parser.add_argument('--maintenance_policy', dest='maintenance_policy',
default=default_maintenance_policy, required=False,
help='maintenance_policy')
parser.add_argument('--subnet', dest='subnet',
default=default_subnet, required=False,
help='subnet')
parser.add_argument('--scopes', dest='scopes',
default=default_scopes, required=False,
help='scopes')
parser.add_argument('--boot_disk_size', dest='boot_disk_size',
default=default_boot_disk_size, required=False, type=int,
help='disk boot size')
parser.add_argument('--boot_disk_type', dest='boot_disk_type',
default=default_boot_disk_type, required=False,
choices=default_boot_disk_type_choices,
help='disk boot type')
parser.add_argument('--min_cpu_platform', dest='min_cpu_platform',
default=default_min_cpu_platform, required=False,
choices=default_min_cpu_platform_choices,
help='min_cpu_platform')
parser.add_argument('--accelerator_type', dest='accelerator_type',
default=default_accelerator_type, required=False,
choices=default_accelerator_type_choices,
help='GPU accelerator type')
parser.add_argument('--accelerator_count', dest='accelerator_count',
default=default_accelerator_count, required=False, type=int,
choices=default_accelerator_count_choices,
help='Number of GPU accelerators to attach to instance')
parser.add_argument('--instance_type', dest='instance_type', # 'size' on azure, use 'instance-type' as common name
default=default_instance_type, required=False,
choices=default_instance_type_choices,
help='VM instance (type) to create')
parser.add_argument('--vpcid', dest='vpcid',
default=None, required=False,
help='gcp VPC id')
# these override the common/default values from add_common_options
# with this csp's specific values
parser.set_defaults(image_name=default_image_name);
parser.set_defaults(key_name=default_key_name)
parser.set_defaults(user=default_user);
# ping-ability makes starting/stopping more traceable, but this
# features is disabled by default, and explicidly needs to be
# enabled in the Networks Security Group -- see ICMP option
parser.set_defaults(pingable=1) # gcloud instances we created support pings (alibaba not)
###########################################################################
# ArgSanity
#
# CSP class specific argument checks, Called after the argument parser has run
# on the user options as a hook to verify that arguments are correct
#
# 'parser' is the structure returned from argparse.ArgumentParser()
#
# Returns 0 success
# 1 something is wrong, stop
#
def ArgSanity(self, parser, args):
''' gcp Parser Argument sanity checking '''
# print args
return 0 # do nothing for now
###########################################################################
# GetRunStatus
#
# Returns the running status of the instance as a string, like 'running'
# 'terminated', 'pending' etc.. This will be somewhat the same across
# all CSPs, but since it comes from them you should not depend upon
# an exact value out of CSP specific code
#
# Returns: string describing state
#
def GetRunStatus(self, args):
''' Returns running-state of instance from describe-instance-status '''
if (self.CheckID(args) == False):
return 1
cmd = "gcloud --format=\"json\" beta compute"
cmd += " instances describe"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # 'quiet' prevents prompting "do you want to delete y/n?"
cmd += " \"%s\" " % args.vm_name # note gclould takes VM Name, not a uuid as with aws/azure..
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems describe VM \"%s\"" % args.vm_name)
return rc
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
run_state = decoded_output['status']
# returns something like "RUNNING" or "STOPPED"
self.Inform(run_state)
return(run_state);
###########################################################################
# GetIPSetupCorrectly
#
# Called after the instance is created in order to get if needed, and
# verify that a public IP address has been returned for the VM
#
# Some CSP's, like azure and alibaba return the IP address from another
# function that sets up the public IP. This needs to be called in the
# CreateVM function in that case.
#
# Other CSP's like aws, return the IP address for you "free" of charge
# as part of the instance information for the VM. This might be returned
# only after the VM creation has been completed.
#
# This function is genericly called after the VM has been found to be
# running, to either simply verify that we have a valid IP address in
# the first case above, or to ask the CSP for it and then verify it
# in the second case.
#
# public IP value will be in args.vm_id
#
# This function can do other cross-checks to validate other setups like
# checking if the SSH key-name returned from the CSP is the same as we
# sent it. Checks like this are optional, but highly desirable.
#
# Returns: 0 success
# 1 fails, invalid IP or can't get it
#
def GetIPSetupCorrectly(self, args):
''' called after 'running' status to get IP. Does nothing for Google '''
# With google, it looks like the IP address gets changed when restarting
# from 'stop'. -- SO we must clear it in our stop command !
#
# If we don't have IP run "describe" and get it.
# If we have it, simply return it
if (args.vm_ip != ""): # this ip value should have been set in Create
# print "GetIPSetupCorrectly: already have ip:%s" % args.vm_ip
return 0 # so we don't need to get it
# don't have IP value, hopefully VM is in running state and will
# have a IP that we can get
cmd = "gcloud --format=\"json\" beta compute"
cmd += " instances describe"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # 'quiet' prevents prompting "do you want to delete y/n?"
cmd += " \"%s\" " % args.vm_name # note takes VM Name, not a uuid as with aws/azure..
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems describe VM \"%s\"" % args.vm_name)
return rc
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
# ip value that was all that was really needed
args.vm_ip = decoded_output['networkInterfaces'][0]['accessConfigs'][0]['natIP']
# sanity -- is VM id returned same as what we got from Create?
# got the value for free, might as well check it
vm_id = decoded_output['id']
if (vm_id != args.vm_id):
error ("Sanity - Returned vm_id:%s != vm_id value from create: %s" % (vm_id, args.vm_id))
return 1
# check status -- we should be RUNNING
status = decoded_output['status']
if (status != "RUNNING"):
error ("Shouldn't we be RUNNING? -- current status is \"$status\"")
return(0)
##############################################################################
# CSP specific Network Security Group Functions
#
# ShowSecurityGroups Displays NSG (network security groups) in region
# ExistingSecurityGroup Does NSG exist?
# CreateSecurityGroup Creates a NSG from a name, and adds rules
# DeleteSecurityGroup Deletes a NSG
##############################################################################
##############################################################################
# ShowSecurityGroups
#
# This function shows basic information about your account's security groups
# for your region.
#
# Intended to be informative only, as each CSP will probably supply different
# type of information.
#
# Returns: 0 one or more Netwroks Security Groups found in region
# 1 error, or no NSG's defined in region
#
def ShowSecurityGroups(self, args):
''' Displays all current security groups '''
error ("gcp (google cloud) does not use network security groups")
return 1 # no NSG's found
# 1 or more NSG's found
##############################################################################
# ExistingSecurityGroup
#
# Given a name of a security group in args.nsg_name, this function sees
# if it currently exists on the CSP
#
# Google cloud does not use security groups
#
# Returns: 0 do nothing
#
def ExistingSecurityGroup(self, args):
''' Does the security group name currently exist ? get it if it does'''
trace(2, "\"%s\"" % (args.nsg_name))
error ("gcp (google cloud) does not use network security groups")
return 0
##############################################################################
# CreateSecurityGroup
#
# Creates a full network security group by the name of args.nsg_name, saves the
# value in args.nsg_id
#
# Google cloud does not use security groups
#
# Returns: 0 do nothing
#
def CreateSecurityGroup(self, args):
''' creates security group. saves it in args.nsg_id '''
trace(2, "\"%s\" %s" % (args.nsg_name, args.nsg_id))
error ("gcp (google cloud) does not use network security groups")
return 1
##############################################################################
# DeleteSecurityGroup
#
# Deletes the security group specified at args.nsg_id, and clears that value
#
# Google cloud does not use security groups
#
# Returns: 0 do nothing
#
def DeleteSecurityGroup(self, args):
''' deletes the security group '''
trace(2, "\"%s\" %s" % (args.nsg_name, args.nsg_id))
error ("gcp (google cloud) does not use network security groups")
return 1
##############################################################################
# CSP specific VM functions
#
# CreateVM Creates a complete fully running VM
# StartVM Starts a VM if it was stopped, returns running
# StopVM Stops the VM if it is currently running
# RestartVM Resets VM, may not quite be same as Stop/Start
# DeleteVM Removes from the CSP a running or stopped VM
##############################################################################
##############################################################################
# CreateVM
#
# Creates a new VM, and returns when it is fully running.
#
# Note that due to simple way that this code saves it's peristent
# data (the id, user name, ... ), only 1 instance can be created
# at a time. Nothing preventing multiple VM's other than way to save/reference
# the id values. The CSPClass.Delete function removes the saved references
#
# The "args" option specify the CSP specific name, disk size, instance type,
# or any other parameter required to fully define the VM that is to be created
#
# Before creating the VM, effort is made to verify that all the supplied
# parameters, such as the SSH key name are valid.
#
# Network Security Group (NSG) is created if needed.
#
# Returns: 0 successful, VM fully created, up and ssh-able
# 1 failure, VM not created for one of many possible reasons
#
def CreateVM(self, args):
''' Creates a new VM. 'args' holds parameters '''
if (args.vm_id != "None" and args.vm_id != None):
error("Instance \"%s\" already exists, run 'deleteVM' first, or 'clean' if stale arg list" % args.vm_id)
return 1
# make sure our persistant IP address is clear
args.vm_ip = ""
# public ssh key file, builds path from options, checks existance
# this sets args.key_file to "keyfile.pub" (better known as "id_rsa.pub")
retcode = self.CheckSSHKeyFilePath(args, ".pub")
if (retcode != 0):
return(retcode)
keyfile_pub = args.key_file
# print "keyfile_pub:%s" % keyfile_pub
# however other than in the createVM, the private Key file
# is required for all the local ssh'ing that we will be doing
retcode = self.CheckSSHKeyFilePath(args, "")
if (retcode != 0):
return(retcode)
# ssh key file, builds path from options, checks existance
# metadata consists of user name, and the "ssh key" file
#
# Note that where we pass azure the name of our public ssh key,
# with Google the entire public key string is passsed in the metadata
#
# Example:
# metadata = "ssh-keys=newtonl:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDbzMfRh2nXbcwqqVjGvMgOqD3FyJHk4hGdXofLfBAsfQtZQbUg208yWqPEdFgPVyw8zwhd2WAEnaRSK6TmNOok5qgCydpjxbqoCNIfdhfOSFl+T6veiibzQ2UyWolxNPaQ4IPE4FdQsNDM37lsQNCFyZfBaqfbTSmDi5W8Odoqf7E2tfXcLD4gsFpexM4bgK43aaOCp/ekCiJi+Y13MJTw5VmLIdLgJZ/40oMRpK6nZcipbkHkVQEV9mLpTKDLG/xvb7gRzFiXbp4qgF9dWQKqIkfL4UNpcKTjYXqmdt2okoeDGVhQ0AnVM1pHKIyVulV5c17jz7wyj+0UaizAFvSh [email protected]"
#
# Note: The first few characters of the id_rsa.pub file is "ssh-rsa AAAAB3..."
# don't need to explicitly pass in "ssh-rsa" here. Don't over complicate it
#
with open(keyfile_pub, "r") as f:
ssh_rsa_data = f.read();
metadata="ssh-keys=%s:%s" % (args.user, ssh_rsa_data)
# with Google, don't need to create a network security group.
# mostly inherit defaults from the main scription
# neat thing with Google, is that we can specify GPU's at VM init time
# with other CSPs, number/type of GPU's is a function of the "instance_type"
accelerator_count = 0 # used for delay before ping below
if ( args.accelerator_type != None and args.accelerator_type != ""
and args.accelerator_type != "None" and args.accelerator_count > 0):
accelerator = "%s,count=%d" %(args.accelerator_type, args.accelerator_count)
accelerator_count = args.accelerator_count
# if adding GPUs, add additional info to the VM name
#
# Google GPU 'accelerator' types are of form: nvidia-tesla-p100 - too long for VM name which is
# limited to 61 chars - so strip of last what's after last '-' as name
#
# Remember with google, names must all be lowercase numbers/letters
if (args.vm_name.find("gpu") == -1): # haven't added "gpu" yet
type = args.accelerator_type[args.accelerator_type.rfind("-")+1:]
args.vm_name += "-%dx%sgpu" %(args.accelerator_count, type)
else:
accelerator = None # don't assign gpus
# Create the VM
# NOTE: with gcp, it's not necessary to assign it Network Security Groups
# when creating the VM's -- Called "network firewall rules", they are
# added later after the VM is created.
self.Inform("CreateVM")
cmd = "gcloud --format=\"json\" beta compute"
cmd += " --project \"%s\" " % args.project # "my-project"
cmd += "instances create \"%s\"" % args.vm_name # "pbradstr-Fri-2018Mar02-181931"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # reduces noize output
cmd += " --machine-type \"%s\"" % args.instance_type # "n1-standard-1"
cmd += " --subnet \"%s\"" % args.subnet # default
cmd += " --metadata \"%s\"" % metadata
cmd += " --maintenance-policy \"%s\"" % args.maintenance_policy # "TERMINATE"
cmd += " --service-account \"%s\"" % args.service_account # "[email protected]"
# cmd += " --scopes %s" % args.scopes # https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring.write","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append" \
if ( accelerator != None ): # optional if we want GPUs
cmd += " --accelerator type=%s" % accelerator # nvidia-tesla-p100,count=1"
cmd += " --min-cpu-platform \"%s\"" % args.min_cpu_platform # "Automatic"
cmd += " --image \"%s\"" % args.image_name # "nvidia-gpu-cloud-image-20180227"
cmd += " --image-project \"%s\"" % args.image_project # "nvidia-ngc-public"
cmd += " --boot-disk-size %d" % args.boot_disk_size # 32, in GB
cmd += " --boot-disk-type \"%s\"" % args.boot_disk_type # "pd-standard"
cmd += " --boot-disk-device-name \"%s\"" % args.vm_name # assume same as VM name
# To break big command into individual options per line for debugging
# echo $V | sed -e $'s/ --/\\\n --/g'
# execute the command
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems creating VM \"%s\"" % args.vm_name)
return rc
# Get the returend information, pull out the vmID and (if possible)
# the public IP address of the VM
#
# NOTE: with gcp, IP address is assigned in output from 'create' commmand
# don't need to poll for it (we waited for command to complete instead)
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
# FYI: reason why [0] is user here is that json output format could
# possibly supply more than one instance of data. Since our request
# is specific to one instance, the [0] grouping is kind of redundant
args.vm_id = decoded_output[0]['id'] # may not actually need the ID, all vm_name based
args.vm_ip = decoded_output[0]['networkInterfaces'][0]['accessConfigs'][0]['natIP']
# save vm ID and other fields setup here so don't use them if error later
# actually don't care if it's fully running, (that would be nice) but
# need to save the VM id here since we need to delete it in any case
self.ArgSaveToFile(args)
# Google has a habbit of reusing the IP addresses, way more than any other
# csp that I've tested. But since this is an old IP with a new VM, if that
# IP exists in the known_hosts file, it's going to cause problems when
# we try to ssh into it (as will happen right away with "WaitTillRunning"
# Blow away value in known-hosts now. Note that it's also removed when
# the VM is deleted... but done here on create if forgot or removed some
# other way. (TODO: This step needed on other CSPs ? )
self.DeleteIPFromSSHKnownHostsFile(args)
# quick sanity check -- verify the name returned from the create command
# is the same as we were given
returned_name = decoded_output[0]["name"]
# print("name:%s" % returned_name)
if (decoded_output[0]["name"] != args.vm_name):
error ("sanity check: vm name returned \"%s\" != vm_name \"%s\" given to create command" % (returned_name, args.vm_name))
json.dumps(decoded_output, indent=4, sort_keys=True)
return 1
# Seeing an error here on gcloud only where
#
# 1) VM is up in gcloud web page, and can ssh into it there from the web page
# 2) the first ping in WaitTillRunning succeeds
# 3) the ssh in WaitTillRunning fails with a timeout
# 4) any further ping or ssh fails
# 5) see #1
#
# A delay before the first ping seems to workaround the problem
# 5 seconds is not enough, got 30% error rates. 10 seconds seems
# to work at least with"n1-standard-1" instances and no gpus
#
# Adding and additional 10 seconds per GPU. Emperical value
#
delay = 10 + (accelerator_count * 10)
debug (0, "WORKAROUND: external network connect - sleep for %d seconds before ping" % (delay))
time.sleep(delay) # wait a few seconds before ANY command to vm
# Another sanity check -- gcp will return from create only once the
# vm is up and running. This code here (which comes from aws implementation)
# wait's till we can ping and ssh into the VM. It should take little
# time here with gcp, but on the other hand it's a good confidence booster
# to know that we have checked and hav verified that can ping and ssh into
# the vm.
if (rc == 0):
rc = self.WaitTillRunning(args, "RUNNING", TIMEOUT_1)
# returns 0 only if VM is fully up and running, we have it's public IP
# and can ssh into it
debug(2, "createVM returning %d" % rc)
return rc # 0: succcess, 1: failure
##############################################################################
# StartVM
#
# Starts a Stopped VM, returns it in a fully running state, where we have
# the correct IP address if it changed, and can ssh into the VM
#
# Returns: 0 successful, VM up and ssh-able
# 1 failure, VM not able to be started, or invalid ID supplied
#
def StartVM(self, args):
''' Starts the VM '''
rc = 1 # assume error
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False): # checks for a valid VM id
return 1 #
# Get run status and check current state
# The strings being checked here may be CSP specific.
status = self.GetRunStatus(args)
if (status == "RUNNING"):
return 0 # already running, simply return
elif (status == "stopping"):
buf = "%s is in %s state, can't start running now" % (args.vm_id, status)
error(buf)
elif (status == "TERMINATED" or status == "null"):
rc = 0 # ok to proceed
else:
buf = "id %s is in \"%s\" state, not sure can start running" % (args.vm_id, status)
error(buf)
if (rc != 0):
return rc # unexpected status
# start the VM
self.Inform("StartVM")
cmd = "gcloud --format=\"json\" beta compute"
cmd += " instances start"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # 'quiet' prevents prompting "do you want to delete y/n?"
cmd += " \"%s\" " % args.vm_name # note takes VM Name, not a uuid as with aws/azure..
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems deleting VM \"%s\"" % args.vm_name)
return rc
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
# CSP specific - verify that the VM is fully up and running, and that
# we have it's IP address and can ssh into it.
#
# Some CSP's may return from their StartVM in this state, so this call
# is optional
if (rc == 0):
rc = self.WaitTillRunning(args, "RUNNING", TIMEOUT_1) # running
# returns 0 only if VM is fully up and running, we have it's public IP
# and can ssh into it
return rc # 0: succcess, 1: failure
##############################################################################
# StopVM
#
# Stops a running VM. No persistent resouces are deallocated, as it's expected
# that the VM will be started again.
#
# Note that most CSP's will continue to charge the customer for the allocated
# resources, even in a Stopped state.
#
# Returns: 0 VM fully stopped
# 1 unable to stop VM. May be invalid ID or connection to CSP
#
def StopVM(self, args):
''' Stop the VM '''
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False):
return 1
# Checks status. Note that "running" string may be CSP specific
retcode = self.CheckRunStatus(args, "RUNNING") # running
if (retcode != 0):
error ("Not running")
return retcode
# Stop the VM
self.Inform("StopVM")
cmd = "gcloud --format=\"json\" beta compute"
cmd += " instances stop"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # 'quiet' prevents prompting "do you want to delete y/n?"
cmd += " \"%s\" " % args.vm_name # note takes VM Name, not a uuid as with aws/azure..
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems deleting VM \"%s\"" % args.vm_name)
return rc
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
# The CSP may return from the above command once the request
# for stopping has been received. However we don't want to
# return from this function until we are actually positive that
# the VM has compleatly stopped. This check will be CSP specific
if (rc == 0):
# make sure our persistant IP address is clear -
# google changes IP address after stop. So make sure
# the next time we need it, we go and ask for it
args.vm_ip = ""
# get status
status = self.GetRunStatus(args)
# CSP specific..
# The instance becomes "stopping" after a successful API request,
# and the instance becomes "stopped" after it is stopped successfully.
if (status != "TERMINATED"): # "stopping" - transiant state
error("Asked VM to stop, but status = \"%s\"" % (status))
rc = 1
else:
rc = self.WaitForRunStatus(args, "TERMINATED", TIMEOUT_2) # stopped
# return 0 only when the VM is fully stopped
return rc # 0: succcess, 1: failure
##############################################################################
# RestartVM
#
# This function restarts a currently running VM
#
# Returns with the VM in a fully running state, where we have it's public IP
# address and can ssh into it
#
# Returns: 0 successful, VM up and ssh-able
# 1 failure, VM not able to be reset, or invalid ID supplied
#
def RestartVM(self, args): # also known as 'reboot' on aws
''' Restarts the VM '''
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False):
return 1
# can only restart a VM if it's currently running.
# This "running" string may be CSP specific
retcode = self.CheckRunStatus(args, "RUNNING") # running
if (retcode != 0):
error ("Not running")
return retcode
# Restart the VM
self.Inform("RestartVM")
cmd = "gcloud --format=\"json\" beta compute"
cmd += " instances start"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # 'quiet' prevents prompting "do you want to delete y/n?"
cmd += " \"%s\" " % args.vm_name # note takes VM Name, not a uuid as with aws/azure..
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems deleting VM \"%s\"" % args.vm_name)
return rc
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
# this code is CSP specific.
#
# on aws after "reset", the status never becomes "un-running"
# anytime durring the reset procss -- so we check when it FAILS
# to ping to know if estart actually occured. Then we simply wait
# till it's back up again - pingable and ssh-able to know it's
# running
#
# Ability to ping the VM is also CSP specific, and is normally
# setup in the Network Security Group as a specific rule.
if (retcode == 0):
if (args.pingable == 1):
rc = self.WaitForPing(args, False, TIMEOUT_2)
print "Saw Pingable rc=%d" % rc
else:
time.sleep(5) # let VM go down enough so SSH stops (we hope)
rc = 0 # fake success, since ping isn't supported
if (rc != 0):
error("never went un-pingable. Did VM restart?")
else:
rc = self.WaitTillRunning(args, "RUNNING", TIMEOUT_1) # running
# returns 0 only if VM is fully up and running, we have it's public IP
# and can ssh into it
return rc # 0: succcess, 1: failure
##############################################################################
# DeleteVM
#
# Deletes a VM and releases of it's resources other than the Network Security
# Group.
#
# Returns: 0 success, VM and all it's resource are gone
# 1 problems..
#
def DeleteVM(self, args):
''' delete the vm and all the pieces '''
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False):
return 1
self.Inform("DeleteVM")
cmd = "gcloud --format=\"json\" beta compute"
cmd += " instances delete"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # 'quiet' prevents prompting "do you want to delete y/n?"
cmd += " \"%s\" " % args.vm_name # note takes VM Name, not a uuid as with aws/azure..
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems deleting VM \"%s\"" % args.vm_name)
return rc
# allocated them in Create, we probably need to deallocate them here
# CSP_Sepecific_Dealloc(stuff...)
# Is error handled ok? What if problems deleting? -- instance left around?
#
# This cleans out everything in the internal args file, so that user must
# fully specify any options on the next create. This is the easiest/safest
# way to make sure any CSP specific ID parmaters, like the VM id also
# get cleared... Really Big hammer, but squishes everything fairly
#
if (rc == 0): # successful so far?
self.Clean(args) # remove file with the persistent id, ip address, ..
self.m_args_fname = "" # clear name, so won't write back args when done
return rc # 0: succcess, 1: failure
##############################################################################
# CSP specific utility functions
#
# ShowRunning Shows all the account's running VM's
# GetRegions Returns proper list of regions
##############################################################################
##############################################################################
# ShowRunning
#
# CSP specific information function to print out the name, type, description
# and start time of all the running instances in the region
#
# Returns: 0 1 or more running instances were found in CSP's args.region
# 1 no running instances found
#
def ShowRunning(self, args):
''' Shows list of running instances within region of account '''
# CSP_SpecificShowRunning(args.region)
rc = 0
mylist = []
cmd = "gcloud --format=\"json\" beta compute instances list"
rc, output, errval = self.DoCmd(cmd)
if ( rc == 0 ):
decoded_output = json.loads(output)
items = len(decoded_output) # number of instances
lines_printed = 0
for idx in range(0, items):
status = decoded_output[idx]["status"] # UP or ??
if (status == "RUNNING"):
name = decoded_output[idx]["name"] # "gpu-stress-test"
id = decoded_output[idx]["id"] # "6069200451247196266"
machineType = decoded_output[idx]["machineType"] # "https://www.googleapis.com/compute/beta/projects/my-project/zones/us-central1-a/machineTypes/n1-standard-32-p100x4"
cpuPlatform = decoded_output[idx]["cpuPlatform"] # "Unknown CPU Platform"
creationTimestamp = decoded_output[idx]["creationTimestamp"] # "2017-08-18T16:21:42.196-07:00"
zone = decoded_output[idx]["zone"] # "https://www.googleapis.com/compute/beta/projects/my-project/zones/us-east1-d"
# pull interesting data out of longer fields that were gathered above
launch_time = creationTimestamp[0:10]
# VM machine type running on
i = machineType.rfind('/')
if (i != -1):
type = machineType[i+1:] # from last '/'
else:
type = machineType # unexpected format, take it all
# datacenter region the VM is running in
i = zone.rfind('/')
if (i != -1):
tzone = zone[i+1:] # from last '/'
else:
tzone = zone # unexpected format, take it all
if (lines_printed == 0):
print("# %s:" % self.m_class_name )
print(" %-20s %-16s %-32s %10s \"%s\"" %(id, tzone, type, launch_time, name))
lines_printed += 1
if (lines_printed == 0):
print("%s: No running instances found" % self.m_class_name )
return 0
##############################################################################
# GetRegions
#
# Returns a list of regions where VMs can be created by this CSP.
#
# These are basiclly the names of the CSP's data centers... Each data center
# may offer different resoures. Don't care about that here. Just need the
# name.
#
# Used in a choice-list in the arg parser when user gives a non-default
# region name to catch invalid names before any real processing is done
#
# Returns: list of names
def GetRegions(self):
''' Returns a list of region names for the CSP '''
mylist = []
cmd = "gcloud --format=\"json\" beta compute regions list"
rc, output, errval = self.DoCmd(cmd)
if ( rc == 0 ):
decoded_output = json.loads(output)
items = len(decoded_output) # number of regions
for idx in range(0, items):
name = decoded_output[idx]["name"] # asia-east1
status = decoded_output[idx]["status"] # UP or ??
if (status == "UP"):
mylist.append(str(name)) # only include running farms
return mylist # list is empty if no regions
|
py | b40809006b8700ee24ee7976d2ea4d077c569acb | from __future__ import division, print_function, unicode_literals
__copyright__ = '''\
Copyright (C) m-click.aero GmbH
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import OpenSSL.crypto
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.hashes
import cryptography.hazmat.primitives.serialization.pkcs12
import cryptography.x509.oid
import datetime
import os
import requests.adapters
import ssl
import tempfile
import urllib3.contrib.pyopenssl
try:
from ssl import PROTOCOL_TLS as default_ssl_protocol
except ImportError:
from ssl import PROTOCOL_SSLv23 as default_ssl_protocol
def check_cert_not_after(cert):
cert_not_after = cert.not_valid_after
if cert_not_after < datetime.datetime.utcnow():
raise ValueError('Client certificate expired: Not After: {cert_not_after:%Y-%m-%d %H:%M:%SZ}'.format(**locals()))
def create_pyopenssl_sslcontext(pkcs12_data, pkcs12_password_bytes, ssl_protocol=default_ssl_protocol):
private_key, cert, ca_certs = cryptography.hazmat.primitives.serialization.pkcs12.load_key_and_certificates(
pkcs12_data,
pkcs12_password_bytes
)
check_cert_not_after(cert)
ssl_context = urllib3.contrib.pyopenssl.PyOpenSSLContext(ssl_protocol)
ssl_context._ctx.use_certificate(OpenSSL.crypto.X509.from_cryptography(cert))
if ca_certs:
for ca_cert in ca_certs:
check_cert_not_after(ca_cert)
ssl_context._ctx.add_extra_chain_cert(OpenSSL.crypto.X509.from_cryptography(ca_cert))
ssl_context._ctx.use_privatekey(OpenSSL.crypto.PKey.from_cryptography_key(private_key))
return ssl_context
def create_ssl_sslcontext(pkcs12_data, pkcs12_password_bytes, ssl_protocol=default_ssl_protocol):
private_key, cert, ca_certs = cryptography.hazmat.primitives.serialization.pkcs12.load_key_and_certificates(
pkcs12_data,
pkcs12_password_bytes
)
check_cert_not_after(cert)
ssl_context = ssl.SSLContext(ssl_protocol)
with tempfile.NamedTemporaryFile(delete=False) as c:
try:
pk_buf = private_key.private_bytes(
cryptography.hazmat.primitives.serialization.Encoding.PEM,
cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL,
cryptography.hazmat.primitives.serialization.KeySerializationEncryption
)
c.write(pk_buf)
buf = cert.public_bytes(cryptography.hazmat.primitives.serialization.Encoding.PEM)
c.write(buf)
if ca_certs:
for ca_cert in ca_certs:
check_cert_not_after(ca_cert)
buf = ca_cert.public_bytes(cryptography.hazmat.primitives.serialization.Encoding.PEM)
c.write(buf)
c.flush()
c.close()
ssl_context.load_cert_chain(c.name, password=pkcs12_password_bytes)
finally:
os.remove(c.name)
return ssl_context
class Pkcs12Adapter(requests.adapters.HTTPAdapter):
def __init__(self, *args, **kwargs):
pkcs12_data = kwargs.pop('pkcs12_data', None)
pkcs12_filename = kwargs.pop('pkcs12_filename', None)
pkcs12_password = kwargs.pop('pkcs12_password', None)
ssl_protocol = kwargs.pop('ssl_protocol', default_ssl_protocol)
if pkcs12_data is None and pkcs12_filename is None:
raise ValueError('Both arguments "pkcs12_data" and "pkcs12_filename" are missing')
if pkcs12_data is not None and pkcs12_filename is not None:
raise ValueError('Argument "pkcs12_data" conflicts with "pkcs12_filename"')
if pkcs12_password is None:
raise ValueError('Argument "pkcs12_password" is missing')
if pkcs12_filename is not None:
with open(pkcs12_filename, 'rb') as pkcs12_file:
pkcs12_data = pkcs12_file.read()
if isinstance(pkcs12_password, bytes):
pkcs12_password_bytes = pkcs12_password
else:
pkcs12_password_bytes = pkcs12_password.encode('utf8')
self.ssl_context = create_pyopenssl_sslcontext(pkcs12_data, pkcs12_password_bytes, ssl_protocol)
super(Pkcs12Adapter, self).__init__(*args, **kwargs)
def init_poolmanager(self, *args, **kwargs):
if self.ssl_context:
kwargs['ssl_context'] = self.ssl_context
return super(Pkcs12Adapter, self).init_poolmanager(*args, **kwargs)
def proxy_manager_for(self, *args, **kwargs):
if self.ssl_context:
kwargs['ssl_context'] = self.ssl_context
return super(Pkcs12Adapter, self).proxy_manager_for(*args, **kwargs)
def request(*args, **kwargs):
pkcs12_data = kwargs.pop('pkcs12_data', None)
pkcs12_filename = kwargs.pop('pkcs12_filename', None)
pkcs12_password = kwargs.pop('pkcs12_password', None)
ssl_protocol = kwargs.pop('ssl_protocol', default_ssl_protocol)
if pkcs12_data is None and pkcs12_filename is None and pkcs12_password is None:
return requests.request(*args, **kwargs)
if 'cert' in kwargs:
raise ValueError('Argument "cert" conflicts with "pkcs12_*" arguments')
with requests.Session() as session:
pkcs12_adapter = Pkcs12Adapter(
pkcs12_data=pkcs12_data,
pkcs12_filename=pkcs12_filename,
pkcs12_password=pkcs12_password,
ssl_protocol=ssl_protocol,
)
session.mount('https://', pkcs12_adapter)
return session.request(*args, **kwargs)
def delete(*args, **kwargs):
return request('delete', *args, **kwargs)
def get(*args, **kwargs):
kwargs.setdefault('allow_redirects', True)
return request('get', *args, **kwargs)
def head(*args, **kwargs):
kwargs.setdefault('allow_redirects', False)
return request('head', *args, **kwargs)
def options(*args, **kwargs):
kwargs.setdefault('allow_redirects', True)
return request('options', *args, **kwargs)
def patch(*args, **kwargs):
return request('patch', *args, **kwargs)
def post(*args, **kwargs):
return request('post', *args, **kwargs)
def put(*args, **kwargs):
return request('put', *args, **kwargs)
def selftest():
key = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(public_exponent=65537, key_size=4096)
cert = cryptography.x509.CertificateBuilder().subject_name(
cryptography.x509.Name([
cryptography.x509.NameAttribute(cryptography.x509.oid.NameOID.COMMON_NAME, 'test'),
])
).issuer_name(
cryptography.x509.Name([
cryptography.x509.NameAttribute(cryptography.x509.oid.NameOID.COMMON_NAME, 'test'),
])
).public_key(
key.public_key()
).serial_number(
cryptography.x509.random_serial_number()
).not_valid_before(
datetime.datetime.utcnow()
).not_valid_after(
datetime.datetime.utcnow() + datetime.timedelta(days=1)
).sign(
key,
cryptography.hazmat.primitives.hashes.SHA512(),
cryptography.hazmat.backends.default_backend()
)
pkcs12_data = cryptography.hazmat.primitives.serialization.pkcs12.serialize_key_and_certificates(
name=b'test',
key=key,
cert=cert,
cas=[cert, cert, cert],
encryption_algorithm=cryptography.hazmat.primitives.serialization.BestAvailableEncryption(b'correcthorsebatterystaple')
)
response = get(
'https://example.com/',
pkcs12_data=pkcs12_data,
pkcs12_password='correcthorsebatterystaple'
)
if response.status_code != 200:
raise Exception('Unexpected response: {response!r}'.format(**locals()))
print('Selftest succeeded.')
if __name__ == '__main__':
selftest()
|
py | b4080aa75354c15aa0db7448f6d5c4f2a0753bc3 | import unittest
import pkg_resources
from bp import parse_jplace
from bp.GPL import insert_multifurcating
import skbio
class InsertTests(unittest.TestCase):
package = 'bp.tests'
def setUp(self):
self.jplacedata_multiple = \
open(self.get_data_path('300/placement_mul.jplace')).read()
self.final_multiple_multifurcating = \
skbio.TreeNode.read(self.get_data_path('300/placement_mul.newick'))
def get_data_path(self, filename):
# adapted from qiime2.plugin.testing.TestPluginBase
return pkg_resources.resource_filename(self.package,
'/data/%s' % filename)
def test_insert_multifurcating(self):
exp = self.final_multiple_multifurcating
placements, backbone = parse_jplace(self.jplacedata_multiple)
obs = insert_multifurcating(placements, backbone)
self.assertEqual({n.name for n in obs.tips()},
{n.name for n in exp.tips()})
self.assertEqual(obs.compare_rfd(exp), 0)
self.assertAlmostEqual(obs.compare_tip_distances(exp), 0)
if __name__ == '__main__':
unittest.main()
|
py | b4080ac3f72edd06a895f87ad1ba63cc42f50972 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.orchestration.v1 import resource
FAKE_ID = '32e39358-2422-4ad0-a1b5-dd60696bf564'
FAKE_NAME = 'test_stack'
FAKE = {
'links': [{
'href': 'http://res_link',
'rel': 'self'
}, {
'href': 'http://stack_link',
'rel': 'stack'
}],
'logical_resource_id': 'the_resource',
'name': 'the_resource',
'physical_resource_id': '9f38ab5a-37c8-4e40-9702-ce27fc5f6954',
'required_by': [],
'resource_type': 'OS::Heat::FakeResource',
'status': 'CREATE_COMPLETE',
'status_reason': 'state changed',
'updated_time': '2015-03-09T12:15:57.233772',
}
class TestResource(testtools.TestCase):
def test_basic(self):
sot = resource.Resource()
self.assertEqual('resource', sot.resource_key)
self.assertEqual('resources', sot.resources_key)
self.assertEqual('/stacks/%(stack_name)s/%(stack_id)s/resources',
sot.base_path)
self.assertEqual('orchestration', sot.service.service_type)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_retrieve)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = resource.Resource(**FAKE)
self.assertEqual(FAKE['links'], sot.links)
self.assertEqual(FAKE['logical_resource_id'], sot.logical_resource_id)
self.assertEqual(FAKE['name'], sot.name)
self.assertEqual(FAKE['physical_resource_id'],
sot.physical_resource_id)
self.assertEqual(FAKE['required_by'], sot.required_by)
self.assertEqual(FAKE['resource_type'], sot.resource_type)
self.assertEqual(FAKE['status'], sot.status)
self.assertEqual(FAKE['status_reason'], sot.status_reason)
self.assertEqual(FAKE['updated_time'], sot.updated_at)
|
py | b4080c1d92ab19829293a63220a08a5bc7cee0c7 | from model.group import Group
import random
def test_modify_group_name(app, db, check_ui):
if app.group.count() == 0:
app.group.create(Group(name="test_group"))
old_groups = db.get_group_list()
random_group = random.choice(old_groups)
group = Group(name="test_text_modify", id=random_group.id)
app.group.modify_group_by_id(group.id, group)
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
old_groups.remove(random_group)
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
def clean(group):
return Group(id=group.id, name=group.name.strip())
if check_ui:
new_groups = map(clean, db.get_group_list())
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
# def test_modify_group_header(app):
# if app.group.count() == 0:
# app.group.create(Group(name="test_group"))
# old_groups = app.group.get_group_list()
# app.group.modify(Group(header="test_text_modify"))
# new_groups = app.group.get_group_list()
# assert len(old_groups) == len(new_groups)
|
py | b4080cae8cfdff611966badc508ab4bc76cc3626 | """
Factories for ecommerce models
"""
from factory import (
LazyAttribute,
SelfAttribute,
SubFactory,
Trait,
)
from factory.django import DjangoModelFactory
from factory.fuzzy import (
FuzzyChoice,
FuzzyDecimal,
FuzzyText,
)
import faker
from courses.factories import (
CourseFactory,
ProgramFactory,
)
from ecommerce.api import (
make_reference_id,
generate_cybersource_sa_signature,
)
from ecommerce.models import (
Coupon,
Line,
Order,
Receipt,
)
from micromasters.factories import UserFactory
FAKE = faker.Factory.create()
class OrderFactory(DjangoModelFactory):
"""Factory for Order"""
user = SubFactory(UserFactory)
status = FuzzyChoice(
Order.STATUSES
)
total_price_paid = FuzzyDecimal(low=0, high=12345)
class Meta:
model = Order
class Params:
fulfilled = Trait(
status=Order.FULFILLED
)
class LineFactory(DjangoModelFactory):
"""Factory for Line"""
order = SubFactory(OrderFactory)
price = SelfAttribute('order.total_price_paid')
description = FuzzyText(prefix="Line ")
course_key = FuzzyText()
class Meta:
model = Line
def gen_fake_receipt_data(order=None):
"""
Helper function to generate a fake signed piece of data
"""
data = {}
for _ in range(10):
data[FAKE.text()] = FAKE.text()
keys = sorted(data.keys())
data['signed_field_names'] = ",".join(keys)
data['unsigned_field_names'] = ''
data['req_reference_number'] = make_reference_id(order) if order else ''
data['signature'] = generate_cybersource_sa_signature(data)
return data
class ReceiptFactory(DjangoModelFactory):
"""Factory for Receipt"""
order = SubFactory(OrderFactory)
data = LazyAttribute(lambda receipt: gen_fake_receipt_data(receipt.order))
class Meta:
model = Receipt
class CouponFactory(DjangoModelFactory):
"""Factory for Coupon"""
coupon_code = FuzzyText()
coupon_type = Coupon.STANDARD
amount_type = Coupon.PERCENT_DISCOUNT
amount = FuzzyDecimal(0, 1)
class Meta:
model = Coupon
content_object = SubFactory(ProgramFactory, financial_aid_availability=True)
class Params: # pylint: disable=missing-docstring
percent = Trait(
amount_type='percent-discount',
amount=FuzzyDecimal(0, 1),
)
fixed = Trait(
amount_type='fixed-discount',
amount=FuzzyDecimal(50, 1000),
)
program = Trait(
content_object=SubFactory(ProgramFactory, financial_aid_availability=True)
)
course = Trait(
content_object=SubFactory(CourseFactory, program__financial_aid_availability=True)
)
|
py | b4080ce922eaaf7843abca72eadaedf82433961a | from urllib import parse
|
py | b4080d0b415830d2913b2945a18aadf86e411503 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PolicyAssignmentArgs', 'PolicyAssignment']
@pulumi.input_type
class PolicyAssignmentArgs:
def __init__(__self__, *,
scope: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
enforcement_mode: Optional[pulumi.Input[Union[str, 'EnforcementMode']]] = None,
identity: Optional[pulumi.Input['IdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
metadata: Optional[Any] = None,
not_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[Any] = None,
policy_assignment_name: Optional[pulumi.Input[str]] = None,
policy_definition_id: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input['PolicySkuArgs']] = None):
"""
The set of arguments for constructing a PolicyAssignment resource.
:param pulumi.Input[str] scope: The scope for the policy assignment.
:param pulumi.Input[str] description: This message will be part of response in case of policy violation.
:param pulumi.Input[str] display_name: The display name of the policy assignment.
:param pulumi.Input[Union[str, 'EnforcementMode']] enforcement_mode: The policy assignment enforcement mode. Possible values are Default and DoNotEnforce.
:param pulumi.Input['IdentityArgs'] identity: The managed identity associated with the policy assignment.
:param pulumi.Input[str] location: The location of the policy assignment. Only required when utilizing managed identity.
:param Any metadata: The policy assignment metadata.
:param pulumi.Input[Sequence[pulumi.Input[str]]] not_scopes: The policy's excluded scopes.
:param Any parameters: Required if a parameter is used in policy rule.
:param pulumi.Input[str] policy_assignment_name: The name of the policy assignment.
:param pulumi.Input[str] policy_definition_id: The ID of the policy definition or policy set definition being assigned.
:param pulumi.Input['PolicySkuArgs'] sku: The policy sku. This property is optional, obsolete, and will be ignored.
"""
pulumi.set(__self__, "scope", scope)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if enforcement_mode is not None:
pulumi.set(__self__, "enforcement_mode", enforcement_mode)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if not_scopes is not None:
pulumi.set(__self__, "not_scopes", not_scopes)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if policy_assignment_name is not None:
pulumi.set(__self__, "policy_assignment_name", policy_assignment_name)
if policy_definition_id is not None:
pulumi.set(__self__, "policy_definition_id", policy_definition_id)
if sku is not None:
pulumi.set(__self__, "sku", sku)
@property
@pulumi.getter
def scope(self) -> pulumi.Input[str]:
"""
The scope for the policy assignment.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: pulumi.Input[str]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
This message will be part of response in case of policy violation.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the policy assignment.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="enforcementMode")
def enforcement_mode(self) -> Optional[pulumi.Input[Union[str, 'EnforcementMode']]]:
"""
The policy assignment enforcement mode. Possible values are Default and DoNotEnforce.
"""
return pulumi.get(self, "enforcement_mode")
@enforcement_mode.setter
def enforcement_mode(self, value: Optional[pulumi.Input[Union[str, 'EnforcementMode']]]):
pulumi.set(self, "enforcement_mode", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['IdentityArgs']]:
"""
The managed identity associated with the policy assignment.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['IdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the policy assignment. Only required when utilizing managed identity.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
"""
The policy assignment metadata.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[Any]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter(name="notScopes")
def not_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The policy's excluded scopes.
"""
return pulumi.get(self, "not_scopes")
@not_scopes.setter
def not_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "not_scopes", value)
@property
@pulumi.getter
def parameters(self) -> Optional[Any]:
"""
Required if a parameter is used in policy rule.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[Any]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="policyAssignmentName")
def policy_assignment_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the policy assignment.
"""
return pulumi.get(self, "policy_assignment_name")
@policy_assignment_name.setter
def policy_assignment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_assignment_name", value)
@property
@pulumi.getter(name="policyDefinitionId")
def policy_definition_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the policy definition or policy set definition being assigned.
"""
return pulumi.get(self, "policy_definition_id")
@policy_definition_id.setter
def policy_definition_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_definition_id", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['PolicySkuArgs']]:
"""
The policy sku. This property is optional, obsolete, and will be ignored.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['PolicySkuArgs']]):
pulumi.set(self, "sku", value)
class PolicyAssignment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
enforcement_mode: Optional[pulumi.Input[Union[str, 'EnforcementMode']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
metadata: Optional[Any] = None,
not_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[Any] = None,
policy_assignment_name: Optional[pulumi.Input[str]] = None,
policy_definition_id: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['PolicySkuArgs']]] = None,
__props__=None):
"""
The policy assignment.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: This message will be part of response in case of policy violation.
:param pulumi.Input[str] display_name: The display name of the policy assignment.
:param pulumi.Input[Union[str, 'EnforcementMode']] enforcement_mode: The policy assignment enforcement mode. Possible values are Default and DoNotEnforce.
:param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: The managed identity associated with the policy assignment.
:param pulumi.Input[str] location: The location of the policy assignment. Only required when utilizing managed identity.
:param Any metadata: The policy assignment metadata.
:param pulumi.Input[Sequence[pulumi.Input[str]]] not_scopes: The policy's excluded scopes.
:param Any parameters: Required if a parameter is used in policy rule.
:param pulumi.Input[str] policy_assignment_name: The name of the policy assignment.
:param pulumi.Input[str] policy_definition_id: The ID of the policy definition or policy set definition being assigned.
:param pulumi.Input[str] scope: The scope for the policy assignment.
:param pulumi.Input[pulumi.InputType['PolicySkuArgs']] sku: The policy sku. This property is optional, obsolete, and will be ignored.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PolicyAssignmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The policy assignment.
:param str resource_name: The name of the resource.
:param PolicyAssignmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PolicyAssignmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
enforcement_mode: Optional[pulumi.Input[Union[str, 'EnforcementMode']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
metadata: Optional[Any] = None,
not_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[Any] = None,
policy_assignment_name: Optional[pulumi.Input[str]] = None,
policy_definition_id: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['PolicySkuArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PolicyAssignmentArgs.__new__(PolicyAssignmentArgs)
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["enforcement_mode"] = enforcement_mode
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["metadata"] = metadata
__props__.__dict__["not_scopes"] = not_scopes
__props__.__dict__["parameters"] = parameters
__props__.__dict__["policy_assignment_name"] = policy_assignment_name
__props__.__dict__["policy_definition_id"] = policy_definition_id
if scope is None and not opts.urn:
raise TypeError("Missing required property 'scope'")
__props__.__dict__["scope"] = scope
__props__.__dict__["sku"] = sku
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:authorization/v20190601:PolicyAssignment"), pulumi.Alias(type_="azure-native:authorization:PolicyAssignment"), pulumi.Alias(type_="azure-nextgen:authorization:PolicyAssignment"), pulumi.Alias(type_="azure-native:authorization/v20151001preview:PolicyAssignment"), pulumi.Alias(type_="azure-nextgen:authorization/v20151001preview:PolicyAssignment"), pulumi.Alias(type_="azure-native:authorization/v20160401:PolicyAssignment"), pulumi.Alias(type_="azure-nextgen:authorization/v20160401:PolicyAssignment"), pulumi.Alias(type_="azure-native:authorization/v20161201:PolicyAssignment"), pulumi.Alias(type_="azure-nextgen:authorization/v20161201:PolicyAssignment"), pulumi.Alias(type_="azure-native:authorization/v20170601preview:PolicyAssignment"), pulumi.Alias(type_="azure-nextgen:authorization/v20170601preview:PolicyAssignment"), pulumi.Alias(type_="azure-native:authorization/v20180301:PolicyAssignment"), pulumi.Alias(type_="azure-nextgen:authorization/v20180301:PolicyAssignment"), pulumi.Alias(type_="azure-native:authorization/v20180501:PolicyAssignment"), pulumi.Alias(type_="azure-nextgen:authorization/v20180501:PolicyAssignment"), pulumi.Alias(type_="azure-native:authorization/v20190101:PolicyAssignment"), pulumi.Alias(type_="azure-nextgen:authorization/v20190101:PolicyAssignment"), pulumi.Alias(type_="azure-native:authorization/v20190901:PolicyAssignment"), pulumi.Alias(type_="azure-nextgen:authorization/v20190901:PolicyAssignment"), pulumi.Alias(type_="azure-native:authorization/v20200301:PolicyAssignment"), pulumi.Alias(type_="azure-nextgen:authorization/v20200301:PolicyAssignment"), pulumi.Alias(type_="azure-native:authorization/v20200901:PolicyAssignment"), pulumi.Alias(type_="azure-nextgen:authorization/v20200901:PolicyAssignment")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PolicyAssignment, __self__).__init__(
'azure-native:authorization/v20190601:PolicyAssignment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PolicyAssignment':
"""
Get an existing PolicyAssignment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PolicyAssignmentArgs.__new__(PolicyAssignmentArgs)
__props__.__dict__["description"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["enforcement_mode"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["metadata"] = None
__props__.__dict__["name"] = None
__props__.__dict__["not_scopes"] = None
__props__.__dict__["parameters"] = None
__props__.__dict__["policy_definition_id"] = None
__props__.__dict__["scope"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["type"] = None
return PolicyAssignment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
This message will be part of response in case of policy violation.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
The display name of the policy assignment.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="enforcementMode")
def enforcement_mode(self) -> pulumi.Output[Optional[str]]:
"""
The policy assignment enforcement mode. Possible values are Default and DoNotEnforce.
"""
return pulumi.get(self, "enforcement_mode")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:
"""
The managed identity associated with the policy assignment.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the policy assignment. Only required when utilizing managed identity.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional[Any]]:
"""
The policy assignment metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the policy assignment.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notScopes")
def not_scopes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The policy's excluded scopes.
"""
return pulumi.get(self, "not_scopes")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Any]]:
"""
Required if a parameter is used in policy rule.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="policyDefinitionId")
def policy_definition_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the policy definition or policy set definition being assigned.
"""
return pulumi.get(self, "policy_definition_id")
@property
@pulumi.getter
def scope(self) -> pulumi.Output[Optional[str]]:
"""
The scope for the policy assignment.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.PolicySkuResponse']]:
"""
The policy sku. This property is optional, obsolete, and will be ignored.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the policy assignment.
"""
return pulumi.get(self, "type")
|
py | b4080d485462fde5a3f02d973075fae44d4fa578 | import threading
import logging
import time
class AlarmEvent:
"""Base class for an Alarm event"""
def __init__(self, data: object):
self.data = data
logging.debug(data)
self.parent = None
def validate(self):
"""
called during setup of alarm
Override to validate each different alarm action
"""
return True
def run(self, config):
"""
DO NOT OVERRIDE
run the code for the alarm action
then this will continually call back until its finished and call on_finish when done
"""
self.config = config
logging.debug(self.data)
self.begin_action()
def is_finished(self):
"""
DO NOT OVERRIDE
Calls back every second to see if the alarm has finished
Only call from the holder of this event
"""
if self.parent is None:
raise ValueError("no parent specified for alarm event")
if self.test_is_finished():
logging.debug("detected that alarm event has completed its action")
self.on_finish()
return True
return False
# if self.parent:
# self.parent.on_alarm_event_exit()
def begin_action(self):
"""Override to start the event action"""
pass
def test_is_finished(self):
"""Override to test if alarm event has completed"""
pass
def on_finish(self):
"""Override to carry out clean up after end of event"""
pass
def exit(self):
"""called on exit of parent state"""
pass
def on_interrupt(self):
pass
def on_continue(self):
pass
|
py | b4080e26ece39dc842260399a7096a80f6930151 | """
Algorithm (made interesting by having to deal with paired reads)
- Consider paired reads (or single reads) as a unit (a list).
- If any one of the unit passes the read filter, write out the whole unit
- For unpaired reads, this is as far as the algorithm needs to go
- For paired reads in the same chromosome, this is easily done by keeping the read/read-pair in a dictionary
until both pairs are found and then flushing it out
- For reads whose mates are in different chromosomes or one mate is unmapped we have some fun
- Two workers will run into this qname (since it appears in two chromosomes)
- Our data can't cross the process boundary
- After we have finished processing a chromosome
- we send the qname, contig and pos of all residual reads back to the parent
OK - let's try a single threaded version since everything I can think of with multiple
processes eventually requires us to go through the BAM again for the residual reads since
reads can't cross process boundaries.
The single threaded version would simply keep everything in a dict until flushed out.
As a bonus, the code is considerably simpler for the single process version
"""
import time
import logging
import os
import pysam
from mitty.benchmarking.alignmentscore import score_alignment_error, load_qname_sidecar, parse_qname
__process_stop_code__ = 'SETECASTRONOMY'
logger = logging.getLogger(__name__)
def main(bam_fname, sidecar_fname, out_fname,
d_range=(-200, 200), reject_d_range=False,
v_range=(-200, 200), reject_v_range=False,
reject_reads_with_variants=False,
reject_reference_reads=False,
strict_scoring=False, do_not_index=True, processes=2):
"""This function extracts reads from a simulation BAM that match the filter critera
:param bam_fname:
:param sidecar_fname:
:param out_fname:
:param d_range:
:param reject_d_range:
:param v_range:
:param reject_v_range:
:param reject_reads_with_variants:
:param reject_reference_reads:
:param strict_scoring:
:param do_not_index:
:param processes:
:return:
"""
def _filter_pass(_r):
"""
:param _r:
:return: T/F, d_err
"""
ri = parse_qname(_r.qname, long_qname_table=long_qname_table)[1 if _r.is_read2 else 0]
is_ref_read = len(ri.v_list) == 0
if is_ref_read and reject_reference_reads:
return False, 0
if not is_ref_read and reject_reads_with_variants:
return False, 0
_d_err = score_alignment_error(_r, ri=ri, max_d=max_d, strict=strict_scoring)
in_d_err_range = d_range[0] <= _d_err <= d_range[1]
if in_d_err_range == reject_d_range:
return False, 0
if not is_ref_read:
# All variants are inside/outside v_range and we want to/do not want to reject the range
if all((v_range[0] <= v <= v_range[1]) == reject_v_range for v in ri.v_list):
return False, 0
return True, _d_err
se_bam = is_single_end_bam(bam_fname)
bam_fp = pysam.AlignmentFile(bam_fname)
long_qname_table = load_qname_sidecar(sidecar_fname)
unsorted_out_fname = out_fname + '.unsorted'
out_fp = pysam.AlignmentFile(unsorted_out_fname, 'wb', header=bam_fp.header)
in_cnt = 0
max_d = d_range[1] + 10000
read_dict = {}
t0 = time.time()
for rd in bam_fp.fetch(until_eof=True):
if rd.flag & 0b100100000000: continue # Skip supplementary or secondary alignments
in_cnt += 1
if in_cnt % 1000000 == 0:
t1 = time.time()
logger.debug(
'Processed {} reads in {:2f}s ({:2f} r/s) {}'.format(
in_cnt, t1 - t0, in_cnt / (t1 - t0), '' if se_bam else '(dict size {})'.format(len(read_dict))))
if se_bam:
keep, d_err = _filter_pass(rd)
if keep:
rd.set_tag('XD', d_err)
out_fp.write(rd)
else:
if rd.qname[:20] not in read_dict:
read_dict[rd.qname[:20]] = [None, None]
rl = read_dict[rd.qname[:20]]
rl[0 if rd.is_read1 else 1] = rd
if all(rl):
keep1, d_err1 = _filter_pass(rl[0])
keep2, d_err2 = _filter_pass(rl[1])
if keep1 or keep2:
rl[0].set_tag('XD', d_err1)
rl[1].set_tag('XD', d_err2)
out_fp.write(rl[0])
out_fp.write(rl[1])
del read_dict[rd.qname[:20]]
out_fp.close()
t1 = time.time()
logger.debug(
'Processed {} reads in {:2f}s ({:2f} r/s) {}'.format(
in_cnt, t1 - t0, in_cnt / (t1 - t0), '' if se_bam else '(dict size {})'.format(len(read_dict))))
logger.debug('Sorting {} -> {}'.format(unsorted_out_fname, out_fname))
t0 = time.time()
pysam.sort('-m', '1G', '-o', out_fname, unsorted_out_fname)
os.remove(unsorted_out_fname)
t1 = time.time()
logger.debug('... {:0.2f}s'.format(t1 - t0))
if not do_not_index:
logger.debug('BAM index {} ...'.format(bam_fname))
t0 = time.time()
pysam.index(out_fname, out_fname + '.bai')
t1 = time.time()
logger.debug('... {:0.2f}s'.format(t1 - t0))
def is_single_end_bam(bam_fname):
bam_fp = pysam.AlignmentFile(bam_fname)
r = next(bam_fp, None)
return not r.is_paired if r is not None else True # Empty BAM? Don't care |
py | b4080e93541f6fdd853451fd6f1c286c238fe1da | # Copyright 2011 Ken Pepple
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for flavors code
"""
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova import test
class InstanceTypeTestCase(test.TestCase):
"""Test cases for flavor code."""
def test_will_not_get_bad_default_instance_type(self):
# ensures error raised on bad default flavor.
self.flags(default_flavor='unknown_flavor')
self.assertRaises(exception.FlavorNotFound,
flavors.get_default_flavor)
def test_flavor_get_by_None_name_returns_default(self):
# Ensure get by name returns default flavor with no name.
default = flavors.get_default_flavor()
actual = flavors.get_flavor_by_name(None)
self.assertIsInstance(default, objects.Flavor)
self.assertIsInstance(actual, objects.Flavor)
self.assertEqual(default.flavorid, actual.flavorid)
def test_will_not_get_flavor_with_bad_name(self):
# Ensure get by name returns default flavor with bad name.
self.assertRaises(exception.FlavorNotFound,
flavors.get_flavor_by_name, 10000)
def test_will_not_get_instance_by_unknown_flavor_id(self):
# Ensure get by flavor raises error with wrong flavorid.
self.assertRaises(exception.FlavorNotFound,
flavors.get_flavor_by_flavor_id,
'unknown_flavor')
def test_will_get_instance_by_flavor_id(self):
default_instance_type = flavors.get_default_flavor()
flavorid = default_instance_type.flavorid
fetched = flavors.get_flavor_by_flavor_id(flavorid)
self.assertIsInstance(fetched, objects.Flavor)
self.assertEqual(default_instance_type.flavorid, fetched.flavorid)
class InstanceTypeToolsTest(test.TestCase):
def _dict_to_metadata(self, data):
return [{'key': key, 'value': value} for key, value in data.items()]
def _test_extract_flavor(self, prefix):
instance_type = flavors.get_default_flavor()
instance_type_p = obj_base.obj_to_primitive(instance_type)
metadata = {}
flavors.save_flavor_info(metadata, instance_type, prefix)
instance = {'system_metadata': self._dict_to_metadata(metadata)}
_instance_type = flavors.extract_flavor(instance, prefix)
_instance_type_p = obj_base.obj_to_primitive(_instance_type)
props = flavors.system_metadata_flavor_props.keys()
for key in list(instance_type_p.keys()):
if key not in props:
del instance_type_p[key]
self.assertEqual(instance_type_p, _instance_type_p)
def test_extract_flavor(self):
self._test_extract_flavor('')
def test_extract_flavor_no_sysmeta(self):
instance = {}
prefix = ''
result = flavors.extract_flavor(instance, prefix)
self.assertIsNone(result)
def test_extract_flavor_prefix(self):
self._test_extract_flavor('foo_')
def test_save_flavor_info(self):
instance_type = flavors.get_default_flavor()
example = {}
example_prefix = {}
for key in flavors.system_metadata_flavor_props.keys():
example['instance_type_%s' % key] = instance_type[key]
example_prefix['fooinstance_type_%s' % key] = instance_type[key]
metadata = {}
flavors.save_flavor_info(metadata, instance_type)
self.assertEqual(example, metadata)
metadata = {}
flavors.save_flavor_info(metadata, instance_type, 'foo')
self.assertEqual(example_prefix, metadata)
def test_delete_flavor_info(self):
instance_type = flavors.get_default_flavor()
metadata = {}
flavors.save_flavor_info(metadata, instance_type)
flavors.save_flavor_info(metadata, instance_type, '_')
flavors.delete_flavor_info(metadata, '', '_')
self.assertEqual(metadata, {})
def test_flavor_numa_extras_are_saved(self):
instance_type = flavors.get_default_flavor()
instance_type['extra_specs'] = {
'hw:numa_mem.0': '123',
'hw:numa_cpus.0': '456',
'hw:numa_mem.1': '789',
'hw:numa_cpus.1': 'ABC',
'foo': 'bar',
}
sysmeta = flavors.save_flavor_info({}, instance_type)
_instance_type = flavors.extract_flavor({'system_metadata': sysmeta})
expected_extra_specs = {
'hw:numa_mem.0': '123',
'hw:numa_cpus.0': '456',
'hw:numa_mem.1': '789',
'hw:numa_cpus.1': 'ABC',
}
self.assertEqual(expected_extra_specs, _instance_type['extra_specs'])
flavors.delete_flavor_info(sysmeta, '')
self.assertEqual({}, sysmeta)
class InstanceTypeFilteringTest(test.TestCase):
"""Test cases for the filter option available for instance_type_get_all."""
def setUp(self):
super(InstanceTypeFilteringTest, self).setUp()
self.context = context.get_admin_context()
def assertFilterResults(self, filters, expected):
inst_types = objects.FlavorList.get_all(
self.context, filters=filters)
inst_names = [i.name for i in inst_types]
self.assertEqual(inst_names, expected)
def test_no_filters(self):
filters = None
expected = ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large',
'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_memory_mb_filter(self):
# Exclude tiny instance which is 512 MB.
filters = dict(min_memory_mb=513)
expected = ['m1.small', 'm1.medium', 'm1.large', 'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_root_gb_filter(self):
# Exclude everything but large and xlarge which have >= 80 GB.
filters = dict(min_root_gb=80)
expected = ['m1.large', 'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_memory_mb_AND_root_gb_filter(self):
# Exclude everything but large and xlarge which have >= 80 GB.
filters = dict(min_memory_mb=16384, min_root_gb=80)
expected = ['m1.xlarge']
self.assertFilterResults(filters, expected)
class CreateInstanceTypeTest(test.TestCase):
def assertInvalidInput(self, *create_args, **create_kwargs):
self.assertRaises(exception.InvalidInput, flavors.create,
*create_args, **create_kwargs)
def test_create_with_valid_name(self):
# Names can contain alphanumeric and [_.- ]
flavors.create('azAZ09. -_', 64, 1, 120)
# And they are not limited to ascii characters
# E.g.: m1.huge in simplified Chinese
flavors.create(u'm1.\u5DE8\u5927', 6400, 100, 12000)
def test_name_with_special_characters(self):
# Names can contain all printable characters
flavors.create('_foo.bar-123', 64, 1, 120)
# Ensure instance types raises InvalidInput for invalid characters.
self.assertInvalidInput('foobar\x00', 64, 1, 120)
def test_name_with_non_printable_characters(self):
# Names cannot contain printable characters
self.assertInvalidInput(u'm1.\u0868 #', 64, 1, 120)
def test_name_length_checks(self):
MAX_LEN = 255
# Flavor name with 255 characters or less is valid.
flavors.create('a' * MAX_LEN, 64, 1, 120)
# Flavor name which is more than 255 characters will cause error.
self.assertInvalidInput('a' * (MAX_LEN + 1), 64, 1, 120)
# Flavor name which is empty should cause an error
self.assertInvalidInput('', 64, 1, 120)
def test_all_whitespace_flavor_names_rejected(self):
self.assertInvalidInput(' ', 64, 1, 120)
def test_flavorid_with_invalid_characters(self):
# Ensure Flavor ID can only contain [a-zA-Z0-9_.- ]
self.assertInvalidInput('a', 64, 1, 120, flavorid=u'\u2605')
self.assertInvalidInput('a', 64, 1, 120, flavorid='%%$%$@#$#@$@#$^%')
def test_flavorid_length_checks(self):
MAX_LEN = 255
# Flavor ID which is more than 255 characters will cause error.
self.assertInvalidInput('a', 64, 1, 120, flavorid='a' * (MAX_LEN + 1))
def test_memory_must_be_positive_db_integer(self):
self.assertInvalidInput('flavor1', 'foo', 1, 120)
self.assertInvalidInput('flavor1', -1, 1, 120)
self.assertInvalidInput('flavor1', 0, 1, 120)
self.assertInvalidInput('flavor1', db.MAX_INT + 1, 1, 120)
flavors.create('flavor1', 1, 1, 120)
def test_vcpus_must_be_positive_db_integer(self):
self.assertInvalidInput('flavor`', 64, 'foo', 120)
self.assertInvalidInput('flavor1', 64, -1, 120)
self.assertInvalidInput('flavor1', 64, 0, 120)
self.assertInvalidInput('flavor1', 64, db.MAX_INT + 1, 120)
flavors.create('flavor1', 64, 1, 120)
def test_root_gb_must_be_nonnegative_db_integer(self):
self.assertInvalidInput('flavor1', 64, 1, 'foo')
self.assertInvalidInput('flavor1', 64, 1, -1)
self.assertInvalidInput('flavor1', 64, 1, db.MAX_INT + 1)
flavors.create('flavor1', 64, 1, 0)
flavors.create('flavor2', 64, 1, 120)
def test_ephemeral_gb_must_be_nonnegative_db_integer(self):
self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb='foo')
self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb=-1)
self.assertInvalidInput('flavor1', 64, 1, 120,
ephemeral_gb=db.MAX_INT + 1)
flavors.create('flavor1', 64, 1, 120, ephemeral_gb=0)
flavors.create('flavor2', 64, 1, 120, ephemeral_gb=120)
def test_swap_must_be_nonnegative_db_integer(self):
self.assertInvalidInput('flavor1', 64, 1, 120, swap='foo')
self.assertInvalidInput('flavor1', 64, 1, 120, swap=-1)
self.assertInvalidInput('flavor1', 64, 1, 120,
swap=db.MAX_INT + 1)
flavors.create('flavor1', 64, 1, 120, swap=0)
flavors.create('flavor2', 64, 1, 120, swap=1)
def test_rxtx_factor_must_be_positive_float(self):
self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor='foo')
self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=-1.0)
self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=0.0)
flavor = flavors.create('flavor1', 64, 1, 120, rxtx_factor=1.0)
self.assertEqual(1.0, flavor.rxtx_factor)
flavor = flavors.create('flavor2', 64, 1, 120, rxtx_factor=1.1)
self.assertEqual(1.1, flavor.rxtx_factor)
def test_rxtx_factor_must_be_within_sql_float_range(self):
_context = context.get_admin_context()
db.flavor_get_all(_context)
# We do * 10 since this is an approximation and we need to make sure
# the difference is noticeble.
over_rxtx_factor = db.SQL_SP_FLOAT_MAX * 10
self.assertInvalidInput('flavor1', 64, 1, 120,
rxtx_factor=over_rxtx_factor)
flavor = flavors.create('flavor2', 64, 1, 120,
rxtx_factor=db.SQL_SP_FLOAT_MAX)
self.assertEqual(db.SQL_SP_FLOAT_MAX, flavor.rxtx_factor)
def test_is_public_must_be_valid_bool_string(self):
self.assertInvalidInput('flavor1', 64, 1, 120, is_public='foo')
flavors.create('flavor1', 64, 1, 120, is_public='TRUE')
flavors.create('flavor2', 64, 1, 120, is_public='False')
flavors.create('flavor3', 64, 1, 120, is_public='Yes')
flavors.create('flavor4', 64, 1, 120, is_public='No')
flavors.create('flavor5', 64, 1, 120, is_public='Y')
flavors.create('flavor6', 64, 1, 120, is_public='N')
flavors.create('flavor7', 64, 1, 120, is_public='1')
flavors.create('flavor8', 64, 1, 120, is_public='0')
flavors.create('flavor9', 64, 1, 120, is_public='true')
def test_flavorid_populated(self):
flavor1 = flavors.create('flavor1', 64, 1, 120)
self.assertIsNotNone(flavor1.flavorid)
flavor2 = flavors.create('flavor2', 64, 1, 120, flavorid='')
self.assertIsNotNone(flavor2.flavorid)
flavor3 = flavors.create('flavor3', 64, 1, 120, flavorid='foo')
self.assertEqual('foo', flavor3.flavorid)
def test_default_values(self):
flavor1 = flavors.create('flavor1', 64, 1, 120)
self.assertIsNotNone(flavor1.flavorid)
self.assertEqual(flavor1.ephemeral_gb, 0)
self.assertEqual(flavor1.swap, 0)
self.assertEqual(flavor1.rxtx_factor, 1.0)
def test_basic_create(self):
# Ensure instance types can be created.
ctxt = context.get_admin_context()
original_list = objects.FlavorList.get_all(ctxt)
# Create new type and make sure values stick
flavor = flavors.create('flavor', 64, 1, 120)
self.assertEqual(flavor.name, 'flavor')
self.assertEqual(flavor.memory_mb, 64)
self.assertEqual(flavor.vcpus, 1)
self.assertEqual(flavor.root_gb, 120)
# Ensure new type shows up in list
new_list = objects.FlavorList.get_all(ctxt)
self.assertNotEqual(len(original_list), len(new_list),
'flavor was not created')
def test_create_then_delete(self):
ctxt = context.get_admin_context()
original_list = objects.FlavorList.get_all(ctxt)
flavor = flavors.create('flavor', 64, 1, 120)
# Ensure new type shows up in list
new_list = objects.FlavorList.get_all(ctxt)
self.assertNotEqual(len(original_list), len(new_list),
'instance type was not created')
flavor.destroy()
self.assertRaises(exception.FlavorNotFound,
objects.Flavor.get_by_name, ctxt, flavor.name)
# Deleted instance should not be in list anymore
new_list = objects.FlavorList.get_all(ctxt)
self.assertEqual(len(original_list), len(new_list))
for i, f in enumerate(original_list):
self.assertIsInstance(f, objects.Flavor)
self.assertEqual(f.flavorid, new_list[i].flavorid)
def test_duplicate_names_fail(self):
# Ensures that name duplicates raise FlavorExists
flavors.create('flavor', 256, 1, 120, 200, 'flavor1')
self.assertRaises(exception.FlavorExists,
flavors.create,
'flavor', 64, 1, 120)
def test_duplicate_flavorids_fail(self):
# Ensures that flavorid duplicates raise FlavorExists
flavors.create('flavor1', 64, 1, 120, flavorid='flavorid')
self.assertRaises(exception.FlavorIdExists,
flavors.create,
'flavor2', 64, 1, 120, flavorid='flavorid')
|
py | b408111abb958a4668c70ba9a63f3c5ccd8830e7 | # Copyright 2019-2020 SURF.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from orchestrator.schemas.engine_settings import EngineSettingsBaseSchema, EngineSettingsSchema, GlobalStatusEnum
from orchestrator.schemas.fixed_input import FixedInputConfigurationSchema, FixedInputSchema
from orchestrator.schemas.problem_detail import ProblemDetailSchema
from orchestrator.schemas.process import (
ProcessBaseSchema,
ProcessIdSchema,
ProcessListItemSchema,
ProcessSchema,
ProcessSubscriptionBaseSchema,
ProcessSubscriptionSchema,
)
from orchestrator.schemas.product import ProductBaseSchema, ProductCRUDSchema, ProductSchema
from orchestrator.schemas.product_block import ProductBlockBaseSchema, ProductBlockEnrichedSchema
from orchestrator.schemas.resource_type import ResourceTypeBaseSchema, ResourceTypeSchema
from orchestrator.schemas.subscription import SubscriptionDomainModelSchema, SubscriptionIdSchema, SubscriptionSchema
from orchestrator.schemas.subscription_descriptions import (
SubscriptionDescriptionBaseSchema,
SubscriptionDescriptionSchema,
)
from orchestrator.schemas.workflow import SubscriptionWorkflowListsSchema, WorkflowSchema, WorkflowWithProductTagsSchema
__all__ = (
"EngineSettingsSchema",
"EngineSettingsBaseSchema",
"FixedInputConfigurationSchema",
"GlobalStatusEnum",
"ProblemDetailSchema",
"ProcessBaseSchema",
"ResourceTypeSchema",
"ResourceTypeBaseSchema",
"ProductBlockEnrichedSchema",
"ProductBlockBaseSchema",
"FixedInputSchema",
"WorkflowSchema",
"ProductBaseSchema",
"ProductSchema",
"WorkflowWithProductTagsSchema",
"SubscriptionSchema",
"SubscriptionDomainModelSchema",
"SubscriptionWorkflowListsSchema",
"SubscriptionIdSchema",
"ProcessSubscriptionSchema",
"ProcessSchema",
"ProcessIdSchema",
"ProcessSubscriptionBaseSchema",
"ProcessListItemSchema",
"SubscriptionDescriptionSchema",
"SubscriptionDescriptionBaseSchema",
"ProductCRUDSchema",
)
|
py | b40811fcf3f297373c5610d74e1531b618d04b9f | """
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="4Tz5zrVc3SuVY9QXT1Vcj1LMbIJWmf09y8kBXzS9QwVDeo7T6W78UhvrVGKnWKfg",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# Your stuff...
# ------------------------------------------------------------------------------
|
py | b4081203f555e091ae77733d61509eb0a5a9c64b | # Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import csv
import geometry
import glob
import os
import math
from module_base import ModuleBase
from module_mixins import IntrospectModuleMixin,\
FileOpenDialogModuleMixin
import module_utils
import vtk
import vtkgdcm
import wx
MAJOR_MARKER_SIZE = 10
MINOR_MARKER_SIZE = 7
STATE_INIT = 0
STATE_IMAGE_LOADED = 1
STATE_APEX = 2 # clicked apex
STATE_LM = 3 # clicked lower middle
STATE_NORMAL_MARKERS = 4 # after first marker has been placed
class Measurement:
filename = ''
apex = (0,0) # in pixels
lm = (0,0)
pogo_dist = 0 # distance between apex and lm in pixels
area = 0 # current area, in floating point pixels squared
class LarynxMeasurement(IntrospectModuleMixin, FileOpenDialogModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._state = STATE_INIT
self._config.filename = None
self._current_measurement = None
# pogo line first
# outline of larynx second
self._actors = []
# list of pointwidgets, first is apex, second is lm, others
# are others. :)
self._markers = []
self._pogo_line_source = None
self._area_polydata = None
self._view_frame = None
self._viewer = None
self._reader = vtk.vtkJPEGReader()
self._create_view_frame()
self._bind_events()
self.view()
# all modules should toggle this once they have shown their
# stuff.
self.view_initialised = True
self.config_to_logic()
self.logic_to_config()
self.config_to_view()
def _bind_events(self):
self._view_frame.start_button.Bind(
wx.EVT_BUTTON, self._handler_start_button)
self._view_frame.next_button.Bind(
wx.EVT_BUTTON, self._handler_next_button)
self._view_frame.reset_button.Bind(
wx.EVT_BUTTON, self._handler_reset_button)
self._view_frame.save_csv.Bind(
wx.EVT_BUTTON, self._handler_save_csv_button)
self._view_frame.rwi.AddObserver(
'LeftButtonPressEvent',
self._handler_rwi_lbp)
def _create_view_frame(self):
import resources.python.larynx_measurement_frame
reload(resources.python.larynx_measurement_frame)
self._view_frame = module_utils.instantiate_module_view_frame(
self, self._module_manager,
resources.python.larynx_measurement_frame.LarynxMeasurementFrame)
module_utils.create_standard_object_introspection(
self, self._view_frame, self._view_frame.view_frame_panel,
{'Module (self)' : self})
# now setup the VTK stuff
if self._viewer is None and not self._view_frame is None:
# vtkImageViewer() does not zoom but retains colour
# vtkImageViewer2() does zoom but discards colour at
# first window-level action.
# vtkgdcm.vtkImageColorViewer() does both right!
self._viewer = vtkgdcm.vtkImageColorViewer()
self._viewer.SetupInteractor(self._view_frame.rwi)
self._viewer.GetRenderer().SetBackground(0.3,0.3,0.3)
self._set_image_viewer_dummy_input()
pp = vtk.vtkPointPicker()
pp.SetTolerance(0.0)
self._view_frame.rwi.SetPicker(pp)
def close(self):
for i in range(len(self.get_input_descriptions())):
self.set_input(i, None)
# with this complicated de-init, we make sure that VTK is
# properly taken care of
self._viewer.GetRenderer().RemoveAllViewProps()
self._viewer.SetupInteractor(None)
self._viewer.SetRenderer(None)
# this finalize makes sure we don't get any strange X
# errors when we kill the module.
self._viewer.GetRenderWindow().Finalize()
self._viewer.SetRenderWindow(None)
del self._viewer
# done with VTK de-init
self._view_frame.Destroy()
del self._view_frame
ModuleBase.close(self)
def get_input_descriptions(self):
return ()
def get_output_descriptions(self):
return ()
def set_input(self, idx, input_stream):
raise RuntimeError
def get_output(self, idx):
raise RuntimeError
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view_to_config(self):
# there is no explicit apply step in this viewer module, so we
# keep the config up to date throughout (this is common for
# pure viewer modules)
pass
def config_to_view(self):
# this will happen right after module reload / network load
if self._config.filename is not None:
self._start(self._config.filename)
def view(self):
self._view_frame.Show()
self._view_frame.Raise()
# we need to do this to make sure that the Show() and Raise() above
# are actually performed. Not doing this is what resulted in the
# "empty renderwindow" bug after module reloading, and also in the
# fact that shortly after module creation dummy data rendered outside
# the module frame.
wx.SafeYield()
self.render()
# so if we bring up the view after having executed the network once,
# re-executing will not do a set_input()! (the scheduler doesn't
# know that the module is now dirty) Two solutions:
# * make module dirty when view is activated
# * activate view at instantiation. <--- we're doing this now.
def execute_module(self):
pass
def _add_normal_marker(self, world_pos):
if not len(self._markers) >= 2:
raise RuntimeError(
'There should be 2 or more markers by now!')
pw = self._add_marker(world_pos, (0,1,0), 0.005)
self._markers.append(pw)
self._markers[-1].AddObserver(
'InteractionEvent',
self._handler_nm_ie)
def _add_area_polygon(self):
pd = vtk.vtkPolyData()
self._area_polydata = pd
m = vtk.vtkPolyDataMapper()
m.SetInput(pd)
a = vtk.vtkActor()
a.SetMapper(m)
self._viewer.GetRenderer().AddActor(a)
self._actors.append(a)
def _add_pogo_line(self):
ls = vtk.vtkLineSource()
self._pogo_line_source = ls
m = vtk.vtkPolyDataMapper()
m.SetInput(ls.GetOutput())
a = vtk.vtkActor()
a.SetMapper(m)
prop = a.GetProperty()
prop.SetLineStipplePattern(0x1010)
prop.SetLineStippleRepeatFactor(1)
self._viewer.GetRenderer().AddActor(a)
self._actors.append(a)
self._update_pogo_distance()
self.render()
def _add_sphere(self, world_pos, radius, colour):
ss = vtk.vtkSphereSource()
ss.SetRadius(radius)
m = vtk.vtkPolyDataMapper()
m.SetInput(ss.GetOutput())
a = vtk.vtkActor()
a.SetMapper(m)
a.SetPosition(world_pos)
a.GetProperty().SetColor(colour)
self._viewer.GetRenderer().AddActor(a)
self.render()
def _add_marker(self, world_pos, colour, size=0.01):
"""
@param size: fraction of visible prop bounds diagonal.
"""
#self._add_sphere(world_pos, MAJOR_MARKER_SIZE, (1,1,0))
pw = vtk.vtkPointWidget()
# we're giving it a small bounding box
pw.TranslationModeOn()
b = self._viewer.GetRenderer().ComputeVisiblePropBounds()
# calculate diagonal
dx,dy = b[1] - b[0], b[3] - b[2]
diag = math.hypot(dx,dy)
d = size * diag
w = world_pos
pwb = w[0] - d, w[0] + d, \
w[1] - d, w[1] + d, \
b[4], b[5]
pw.PlaceWidget(pwb)
pw.SetPosition(world_pos)
pw.SetInteractor(self._view_frame.rwi)
pw.AllOff()
pw.GetProperty().SetColor(colour)
pw.On()
return pw
def _add_apex_marker(self, world_pos):
# this method should only be called when the list is empty!
if self._markers:
raise RuntimeError('Marker list is not empty!')
self._markers.append(self._add_marker(world_pos, (1,1,0)))
self._markers[-1].AddObserver(
'InteractionEvent',
self._handler_alm_ie)
def _add_lm_marker(self, world_pos):
if len(self._markers) != 1:
raise RuntimeError(
'Marker list should have only one entry!')
self._markers.append(self._add_marker(world_pos, (0,1,1)))
self._markers[-1].AddObserver(
'InteractionEvent',
self._handler_alm_ie)
def _create_db(self, filename):
con = sqlite3.connect(filename)
con.execute(
"""create table images
(id integer primary key, filename varchar unique)""")
con.execute(
"""create table coords
(
""")
def _handler_alm_ie(self, pw=None, vtk_e=None):
self._update_pogo_distance()
self._update_area()
def _handler_nm_ie(self, pw=None, vtk_e=None):
self._update_area()
def _handler_rwi_lbp(self, vtk_o, vtk_e):
# we only handle this if the user is pressing shift
if not vtk_o.GetShiftKey():
return
pp = vtk_o.GetPicker() # this will be our pointpicker
x,y = vtk_o.GetEventPosition()
#iapp = vtk.vtkImageActorPointPlacer()
#ren = self._viewer.GetRenderer()
#iapp.SetImageActor(our_actor)
#iapp.ComputeWorldPosition(ren, display_pos, 3xdouble,
# 9xdouble)
if not pp.Pick(x,y,0,self._viewer.GetRenderer()):
print "off image!"
else:
print pp.GetMapperPosition()
# now also get WorldPos
ren = self._viewer.GetRenderer()
ren.SetDisplayPoint(x,y,0)
ren.DisplayToWorld()
w = ren.GetWorldPoint()[0:3]
print w
# we have a picked position and a world point, now decide
# what to do based on our current state
if self._state == STATE_IMAGE_LOADED:
# put down the apex ball
self._add_apex_marker(w)
self._state = STATE_APEX
elif self._state == STATE_APEX:
# put down the LM ball
self._add_lm_marker(w)
self._add_pogo_line()
self._state = STATE_LM
elif self._state == STATE_LM:
# now we're putting down all other markers
self._add_normal_marker(w)
# now create the polydata
self._add_area_polygon()
self._update_area()
self._state = STATE_NORMAL_MARKERS
elif self._state == STATE_NORMAL_MARKERS:
self._add_normal_marker(w)
self._update_area()
def _handler_reset_button(self, evt):
if self._current_measurement.filename:
self._start(self._current_measurement.filename,
reset=True)
def _handler_save_csv_button(self, evt):
fn = self._current_measurement.filename
if not os.path.exists(fn):
return
self._save_dacs_to_csv(fn)
def _handler_start_button(self, evt):
# let user pick image
# - close down any running analysis
# - analyze all jpg images in that dir
# - read / initialise SQL db
# first get filename from user
filename = self.filename_browse(self._view_frame,
'Select FIRST subject image to start processing',
'Subject image (*.jpg)|*.jpg;*.JPG',
style=wx.OPEN)
if filename:
self._start(filename)
def _handler_next_button(self, evt):
# write everything to to measurement files
# first the points
fn = self._current_measurement.filename
if len(self._markers) > 0:
points_name = '%s.pts' % (fn,)
f = open(points_name, 'w')
pts = [m.GetPosition()[0:3] for m in self._markers]
f.write(str(pts))
f.close()
if len(self._markers) >= 3:
# we only write the DAC if there are at least 3 markers,
# else the measurement is not valid...
# then the distance, area and cormack lehane
dac_name = '%s.dac' % (fn,)
f = open(dac_name, 'w')
clg1 = int(self._view_frame.clg1_cbox.GetValue())
d = self._current_measurement.pogo_dist
a = self._current_measurement.area
dac = [d,a,clg1]
f.write(str(dac))
f.close()
# IS there a next file?
# get ext and dir of current file
current_fn = self._current_measurement.filename
# ext is '.JPG'
ext = os.path.splitext(current_fn)[1]
dir = os.path.dirname(current_fn)
all_files = glob.glob(os.path.join(dir, '*%s' % (ext,)))
# we assume the user has this covered (filenames padded)
all_files.sort()
# find index of current file, take next image
idx = all_files.index(current_fn) + 1
if idx < len(all_files):
new_filename = all_files[idx]
else:
new_filename = all_files[0]
self._start(new_filename)
def _load_measurement(self, new_filename):
# see if there's a points file that we can use
points_name = '%s.pts' % (new_filename,)
try:
f = open(points_name)
except IOError:
pass
else:
# just evaluate what's in there, should be an array of
# three-element tuples (we're going to write away the
# world-pos coordinates)
points = eval(f.read(), {"__builtins__": {}})
f.close()
try:
self._add_apex_marker(points[0])
self._state = STATE_APEX
self._add_lm_marker(points[1])
self._add_pogo_line()
self._state = STATE_LM
self._add_normal_marker(points[2])
self._add_area_polygon()
self._update_area()
self._state = STATE_NORMAL_MARKERS
for pt in points[3:]:
self._add_normal_marker(pt)
self._update_area()
except IndexError:
pass
# now make sure everything else is updated
self._update_pogo_distance()
self._update_area()
# cormack lehane grade
dac_name = '%s.dac' % (new_filename,)
try:
f = open(dac_name)
except IOError:
pass
else:
dist, area, clg1 = eval(f.read(), {"__builtins__":{}})
f.close()
#self._current_measurement.clg1 = clg
self._view_frame.clg1_cbox.SetValue(clg1)
def render(self):
# if you call self._viewer.Render() here, you get the
# VTK-window out of main window effect at startup. So don't.
self._view_frame.rwi.Render()
def _reset_image_pz(self):
"""Reset the pan/zoom of the current image.
"""
ren = self._viewer.GetRenderer()
ren.ResetCamera()
def _save_dacs_to_csv(self, current_fn):
# make list of all filenames in current directory
# load all dacs
img_ext = os.path.splitext(current_fn)[1]
dir = os.path.dirname(current_fn)
all_images = glob.glob(os.path.join(dir, '*%s' % (img_ext,)))
all_dacs = glob.glob(os.path.join(dir, '*%s.dac' % (img_ext,)))
if len(all_dacs) == 0:
self._module_manager.log_error(
"No measurements to save yet.")
return
if len(all_dacs) % 3 != 0:
self._module_manager.log_error(
"Number of measurements not a multiple of 3!\n"
"Can't write CSV file.")
return
if len(all_dacs) != len(all_images):
self._module_manager.log_warning(
"You have not yet measured all images yet.\n"
"Will write CSV anyway, please double-check.")
# sort the dacs
all_dacs.sort()
csv_fn = os.path.join(dir, 'measurements.csv')
csv_f = open(csv_fn, 'w')
wrtr = csv.writer(csv_f, delimiter=',',
quotechar='"')
# write header row
wrtr.writerow([
'name', 'clg1 a', 'clg1 b', 'clg1 c',
'norm dist a', 'norm dist b', 'norm dist c',
'dist a', 'dist b', 'dist c',
'norm area a', 'norm area b', 'norm area c',
'area a', 'area b', 'area c'
])
# now go through all the dac files and write them out in
# multiples of three
for i in range(len(all_dacs) / 3):
three_names = []
clg = []
norm_dist = []
dist = []
norm_area = []
area = []
for j in range(3):
# get dac filename and read its contents
dfn = all_dacs[i*3 + j]
d,a,c = eval(open(dfn).read(),
{"__builtins__":{}})
# create short (extensionless) filename for creating
# the measurement title
sfn = os.path.splitext(os.path.basename(dfn))[0]
# we have to strip off the jpg as well
sfn = os.path.splitext(sfn)[0]
# store it for creating the string later
three_names.append(sfn)
if j == 0:
# if this is the first of a three-element group,
# store the distance and area to normalise the
# other two with.
nd = d
na = a
norm_dist.append(1.0)
norm_area.append(1.0)
else:
# if not, normalise and store
norm_dist.append(d / nd)
norm_area.append(a / na)
# store the pixel measurements
clg.append(c)
dist.append(d)
area.append(a)
# write out a measurement line to the CSV file
name3 = '%s-%s-%s' % tuple(three_names)
wrtr.writerow([name3] + clg +
norm_dist + dist +
norm_area + area)
csv_f.close()
def _stop(self):
# close down any running analysis
# first remove all polydatas we might have added to the scene
for a in self._actors:
self._viewer.GetRenderer().RemoveViewProp(a)
for m in self._markers:
m.Off()
m.SetInteractor(None)
del self._markers[:]
# setup dummy image input.
self._set_image_viewer_dummy_input()
# set state to initialised
self._state = STATE_INIT
def _start(self, new_filename, reset=False):
# first see if we can open the new file
new_reader = self._open_image_file(new_filename)
# if so, stop previous session
self._stop()
# replace reader and show the image
self._reader = new_reader
self._viewer.SetInput(self._reader.GetOutput())
# show the new filename in the correct image box
# first shorten it slightly: split it at the path separator,
# take the last two components (last dir comp, filename), then
# prepend a '...' and join them all together again. example
# output: .../tmp/file.jpg
short_p = os.path.sep.join(
['...']+new_filename.split(os.path.sep)[-2:])
self._view_frame.current_image_txt.SetValue(short_p)
self._config.filename = new_filename
cm = Measurement()
cm.filename = self._config.filename
self._current_measurement = cm
self._actors = []
self._reset_image_pz()
self.render()
self._state = STATE_IMAGE_LOADED
# this means that the user doesn't want the stored data, for
# example when resetting the image measurement
if not reset:
self._load_measurement(new_filename)
self.render()
# now determine our current progress by tallying up DAC files
ext = os.path.splitext(new_filename)[1]
dir = os.path.dirname(new_filename)
all_images = glob.glob(os.path.join(dir, '*%s' % (ext,)))
all_dacs = glob.glob(os.path.join(dir, '*%s.dac' % (ext,)))
progress_msg = "%d / %d images complete" % \
(len(all_dacs), len(all_images))
self._view_frame.progress_txt.SetValue(progress_msg)
def _set_image_viewer_dummy_input(self):
ds = vtk.vtkImageGridSource()
self._viewer.SetInput(ds.GetOutput())
def _open_image_file(self, filename):
# create a new instance of the current reader
# to read the passed file.
nr = self._reader.NewInstance()
nr.SetFileName(filename)
# FIXME: trap this error
nr.Update()
return nr
def _update_pogo_distance(self):
"""Based on the first two markers, update the pogo line and
recalculate the distance.
"""
if len(self._markers) >= 2:
p1,p2 = [self._markers[i].GetPosition() for i in range(2)]
self._pogo_line_source.SetPoint1(p1)
self._pogo_line_source.SetPoint2(p2)
pogo_dist = math.hypot(p2[0] - p1[0], p2[1] - p1[1])
# store pogo_dist in Measurement
self._current_measurement.pogo_dist = pogo_dist
self._view_frame.pogo_dist_txt.SetValue('%.2f' %
(pogo_dist,))
def _update_area(self):
"""Based on three or more markers in total, draw a nice
polygon and update the total area.
"""
if len(self._markers) >= 3:
# start from apex, then all markers to the right of the
# pogo line, then the lm point, then all markers to the
# left.
p1,p2 = [self._markers[i].GetPosition()[0:2] for i in range(2)]
z = self._markers[0].GetPosition()[2]
n,mag,lv = geometry.normalise_line(p1,p2)
# get its orthogonal vector
no = - n[1],n[0]
pts = [self._markers[i].GetPosition()[0:2]
for i in range(2, len(self._markers))]
right_pts = []
left_pts = []
for p in pts:
v = geometry.points_to_vector(p1,p)
# project v onto n
v_on_n = geometry.dot(v,n) * n
# then use that to determine the vector orthogonal on
# n from p
v_ortho_n = v - v_on_n
# rl is positive for right hemisphere, negative for
# otherwise
rl = geometry.dot(no, v_ortho_n)
if rl >= 0:
right_pts.append(p)
elif rl < 0:
left_pts.append(p)
vpts = vtk.vtkPoints()
vpts.InsertPoint(0,p1[0],p1[1],z)
for i,j in enumerate(right_pts):
vpts.InsertPoint(i+1,j[0],j[1],z)
if len(right_pts) == 0:
i = -1
vpts.InsertPoint(i+2,p2[0],p2[1],z)
for k,j in enumerate(left_pts):
vpts.InsertPoint(i+3+k,j[0],j[1],z)
num_points = 2 + len(left_pts) + len(right_pts)
assert(vpts.GetNumberOfPoints() == num_points)
self._area_polydata.SetPoints(vpts)
cells = vtk.vtkCellArray()
# we repeat the first point
cells.InsertNextCell(num_points + 1)
for i in range(num_points):
cells.InsertCellPoint(i)
cells.InsertCellPoint(0)
self._area_polydata.SetLines(cells)
# now calculate the polygon area according to:
# http://local.wasp.uwa.edu.au/~pbourke/geometry/polyarea/
all_pts = [p1] + right_pts + [p2] + left_pts + [p1]
tot = 0
for i in range(len(all_pts)-1):
pi = all_pts[i]
pip = all_pts[i+1]
tot += pi[0]*pip[1] - pip[0]*pi[1]
area = - tot / 2.0
# store area in current measurement
self._current_measurement.area = area
self._view_frame.area_txt.SetValue('%.2f' % (area,))
|
py | b40812c588e67dabd9dcb6ac49462115ea649ab0 | # Copyright (c) 2020 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLBase
class Angle(XMLBase):
_NAME = 'angle'
_TYPE = 'sdf'
_ATTRIBUTES = dict(
axis='0'
)
def __init__(self, default=0):
super(Angle, self).__init__()
self.reset()
@property
def axis(self):
return self.attributes['axis']
@axis.setter
def axis(self, value):
assert self._is_scalar(value) and \
value >= 0, \
'Invalid axis input, provided={}'.format(
value)
if isinstance(value, float):
assert value.is_integer(), \
'Input must be an integer, provided={}'.format(
value)
self.attributes['axis'] = int(value)
|
py | b40813504aa75fa0e463fadc126a984c46455f96 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class DeploymentGroupMetrics(Model):
"""DeploymentGroupMetrics.
:param columns_header:
:type columns_header: :class:`MetricsColumnsHeader <task-agent.v4_0.models.MetricsColumnsHeader>`
:param deployment_group:
:type deployment_group: :class:`DeploymentGroupReference <task-agent.v4_0.models.DeploymentGroupReference>`
:param rows:
:type rows: list of :class:`MetricsRow <task-agent.v4_0.models.MetricsRow>`
"""
_attribute_map = {
'columns_header': {'key': 'columnsHeader', 'type': 'MetricsColumnsHeader'},
'deployment_group': {'key': 'deploymentGroup', 'type': 'DeploymentGroupReference'},
'rows': {'key': 'rows', 'type': '[MetricsRow]'}
}
def __init__(self, columns_header=None, deployment_group=None, rows=None):
super(DeploymentGroupMetrics, self).__init__()
self.columns_header = columns_header
self.deployment_group = deployment_group
self.rows = rows
|
py | b4081453141658a75ab865f140c7f0c5c303286b | from diofant import (EX, Float, I, Integer, Lambda, Poly, Rational, RootSum,
atan, integrate, log, simplify, sqrt, symbols)
from diofant.abc import a, b, t, u, x
from diofant.integrals.rationaltools import log_to_atan, ratint, ratint_logpart
__all__ = ()
def test_ratint():
assert ratint(Integer(0), x) == 0
assert ratint(Integer(7), x) == 7*x
assert ratint(x, x) == x**2/2
assert ratint(2*x, x) == x**2
assert ratint(-2*x, x) == -x**2
assert ratint(8*x**7 + 2*x + 1, x) == x**8 + x**2 + x
f = Integer(1)
g = x + 1
assert ratint(f / g, x) == log(x + 1)
assert ratint((f, g), x) == log(x + 1)
f = x**3 - x
g = x - 1
assert ratint(f/g, x) == x**3/3 + x**2/2
f = x
g = (x - a)*(x + a)
assert ratint(f/g, x) == log(x**2 - a**2)/2
f = Integer(1)
g = x**2 + 1
assert ratint(f/g, x, extended_real=None) == atan(x)
assert ratint(f/g, x, extended_real=True) == atan(x)
assert ratint(f/g, x, extended_real=False) == I*log(x + I)/2 - I*log(x - I)/2
f = Integer(36)
g = x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2
assert ratint(f/g, x) == \
-4*log(x + 1) + 4*log(x - 2) + (12*x + 6)/(x**2 - 1)
f = x**4 - 3*x**2 + 6
g = x**6 - 5*x**4 + 5*x**2 + 4
assert ratint(f/g, x) == \
atan(x) + atan(x**3) + atan(x/2 - 3*x**3/2 + Rational(1, 2)*x**5)
f = x**7 - 24*x**4 - 4*x**2 + 8*x - 8
g = x**8 + 6*x**6 + 12*x**4 + 8*x**2
assert ratint(f/g, x) == \
(4 + 6*x + 8*x**2 + 3*x**3)/(4*x + 4*x**3 + x**5) + log(x)
assert ratint((x**3*f)/(x*g), x) == \
-(12 - 16*x + 6*x**2 - 14*x**3)/(4 + 4*x**2 + x**4) - \
5*sqrt(2)*atan(x*sqrt(2)/2) + Rational(1, 2)*x**2 - 3*log(2 + x**2)
f = x**5 - x**4 + 4*x**3 + x**2 - x + 5
g = x**4 - 2*x**3 + 5*x**2 - 4*x + 4
assert ratint(f/g, x) == \
x + Rational(1, 2)*x**2 + Rational(1, 2)*log(2 - x + x**2) - (4*x - 9)/(14 - 7*x + 7*x**2) + \
13*sqrt(7)*atan(-Rational(1, 7)*sqrt(7) + 2*x*sqrt(7)/7)/49
assert ratint(1/(x**2 + x + 1), x) == \
2*sqrt(3)*atan(sqrt(3)/3 + 2*x*sqrt(3)/3)/3
assert ratint(1/(x**3 + 1), x) == \
-log(1 - x + x**2)/6 + log(1 + x)/3 + sqrt(3)*atan(-sqrt(3)
/ 3 + 2*x*sqrt(3)/3)/3
assert ratint(1/(x**2 + x + 1), x, extended_real=False) == \
-I*sqrt(3)*log(Rational(1, 2) + x - I*sqrt(3)/2)/3 + \
I*sqrt(3)*log(Rational(1, 2) + x + I*sqrt(3)/2)/3
assert ratint(1/(x**3 + 1), x, extended_real=False) == log(1 + x)/3 + \
(-Rational(1, 6) + I*sqrt(3)/6)*log(-Rational(1, 2) + x + I*sqrt(3)/2) + \
(-Rational(1, 6) - I*sqrt(3)/6)*log(-Rational(1, 2) + x - I*sqrt(3)/2)
# issue sympy/sympy#4991
assert ratint(1/(x*(a + b*x)**3), x) == \
((3*a + 2*b*x)/(2*a**4 + 4*a**3*b*x + 2*a**2*b**2*x**2) +
(log(2*b*x) - log(2*a + 2*b*x))/a**3)
assert ratint(x/(1 - x**2), x) == -log(x**2 - 1)/2
assert ratint(-x/(1 - x**2), x) == log(x**2 - 1)/2
assert ratint((x/4 - 4/(1 - x)).diff(x), x) == x/4 + 4/(x - 1)
ans = atan(x)
assert ratint(1/(x**2 + 1), x, symbol=x) == ans
assert ratint(1/(x**2 + 1), x, symbol='x') == ans
assert ratint(1/(x**2 + 1), x, symbol=a) == ans
ans = (-sqrt(2)*log(x**2 + x*(-2 - sqrt(2)) + sqrt(2) + 2)/8 +
sqrt(2)*log(x**2 + x*(-2 + sqrt(2)) - sqrt(2) + 2)/8 -
sqrt(2)*atan(-sqrt(2)*x + 1 + sqrt(2))/4 +
sqrt(2)*atan(sqrt(2)*x - sqrt(2) + 1)/4)
assert ratint(1/((x - 1)**4 + 1), x) == ans
ans = RootSum(776887*t**7 + 27216*t**5 - 15120*t**4 + 3780*t**3 -
504*t**2 + 35*t - 1,
Lambda(t, t*log(x + 6041073312*t**6/117649 +
1006845552*t**5/117649 +
379439208*t**4/117649 -
54333252*t**3/117649 +
20337738*t**2/117649 - 529481*t/117649 +
46656/117649)))
assert ratint(1/(x**7 - x + 1), x) == ans
def test_ratint_logpart():
assert ratint_logpart(x, x**2 - 9, x, t) == \
[(Poly(x**2 - 9, x), Poly(2*t - 1, t))]
assert ratint_logpart(x**2, x**3 - 5, x, t) == \
[(Poly(x**3 - 5, x), Poly(3*t - 1, t))]
def test_sympyissue_5414():
assert ratint(1/(x**2 + 16), x) == atan(x/4)/4
def test_sympyissue_5249():
assert ratint(
1/(x**2 + a**2), x) == (-I*log(-I*a + x)/2 + I*log(I*a + x)/2)/a
def test_sympyissue_5817():
a, b, c = symbols('a,b,c', positive=True)
assert simplify(ratint(a/(b*c*x**2 + a**2 + b*a), x)) == \
sqrt(a)*atan(sqrt(
b)*sqrt(c)*x/(sqrt(a)*sqrt(a + b)))/(sqrt(b)*sqrt(c)*sqrt(a + b))
def test_sympyissue_5981():
assert integrate(1/(u**2 + 1)) == atan(u)
def test_sympyissue_10488():
a, b, x = symbols('a b x', real=True, positive=True)
assert integrate(x/(a*x + b), x) == x/a - b*log(a*x + b)/a**2
def test_log_to_atan():
f, g = (Poly(x + Rational(1, 2)), Poly(sqrt(3)/2, x, domain=EX))
fg_ans = 2*atan(2*sqrt(3)*x/3 + sqrt(3)/3)
assert log_to_atan(f, g) == fg_ans
assert log_to_atan(g, f) == -fg_ans
def test_sympyissue_13460():
assert integrate(1/(-28*x**3 - 46*x**2 - 25*x - 10),
[x, 2, 3]).evalf() == Float('-0.0013230197536986538', dps=15)
|
py | b4081506a0a5c13d401734bb35f9b381194c1c45 | # -*- coding: utf-8 -*-
from openprocurement.tender.competitivedialogue.constants import STAGE_2_EU_TYPE
from openprocurement.tender.openeu.utils import qualifications_resource
from openprocurement.tender.openeu.views.qualification_complaint_post import (
TenderQualificationComplaintPostResource as BaseTenderQualificationComplaintPostResource
)
@qualifications_resource(
name="{}:Tender Qualification Complaint Posts".format(STAGE_2_EU_TYPE),
collection_path="/tenders/{tender_id}/qualifications/{qualification_id}/complaints/{complaint_id}/posts",
path="/tenders/{tender_id}/qualifications/{qualification_id}/complaints/{complaint_id}/posts/{post_id}",
procurementMethodType=STAGE_2_EU_TYPE,
description="Tender qualification complaint posts",
)
class TenderCompetitiveDialogueEUQualificationComplaintPostResource(BaseTenderQualificationComplaintPostResource):
pass
|
py | b4081663474fc9fb4a31a8743825c3e65aefcc2d | from monopyly import *
class LazyBonesAI(PlayerAIBase):
'''
This player does nothing - just has the default behaviour
of the base AI. It does not buy properties, make deals etc.
'''
def get_name(self):
return "LazyBones"
|
py | b408174e4f2b1ad0bfe563eb734b6e66d50b54c4 | import logging
from quasimodo.data_structures.submodule_interface import SubmoduleInterface
from quasimodo.inflect_accessor import DEFAULT_INFLECT
class ToSingularSubjectSubmodule(SubmoduleInterface):
def __init__(self, module_reference):
super().__init__()
self._module_reference = module_reference
self._name = "To Singular Subject Submodule"
def process(self, input_interface):
logging.info("Turn subject to singular")
new_generated_facts = []
subjects = set([x.get() for x in input_interface.get_subjects()])
singular_maker = DEFAULT_INFLECT
for g in input_interface.get_generated_facts():
subj = g.get_subject().get()
singular = singular_maker.to_singular(subj)
if singular not in subjects or singular == subj:
new_generated_facts.append(g)
else:
new_generated_facts.append(g.change_subject(singular))
return input_interface.replace_generated_facts(new_generated_facts)
|
py | b4081811ece0328635f1be9a49860fd8484f4ff0 | import numpy as np
import HyperUtils as hu
check_eps = 0.3
check_sig = 2.0
check_alp = np.array([0.2, 0.18, 0.16, 0.14, 0.01])
check_chi = np.array([0.9, 1.0, 1.1, 1.2, 1.0])
file = "h1epmk_ser_cbh"
name = "1D Linear Elastic-Plastic with Multisurface Kinematic Hardening - Nested Boundary HARM"
mode = 0
ndim = 1
n_y = 1
const = [100.0, 4, 0.11955, 100.0, 0.317097, 33.33333, 0.679352, 20.0, 0.848504, 10.0, 0.1]
mu = 0.1
def deriv():
global E, k, recip_k, H, R, name_const, n_int, n_inp, n_const
E = float(const[0])
n_int = int(const[1]) + 1
n_inp = int(const[1])
n_const = 2 + 2*n_int + 1
k = np.array(const[2:2 + 2*n_inp:2])
H = np.array(const[3:3 + 2*n_inp:2])
R = float(const[2*n_int])
recip_k = 1.0 / k
name_const = ["E", "N"]
for i in range(n_inp):
name_const.append("k"+str(i+1))
name_const.append("H"+str(i+1))
name_const.append("R")
deriv()
def alpdiff(alp): return np.array([(alp[i]-alp[i+1]) for i in range(n_inp-1)])
def f(eps,alp): return ((E*(eps-alp[0]-alp[n_inp])**2)/2.0 +
np.einsum("n,n,n->",H[:n_inp-1],alpdiff(alp),alpdiff(alp))/2.0 +
H[n_inp-1]*(alp[n_inp-1]**2)/2.0)
def dfde(eps,alp): return E*(eps-alp[0]-alp[n_inp])
def dfda(eps,alp):
temp = np.zeros(n_int)
temp[0] = -E*(eps-alp[0]-alp[n_inp])
temp[:n_inp-1] += H[:n_inp-1]*alpdiff(alp)
temp[1:n_inp] -= H[:n_inp-1]*alpdiff(alp)
temp[n_inp-1] += H[n_inp-1]*alp[n_inp-1]
temp[n_inp] = -E*(eps-alp[0]-alp[n_inp])
return temp
def d2fdede(eps,alp): return E
def d2fdeda(eps,alp):
temp = np.zeros(n_int)
temp[0] = -E
temp[n_inp] = -E
return temp
def d2fdade(eps,alp):
temp = np.zeros(n_int)
temp[0] = -E
temp[n_inp] = -E
return temp
def d2fdada(eps,alp):
temp = np.zeros([n_int,n_int])
temp[0,0] = E
for i in range(n_inp-1):
temp[i,i] += H[i]
temp[i+1,i] -= H[i]
temp[i,i+1] -= H[i]
temp[i+1,i+1] += H[i]
temp[n_inp-1,n_inp-1] += H[n_inp-1]
temp[0,n_inp] = E
temp[n_inp,0] = E
temp[n_inp,n_inp] = E
return temp
def g(sig,alp): return (-(sig**2)/(2.0*E) - sig*alp[0] - sig*alp[n_inp] +
np.einsum("n,n,n->",H[:n_inp-1],alpdiff(alp),alpdiff(alp))/2.0 +
H[n_inp-1]*(alp[n_inp-1]**2)/2.0)
def dgds(sig,alp): return -sig/E - alp[0] - alp[n_inp]
def dgda(sig,alp):
temp = np.zeros(n_int)
temp[0] = -sig
temp[:n_inp-1] += H[:n_inp-1]*alpdiff(alp)
temp[1:n_inp] -= H[:n_inp-1]*alpdiff(alp)
temp[n_inp-1] += H[n_inp-1]*alp[n_inp-1]
temp[n_inp] = -sig
return temp
def d2gdsds(sig,alp): return -1.0/E
def d2gdsda(sig,alp):
temp = np.zeros(n_int)
temp[0] = -1.0
temp[n_inp] = -1.0
return temp
def d2gdads(sig,alp):
temp = np.zeros(n_int)
temp[0] = -1.0
temp[n_inp] = -1.0
return temp
def d2gdada(sig,alp):
temp = np.zeros([n_int,n_int])
for i in range(n_inp-1):
temp[i,i] += H[i]
temp[i+1,i] -= H[i]
temp[i,i+1] -= H[i]
temp[i+1,i+1] += H[i]
temp[n_inp-1,n_inp-1] += H[n_inp-1]
return temp
#def d_f(alpr,eps,alp): return k*abs(alpr)
def y_f(chi,eps,alp):
s = dfde(eps,alp)
return np.array([np.sqrt(np.einsum("n,n,n,n->",chi[:n_inp],chi[:n_inp],recip_k,recip_k)) - 1.0 +
R*(np.abs(chi[n_inp]) - np.abs(s))])
def dydc_f(chi,eps,alp):
temp = np.zeros([n_y,n_int])
t1 = hu.floor(np.sqrt(np.einsum("n,n,n,n->",chi[:n_inp],chi[:n_inp],recip_k,recip_k)))
for i in range(n_inp):
temp[0,i] += (chi[i]/(k[i]**2)) / t1
temp[0,n_inp] += R*hu.S(chi[n_inp])
return temp
def dyde_f(chi,eps,alp):
s = dfde(eps,alp)
return -R*E*hu.S(s)*np.ones(n_y)
def dyda_f(chi,eps,alp):
s = dfde(eps,alp)
temp = np.zeros([n_y,n_int])
temp[0,0] = R*E*hu.S(s)
temp[0,n_int-1] = R*E*hu.S(s)
return temp
#def d_g(alpr,eps,alp): return k*abs(alpr)
def y_g(chi,sig,alp): return np.array([np.sqrt(np.einsum("n,n,n,n->",chi[:n_inp],chi[:n_inp],recip_k,recip_k)) - 1.0 +
R*(np.abs(chi[n_inp]) - np.abs(sig))])
def dydc_g(chi,sig,alp):
temp = np.zeros([n_y,n_int])
t1 = hu.floor(np.sqrt(np.einsum("n,n,n,n->",chi[:n_inp],chi[:n_inp],recip_k,recip_k)))
for i in range(n_inp):
temp[0,i] += (chi[i]/(k[i]**2)) / t1
temp[0,n_inp] += R*hu.S(chi[n_inp])
return temp
def dyds_g(chi,sig,alp): return -R*hu.S(sig)*np.ones(n_y)
def dyda_g(chi,sig,alp): return np.zeros([n_y,n_int])
#def w_f(chi,eps,alp): return sum([(mac(abs(chi[i]) - k[i])**2)/(2.0*mu) for i in range(n_int)])
#def dwdc_f(chi,eps,alp): return np.array([S(chi[i])*mac(abs(chi[i]) - k[i])/mu for i in range(n_int)])
#def w_g(chi,sig,alp): return sum([(mac(abs(chi[i]) - k[i])**2)/(2.0*mu) for i in range(n_int)])
#def dwdc_g(chi,sig,alp): return np.array([S(chi[i])*mac(abs(chi[i]) - k[i])/mu for i in range(n_int)])
|
py | b40818b27cf2185044309b1e85a87bdfa4db7da0 | # -*- coding: utf-8 -*-
"""The Elastic Search output module CLI arguments helper."""
from __future__ import unicode_literals
import getpass
import os
from uuid import uuid4
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.cli.helpers import server_config
from plaso.cli import logger
from plaso.lib import errors
from plaso.output import elastic
class ElasticSearchServerArgumentsHelper(server_config.ServerArgumentsHelper):
"""Elastic Search server CLI arguments helper."""
_DEFAULT_SERVER = '127.0.0.1'
_DEFAULT_PORT = 9200
class ElasticSearchOutputArgumentsHelper(interface.ArgumentsHelper):
"""Elastic Search output module CLI arguments helper."""
NAME = 'elastic'
CATEGORY = 'output'
DESCRIPTION = 'Argument helper for the Elastic Search output modules.'
_DEFAULT_INDEX_NAME = uuid4().hex
_DEFAULT_DOCUMENT_TYPE = 'plaso_event'
_DEFAULT_FLUSH_INTERVAL = 1000
_DEFAULT_RAW_FIELDS = False
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'--index_name', dest='index_name', type=str, action='store',
default=cls._DEFAULT_INDEX_NAME, help=(
'Name of the index in ElasticSearch.'))
argument_group.add_argument(
'--doc_type', dest='document_type', type=str,
action='store', default=cls._DEFAULT_DOCUMENT_TYPE, help=(
'Name of the document type that will be used in ElasticSearch.'))
argument_group.add_argument(
'--flush_interval', dest='flush_interval', type=int,
action='store', default=cls._DEFAULT_FLUSH_INTERVAL, help=(
'Events to queue up before bulk insert to ElasticSearch.'))
argument_group.add_argument(
'--raw_fields', dest='raw_fields', action='store_true',
default=cls._DEFAULT_RAW_FIELDS, help=(
'Export string fields that will not be analyzed by Lucene.'))
argument_group.add_argument(
'--elastic_user', dest='elastic_user', action='store',
default=None, help='Username to use for Elasticsearch authentication.')
argument_group.add_argument(
'--elastic_password', dest='elastic_password', action='store',
default=None, help=(
'Password to use for Elasticsearch authentication. WARNING: use '
'with caution since this can expose the password to other users '
'on the system. The password can also be set with the environment '
'variable PLASO_ELASTIC_PASSWORD. '))
argument_group.add_argument(
'--use_ssl', dest='use_ssl', action='store_true',
help='Enforces use of ssl.')
argument_group.add_argument(
'--ca_certificates_file_path', dest='ca_certificates_file_path',
action='store', type=str, default=None, help=(
'Path to a file containing a list of root certificates to trust.'))
argument_group.add_argument(
'--elastic_url_prefix', dest='elastic_url_prefix', type=str,
action='store', default=None, help='URL prefix for elastic search.')
ElasticSearchServerArgumentsHelper.AddArguments(argument_group)
# pylint: disable=arguments-differ
@classmethod
def ParseOptions(cls, options, output_module):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
elastic_output_modules = (
elastic.ElasticsearchOutputModule, elastic.ElasticsearchOutputModule)
if not isinstance(output_module, elastic_output_modules):
raise errors.BadConfigObject(
'Output module is not an instance of ElasticsearchOutputModule')
index_name = cls._ParseStringOption(
options, 'index_name', default_value=cls._DEFAULT_INDEX_NAME)
document_type = cls._ParseStringOption(
options, 'document_type', default_value=cls._DEFAULT_DOCUMENT_TYPE)
flush_interval = cls._ParseNumericOption(
options, 'flush_interval', default_value=cls._DEFAULT_FLUSH_INTERVAL)
raw_fields = getattr(options, 'raw_fields', cls._DEFAULT_RAW_FIELDS)
elastic_user = cls._ParseStringOption(options, 'elastic_user')
elastic_password = cls._ParseStringOption(options, 'elastic_password')
use_ssl = getattr(options, 'use_ssl', False)
ca_certificates_path = cls._ParseStringOption(
options, 'ca_certificates_file_path')
elastic_url_prefix = cls._ParseStringOption(options, 'elastic_url_prefix')
if elastic_password is None:
elastic_password = os.getenv('PLASO_ELASTIC_PASSWORD', None)
if elastic_password is not None:
logger.warning(
'Note that specifying your Elasticsearch password via '
'--elastic_password or the environment PLASO_ELASTIC_PASSWORD can '
'expose the password to other users on the system.')
if elastic_user is not None and elastic_password is None:
elastic_password = getpass.getpass('Enter your Elasticsearch password: ')
ElasticSearchServerArgumentsHelper.ParseOptions(options, output_module)
output_module.SetIndexName(index_name)
output_module.SetDocumentType(document_type)
output_module.SetFlushInterval(flush_interval)
output_module.SetRawFields(raw_fields)
output_module.SetUsername(elastic_user)
output_module.SetPassword(elastic_password)
output_module.SetUseSSL(use_ssl)
output_module.SetCACertificatesPath(ca_certificates_path)
output_module.SetURLPrefix(elastic_url_prefix)
manager.ArgumentHelperManager.RegisterHelper(ElasticSearchOutputArgumentsHelper)
|
py | b408191567e6e19d061a1ee6856a563dbc1ed132 | """
Provide the functionality to group entities.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/group/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant import core as ha
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_ICON, CONF_NAME, STATE_CLOSED, STATE_HOME,
STATE_NOT_HOME, STATE_OFF, STATE_ON, STATE_OPEN, STATE_LOCKED,
STATE_UNLOCKED, STATE_OK, STATE_PROBLEM, STATE_UNKNOWN,
ATTR_ASSUMED_STATE, SERVICE_RELOAD, ATTR_NAME, ATTR_ICON)
from homeassistant.core import callback
from homeassistant.loader import bind_hass
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_state_change
import homeassistant.helpers.config_validation as cv
from homeassistant.util.async_ import run_coroutine_threadsafe
DOMAIN = 'group'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
CONF_ENTITIES = 'entities'
CONF_VIEW = 'view'
CONF_CONTROL = 'control'
ATTR_ADD_ENTITIES = 'add_entities'
ATTR_AUTO = 'auto'
ATTR_CONTROL = 'control'
ATTR_ENTITIES = 'entities'
ATTR_OBJECT_ID = 'object_id'
ATTR_ORDER = 'order'
ATTR_VIEW = 'view'
ATTR_VISIBLE = 'visible'
SERVICE_SET_VISIBILITY = 'set_visibility'
SERVICE_SET = 'set'
SERVICE_REMOVE = 'remove'
CONTROL_TYPES = vol.In(['hidden', None])
SET_VISIBILITY_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_VISIBLE): cv.boolean
})
RELOAD_SERVICE_SCHEMA = vol.Schema({})
SET_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_OBJECT_ID): cv.slug,
vol.Optional(ATTR_NAME): cv.string,
vol.Optional(ATTR_VIEW): cv.boolean,
vol.Optional(ATTR_ICON): cv.string,
vol.Optional(ATTR_CONTROL): CONTROL_TYPES,
vol.Optional(ATTR_VISIBLE): cv.boolean,
vol.Exclusive(ATTR_ENTITIES, 'entities'): cv.entity_ids,
vol.Exclusive(ATTR_ADD_ENTITIES, 'entities'): cv.entity_ids,
})
REMOVE_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_OBJECT_ID): cv.slug,
})
_LOGGER = logging.getLogger(__name__)
def _conf_preprocess(value):
"""Preprocess alternative configuration formats."""
if not isinstance(value, dict):
value = {CONF_ENTITIES: value}
return value
GROUP_SCHEMA = vol.Schema({
vol.Optional(CONF_ENTITIES): vol.Any(cv.entity_ids, None),
CONF_VIEW: cv.boolean,
CONF_NAME: cv.string,
CONF_ICON: cv.icon,
CONF_CONTROL: CONTROL_TYPES,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({cv.match_all: vol.All(_conf_preprocess, GROUP_SCHEMA)})
}, extra=vol.ALLOW_EXTRA)
# List of ON/OFF state tuples for groupable states
_GROUP_TYPES = [(STATE_ON, STATE_OFF), (STATE_HOME, STATE_NOT_HOME),
(STATE_OPEN, STATE_CLOSED), (STATE_LOCKED, STATE_UNLOCKED),
(STATE_PROBLEM, STATE_OK)]
def _get_group_on_off(state):
"""Determine the group on/off states based on a state."""
for states in _GROUP_TYPES:
if state in states:
return states
return None, None
@bind_hass
def is_on(hass, entity_id):
"""Test if the group state is in its ON-state."""
state = hass.states.get(entity_id)
if state:
group_on, _ = _get_group_on_off(state.state)
# If we found a group_type, compare to ON-state
return group_on is not None and state.state == group_on
return False
@bind_hass
def reload(hass):
"""Reload the automation from config."""
hass.add_job(async_reload, hass)
@callback
@bind_hass
def async_reload(hass):
"""Reload the automation from config."""
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_RELOAD))
@bind_hass
def set_visibility(hass, entity_id=None, visible=True):
"""Hide or shows a group."""
data = {ATTR_ENTITY_ID: entity_id, ATTR_VISIBLE: visible}
hass.services.call(DOMAIN, SERVICE_SET_VISIBILITY, data)
@bind_hass
def set_group(hass, object_id, name=None, entity_ids=None, visible=None,
icon=None, view=None, control=None, add=None):
"""Create/Update a group."""
hass.add_job(
async_set_group, hass, object_id, name, entity_ids, visible, icon,
view, control, add)
@callback
@bind_hass
def async_set_group(hass, object_id, name=None, entity_ids=None, visible=None,
icon=None, view=None, control=None, add=None):
"""Create/Update a group."""
data = {
key: value for key, value in [
(ATTR_OBJECT_ID, object_id),
(ATTR_NAME, name),
(ATTR_ENTITIES, entity_ids),
(ATTR_VISIBLE, visible),
(ATTR_ICON, icon),
(ATTR_VIEW, view),
(ATTR_CONTROL, control),
(ATTR_ADD_ENTITIES, add),
] if value is not None
}
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_SET, data))
@bind_hass
def remove(hass, name):
"""Remove a user group."""
hass.add_job(async_remove, hass, name)
@callback
@bind_hass
def async_remove(hass, object_id):
"""Remove a user group."""
data = {ATTR_OBJECT_ID: object_id}
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_REMOVE, data))
@bind_hass
def expand_entity_ids(hass, entity_ids):
"""Return entity_ids with group entity ids replaced by their members.
Async friendly.
"""
found_ids = []
for entity_id in entity_ids:
if not isinstance(entity_id, str):
continue
entity_id = entity_id.lower()
try:
# If entity_id points at a group, expand it
domain, _ = ha.split_entity_id(entity_id)
if domain == DOMAIN:
child_entities = get_entity_ids(hass, entity_id)
if entity_id in child_entities:
child_entities = list(child_entities)
child_entities.remove(entity_id)
found_ids.extend(
ent_id for ent_id
in expand_entity_ids(hass, child_entities)
if ent_id not in found_ids)
else:
if entity_id not in found_ids:
found_ids.append(entity_id)
except AttributeError:
# Raised by split_entity_id if entity_id is not a string
pass
return found_ids
@bind_hass
def get_entity_ids(hass, entity_id, domain_filter=None):
"""Get members of this group.
Async friendly.
"""
group = hass.states.get(entity_id)
if not group or ATTR_ENTITY_ID not in group.attributes:
return []
entity_ids = group.attributes[ATTR_ENTITY_ID]
if not domain_filter:
return entity_ids
domain_filter = domain_filter.lower() + '.'
return [ent_id for ent_id in entity_ids
if ent_id.startswith(domain_filter)]
async def async_setup(hass, config):
"""Set up all groups found defined in the configuration."""
component = hass.data.get(DOMAIN)
if component is None:
component = hass.data[DOMAIN] = EntityComponent(_LOGGER, DOMAIN, hass)
await _async_process_config(hass, config, component)
async def reload_service_handler(service):
"""Remove all user-defined groups and load new ones from config."""
#auto = list(filter(lambda e: not e.user_defined, component.entities))
# fix for ais-dom groups defined in packages
auto = list(component.entities)
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
await component.async_add_entities(auto)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA)
async def groups_service_handler(service):
"""Handle dynamic group service functions."""
object_id = service.data[ATTR_OBJECT_ID]
entity_id = ENTITY_ID_FORMAT.format(object_id)
group = component.get_entity(entity_id)
# new group
if service.service == SERVICE_SET and group is None:
entity_ids = service.data.get(ATTR_ENTITIES) or \
service.data.get(ATTR_ADD_ENTITIES) or None
extra_arg = {attr: service.data[attr] for attr in (
ATTR_VISIBLE, ATTR_ICON, ATTR_VIEW, ATTR_CONTROL
) if service.data.get(attr) is not None}
await Group.async_create_group(
hass, service.data.get(ATTR_NAME, object_id),
object_id=object_id,
entity_ids=entity_ids,
user_defined=False,
**extra_arg
)
return
if group is None:
_LOGGER.warning("%s:Group '%s' doesn't exist!",
service.service, object_id)
return
# update group
if service.service == SERVICE_SET:
need_update = False
if ATTR_ADD_ENTITIES in service.data:
delta = service.data[ATTR_ADD_ENTITIES]
entity_ids = set(group.tracking) | set(delta)
await group.async_update_tracked_entity_ids(entity_ids)
if ATTR_ENTITIES in service.data:
entity_ids = service.data[ATTR_ENTITIES]
await group.async_update_tracked_entity_ids(entity_ids)
if ATTR_NAME in service.data:
group.name = service.data[ATTR_NAME]
need_update = True
if ATTR_VISIBLE in service.data:
group.visible = service.data[ATTR_VISIBLE]
need_update = True
if ATTR_ICON in service.data:
group.icon = service.data[ATTR_ICON]
need_update = True
if ATTR_CONTROL in service.data:
group.control = service.data[ATTR_CONTROL]
need_update = True
if ATTR_VIEW in service.data:
group.view = service.data[ATTR_VIEW]
need_update = True
if need_update:
await group.async_update_ha_state()
return
# remove group
if service.service == SERVICE_REMOVE:
await component.async_remove_entity(entity_id)
hass.services.async_register(
DOMAIN, SERVICE_SET, groups_service_handler,
schema=SET_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_REMOVE, groups_service_handler,
schema=REMOVE_SERVICE_SCHEMA)
async def visibility_service_handler(service):
"""Change visibility of a group."""
visible = service.data.get(ATTR_VISIBLE)
tasks = []
for group in component.async_extract_from_service(service,
expand_group=False):
group.visible = visible
tasks.append(group.async_update_ha_state())
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
hass.services.async_register(
DOMAIN, SERVICE_SET_VISIBILITY, visibility_service_handler,
schema=SET_VISIBILITY_SERVICE_SCHEMA)
return True
async def _async_process_config(hass, config, component):
"""Process group configuration."""
for object_id, conf in config.get(DOMAIN, {}).items():
name = conf.get(CONF_NAME, object_id)
entity_ids = conf.get(CONF_ENTITIES) or []
icon = conf.get(CONF_ICON)
view = conf.get(CONF_VIEW)
control = conf.get(CONF_CONTROL)
# Don't create tasks and await them all. The order is important as
# groups get a number based on creation order.
await Group.async_create_group(
hass, name, entity_ids, icon=icon, view=view,
control=control, object_id=object_id)
class Group(Entity):
"""Track a group of entity ids."""
def __init__(self, hass, name, order=None, visible=True, icon=None,
view=False, control=None, user_defined=True, entity_ids=None):
"""Initialize a group.
This Object has factory function for creation.
"""
self.hass = hass
self._name = name
self._state = STATE_UNKNOWN
self._icon = icon
self.view = view
if entity_ids:
self.tracking = tuple(ent_id.lower() for ent_id in entity_ids)
else:
self.tracking = tuple()
self.group_on = None
self.group_off = None
self.visible = visible
self.control = control
self.user_defined = user_defined
self._order = order
self._assumed_state = False
self._async_unsub_state_changed = None
@staticmethod
def create_group(hass, name, entity_ids=None, user_defined=True,
visible=True, icon=None, view=False, control=None,
object_id=None):
"""Initialize a group."""
return run_coroutine_threadsafe(
Group.async_create_group(
hass, name, entity_ids, user_defined, visible, icon, view,
control, object_id),
hass.loop).result()
@staticmethod
async def async_create_group(hass, name, entity_ids=None,
user_defined=True, visible=True, icon=None,
view=False, control=None, object_id=None):
"""Initialize a group.
This method must be run in the event loop.
"""
group = Group(
hass, name,
order=len(hass.states.async_entity_ids(DOMAIN)),
visible=visible, icon=icon, view=view, control=control,
user_defined=user_defined, entity_ids=entity_ids
)
group.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, object_id or name, hass=hass)
# If called before the platform async_setup is called (test cases)
component = hass.data.get(DOMAIN)
if component is None:
component = hass.data[DOMAIN] = \
EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([group], True)
return group
@property
def should_poll(self):
"""No need to poll because groups will update themselves."""
return False
@property
def name(self):
"""Return the name of the group."""
return self._name
@name.setter
def name(self, value):
"""Set Group name."""
self._name = value
@property
def state(self):
"""Return the state of the group."""
return self._state
@property
def icon(self):
"""Return the icon of the group."""
return self._icon
@icon.setter
def icon(self, value):
"""Set Icon for group."""
self._icon = value
@property
def hidden(self):
"""If group should be hidden or not."""
if self.visible and not self.view:
return False
return True
@property
def state_attributes(self):
"""Return the state attributes for the group."""
data = {
ATTR_ENTITY_ID: self.tracking,
ATTR_ORDER: self._order,
}
if not self.user_defined:
data[ATTR_AUTO] = True
if self.view:
data[ATTR_VIEW] = True
if self.control:
data[ATTR_CONTROL] = self.control
return data
@property
def assumed_state(self):
"""Test if any member has an assumed state."""
return self._assumed_state
def update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs."""
run_coroutine_threadsafe(
self.async_update_tracked_entity_ids(entity_ids), self.hass.loop
).result()
async def async_update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs.
This method must be run in the event loop.
"""
await self.async_stop()
self.tracking = tuple(ent_id.lower() for ent_id in entity_ids)
self.group_on, self.group_off = None, None
await self.async_update_ha_state(True)
self.async_start()
@callback
def async_start(self):
"""Start tracking members.
This method must be run in the event loop.
"""
if self._async_unsub_state_changed is None:
self._async_unsub_state_changed = async_track_state_change(
self.hass, self.tracking, self._async_state_changed_listener
)
async def async_stop(self):
"""Unregister the group from Home Assistant.
This method must be run in the event loop.
"""
if self._async_unsub_state_changed:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
async def async_update(self):
"""Query all members and determine current group state."""
self._state = STATE_UNKNOWN
self._async_update_group_state()
async def async_added_to_hass(self):
"""Handle addition to HASS."""
if self.tracking:
self.async_start()
async def async_will_remove_from_hass(self):
"""Handle removal from HASS."""
if self._async_unsub_state_changed:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
async def _async_state_changed_listener(self, entity_id, old_state,
new_state):
"""Respond to a member state changing.
This method must be run in the event loop.
"""
# removed
if self._async_unsub_state_changed is None:
return
self._async_update_group_state(new_state)
await self.async_update_ha_state()
@property
def _tracking_states(self):
"""Return the states that the group is tracking."""
states = []
for entity_id in self.tracking:
state = self.hass.states.get(entity_id)
if state is not None:
states.append(state)
return states
@callback
def _async_update_group_state(self, tr_state=None):
"""Update group state.
Optionally you can provide the only state changed since last update
allowing this method to take shortcuts.
This method must be run in the event loop.
"""
# To store current states of group entities. Might not be needed.
states = None
gr_state = self._state
gr_on = self.group_on
gr_off = self.group_off
# We have not determined type of group yet
if gr_on is None:
if tr_state is None:
states = self._tracking_states
for state in states:
gr_on, gr_off = \
_get_group_on_off(state.state)
if gr_on is not None:
break
else:
gr_on, gr_off = _get_group_on_off(tr_state.state)
if gr_on is not None:
self.group_on, self.group_off = gr_on, gr_off
# We cannot determine state of the group
if gr_on is None:
return
if tr_state is None or ((gr_state == gr_on and
tr_state.state == gr_off) or
tr_state.state not in (gr_on, gr_off)):
if states is None:
states = self._tracking_states
if any(state.state == gr_on for state in states):
self._state = gr_on
else:
self._state = gr_off
elif tr_state.state in (gr_on, gr_off):
self._state = tr_state.state
if tr_state is None or self._assumed_state and \
not tr_state.attributes.get(ATTR_ASSUMED_STATE):
if states is None:
states = self._tracking_states
self._assumed_state = any(
state.attributes.get(ATTR_ASSUMED_STATE) for state
in states)
elif tr_state.attributes.get(ATTR_ASSUMED_STATE):
self._assumed_state = True
|
py | b40819279921267ced5ad65c164156a27514d8e5 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Replaces the default Django XML serializer with one that uses the built in
ToXml method for each entity.
"""
from datetime import datetime
import re
from django.conf import settings
from django.core.serializers import base
from django.core.serializers import xml_serializer
from django.db import models
from google.appengine.api import datastore_types
from google.appengine.ext import db
from python import FakeParent
from python import parse_datetime_with_microseconds
getInnerText = xml_serializer.getInnerText
class Serializer(xml_serializer.Serializer):
"""A Django Serializer class to convert datastore models to XML.
This class relies on the ToXml method of the entity behind each model to do
the hard work.
"""
def __init__(self, *args, **kwargs):
super(Serializer, self).__init__(*args, **kwargs)
self._objects = []
def handle_field(self, obj, field):
"""Fields are not handled individually."""
pass
def handle_fk_field(self, obj, field):
"""Fields are not handled individually."""
pass
def start_object(self, obj):
"""Nothing needs to be done to start an object."""
pass
def end_object(self, obj):
"""Serialize the object to XML and add to the list of objects to output.
The output of ToXml is manipulated to replace the datastore model name in
the "kind" tag with the Django model name (which includes the Django
application name) to make importing easier.
"""
xml = obj._entity.ToXml()
xml = xml.replace(u"""kind="%s" """ % obj._entity.kind(),
u"""kind="%s" """ % unicode(obj._meta))
self._objects.append(xml)
def getvalue(self):
"""Wrap the serialized objects with XML headers and return."""
str = u"""<?xml version="1.0" encoding="utf-8"?>\n"""
str += u"""<django-objects version="1.0">\n"""
str += u"".join(self._objects)
str += u"""</django-objects>"""
return str
class Deserializer(xml_serializer.Deserializer):
"""A Django Deserializer class to convert XML to Django objects.
This is a fairly manualy and simplistic XML parser, it supports just enough
functionality to read the keys and fields for an entity from the XML file and
construct a model object.
"""
def next(self):
"""Replacement next method to look for 'entity'.
The default next implementation exepects 'object' nodes which is not
what the entity's ToXml output provides.
"""
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "entity":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""Convert an <entity> node to a DeserializedObject"""
Model = self._get_model_from_node(node, "kind")
data = {}
key = db.Key(node.getAttribute("key"))
if key.name():
data["key_name"] = key.name()
parent = None
if key.parent():
parent = FakeParent(key.parent())
m2m_data = {}
# Deseralize each field.
for field_node in node.getElementsByTagName("property"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' "
"attribute")
field = Model.properties()[field_name]
field_value = getInnerText(field_node).strip()
if isinstance(field, db.Reference):
m = re.match("tag:.*\[(.*)\]", field_value)
if not m:
raise base.DeserializationError(u"Invalid reference value: '%s'" %
field_value)
key = m.group(1)
key_obj = db.Key(key)
if not key_obj.name():
raise base.DeserializationError(u"Cannot load Reference with "
"unnamed key: '%s'" % field_value)
data[field.name] = key_obj
else:
format = '%Y-%m-%d %H:%M:%S'
if isinstance(field, db.DateProperty):
field_value = datetime.strptime(field_value, format).date()
elif isinstance(field, db.TimeProperty):
field_value = parse_datetime_with_microseconds(field_value,
format).time()
elif isinstance(field, db.DateTimeProperty):
field_value = parse_datetime_with_microseconds(field_value, format)
data[field.name] = field.validate(field_value)
# Create the new model instance with all it's data, but no parent.
object = Model(**data)
# Now add the parent into the hidden attribute, bypassing the type checks
# in the Model's __init__ routine.
object._parent = parent
# When the deserialized object is saved our replacement DeserializedObject
# class will set object._parent to force the real parent model to be loaded
# the first time it is referenced.
return base.DeserializedObject(object, m2m_data)
|
py | b40819c2be653ac2c4c8e4b659559d0b23014af8 | # coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://www.mailslurp.com/docs/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from mailslurp_client.configuration import Configuration
class PageEmailPreview(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'content': 'list[EmailPreview]',
'empty': 'bool',
'first': 'bool',
'last': 'bool',
'number': 'int',
'number_of_elements': 'int',
'pageable': 'Pageable',
'size': 'int',
'sort': 'Sort',
'total_elements': 'int',
'total_pages': 'int'
}
attribute_map = {
'content': 'content',
'empty': 'empty',
'first': 'first',
'last': 'last',
'number': 'number',
'number_of_elements': 'numberOfElements',
'pageable': 'pageable',
'size': 'size',
'sort': 'sort',
'total_elements': 'totalElements',
'total_pages': 'totalPages'
}
def __init__(self, content=None, empty=None, first=None, last=None, number=None, number_of_elements=None, pageable=None, size=None, sort=None, total_elements=None, total_pages=None, local_vars_configuration=None): # noqa: E501
"""PageEmailPreview - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._content = None
self._empty = None
self._first = None
self._last = None
self._number = None
self._number_of_elements = None
self._pageable = None
self._size = None
self._sort = None
self._total_elements = None
self._total_pages = None
self.discriminator = None
if content is not None:
self.content = content
if empty is not None:
self.empty = empty
if first is not None:
self.first = first
if last is not None:
self.last = last
if number is not None:
self.number = number
if number_of_elements is not None:
self.number_of_elements = number_of_elements
if pageable is not None:
self.pageable = pageable
if size is not None:
self.size = size
if sort is not None:
self.sort = sort
if total_elements is not None:
self.total_elements = total_elements
if total_pages is not None:
self.total_pages = total_pages
@property
def content(self):
"""Gets the content of this PageEmailPreview. # noqa: E501
:return: The content of this PageEmailPreview. # noqa: E501
:rtype: list[EmailPreview]
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this PageEmailPreview.
:param content: The content of this PageEmailPreview. # noqa: E501
:type: list[EmailPreview]
"""
self._content = content
@property
def empty(self):
"""Gets the empty of this PageEmailPreview. # noqa: E501
:return: The empty of this PageEmailPreview. # noqa: E501
:rtype: bool
"""
return self._empty
@empty.setter
def empty(self, empty):
"""Sets the empty of this PageEmailPreview.
:param empty: The empty of this PageEmailPreview. # noqa: E501
:type: bool
"""
self._empty = empty
@property
def first(self):
"""Gets the first of this PageEmailPreview. # noqa: E501
:return: The first of this PageEmailPreview. # noqa: E501
:rtype: bool
"""
return self._first
@first.setter
def first(self, first):
"""Sets the first of this PageEmailPreview.
:param first: The first of this PageEmailPreview. # noqa: E501
:type: bool
"""
self._first = first
@property
def last(self):
"""Gets the last of this PageEmailPreview. # noqa: E501
:return: The last of this PageEmailPreview. # noqa: E501
:rtype: bool
"""
return self._last
@last.setter
def last(self, last):
"""Sets the last of this PageEmailPreview.
:param last: The last of this PageEmailPreview. # noqa: E501
:type: bool
"""
self._last = last
@property
def number(self):
"""Gets the number of this PageEmailPreview. # noqa: E501
:return: The number of this PageEmailPreview. # noqa: E501
:rtype: int
"""
return self._number
@number.setter
def number(self, number):
"""Sets the number of this PageEmailPreview.
:param number: The number of this PageEmailPreview. # noqa: E501
:type: int
"""
self._number = number
@property
def number_of_elements(self):
"""Gets the number_of_elements of this PageEmailPreview. # noqa: E501
:return: The number_of_elements of this PageEmailPreview. # noqa: E501
:rtype: int
"""
return self._number_of_elements
@number_of_elements.setter
def number_of_elements(self, number_of_elements):
"""Sets the number_of_elements of this PageEmailPreview.
:param number_of_elements: The number_of_elements of this PageEmailPreview. # noqa: E501
:type: int
"""
self._number_of_elements = number_of_elements
@property
def pageable(self):
"""Gets the pageable of this PageEmailPreview. # noqa: E501
:return: The pageable of this PageEmailPreview. # noqa: E501
:rtype: Pageable
"""
return self._pageable
@pageable.setter
def pageable(self, pageable):
"""Sets the pageable of this PageEmailPreview.
:param pageable: The pageable of this PageEmailPreview. # noqa: E501
:type: Pageable
"""
self._pageable = pageable
@property
def size(self):
"""Gets the size of this PageEmailPreview. # noqa: E501
:return: The size of this PageEmailPreview. # noqa: E501
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this PageEmailPreview.
:param size: The size of this PageEmailPreview. # noqa: E501
:type: int
"""
self._size = size
@property
def sort(self):
"""Gets the sort of this PageEmailPreview. # noqa: E501
:return: The sort of this PageEmailPreview. # noqa: E501
:rtype: Sort
"""
return self._sort
@sort.setter
def sort(self, sort):
"""Sets the sort of this PageEmailPreview.
:param sort: The sort of this PageEmailPreview. # noqa: E501
:type: Sort
"""
self._sort = sort
@property
def total_elements(self):
"""Gets the total_elements of this PageEmailPreview. # noqa: E501
:return: The total_elements of this PageEmailPreview. # noqa: E501
:rtype: int
"""
return self._total_elements
@total_elements.setter
def total_elements(self, total_elements):
"""Sets the total_elements of this PageEmailPreview.
:param total_elements: The total_elements of this PageEmailPreview. # noqa: E501
:type: int
"""
self._total_elements = total_elements
@property
def total_pages(self):
"""Gets the total_pages of this PageEmailPreview. # noqa: E501
:return: The total_pages of this PageEmailPreview. # noqa: E501
:rtype: int
"""
return self._total_pages
@total_pages.setter
def total_pages(self, total_pages):
"""Sets the total_pages of this PageEmailPreview.
:param total_pages: The total_pages of this PageEmailPreview. # noqa: E501
:type: int
"""
self._total_pages = total_pages
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PageEmailPreview):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PageEmailPreview):
return True
return self.to_dict() != other.to_dict()
|
py | b40819f7277d0dd9b68b3b3ef6ec3a0910b23d8b | # -*- coding: utf-8 -*-
import torchvision.transforms as transforms
import os
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
import time
import cv2
ori_image_dir = 'data_2000/Mesh_Film/npy/'
EPOCH = 1
test_BATCH_SIZE = 100
def bw2deform(bw):
assert type(bw) == np.ndarray
im_size = bw.shape[-1]
bw = ((bw + 1.)/2.)*im_size
x = np.arange(im_size)
y = np.arange(im_size)
xi, yi = np.meshgrid(x, y)
bw[0, :, :] = bw[0, :, :] - yi
bw[1, :, :] = bw[1, :, :] - xi
return bw/im_size
class filmDataset(Dataset):
def __init__(self, npy_dir, npy_dir_2=None):
self.npy_dir = npy_dir
self.npy_list = np.array([x.path for x in os.scandir(npy_dir) if x.name.endswith(".npy")])
if npy_dir_2!=None:
self.npy_list_2 = np.array([x.path for x in os.scandir(npy_dir_2) if x.name.endswith(".npy")])
self.npy_list = np.append(self.npy_list, self.npy_list_2)
self.npy_list.sort()
# self.input_size =(256, 256)
# print(self.record_files)
def __getitem__(self, index):
npy_path = self.npy_list[index]
"""loading"""
# data = np.load(self.npy_dir + '/' + npy_name, allow_pickle=True)[()]
data = np.load(npy_path, allow_pickle=True)[()]
ori = data['ori']
ab = data['ab']
# bmap = data['bmap']
depth = data['depth']
normal = data['normal']
uv = data['uv']
cmap = data['cmap']
background = data['background']
bw = data['bw']
# ori_1080 = data['ori_1080']
return torch.from_numpy(ori), \
torch.from_numpy(ab), \
torch.from_numpy(depth), \
torch.from_numpy(normal), \
torch.from_numpy(cmap), \
torch.from_numpy(uv), \
torch.from_numpy(background), \
torch.from_numpy(bw),\
# torch.from_numpy(ori_1080), \
# torch.from_numpy(bmap)
# torch.from_numpy(bmap), \
# torch.unsqueeze(torch.from_numpy(depth),0), \
def __len__(self):
return len(self.npy_list)
class filmDataset_with_name(Dataset):
def __init__(self, npy_dir, npy_dir_2=None):
self.npy_dir = npy_dir
self.npy_list = np.array([x.path for x in os.scandir(npy_dir) if x.name.endswith(".npy")])
if npy_dir_2!=None:
self.npy_list_2 = np.array([x.path for x in os.scandir(npy_dir_2) if x.name.endswith(".npy")])
self.npy_list = np.append(self.npy_list, self.npy_list_2)
self.npy_list.sort()
# self.input_size =(256, 256)
# print(self.record_files)
def __getitem__(self, index):
npy_path = self.npy_list[index]
"""loading"""
# data = np.load(self.npy_dir + '/' + npy_name, allow_pickle=True)[()]
data = np.load(npy_path, allow_pickle=True)[()]
ori = data['ori']
ab = data['ab']
# bmap = data['bmap']
depth = data['depth']
normal = data['normal']
uv = data['uv']
cmap = data['cmap']
background = data['background']
bw = data['bw']
name = npy_path.split('/')[-1].split('.')[0]
# ori_1080 = data['ori_1080']
return torch.from_numpy(ori), \
torch.from_numpy(ab), \
torch.from_numpy(depth), \
torch.from_numpy(normal), \
torch.from_numpy(cmap), \
torch.from_numpy(uv), \
torch.from_numpy(background), \
name,\
torch.from_numpy(bw),\
# torch.from_numpy(ori_1080), \
# torch.from_numpy(bmap)
# torch.from_numpy(bmap), \
# torch.unsqueeze(torch.from_numpy(depth),0), \
def __len__(self):
return len(self.npy_list)
class DeFilmDataset(Dataset):
def __init__(self, npy_dir):
self.npy_dir = npy_dir
self.npy_list = np.array([x.path for x in os.scandir(npy_dir) if x.name.endswith(".npy")])
self.npy_list.sort()
def __getitem__(self, index):
npy_path = self.npy_list[index]
"""loading"""
# data = np.load(self.npy_dir + '/' + npy_name, allow_pickle=True)[()]
data = np.load(npy_path, allow_pickle=True)[()]
ori = data['ori']
ab = data['ab']
depth = data['depth']
normal = data['normal']
uv = data['uv']
cmap = data['cmap']
background = data['background']
bw = data['bw']
deform = bw2deform(bw.copy())
name = npy_path.split('/')[-1].split('.')[0]
# ori_1080 = data['ori_1080']
return torch.from_numpy(ori), \
torch.from_numpy(ab), \
torch.from_numpy(depth), \
torch.from_numpy(normal), \
torch.from_numpy(cmap), \
torch.from_numpy(uv), \
torch.from_numpy(background), \
torch.from_numpy(bw),\
torch.from_numpy(deform),\
name,\
# torch.from_numpy(ori_1080), \
# torch.from_numpy(bmap)
# torch.from_numpy(bmap), \
# torch.unsqueeze(torch.from_numpy(depth),0), \
def __len__(self):
return len(self.npy_list)
class single_test(Dataset):
def __init__(self, npy_dir):
self.npy_dir = npy_dir
#self.npy_list = np.array([x.path for x in os.scandir(npy_dir) if x.name.endswith(".npy")])
self.npy_list = np.array([x.path for x in os.scandir(npy_dir)])
self.npy_list.sort()
self.npy_list = self.npy_list[:100]
def __getitem__(self, index):
npy_path = self.npy_list[index]
if npy_path[-3:] == 'npy':
#ori = np.load(npy_path)
ori = np.load(npy_path,allow_pickle=True)[()]['ori']
else:
ori = np.transpose((cv2.resize(cv2.imread(npy_path),(256,256))/255.*2. - 1.),(2,0,1)).astype(np.float32)
name = npy_path.split('/')[-1].split('.')[0]
return torch.from_numpy(ori), \
name,\
def __len__(self):
return len(self.npy_list)
def mainT():
device = torch.device("cuda")
#dataset_test = filmDataset(npy_dir=ori_image_dir)
ori_image_dir = '/home1/qiyuanwang/film_generate/npy_with_bw'
dataset_test = filmDataset_with_name(npy_dir=ori_image_dir)
dataset_test_loader = DataLoader(dataset_test,
batch_size=50,
num_workers=1,
shuffle=False,)
# collate_fn = collate_fn)
# collate_fn=callot.PadCollate(dim=0)) #
print('dataset_test_loader', dataset_test_loader)
for epoch in range(EPOCH):
start_time = time.time()
for i, data in enumerate(dataset_test_loader):
print('start')
"""
data 的数据格式是[tuple, tuple]_batchsize个,每个tuple里面是三个Tensor
"""
ori = data[0]
ab = data[1]
depth = data[2]
normal = data[3]
uv = data[5]
cmap = data[4]
mask = data[6]
name = data[7]
bw = data[8]
# uv = data[6]
ori, ab, depth, normal, uv, cmap = ori.to(device), ab.to(device), depth.to(device), normal.to(device), uv.to(device), cmap.to(device)
print('ori', ori.size())
print('It took {} seconds to load {} samples'.format(float(time.time()-start_time), test_BATCH_SIZE))
start_time = time.time()
print(name)
# duration = float(time.time()-start_time)
# print('It cost', duration, 'seconds')
if __name__ =='__main__':
mainT()
|
py | b4081ab58b5eaa197fc8c7381207b2a208402158 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from nefi2.model.algorithms._alg import *
import cv2
import numpy as np
__authors__ = {"Sebastian Schattner": "[email protected]"}
# Segmentation routines
THRESHOLD_FG_COLOR = 255
THRESHOLD_BG_COLOR = 0
# markers for grabcut, watershed denoting sure-fg, sure-bg and
# let-the-algorithm-figure-it-out
FG_MARKER = 255
BG_MARKER = 150
UNDECIDED_MARKER = 0
class AlgBody(Algorithm):
"""
Grabcut - Dilation Erosion Otsu algorithm implementation
"""
def __init__(self):
"""
Instance vars:
| *name* : name of the algorithm
| *parent* : name of the appropriate category
| *fg_iter* : Number of foreground iterations for markers
| *bg_iter* : Number of background iterations for markers
| *gc_iter* : Number of grabcut iterations
"""
Algorithm.__init__(self)
self.name = "Grabcut DE Otsus"
self.parent = "Segmentation"
self.fg_iter = IntegerSlider("Foreground Iteration", 0, 10, 1, 2)
self.bg_iter = IntegerSlider("Background Iteration", 0, 10, 1, 1)
self.gc_iter = IntegerSlider("GrabCut Iteration", 1, 10, 1, 5)
self.integer_sliders.append(self.fg_iter)
self.integer_sliders.append(self.bg_iter)
self.integer_sliders.append(self.gc_iter)
def process(self, args):
"""
Use the Watershed algorithm from the opencv package to the current
image with the help of marker based ob dilation, erosion and
adaptive threshold.
Args:
| *args* : a list of arguments, e.g. image ndarray
"""
marker = self.erosion_dilation_marker(image=args[0],
erosion_iterations=self.fg_iter.value,
dilation_iterations=self.bg_iter.value,
threshold_strategy=self.otsus_threshold)
grabcut_marker = self.grabcut(image=args[0],
marker=marker,
grabcut_iterations=self.gc_iter.value)
seg = self.apply_mask_to_image(grabcut_marker, image=args[0])
self.result['img'] = cv2.cvtColor(seg, cv2.COLOR_RGB2GRAY)
def apply_mask_to_image(self, mask, image):
"""
Constructs the segmented image based on the original image and the
mask.
Args:
| *image* : An input image which is not altered
| *mask* : A mask containing foreground and background information
Returns:
A segmented image
"""
res = np.zeros_like(image)
res[mask == THRESHOLD_FG_COLOR] = [THRESHOLD_FG_COLOR] * 3
return res
def otsus_threshold(self, image, threshold_value=0,
threshold_type=cv2.THRESH_BINARY_INV, **_):
if len(image.shape) == 3:
greyscale_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
else:
greyscale_image = image
threshold_type += cv2.THRESH_OTSU
threshold_image = cv2.threshold(greyscale_image, threshold_value,
THRESHOLD_FG_COLOR, threshold_type)[1]
return threshold_image
def erosion_dilation_marker(self, image, erosion_iterations=2,
dilation_iterations=1,
threshold_strategy=otsus_threshold):
"""
Applies morphological transformations to obtain the marker. The areas
likely to be foreground are obtained by erosion. The areas likely to
be background are obtained by dilation.
The final marker is obtained by adding likely background to likely
foreground where areas not part of either are considered undecided.
Args:
* threshold_image* : A properly thresholded image
Returns:
A marker subdividing image regions into likely foreground, likely
background and undecided pixels
"""
threshold_image = threshold_strategy(image)
# determine likely foreground by erosion
foreground_image = cv2.erode(threshold_image, None,
iterations=erosion_iterations)
# determine likely background by dilation
background_image_tmp = cv2.dilate(threshold_image, None,
iterations=dilation_iterations)
background_image = cv2.threshold(background_image_tmp, 0, BG_MARKER,
cv2.THRESH_BINARY_INV)[1]
# regions not part of either likely foreground nor likely background
# are considered undecided
marker = cv2.add(foreground_image, background_image)
return marker
def grabcut(self, image, marker, grabcut_iterations=5):
"""
Applies opencv's grabcut method iteratively to an input image. An
initial marker containing preliminary information on whether a pixel is
foreground, background or probably background serves as additional
input. The initial marker can be based on user input (color-picking),
or can be constructed with an automatic marker strategy. The marker is
updated and improved by the grabcut method iteratively. Finally, the
marker is used to obtain a mask classifying every pixel into foreground
or background.
Args:
| *image* : An input image which is not altered
| *marker*: A marer suitable for use with opencv's grabcut
Returns:
A mask image classifying every pixel into foreground or background
"""
# data structures grabcut needs to operate
background_model = np.zeros((1, 65), np.float64)
foreground_model = np.zeros((1, 65), np.float64)
# an empty mask to start from
grabcut_mask = np.zeros(image.shape[:2], np.uint8)
grabcut_mask = np.zeros_like(marker)
# set undecided pixel to grabcuts probably background
grabcut_mask[marker == UNDECIDED_MARKER] = cv2.GC_PR_BGD
# set undecided pixel to grabcuts probably foreground
grabcut_mask[marker == UNDECIDED_MARKER] = cv2.GC_PR_FGD
# set black pixel to grabcuts definitely background
grabcut_mask[marker == BG_MARKER] = cv2.GC_BGD
# set white pixel to grabcuts definitely foreground
grabcut_mask[marker == FG_MARKER] = cv2.GC_FGD
# run grabcut and let it figure out the undecided areas of the image
# and update the guiding grabcut_mask
cv2.grabCut(image, grabcut_mask, None, background_model,
foreground_model, grabcut_iterations,
mode = cv2.GC_INIT_WITH_MASK)
mask = np.zeros_like(grabcut_mask)
# replace probable background/foreground with definite
# background/foreground respectively and the final mask is done
mask[grabcut_mask == cv2.GC_FGD] = FG_MARKER
mask[grabcut_mask == cv2.GC_PR_FGD] = FG_MARKER
mask[grabcut_mask == cv2.GC_BGD] = BG_MARKER
mask[grabcut_mask == cv2.GC_PR_BGD] = BG_MARKER
return mask
if __name__ == '__main__':
pass
|
py | b4081bfb874775c47cc14dd1b10da5d6271a77d2 | from django.apps import AppConfig
class AccountConfig(AppConfig):
name = 'account'
def ready(self):
import account.signals |
py | b4081cd108d613643ca4c2ff8941d695b9af4d36 | # BSD 3-Clause License.
#
# Copyright (c) 2019-2022 Robert A. Milton. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contains a suite of functions designed to test_data screening (order reduction) with high-dimensional distributions.
All functions herein are taken from https://salib.readthedocs.io.
Each function signature follows the format:
|
``def function_(X: MatrixLike, **kwargs)``
:X: The function argument, in the form of an ``(N,M)`` design Matrix.
:**kwargs: Function-specific parameters, normally fixed.
Returns: A ``Vector[0 : N-1, 1]`` evaluating ``function_(X[0 : N-1, :])``.
|
"""
from __future__ import annotations
from romcomma.base.definitions import *
from romcomma.data.storage import Repository
from romcomma.test import sampling
from SALib.test_functions import Ishigami, Sobol_G
class FunctionWithMeta:
""" A class for use with functions.sample(...). Encapsulates a function and its parameters."""
_DEFAULT = None
@classmethod
def _default(cls, **kwargs: Any) -> Dict[str, FunctionWithMeta]:
name = kwargs['name']
return {name: FunctionWithMeta(**kwargs)}
@classmethod
@property
def DEFAULT(cls) -> Dict[str, FunctionWithMeta]:
""" List of Default FunctionsWithMeta."""
if cls._DEFAULT is None:
cls._DEFAULT = {
**cls._default(name='sin.1', function=Ishigami.evaluate, loc=-np.pi, scale=2 * np.pi, A=0.0, B=0.0),
**cls._default(name='sin.2', function=Ishigami.evaluate, loc=-np.pi, scale=2 * np.pi, A=2.0, B=0.0),
**cls._default(name='ishigami', function=Ishigami.evaluate, loc=-np.pi, scale=2 * np.pi, A=7.0, B=0.1),
**cls._default(name='sobol_g', function=Sobol_G.evaluate, loc=0, scale=1, a=np.array([0, 1, 4.5, 9, 99])),
**cls._default(name='sobol_g2', function=Sobol_G.evaluate, loc=0, scale=1, a=np.array([0, 1, 4.5, 9, 99]),
alpha=np.array([2.0, 2.0, 2.0, 2.0, 2.0])),
}
return cls._DEFAULT
@property
def meta(self) -> Dict[str, Any]:
return self._meta | {key: (value.tolist() if isinstance(value, np.ndarray) else value) for key, value in self.parameters.items()}
def __call__(self, X: NP.Matrix, **kwargs) -> NP.Matrix:
kwargs.update(self.parameters)
return np.reshape(self._function(X * self._meta['scale'] + self._meta['loc'], **kwargs), (X.shape[0], 1))
def __init__(self, **kwargs):
self._meta = {key: kwargs.pop(key) for key in ('name', 'loc', 'scale')}
self._function = kwargs.pop('function')
self.parameters = kwargs.copy()
def sample(functions: Tuple[FunctionWithMeta], N: int, M: int, likelihood_variance: NP.MatrixLike, folder: PathLike,
sampling_method: Callable[[int, int, Any], NP.Matrix] = sampling.latin_hypercube, **kwargs) -> Repository:
""" Record a sample of test function responses.
Args:
functions: A tuple of test functions, of length L.
N: The number of samples (datapoints), N > 0.
M: The input dimensionality, M &ge 0.
likelihood_variance: A noise (co)variance of shape (L,L) or (L,). The latter is interpreted as an (L,L) diagonal matrix.
Used to generate N random samples of Gaussian noise ~ N(0, noise_variance).
folder: The Repository.folder to create and record the results in.
sampling_method: A Callable sampling_method(N, M, **kwargs) -> X, which returns an (N,M) matrix.
kwargs: Passed directly to sampling_method.
Returns: A Repository containing N rows of M input columns and L output columns. The output is f(X) + noise.
"""
X = sampling_method(N, M, **kwargs)
likelihood_variance = np.atleast_2d(likelihood_variance)
origin_meta = {'sampling_method': sampling_method.__name__, 'noise_variance': likelihood_variance.tolist()}
noise = sampling.multivariate_gaussian_noise(N, likelihood_variance)
return apply(functions, X, noise, folder, origin_meta=origin_meta)
def apply(functions: Tuple[FunctionWithMeta], X: NP.Matrix, noise: NP.Matrix, folder: PathLike, **kwargs) -> Repository:
""" Record a sample of test function responses.
Args:
functions: A tuple of test functions, of length L.
X: An (N,M) design matrix of inputs.
noise: An (N,L) design matrix of fractional output noise per unit Y.
folder: The Repository.folder to create and record the results in.
Returns: A repo containing N rows of M input columns and L output columns. The output is f(X) + noise.
Raises: IndexError if dimensions are incompatible.
"""
X, noise = np.atleast_2d(X), np.atleast_2d(noise)
if min(X.shape) < 1:
raise IndexError(f'X.shape = {X.shape} does not consist of two non-zero dimensions.')
if min(noise.shape) < 1:
raise IndexError(f'noise.shape = {noise.shape} does not consist of two non-zero dimensions.')
if X.shape[0] != noise.shape[0]:
raise IndexError(f'X has {X.shape[0]} samples while noise has {noise.shape[0]} samples.')
if len(functions) == 1:
functions = functions * noise.shape[1]
elif len(functions) != noise.shape[1]:
raise IndexError(f'functions should be of length L, equal to noise.shape[1].')
meta = {'N': X.shape[0], 'functions': [f.meta for f in functions]}
meta = {'origin': meta | kwargs.get('origin_meta', {})}
Y = np.concatenate([f(X) for f in functions], axis=1)
std = np.reshape(np.std(Y, axis=0), (1, -1))
Y += noise * std
columns = [('X', f'X.{i:d}') for i in range(X.shape[1])] + [('Y', f'Y.{i:d}') for i in range(Y.shape[1])]
df = pd.DataFrame(np.concatenate((X, Y), axis=1), columns=pd.MultiIndex.from_tuples(columns), dtype=float)
return Repository.from_df(folder=folder, df=df, meta=meta)
|
py | b4081e1e34a760c3b43f0f90eb442277ea5ec5bd | from output.models.nist_data.atomic.positive_integer.schema_instance.nistschema_sv_iv_atomic_positive_integer_max_exclusive_1_xsd.nistschema_sv_iv_atomic_positive_integer_max_exclusive_1 import NistschemaSvIvAtomicPositiveIntegerMaxExclusive1
__all__ = [
"NistschemaSvIvAtomicPositiveIntegerMaxExclusive1",
]
|
py | b4081e45181843e380dbb146dc92f5e233ccda1e | from typing import Optional, List, Any
from cleo.helpers import argument
from cleo.helpers import option
from cleo.io.null_io import NullIO
from poetry.core.utils.collections import nested_dict_set, nested_dict_get
from poetry.config.source import Source
from poetry.console.commands.command import Command
from poetry.factory import Factory
from poetry.repositories import Pool
class SourceAddCommand(Command):
name = "source add"
description = "Add source configuration for project."
arguments = [
argument(
"name",
"Source repository name.",
),
argument("url", "Source repository url."),
]
options = [
option(
"default",
"d",
"Set this source as the default (disable PyPI). A "
"default source will also be the fallback source if "
"you add other sources.",
),
option("secondary", "s", "Set this source as secondary."),
]
def handle(self) -> Optional[int]:
name = self.argument("name")
url = self.argument("url")
is_default = self.option("default")
is_secondary = self.option("secondary")
if is_default and is_secondary:
self.line_error(
"Cannot configure a source as both <c1>default</c1> and <c1>secondary</c1>."
)
return 1
new_source = Source(
name=name, url=url, default=is_default, secondary=is_secondary
)
existing_sources = self.poetry.get_sources()
for source in existing_sources:
if source.name == new_source.name:
self.line(
f"Source with name <c1>{name}</c1> already exits. Skipping addition."
)
return 0
elif source.default and is_default:
self.line_error(
f"<error>Source with name <c1>{source.name}</c1> is already set to default. "
f"Only one default source can be configured at a time.</error>"
)
return 1
self.line(f"Adding source with name <c1>{name}</c1>.")
new_source_dict = new_source.to_dict()
# ensure new source is valid. eg: invalid name etc.
self.poetry._pool = Pool()
try:
Factory.configure_sources(
self.poetry, [new_source_dict], self.poetry.config, NullIO()
)
self.poetry.pool.repository(name)
except ValueError as e:
self.line_error(
f"<error>Failed to validate addition of <c1>{name}</c1>: {e}</error>"
)
return 1
sources_path = ['tool', 'poetry', 'source']
with self.poetry.pyproject.edit() as data:
lst: List[Any] = nested_dict_get(data, sources_path)
if not lst:
lst = [new_source_dict]
nested_dict_set(data, sources_path, lst)
else:
lst.append(new_source_dict)
return 0
|
py | b4081e6392534b98eddfb6f80d08db3ee1f82dfd | from tradenity.token_holder import AuthTokenHolder
from tradenity_django.sdk.ext.middleware import CurrentRequestMiddleware
class DjangoAuthTokenHolder(AuthTokenHolder):
AUTH_TOKEN_NAME = 'tradenity_auth_token'
def __init__(self):
super(DjangoAuthTokenHolder, self).__init__()
self._token = None
@property
def token(self):
session = CurrentRequestMiddleware.get_request().session
return session.get(self.AUTH_TOKEN_NAME, None)
@token.setter
def token(self, value):
session = CurrentRequestMiddleware.get_request().session
session[self.AUTH_TOKEN_NAME] = value
def reset(self):
session = CurrentRequestMiddleware.get_request().session
del session[self.AUTH_TOKEN_NAME]
|
py | b4081ea0df24dca96decec53a96398d1be4f27fa | import itk
filt = itk.MeshToPolyDataFilter.New()
|
py | b4081ef411b0f9c02b75894d79f32703cf3c1099 | """A Metric observes output of certain model, for example, in form of logits or
scores, and accumulates a particular metric with reference to some provided
targets. In context of VisDial, we use Recall (@ 1, 5, 10), Mean Rank, Mean
Reciprocal Rank (MRR) and Normalized Discounted Cumulative Gain (NDCG).
Each ``Metric`` must atleast implement three methods:
- ``observe``, update accumulated metric with currently observed outputs
and targets.
- ``retrieve`` to return the accumulated metric., an optionally reset
internally accumulated metric (this is commonly done between two epochs
after validation).
- ``reset`` to explicitly reset the internally accumulated metric.
Caveat, if you wish to implement your own class of Metric, make sure you call
``detach`` on output tensors (like logits), else it will cause memory leaks.
"""
import torch
import numpy as np
def scores_to_ranks(scores: torch.Tensor):
"""Convert model output scores into ranks."""
batch_size, num_rounds, num_options = scores.size()
scores = scores.view(-1, num_options)
# sort in descending order - largest score gets highest rank
sorted_ranks, ranked_idx = scores.sort(1, descending=True)
# i-th position in ranked_idx specifies which score shall take this
# position but we want i-th position to have rank of score at that
# position, do this conversion
ranks = ranked_idx.clone().fill_(0)
for i in range(ranked_idx.size(0)):
for j in range(num_options):
ranks[i][ranked_idx[i][j]] = j
# convert from 0-99 ranks to 1-100 ranks
ranks += 1
ranks = ranks.view(batch_size, num_rounds, num_options)
return ranks
class SparseGTMetrics(object):
"""A class to accumulate all metrics with sparse ground truth annotations.
These include Recall (@ 1, 5, 10), Mean Rank and Mean Reciprocal Rank.
"""
def __init__(self):
self._rank_list = []
self._rank_list_rnd = []
self.num_rounds = None
def observe(self, predicted_scores: torch.Tensor, target_ranks: torch.Tensor):
predicted_scores = predicted_scores.detach()
# shape: (batch_size, num_rounds, num_options)
predicted_ranks = scores_to_ranks(predicted_scores)
batch_size, num_rounds, num_options = predicted_ranks.size()
self.num_rounds = num_rounds
# collapse batch dimension
predicted_ranks = predicted_ranks.view(batch_size * num_rounds, num_options)
# shape: (batch_size * num_rounds, )
target_ranks = target_ranks.view(batch_size * num_rounds).long()
# shape: (batch_size * num_rounds, )
predicted_gt_ranks = predicted_ranks[torch.arange(batch_size * num_rounds), target_ranks]
self._rank_list.extend(list(predicted_gt_ranks.cpu().numpy()))
predicted_gt_ranks_rnd = predicted_gt_ranks.view(batch_size, num_rounds)
# predicted gt ranks
self._rank_list_rnd.append(predicted_gt_ranks_rnd.cpu().numpy())
def retrieve(self, reset: bool = True):
num_examples = len(self._rank_list)
if num_examples > 0:
# convert to numpy array for easy calculation.
__rank_list = torch.tensor(self._rank_list).float()
metrics = {
'r@1': torch.mean((__rank_list <= 1).float()).item(),
'r@5': torch.mean((__rank_list <= 5).float()).item(),
'r@10': torch.mean((__rank_list <= 10).float()).item(),
'mean': torch.mean(__rank_list).item(),
'mrr': torch.mean(__rank_list.reciprocal()).item()
}
# add round metrics
_rank_list_rnd = np.concatenate(self._rank_list_rnd)
_rank_list_rnd = _rank_list_rnd.astype(float)
r_1_rnd = np.mean(_rank_list_rnd <= 1, axis=0)
r_5_rnd = np.mean(_rank_list_rnd <= 5, axis=0)
r_10_rnd = np.mean(_rank_list_rnd <= 10, axis=0)
mean_rnd = np.mean(_rank_list_rnd, axis=0)
mrr_rnd = np.mean(np.reciprocal(_rank_list_rnd), axis=0)
for rnd in range(1, self.num_rounds + 1):
metrics['r_1' + '_round_' + str(rnd)] = r_1_rnd[rnd - 1]
metrics['r_5' + '_round_' + str(rnd)] = r_5_rnd[rnd - 1]
metrics['r_10' + '_round_' + str(rnd)] = r_10_rnd[rnd - 1]
metrics['mean' + '_round_' + str(rnd)] = mean_rnd[rnd - 1]
metrics['mrr' + '_round_' + str(rnd)] = mrr_rnd[rnd - 1]
else:
metrics = {}
if reset:
self.reset()
return metrics
def reset(self):
self._rank_list = []
self._rank_list_rnd = []
class NDCG(object):
def __init__(self):
self._ndcg_numerator = 0.0
self._ndcg_denominator = 0.0
def observe(self, predicted_scores: torch.Tensor, target_relevance: torch.Tensor):
"""Observe model output scores and target ground truth relevance and
accumulate NDCG metric.
Parameters
----------
predicted_scores: torch.Tensor
A tensor of shape (batch_size, num_options), because dense
annotations are available for 1 randomly picked round out of 10.
target_relevance: torch.Tensor
A tensor of shape same as predicted scores, indicating ground truth
relevance of each answer option for a particular round.
"""
predicted_scores = predicted_scores.detach()
# shape: (batch_size, 1, num_options)
predicted_scores = predicted_scores.unsqueeze(1)
predicted_ranks = scores_to_ranks(predicted_scores)
# shape: (batch_size, num_options)
predicted_ranks = predicted_ranks.squeeze()
batch_size, num_options = predicted_ranks.size()
k = torch.sum(target_relevance != 0, dim=-1)
# shape: (batch_size, num_options)
_, rankings = torch.sort(predicted_ranks, dim=-1)
# Sort relevance in descending order so highest relevance gets top rnk.
_, best_rankings = torch.sort(target_relevance, dim=-1, descending=True)
# shape: (batch_size, )
batch_ndcg = []
for batch_index in range(batch_size):
num_relevant = k[batch_index]
dcg = self._dcg(
rankings[batch_index][:num_relevant],
target_relevance[batch_index],
)
best_dcg = self._dcg(
best_rankings[batch_index][:num_relevant],
target_relevance[batch_index],
)
batch_ndcg.append(dcg / best_dcg)
self._ndcg_denominator += batch_size
self._ndcg_numerator += sum(batch_ndcg)
def _dcg(self, rankings: torch.Tensor, relevance: torch.Tensor):
sorted_relevance = relevance[rankings].cpu().float()
discounts = torch.log2(torch.arange(len(rankings)).float() + 2)
return torch.sum(sorted_relevance / discounts, dim=-1)
def retrieve(self, reset: bool = True):
if self._ndcg_denominator > 0:
metrics = {'ndcg': float(self._ndcg_numerator / self._ndcg_denominator)}
else:
metrics = {}
if reset:
self.reset()
return metrics
def reset(self):
self._ndcg_numerator = 0.0
self._ndcg_denominator = 0.0
|
py | b4081f109e0c01502a80b90111f8373eae24c128 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FirewallPolicyRuleCollectionGroupsOperations:
"""FirewallPolicyRuleCollectionGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_collection_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleCollectionGroupName': self._serialize.url("rule_collection_group_name", rule_collection_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleCollectionGroups/{ruleCollectionGroupName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_collection_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified FirewallPolicyRuleCollectionGroup.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:param rule_collection_group_name: The name of the FirewallPolicyRuleCollectionGroup.
:type rule_collection_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
firewall_policy_name=firewall_policy_name,
rule_collection_group_name=rule_collection_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleCollectionGroupName': self._serialize.url("rule_collection_group_name", rule_collection_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleCollectionGroups/{ruleCollectionGroupName}'} # type: ignore
async def get(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_collection_group_name: str,
**kwargs: Any
) -> "_models.FirewallPolicyRuleCollectionGroup":
"""Gets the specified FirewallPolicyRuleCollectionGroup.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:param rule_collection_group_name: The name of the FirewallPolicyRuleCollectionGroup.
:type rule_collection_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallPolicyRuleCollectionGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.FirewallPolicyRuleCollectionGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyRuleCollectionGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleCollectionGroupName': self._serialize.url("rule_collection_group_name", rule_collection_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallPolicyRuleCollectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleCollectionGroups/{ruleCollectionGroupName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_collection_group_name: str,
parameters: "_models.FirewallPolicyRuleCollectionGroup",
**kwargs: Any
) -> "_models.FirewallPolicyRuleCollectionGroup":
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyRuleCollectionGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleCollectionGroupName': self._serialize.url("rule_collection_group_name", rule_collection_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FirewallPolicyRuleCollectionGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FirewallPolicyRuleCollectionGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('FirewallPolicyRuleCollectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleCollectionGroups/{ruleCollectionGroupName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_collection_group_name: str,
parameters: "_models.FirewallPolicyRuleCollectionGroup",
**kwargs: Any
) -> AsyncLROPoller["_models.FirewallPolicyRuleCollectionGroup"]:
"""Creates or updates the specified FirewallPolicyRuleCollectionGroup.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:param rule_collection_group_name: The name of the FirewallPolicyRuleCollectionGroup.
:type rule_collection_group_name: str
:param parameters: Parameters supplied to the create or update
FirewallPolicyRuleCollectionGroup operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.FirewallPolicyRuleCollectionGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either FirewallPolicyRuleCollectionGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.FirewallPolicyRuleCollectionGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyRuleCollectionGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
firewall_policy_name=firewall_policy_name,
rule_collection_group_name=rule_collection_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FirewallPolicyRuleCollectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleCollectionGroupName': self._serialize.url("rule_collection_group_name", rule_collection_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleCollectionGroups/{ruleCollectionGroupName}'} # type: ignore
def list(
self,
resource_group_name: str,
firewall_policy_name: str,
**kwargs: Any
) -> AsyncIterable["_models.FirewallPolicyRuleCollectionGroupListResult"]:
"""Lists all FirewallPolicyRuleCollectionGroups in a FirewallPolicy resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FirewallPolicyRuleCollectionGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.FirewallPolicyRuleCollectionGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyRuleCollectionGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('FirewallPolicyRuleCollectionGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleCollectionGroups'} # type: ignore
|
py | b40820892433e3d71d101a5f4b836d9d0d8483b6 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import os
from optparse import OptionParser
import cmd
import random
from collections import OrderedDict
from marvin.marvinInit import MarvinInit
from marvin.deployDataCenter import DeployDataCenters
from marvin.cloudstackException import GetDetailExceptionInfo
from marvin.codegenerator import CodeGenerator
from marvin.codes import (SUCCESS,
FAILED,
EXCEPTION
)
from marvin.tcExecuteEngine import TestCaseExecuteEngine
class VerifyAndExit(object):
def __init__(self, msg):
self.msg = msg
def __call__(self, original_func):
def new_function(*args, **kwargs):
exit_check = False
try:
if original_func(*args, **kwargs) == FAILED:
exit_check = True
except Exception as e:
print "===Exception.Please Check:===", e
exit_check = True
finally:
if exit_check:
print "==== %s ====" % self.msg
MarvinCliHelp.print_cmds_help()
sys.exit(1)
return new_function
class MarvinCliCommands(object):
cmds_info = OrderedDict({
'deploydc':
{
'summary': 'for deploying a datacenter',
'options': ['*config-file'],
'help': 'marvincli deploydc config-file=<marvin-config-file EX: setup/dev/advanced.cfg file>',
'desc': 'deploys a data center using the config file provided'
},
'deploydc_and_runtest':
{
'summary': 'for deploying a datacenter (and) running tests, either test suite (or) directory of test suites',
'options': ['*config-file', '*tc-path', 'zone', 'hyp-type', 'required_hardware'],
'help': 'marvincli deploydc_and_runtest config-file=<path_to_marvin_cfg EX: setup/dev/advanced.cfg>'
'tc-path=<test suite or test suite folder path EX: test/integration/smoke/>'
'zone=<name of the zone> hyp-type=<hypervisor_type EX: xen,kvm,vmware etc> required_hardware=<true\\false>',
'desc': 'deploys a data center using the config file provided, and runs test cases using the test suite or directory of test suites provided. '
'If zone to run against is not provided, then default zone mentioned in config file is provided '
'If hyp-type information is not provided, first hypervisor from config file is taken. '
'If required_hardware option is not provided, then it is set to false'
},
'generateapis_from_endpoint':
{
'summary': 'for generating apis from cs end point',
'options': ['*cs-folder-path', 'end-point'],
'help': 'marvincli generateapis_from_endpoint cs-folder-path=<cloudstack code root dir EX: /root/cs-4.5/cloudstack/>'
'end-point=<CS Endpoint ip EX: localhost>',
'desc': 'generates cloudstackAPI directory with CS apis information from cloudstack endpoint. '
'If end-point information is not provided, localhost is considered as default'
},
'generateapis_from_apispecfile':
{
'summary': 'for generating apis from api spec file',
'options': ['*cs-folder-path', 'api-spec-file'],
'help': 'marvincli generateapis_from_apispecfile cs-folder-path=<cloudstack code root dir EX: /root/cs-4.5/cloudstack/>'
'api-spec-file=<api spec file EX: /etc/cloud/cli/commands.xml>',
'desc': 'generates cloudstackAPI directory with CS apis information from cloudstack api spec file. '
'If spec file information is not provided, /etc/cloud/cli/commands.xml is considered as default'
},
'runtest':
{
'summary': 'for running test cases, either test suite (or) directory of test suites',
'options': ['*config-file', '*tc-path', 'required_hardware', 'zone', 'hyp-type'],
'help': 'marvincli runtest config-file=<path_to_marvin_config> tc-path=test/integration/smoke'
'required_hardware=<true\\false> zone=<name of zone> hyp-type=<xenserver\\kvm\\vmware> etc',
'desc': 'runs marvin integration tests against CS using config file, test suite path or directory of test suites are provided as input for running tests',
},
'sync_and_install':
{
'summary': 'for syncing apis and installing marvin using cs endpoint',
'options': ['*cs-folder-path', 'end-point'],
'help': 'marvincli sync_and_install cs-folder-path = <cloudstack code root dir EX: /root/cs-4.5/cloudstack/>'
'end-point = <CS installed host ip EX: localhost>',
'desc': 'generates cloudstackAPI directory with CS apis information from cloudstack end-point (and) installs new marvin.'
'If end-point information is not provided, localhost is considered as default'
},
'build_and_install':
{
'summary': 'for building and installing marvin using spec file',
'options': ['*cs-folder-path', 'api-sync-file'],
'help': 'marvincli build_and_install cs-folder-path = <cloudstack code root dir EX: /root/cs-4.5/cloudstack/>'
'api-sync-file = <api spec file generated by cs EX: /etc/cloud/cli/commands.xml>',
'desc': 'generates cloudstackAPI directory with CS apis information from cloudstack api-spec-file (and) installs new marvin.'
'If api spec file information is not provided, /etc/cloud/cli/commands.xml is considered as default'
},
'version':
{
'summary': 'for printing marvincli version',
'options': ['-v (or) --version'],
'help': 'marvincli -v (or) marvincli --version',
'desc': 'prints the version of marvincli'
}
})
class MarvinCliHelp(object):
@classmethod
def print_cmds_help(cls):
msg = ''
for cmd_name, cmd_txt in MarvinCliCommands.cmds_info.items():
msg = msg + \
'\n----------------------------------------------------\n'
cmd_info = ShellColor.BOLD + ShellColor.RED + \
'cmd_name:%s' % str(cmd_name) + ShellColor.END
for key, value in cmd_txt.iteritems():
cmd_info = cmd_info + '\n' + \
str(key) + ' : ' + str(value).strip('\n')
msg = msg + cmd_info
# return ShellColor.BOLD + ShellColor.RED + msg + ShellColor.END
return msg
@classmethod
def print_msg(cls, msg):
if msg:
return ShellColor.BOLD + ShellColor.RED + msg + ShellColor.END
class ShellColor(object):
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
ITALICS = '\x1B[3m'
#VERSION = "4.5.1-SNAPSHOT"
class MarvinCli(cmd.Cmd, object):
def __init__(self):
self.__configFile = None
self.__deployFlag = False
self.__zone = None
self.__hypervisorType = None
self.__tcPath = None
self.__testClient = None
self.__tcRunLogger = None
self.__parsedConfig = None
self.__resultStream = None
self.__logFolderPath = None
self.__testRunner = None
self.__requiredHw = False
self.__csFolder = "."
cmd.Cmd.__init__(self)
@VerifyAndExit(
"cmd failed, may be invalid input options, please check help")
def parse_input_deploy(self, inputs=None):
'''
Parses,reads the options and verifies for the config file
'''
if inputs:
out_dict = {}
args = inputs.strip().split(' ')
for item in args:
(key, value) = item.split('=')
out_dict[key] = value
self.__configFile = out_dict.get('config-file', '')
if not self.__configFile:
return FAILED
print "\n==== Parsing Input Options Successful ===="
return SUCCESS
return FAILED
@VerifyAndExit(
"cmd failed, may be invalid input options, please check help")
def parse_input_runtcs(self, inputs):
'''
Parses,reads the options and verifies for the config file
'''
if inputs:
out_dict = {}
args = inputs.strip().split(' ')
for item in args:
(key, value) = item.split('=')
out_dict[key] = value
self.__configFile = out_dict.get('config-file', None)
self.__zone = out_dict.get("zone", None)
self.__hypervisorType = out_dict.get("hyp-type", None)
self.__tcPath = out_dict.get("tc-path",)
self.__requiredHw = out_dict.get("required-hardware")
if not all([self.__tcPath, self.__configFile]):
return FAILED
print "\n==== Parsing Input Options Successful ===="
return SUCCESS
return FAILED
@VerifyAndExit("Marvin initialization failed, please check")
def start_marvin(self):
'''
Initialize the Marvin
'''
try:
obj_marvininit = MarvinInit(config_file=self.__configFile,
deploy_dc_flag=self.__deployFlag,
zone=self.__zone,
hypervisor_type=self.__hypervisorType,
user_logfolder_path=None)
if obj_marvininit and obj_marvininit.init() == SUCCESS:
self.__testClient = obj_marvininit.getTestClient()
self.__tcRunLogger = obj_marvininit.getLogger()
self.__parsedConfig = obj_marvininit.getParsedConfig()
self.__resultStream = obj_marvininit.getResultFile()
self.__logFolderPath = obj_marvininit.getLogFolderPath()
return SUCCESS
return FAILED
except Exception as e:
print "====Exception Occurred under start_marvin: %s ====" % \
GetDetailExceptionInfo(e)
return FAILED
def run_test_suites(self):
print "\n==== Started Running Test Cases ===="
xunit_out_path = "/tmp/marvin_xunit_out" + \
str(random.randrange(1, 10000)) + ".xml"
marvin_tc_run_cmd = "nosetests-2.7 -s --with-marvin --marvin-config=%s --with-xunit --xunit-file=%s %s -a tags=advanced, required_hardware=%s --zone=%s --hypervisor=%s"
if os.path.isfile(self.__tcPath):
marvin_tc_run_cmd = marvin_tc_run_cmd % (self.__configFile,
xunit_out_path, self.__requiredHw, self.__zone, self.__hypervisorType)
if os.path.isdir(self.__tcPath):
marvin_tc_run_cmd = marvin_tc_run_cmd % (self.__configFile,
xunit_out_path, self.__requiredHw, self.__zone, self.__hypervisorType)
os.system(marvin_tc_run_cmd)
'''
engine = TestCaseExecuteEngine(self.__testClient,
self.__parsedConfig,
tc_logger=self.__tcRunLogger)
if os.path.isfile(self.__tcPath):
engine.loadTestsFromFile(self.__tcPath)
elif os.path.isdir(self.__tcPath):
engine.loadTestsFromDir(self.__tcPath)
engine.run()
'''
print "\n==== Running Test Cases Successful ===="
@VerifyAndExit(
"cmd failed, may be invalid input options, please check help")
def do_deploydc(self, args):
try:
self.__deployFlag = True
self.parse_input_deploy(inputs=args)
self.start_marvin()
return SUCCESS
except Exception as e:
print "==== deploy cmd failed :%s ==== " % str(e)
return FAILED
@VerifyAndExit(
"cmd failed, may be invalid input options, please check help")
def do_deploydc_and_runtest(self, args):
try:
self.do_deploy(inputs=args)
self.parse_input_runtcs()
self.run_test_suites()
return SUCCESS
except Exception as e:
print "==== deploydc cmd failed:%s ==== " % str(e)
return FAILED
@VerifyAndExit(
"cmd failed, may be invalid input options, please check help")
def do_generateapis_from_apispecfile(self, args):
api_spec_file = "/etc/cloud/cli/commands.xml"
cs_api_folder = "."
if args:
inp = args.strip().split(' ')
for items in inp:
(key, value) = items.split('=')
if key.lower() == 'api-spec-file':
if os.path.exists(value):
api_spec_file = value
elif not os.path.exists(api_spec_file):
print "=== Mentioned api spec file :%s does not exists ===" % str(api_spec_file)
sys.exit(1)
if key.lower() == 'cs-folder-path':
cs_api_folder = self.create_marvin_api_folder(value)
cg = CodeGenerator(cs_api_folder)
if api_spec_file:
try:
cg.generateCodeFromXML(api_spec_file)
return SUCCESS
except Exception as e:
print "==== Generating apis from api spec file failed: %s ====" % str(e.message())
return FAILED
return FAILED
def create_marvin_api_folder(self, cs_folder_path='.'):
cs_api_folder = cs_folder_path + "/tools/marvin/marvin/cloudstackAPI"
if os.path.exists(cs_api_folder):
os.rmdir(cs_api_folder)
else:
os.makedirs(cs_api_folder)
return cs_api_folder
@VerifyAndExit(
"cmd failed, may be invalid input options, please check help")
def do_generateapis_from_endpoint(self, args):
endpoint_url = 'http://%s:8096/client/api?command=listApis&\
response=json'
cs_api_folder = "."
if args:
inp = args.strip().split(' ')
for items in inp:
(key, value) = items.split('=')
if key.lower() == 'endpoint':
cs_end_point = value
if key.lower() == 'cs-folder-path':
cs_api_folder = self.create_marvin_api_folder(value)
cg = CodeGenerator(cs_api_folder)
if cs_end_point:
try:
endpoint_url = endpoint_url % str(cs_end_point)
cg.generateCodeFromJSON(endpoint_url)
return SUCCESS
except Exception as e:
print "==== Generating apis from end point failed: %s ====" % str(e.message())
return FAILED
return FAILED
@VerifyAndExit(
"cmd failed, may be invalid input options, please check help")
def do_runtest(self, args):
try:
self.parse_input_runtcs(args)
self.start_marvin()
self.run_test_suites()
return SUCCESS
except Exception as e:
print "==== run test failed: %s ====" % str(e.message())
return FAILED
def install_marvin(self):
if self.__csFolder:
marvin_setup_file_path = self.__csFolder + "/tools/marvin/setup.py"
try:
os.system("python %s install" % str(marvin_setup_file_path))
print "==== Marvin Installed Successfully ===="
except Exception as e:
print "==== Marvin Installation Failed ===="
@VerifyAndExit(
"cmd failed, may be invalid input options, please check help")
def do_build_and_install(self, args):
try:
self.do_generateapis_from_apispecfile(args)
self.install_marvin()
return SUCCESS
except Exception as e:
print "==== build from end point and install marvin failed: %s ====" % str(e)
return FAILED
@VerifyAndExit(
"cmd failed, may be invalid input options, please check help")
def do_sync_and_install(self, args):
try:
self.do_generateapis_from_endpoint(args)
self.install_marvin()
return SUCCESS
except Exception as e:
print "==== sync from spec file and install marvin failed: %s ====" % str(e)
return FAILED
class MarvinCliParser(OptionParser):
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(MarvinCliHelp.print_msg("\nUsage: marvincli [cmd] [options]. See, the below cmds for more information."
"(*) signifies mandatory fields \n\n"))
self.description = MarvinCliHelp.print_cmds_help()
if self.description:
result.append(self.format_description(formatter) + "\n")
return "".join(result)
def main():
parser = MarvinCliParser()
parser.add_option("-v", "--version",
action="store_true", dest="version", default=False,
help="prints marvin cli version information")
(options, args) = parser.parse_args()
if options.version:
MarvinCliHelp.help_printversion()
sys.exit(0)
if len(sys.argv) > 1:
if sys.argv[1].lower() not in MarvinCliCommands.cmds_info.keys():
print "\n==== Invalid Command ===="
sys.exit(1)
args = ' '.join(args)
if '-h' in args or '--help' in args:
print MarvinCliCommands.cmds_info[sys.argv[0]]
else:
MarvinCli().onecmd(args)
sys.exit(0)
if __name__ == "__main__":
main()
|
py | b40820e1533f14f0e57176a9b12960dd714f3422 | from .bases import *
from .optimizers import *
__all__ = ["Optimizer", "GradientDescentMixin", "optimizer_dict"]
|
py | b40820ec2b1ec589be9c0a0451c3f1bc3ec4ee49 | # import pytest
import recipeasy.data.util as data_util
def test_get_foods():
food_data = data_util.get_foods()
assert isinstance(food_data, dict)
|
py | b40821769a82ebb1aafa90f99d8443f35d4cf501 | from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0003_lastseennotification'),
]
operations = [
migrations.AlterField(
model_name='lastseennotification',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
preserve_default=True,
),
]
|
py | b4082218a10cc703b1599705527cd2ad7fa9d663 | """
Implements counterwallet enhanced asset info and betting feed support as a counterblock plugin
Python 3.x since v1.4.0
"""
import logging
import decimal
import json
import base64
from counterblock.lib import config, util
from counterblock.lib.modules import BETTING_PRIORITY_PARSE_BROADCAST, ipfs
from counterblock.lib.processor import MessageProcessor, StartUpProcessor, CaughtUpProcessor, RollbackProcessor, API, start_task
FEED_MAX_RETRY = 3
D = decimal.Decimal
logger = logging.getLogger(__name__)
def sanitize_json_data(data):
if 'operator' in data:
data['operator']['name'] = util.sanitize_eliteness(data['operator']['name'])
if 'description' in data['operator']:
data['operator']['description'] = util.sanitize_eliteness(data['operator']['description'])
data['title'] = util.sanitize_eliteness(data['title'])
if 'description' in data:
data['description'] = util.sanitize_eliteness(data['description'])
if 'targets' in data:
for i in range(len(data['targets'])):
data['targets'][i]['text'] = util.sanitize_eliteness(data['targets'][i]['text'])
if 'description' in data['targets'][i]:
data['targets'][i]['description'] = util.sanitize_eliteness(data['targets'][i]['description'])
if 'labels' in data['targets'][i]:
data['targets'][i]['labels']['equal'] = util.sanitize_eliteness(data['targets'][i]['labels']['equal'])
data['targets'][i]['labels']['not_equal'] = util.sanitize_eliteness(data['targets'][i]['labels']['not_equal'])
if 'customs' in data:
for key in data['customs']:
if isinstance(data['customs'][key], str):
data['customs'][key] = util.sanitize_eliteness(data['customs'][key])
return data
def get_feeds_by_source_addresses(addresses):
conditions = {'source': {'$in': addresses}}
feeds = config.mongo_db.feeds.find(conditions, projection={'_id': False})
feeds_by_source = {}
for feed in feeds:
feeds_by_source[feed['source']] = feed
return feeds_by_source
def get_feed_counters(feed_address):
counters = {}
sql = 'SELECT COUNT(*) AS bet_count, SUM(wager_quantity) AS wager_quantity, SUM(wager_remaining) AS wager_remaining, status FROM bets '
sql += 'WHERE feed_address=? GROUP BY status ORDER BY status DESC'
bindings = [feed_address]
params = {
'query': sql,
'bindings': bindings
}
counters['bets'] = util.call_jsonrpc_api('sql', params)['result']
return counters
@API.add_method
def get_bets(bet_type, feed_address, deadline, target_value=None, leverage=5040):
limit = 50
bindings = []
sql = 'SELECT * FROM bets WHERE counterwager_remaining>0 AND '
sql += 'bet_type=? AND feed_address=? AND leverage=? AND deadline=? '
bindings += [bet_type, feed_address, leverage, deadline]
if target_value is not None:
sql += 'AND target_value=? '
bindings.append(target_value)
sql += 'ORDER BY ((counterwager_quantity+0.0)/(wager_quantity+0.0)) ASC LIMIT ?'
bindings.append(limit)
params = {
'query': sql,
'bindings': bindings
}
return util.call_jsonrpc_api('sql', params)['result']
@API.add_method
def get_user_bets(addresses=[], status="open"):
params = {
'filters': {
'field': 'source',
'op': 'IN',
'value': addresses
},
'status': status,
'order_by': 'tx_index',
'order_dir': 'DESC',
'limit': 100
}
bets = util.call_jsonrpc_api('get_bets', params)['result']
sources = {}
for bet in bets:
sources[bet['feed_address']] = True
return {
'bets': bets,
'feeds': get_feeds_by_source_addresses(list(sources.keys()))
}
@API.add_method
def get_feed(address_or_url=''):
conditions = {
'$or': [{'source': address_or_url}, {'info_url': address_or_url}],
'info_status': 'valid'
}
result = {}
feeds = config.mongo_db.feeds.find(conditions, projection={'_id': False}, limit=1)
for feed in feeds:
result = feed
result['counters'] = get_feed_counters(feed['source'])
if 'counters' not in result:
params = {
'filters': {
'field': 'source',
'op': '=',
'value': address_or_url
},
'order_by': 'tx_index',
'order_dir': 'DESC',
'limit': 10
}
broadcasts = util.call_jsonrpc_api('get_broadcasts', params)['result']
if broadcasts:
return {
'broadcasts': broadcasts,
'counters': get_feed_counters(address_or_url)
}
return result
@API.add_method
def get_feeds_by_source(addresses=[]):
feed = get_feeds_by_source_addresses(addresses)
return feed
@API.add_method
def parse_base64_feed(base64_feed):
decoded_feed = base64.b64decode(base64_feed)
feed = json.loads(decoded_feed)
if not isinstance(feed, dict) or 'feed' not in feed:
return False
errors = util.is_valid_json(feed['feed'], config.FEED_SCHEMA)
if len(errors) > 0:
raise Exception("Invalid json: {}".format(", ".join(errors)))
# get broadcast infos
params = {
'filters': {
'field': 'source',
'op': '=',
'value': feed['feed']['address']
},
'order_by': 'tx_index',
'order_dir': 'DESC',
'limit': 1
}
broadcasts = util.call_jsonrpc_api('get_broadcasts', params)['result']
if len(broadcasts) == 0:
raise Exception("invalid feed address")
complete_feed = {}
complete_feed['fee_fraction_int'] = broadcasts[0]['fee_fraction_int']
complete_feed['source'] = broadcasts[0]['source']
complete_feed['locked'] = broadcasts[0]['locked']
complete_feed['counters'] = get_feed_counters(broadcasts[0]['source'])
complete_feed['info_data'] = sanitize_json_data(feed['feed'])
feed['feed'] = complete_feed
return feed
@MessageProcessor.subscribe(priority=BETTING_PRIORITY_PARSE_BROADCAST)
def parse_broadcast(msg, msg_data):
if msg['category'] != 'broadcasts':
return
save = False
feed = config.mongo_db.feeds.find_one({'source': msg_data['source']})
if util.is_valid_url(msg_data['text'], allow_no_protocol=True) and msg_data['value'] == -1.0:
if feed is None:
feed = {}
feed['source'] = msg_data['source']
feed['info_url'] = msg_data['text']
feed['info_status'] = 'needfetch' # needfetch, valid (included in CW feed directory), invalid, error
feed['fetch_info_retry'] = 0 # retry FEED_MAX_RETRY times to fetch info from info_url
feed['info_data'] = {}
feed['fee_fraction_int'] = msg_data['fee_fraction_int']
feed['locked'] = False
feed['last_broadcast'] = {}
feed['errors'] = []
save = True
if feed['info_url'].startswith('ipfs://'):
ipfs.watch_feed(feed['info_url'])
elif feed is not None:
if msg_data['locked']:
feed['locked'] = True
else:
feed['last_broadcast'] = {
'text': msg_data['text'],
'value': msg_data['value']
}
feed['fee_fraction_int'] = msg_data['fee_fraction_int']
save = True
if save:
config.mongo_db.feeds.save(feed)
return save
def task_compile_extended_feed_info():
feeds = list(config.mongo_db.feeds.find({'info_status': 'needfetch'}))
feed_info_urls = []
def inc_fetch_retry(feed, max_retry=FEED_MAX_RETRY, new_status='error', errors=[]):
feed['fetch_info_retry'] += 1
feed['errors'] = errors
if feed['fetch_info_retry'] == max_retry:
feed['info_status'] = new_status
config.mongo_db.feeds.save(feed)
def process_feed_info(feed, info_data):
# sanity check
assert feed['info_status'] == 'needfetch'
assert 'info_url' in feed
assert util.is_valid_url(feed['info_url'], allow_no_protocol=True) # already validated in the fetch
errors = util.is_valid_json(info_data, config.FEED_SCHEMA)
if not isinstance(info_data, dict) or 'address' not in info_data:
errors.append('Invalid data format')
elif feed['source'] != info_data['address']:
errors.append('Invalid address')
if len(errors) > 0:
inc_fetch_retry(feed, new_status='invalid', errors=errors)
if feed['info_url'].startswith('ipfs://'):
ipfs.invalidate_hash(feed['info_url'])
return (False, errors)
feed['info_status'] = 'valid'
# fetch any associated images...
# TODO: parallelize this 2nd level feed image fetching ... (e.g. just compose a list here, and process it in later on)
if 'image' in info_data:
info_data['valid_image'] = util.fetch_image(
info_data['image'],
config.SUBDIR_FEED_IMAGES, feed['source'] + '_topic', fetch_timeout=5)
if 'operator' in info_data and 'image' in info_data['operator']:
info_data['operator']['valid_image'] = util.fetch_image(
info_data['operator']['image'],
config.SUBDIR_FEED_IMAGES, feed['source'] + '_owner', fetch_timeout=5)
if 'targets' in info_data:
for i in range(len(info_data['targets'])):
if 'image' in info_data['targets'][i]:
image_name = feed['source'] + '_tv_' + str(info_data['targets'][i]['value'])
info_data['targets'][i]['valid_image'] = util.fetch_image(
info_data['targets'][i]['image'], config.SUBDIR_FEED_IMAGES, image_name, fetch_timeout=5)
feed['info_data'] = sanitize_json_data(info_data)
config.mongo_db.feeds.save(feed)
return (True, None)
def feed_fetch_complete_hook(urls_data):
logger.info("Enhanced feed info fetching complete. %s unique URLs fetched. Processing...", len(urls_data))
feeds = config.mongo_db.feeds.find({'info_status': 'needfetch'})
for feed in feeds:
#logger.debug("Looking at feed %s: %s" % (feed, feed['info_url']))
if feed['info_url']:
info_url = util.normalize_content_url(feed['info_url'])
if info_url not in urls_data:
logger.warning("URL %s not properly fetched (not one of %i entries in urls_data), skipping...", info_url, len(urls_data))
continue
assert info_url in urls_data
if not urls_data[info_url][0]: # request was not successful
inc_fetch_retry(feed, max_retry=FEED_MAX_RETRY, errors=[urls_data[info_url][1]])
logger.warning("Fetch for feed at %s not successful: %s (try %i of %i)",
info_url, urls_data[info_url][1], feed['fetch_info_retry'], FEED_MAX_RETRY)
else:
result = process_feed_info(feed, urls_data[info_url][1])
if not result[0]:
logger.info("Processing for feed at %s not successful: %s", info_url, result[1])
else:
logger.info("Processing for feed at %s successful", info_url)
# compose and fetch all info URLs in all feeds with them
for feed in feeds:
assert feed['info_url']
feed_info_urls.append(util.normalize_content_url(feed['info_url']))
feed_info_urls_str = ', '.join(feed_info_urls)
feed_info_urls_str = (feed_info_urls_str[:2000] + ' ...') if len(feed_info_urls_str) > 2000 else feed_info_urls_str # truncate if necessary
if len(feed_info_urls):
logger.info('Fetching enhanced feed info for %i feeds: %s', len(feed_info_urls), feed_info_urls_str)
util.stream_fetch(
feed_info_urls, feed_fetch_complete_hook,
fetch_timeout=10, max_fetch_size=4 * 1024, urls_group_size=20, urls_group_time_spacing=20,
per_request_complete_callback=lambda url, data: logger.debug("Feed at %s retrieved, result: %s", url, data))
start_task(task_compile_extended_feed_info, delay=60 * 5) # call again in 5 minutes
@StartUpProcessor.subscribe()
def init():
# init db and indexes
# feeds (also init in enhanced_asset_info module)
config.mongo_db.feeds.ensure_index('source')
config.mongo_db.feeds.ensure_index('owner')
config.mongo_db.feeds.ensure_index('category')
config.mongo_db.feeds.ensure_index('info_url')
@CaughtUpProcessor.subscribe()
def start_tasks():
start_task(task_compile_extended_feed_info)
@RollbackProcessor.subscribe()
def process_rollback(max_block_index):
if not max_block_index: # full reparse
pass
else: # rollback
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.