metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jdrtommey/expvis",
"score": 3
}
|
#### File: expvis/app/get_data.py
```python
import os
import datetime
from e11 import H5Scan
def get_files(directory,year,month,day):
"""
given a directory will return a list of all files which match, and their dataframes
"""
runs = []
folder = directory + '/' + year + '/' + month + '/' + day
for subdir, dirs, files in os.walk(folder):
if year+month+day in subdir:
f = subdir.split("_")[1]
df = H5Scan(subdir + '/' + files[0]).df('analysis')
runs.append([f,df])
runs.sort()
return runs
## Build the layout for selecting data
def date_to_file(date):
"""
given a date format will produce the correct formatting for accessing folders.
months are padded with an additional 0 if single digit
days are padded with an additional 0
"""
return [str(date.year),str(date.month).zfill(2),str(date.day).zfill(2)]
```
#### File: expvis/app/manipulate.py
```python
from bokeh.layouts import row,widgetbox,column,layout
from bokeh.models import Button,ColumnDataSource,TextInput
from bokeh.palettes import viridis
from bokeh.plotting import figure, curdoc
from bokeh.models.widgets import DatePicker,TableColumn,DataTable,RadioButtonGroup,Tabs,Panel,Slider,TextInput
import numpy as np
class Manipulator:
def __init__(self,data_x,data_y,name,p):
self.name = name
self.figure = p
self.data_x = np.asarray(data_x)
self.data_y = np.asarray(data_y)
self.mani_data_x = self.data_x
self.mani_data_y = self.data_y
self.cds = ColumnDataSource(dict(x=self.mani_data_x,y=self.mani_data_y))
self.y_scale = TextInput(value='1',title="Y scale")
self.x_scale = TextInput(value='1',title="X scale")
self.y_offset = TextInput(value='0',title="Y offset")
self.x_offset = TextInput(value='0',title="X offset")
self.y_scale.on_change('value',self._on_change)
self.x_scale.on_change('value',self._on_change)
self.y_offset.on_change('value',self._on_change)
self.x_offset.on_change('value',self._on_change)
rolling_num = len(self.data_x)//10
if rolling_num ==0:
rolling_num=1
self.rolling_average = Slider(start=0,end=rolling_num,value=0,title='rolling average')
self.rolling_average.on_change('value',self._on_change)
self.plot_line()
self.plot_line()
def show(self):
x_control = column(self.x_scale,self.x_offset)
y_control = column(self.y_scale,self.y_offset)
controls = column(children=[x_control,y_control],width=200)
return widgetbox(children=[self.x_scale,self.x_offset,self.y_scale,self.y_offset,self.rolling_average],width=200)
def update_data(self,data_x,data_y,name=None):
self.data_x = np.asarray(data_x)
self.data_y = np.asarray(data_y)
self.cds.data = dict(x=self.data_x,y=self.data_y)
print(name)
if name != None:
self.name = name
self._reset()
def _reset(self):
"""
restore the widgets to their default settings when the data source is changed
"""
rolling_num = len(self.data_x)//10
if rolling_num ==0:
rolling_num=1
self.rolling_average.end = rolling_num
self.rolling_average.value = 0
self.y_scale.value = '1'
self.x_scale.value = '1'
self.x_offset.value = '0'
self.y_offset.value = '0'
self.rolling_average
def _on_change(self,attr,old,new):
self.mani_data_y = self.data_y * eval(self.y_scale.value) + eval(self.y_offset.value)
self.mani_data_x = self.data_x * eval(self.x_scale.value) + eval(self.x_offset.value)
self.mani_data_y = runningMeanFast(self.mani_data_y,self.rolling_average.value)
self.cds.data=dict(x=self.mani_data_x,y=self.mani_data_y)
def plot_line(self):
self.figure.line(x='x',y='y',source=self.cds,legend=self.name)
def runningMeanFast(x, N):
if N > 0:
return np.convolve(x, np.ones((N,))/N)[(N-1):]
else:
return x
#import random
#x_vals = np.linspace(0,6,5000)
#y_vals=[]
#for x in x_vals:
# y_vals.append(np.sin(x + random.uniform(-0.1,0.1)))
#x_vals2 = np.linspace(0,6,5000)
#y_vals2=[]
#for x in x_vals2:
# y_vals2.append(np.cos(x + random.uniform(-0.1,0.1)))
#
#p = figure(width=600, height=600)
##my = Manipulator(x_vals,y_vals,"001",p)
#my2 = Manipulator(x_vals2,y_vals2,"002",p)
#my_lines=[my,my2]
#my_panels = []
#for line in my_lines:
# my_panels.append(Panel(child=line.show(),title=line.name))
#manipulate_tabs=Tabs(tabs=my_panels)
#p.legend.click_policy= 'hide'
#curdoc().add_root(row(p,manipulate_tabs))
```
|
{
"source": "jdrtommey/rydprops",
"score": 3
}
|
#### File: adiabatic_solver/plugins/param_guesser.py
```python
from scipy.sparse.linalg import eigs
from numpy.linalg import eigh
import numpy as np
class Param_gen:
def __init__(self,method,p_max,num_iters,max_fails = 5, min_states = 1):
"""
Determines what the next parameter should be.
Parameters
----------
method: string
chose which matching algorithim to apply
"""
self.available_methods = {'constant':self.constant_step,'variable':self.variable_step}
try:
self.func = self.available_methods[method]
except:
raise KeyError(method + " is not an avaivable method, current dictionary of avaiable methods: " + str(self.available_methods.keys()))
self.min_states = min_states
self.p_max = p_max
self.step = p_max/num_iters
self.max_fails = max_fails
self.current_fails = 0
def __call__(self,state_list,param):
param = self.func(state_list,param)
return param
def constant_step(self,state_list,param):
return param + self.step
def variable_step(self,state_list,param):
"""
if fails to find a adiabatic state it will make the step size half as big and log the fact that
it failed. if fails self.max_fails in a row it will raise an error.
"""
if len(state_list) < self.min_states :
print(len(state_list),self.step)
self.step = self.step/2
param = param - (self.step)
self.current_fails = self.current_fails + 1
if self.current_fails == self.max_fails:
raise RuntimeError("reached maximum number of failed attempts " + str(self.max_fails))
else:
param = param + self.step
self.current_fails = 0
return param
```
#### File: adiabatic_solver/plugins/state_matcher.py
```python
from scipy.sparse.linalg import eigs
from numpy.linalg import eigh
import numpy as np
from numba import jit
class Matcher:
def __init__(self,method):
"""
class which given a set of eigen_vals and eigen_vecs can match these to the correct
adiabatic states. Contains set of default methods.
General idea: select an algorithim, this compares the eigenvalue/vector against all the
adiabatic states, if it finds a match will add that val/vec to the adiabatic state.
A lengh_checker then looks at the lengths of adibatic state, and either puts it in a
successful list if it has acquired an additional state, or a failed list if not.
Parameters
----------
method: string
chose which matching algorithim to apply
"""
self.available_methods = {'vec':vector_algorithim,'basic':basic_algorithim,'energy':energy_algorithim,\
'circ':circular_algorithim} #maintains a list of the current methods which have been written
try:
self.func = self.available_methods[method]
except:
raise KeyError("Not avaivable method, current dictionary of avaiable methods: " + str(self.available_methods.keys()))
def __call__(self,state_list,vals,vecs,param):
current_length = current_length_check(state_list) #at start of process check length of the adibatic states.
state_list = self.func(state_list,vals,vecs,param) #computes the changes to the adibatic states
success,fail = length_checker(state_list,current_length) #checks which ones were successfully paired.
return success,fail
def current_length_check(state_list):
"""
takes a list of adibataic states and confirms the current length
"""
length = state_list[0].get_length()
for state in state_list:
if state.get_length() != length:
raise RuntimeError("mismatch in adibataic state length, internal error")
return length
def length_checker(state_list,current_length):
"""
checks the lenght of every adiabatic state. returns two lists:
the successful and the failed matchings
"""
successful_list =[]
failed_list=[]
for state in state_list:
if state.get_length() == current_length+1:
successful_list.append(state)
elif state.get_length() != current_length+1:
failed_list.append(state)
return successful_list,failed_list
#algorithim functions
#########################
# #
# vector algoithim #
# #
#########################
@jit
def vector_algorithim(state_list,vals,vecs,param,x=0.01):
"""
For each state in the current set of adiabatic states, computes a range of x% around the current eigenenergy
and coefficient of its initial state, searches all the eigenvals and vecs stored in vals and vecs
if it finds a state which if within the bounds of both value and coeffieicnt adds
it too a candidate state list. If this list only has a single entry it adds this vector to the
adiaabatic state and places the vector in a taken list to stop it being compared to future
states.
"""
taken_list = [] #stores the index of vals which have been assigned
for state in state_list:
candidate_list =[]
predicted_energy = state.get_current_value()
upperbound_energy = predicted_energy * (1+x)
lowerbound_energy = predicted_energy * (1-x)
energy_range = [lowerbound_energy,upperbound_energy]
predicted_coeff = state.get_current_coefficient()
upperbound_coeff = abs(predicted_coeff) * (1+x)
lowerbound_coeff = abs(predicted_coeff) * (1-x)
coeff_range = [upperbound_coeff,lowerbound_coeff]
for i,val in enumerate(vals):
if i not in taken_list:
vec_coeff = abs(vecs[state.index,i])
if val < np.max(energy_range) and val > np.min(energy_range):
if vec_coeff < np.max(coeff_range) and vec_coeff > np.min(coeff_range):
candidate_list.append(i)
if len(candidate_list) == 1:
vec_index = candidate_list[0]
state.add(vals[vec_index],vecs[:,vec_index],param)
taken_list.append(vec_index)
elif len(candidate_list) > 1:
vec_index = candidate_list[0]
state.add(vals[vec_index],vecs[:,vec_index],param)
taken_list.append(vec_index)
return state_list
#########################
# #
# basic algoithim #
# #
# #
#########################
def basic_algorithim(state_list,vals,vecs,param):
"""
Just assigns the largest val to the largest vec in the list. stops when it
reaches the last vec, only really useful when computing the dense space and
not concerned about exact crossings.
"""
#loop through and append it to the next in state_list
for i,state in enumerate(state_list):
try:
state_list[i].add(vals[i],vecs[:,i],param)
except:
raise IndexError("Index error assigning eigenvalue index "+ str(i) + " to adibatic state")
return state_list
#########################
# #
# basic algoithim #
# #
# #
#########################
@jit
def energy_algorithim(state_list,vals,vecs,param,x=0.05):
"""
For each state in the current set of adiabatic states, computes a range of x% around the current eigenenergy
and coefficient of its initial state, searches all the eigenvals and vecs stored in vals and vecs
if it finds a state which if within the bounds of both value and coeffieicnt adds
it too a candidate state list. If this list only has a single entry it adds this vector to the
adiaabatic state and places the vector in a taken list to stop it being compared to future
states.
"""
taken_list = [] #stores the index of vals which have been assigned
for state in state_list:
candidate_list =[]
predicted_energy = state.get_current_value()
upperbound_energy = predicted_energy * (1+x)
lowerbound_energy = predicted_energy * (1-x)
energy_range = [lowerbound_energy,upperbound_energy]
for i,val in enumerate(vals):
if i not in taken_list:
vec_coeff = abs(vecs[state.index,i])
if val < np.max(energy_range) and val > np.min(energy_range):
candidate_list.append(i)
if len(candidate_list) > 0:
vec_index = candidate_list[0]
state.add(vals[vec_index],vecs[:,vec_index],param)
taken_list.append(vec_index)
return state_list
#########################
# #
# circular algoithim #
# #
# #
#########################
@jit
def circular_algorithim(state_list,vals,vecs,param):
"""
alogorithim specifically written to find circular states, where the
s state above is also being calculated.
Assumes the largest returned eigenstate is always a given state.s
"""
idx = vals.argsort() #first sort the returned eigens from biggest to largest
eigenValues = vals[idx]
eigenVectors = vecs[:,idx]
for i,state in enumerate(state_list):
state.add(vals[i],vecs[:,i],param)
return state_list
```
#### File: adiabatic_solver/tests/test_diag_engine.py
```python
from ..plugins.diag_engine import Diagonaliser
import pytest
import numpy as np
from scipy.sparse import csr_matrix,isspmatrix,coo_matrix
def test_sparse_logic_numpy():
"""
correctly determines if needs to create sparse or dense matrices, given a numpy input
"""
return_vecs = False
num_eig = 3
h0 = np.random.rand(3,3)
h1 = np.random.rand(3,3)
my_diag = Diagonaliser(return_vecs,num_eig,h0,h1)
assert my_diag.sparse == False
def test_sparse_logic_csr():
"""
correctly determines if needs to create sparse or dense matrices, given a numpy input
"""
return_vecs = False
num_eig = 3
h0 = csr_matrix(np.random.rand(3,3))
h1 = csr_matrix(np.random.rand(3,3))
my_diag = Diagonaliser(return_vecs,num_eig,h0,h1)
assert my_diag.sparse == False
def test_sparse_logic_numpy_true():
"""
correctly determines if needs to create sparse or dense matrices, given a numpy input
"""
return_vecs = False
num_eig = 1
h0 = np.random.rand(3,3)
h1 = np.random.rand(3,3)
my_diag = Diagonaliser(return_vecs,num_eig,h0,h1)
assert my_diag.sparse == True
def test_sparse_logic_csr_true():
"""
correctly determines if needs to create sparse or dense matrices, given a numpy input
"""
return_vecs = False
num_eig = 1
h0 = csr_matrix(np.random.rand(3,3))
h1 = csr_matrix(np.random.rand(3,3))
my_diag = Diagonaliser(return_vecs,num_eig,h0,h1)
assert my_diag.sparse == True
def test_matrix_converter_dense_to_sparse():
"""
given dense matrices and a sparse toggle will convert to csr_matrices
"""
return_vecs = False
h0 = np.zeros((3,3))
h1= np.zeros((3,3))
num_eig = 1
my_diag = Diagonaliser(return_vecs,num_eig,h0,h1)
assert type(my_diag.h0) == csr_matrix and type(my_diag.h0) == csr_matrix
def test_matrix_converter_dense_to_dense():
"""
given dense matrices and a sparse toggle will convert to csr_matrices
"""
return_vecs = False
h0 = np.zeros((3,3))
h1= np.zeros((3,3))
num_eig = 3
my_diag = Diagonaliser(return_vecs,num_eig,h0,h1)
assert type(my_diag.h0) == np.ndarray and type(my_diag.h0) == np.ndarray
def test_matrix_converter_sparse_to_dense():
"""
given dense matrices and a sparse toggle will convert to csr_matrices
"""
return_vecs = False
h0 = csr_matrix(np.zeros((3,3)))
h1= csr_matrix(np.zeros((3,3)))
num_eig = 3
my_diag = Diagonaliser(return_vecs,num_eig,h0,h1)
assert type(my_diag.h0) == np.ndarray and type(my_diag.h0) == np.ndarray
def test_matrix_converter_sparse_to_sparse():
"""
given dense matrices and a sparse toggle will convert to csr_matrices
"""
return_vecs = False
h0 = csr_matrix(np.zeros((3,3)))
h1= csr_matrix(np.zeros((3,3)))
num_eig = 1
my_diag = Diagonaliser(return_vecs,num_eig,h0,h1)
assert type(my_diag.h0) == csr_matrix and type(my_diag.h0) == csr_matrix
def test_matrix_converter_coo_to_sparse():
"""
matrices generated as coo will be accepted
"""
return_vecs = False
h0 = coo_matrix(np.zeros((3,3)))
h1= coo_matrix(np.zeros((3,3)))
num_eig = 1
my_diag = Diagonaliser(return_vecs,num_eig,h0,h1)
assert type(my_diag.h0) == csr_matrix and type(my_diag.h0) == csr_matrix
```
#### File: rydprops/rydprop/space.py
```python
from .state import State_nlm
from scipy.sparse import coo_matrix,csr_matrix,diags
import numpy as np
class Space:
def __init__(self,atom):
"""
wrapper around a python list for use with state objects. A list which can only hold single
type of object. Index returns an index if found and None otherwise. Can only add a single
instance of a state to the space, ie n=10,l=0,ml=0 can only be added once.
"""
self.atom = atom
self.basis_type = self.atom.basis
self.states = []
def __len__(self):
return len(self.states)
def __getitem__(self,key):
try:
return self.states[key]
except:
raise IndexError("Index "+str(key) +" could not be found in space of dimension" + len(self))
def __iter__(self):
return self.states.__iter__()
def append(self,state):
"""
try to add to list, checks that the
"""
if type(state) == self.basis_type:
if self.index(state) == None:
self.states.append(state)
else:
raise ValueError("Cannot add duplicate states to space.")
else:
raise TypeError("Cannot add state basis type " +str(type(state)) + "to space of basis " + str(self.basis_type) )
def index(self,test_state):
"""
locate the index of a state in the list. loops through the states and checks if the state has a match
"""
try:
return self.states.index(test_state)
except:
return None
def H0(self):
"""
returns the field-free hamiltonian for the space.
"""
energies = []
for state in self.states:
energies.append(self.atom.energy(state))
return diags(energies,format='coo')
```
#### File: rydprops/rydprop/state.py
```python
from numba import jit
default_basis = 'nlm'
class State_nlm:
@jit
def __init__(self,n,l,ml):
"""
class which holds quantum numbers in the |n,l,ml> basis.
"""
self._n = n
if l >= 0 and l < n:
self._l = l
else:
raise ValueError("l must be between 0 and n. l provided l = " +str(l)+", provided n = "+str(n))
if abs(ml) <= l:
self._ml = ml
else:
raise ValueError("ml must be between -l and l. provided ml = "+str(ml)+", provided l = " + str(l))
@property
def n(self):
return self._n
@property
def l(self):
return self._l
@property
def ml(self):
return self._ml
def __members(self):
return (self._n,self._l,self._ml)
def __eq__(self,other_state):
"""
checks if another state_nml is equal to this state or not.
"""
if type(self) == type(other_state):
return self.__members() == other_state.__members()
else:
return False
class State_nlj:
def __init__(self,n,l,j,mj):
"""
class which holds quantum numbers in the |n,l,ml> basis.
"""
self._n = n
if l >= 0 and l < n:
self._l = l
else:
raise ValueError("l must be between 0 and n. l provided l = " +str(l)+", provided n = "+str(n))
if j in [l-0.5,l+0.5]:
self._j = j
else:
raise ValueError("j must be either l-0.5 or l+0.5. provided j = "+str(j)+", provided l = " + str(l))
if abs(ml) <=j:
self._mj = mj
else:
raise ValueError("mj must be between -j and j. provided j = "+str(j)+", provided mj = " + str(mj))
@property
def n(self):
return self._n
@property
def l(self):
return self._l
@property
def j(self):
return self._j
@property
def mj(self):
return self._mj
def __members(self):
return (self._n,self._l,self._j,self.mj)
def __eq__(self,other_state):
"""
checks if another state_nlj is equal to this state or not.
"""
if type(self) == type(other_state):
return self.__members() == other_state.__members()
else:
return False
basis_options = dict({'nlm':State_nlm,'nlj':State_nlj})
```
|
{
"source": "jdr-tools/rulesets",
"score": 2
}
|
#### File: rulesets/routes/update.py
```python
from bson.objectid import ObjectId
from flask import jsonify, request, g
from ..models import Ruleset, Session
from .blueprints import rulesets_blueprint
@rulesets_blueprint.route('/<ruleset_id>', methods = ['PUT'])
def update(ruleset_id) -> tuple:
if 'title' in request.json:
g.ruleset.title = request.json.get('title')
if 'description' in request.json:
g.ruleset.description = request.json.get('description')
g.ruleset.save()
return jsonify(
message = 'updated',
item = {
'title': g.ruleset.title,
'description': g.ruleset.description
}
)
```
#### File: rulesets/tests/test_destroy.py
```python
import pytest, pdb
from bson.objectid import ObjectId
from rulesets.models import Account, Ruleset, Session
from tests.fixtures import client
class DestroyRequestable():
"""This class is just a superclass to easily make requests to delete rulesets."""
@pytest.fixture
def delete(self, client):
url = f"/rulesets/{str(self.id)}"
query_string = {'session_id': pytest.session.token}
return lambda: client.delete(url, query_string = query_string)
@pytest.mark.describe('Destroy - nominal case')
class TestDestroyNominalCase(DestroyRequestable):
def setup_method(self):
self.ruleset = Ruleset.objects.create(title = 'test title', description = 'test description')
self.id = self.ruleset._id
def teardown_method(self):
Ruleset.objects.delete()
@pytest.mark.it('Returns a 200 (OK) Status code')
def test_status_code(self, delete):
assert delete().status_code == 200
@pytest.mark.it('Returns the correct body')
def test_message(self, delete):
assert delete().get_json() == {'message': 'deleted'}
@pytest.mark.it('Correctly deletes the ruleset from the database')
def test_deletion(self, delete):
response = delete()
assert Ruleset.objects.raw({'_id': self.ruleset._id}).count() == 0
@pytest.mark.describe('Destroy without giving session ID')
class TestDestroyWithoutSessionId():
@pytest.mark.it('Returns a 400 (Bad Request) status code')
def test_status_code(self, client):
assert client.delete('/rulesets/test').status_code == 400
@pytest.mark.it('Returns the correct error body')
def test_response_body(self, client):
response_body = client.delete('/rulesets/test').get_json()
assert response_body == {
'status': 400,
'field': 'session_id',
'error': 'required'
}
@pytest.mark.describe('Destroy with empty session ID')
class TestDestroyWithEmptySessionId():
@pytest.mark.it('Returns a 400 (Bad Request) status code')
def test_status_code(self, client):
response = client.delete('/rulesets/test', query_string={'session_id': None})
assert response.status_code == 400
@pytest.mark.it('Returns the correct error body')
def test_response_body(self, client):
response = client.delete('/rulesets/test', query_string={'session_id': None})
assert response.get_json() == {
'status': 400,
'field': 'session_id',
'error': 'required'
}
@pytest.mark.describe('Destroy with unknown session ID')
class TestDestroyWithUnknownSessionId():
@pytest.mark.it('Returns a 404 (Not Found) status code')
def test_status_code(self, client):
response = client.delete('/rulesets/test', query_string={'session_id': str(ObjectId())})
assert response.status_code == 404
@pytest.mark.it('Returns the correct error body')
def test_response_body(self, client):
response = client.delete('/rulesets/test', query_string={'session_id': str(ObjectId())})
assert response.get_json() == {
'status': 404,
'field': 'session_id',
'error': 'unknown'
}
@pytest.mark.describe('Destroy with unknown ruleset ID')
class TestDestroyWithUnknowId(DestroyRequestable):
@classmethod
def setup_class(self):
self.id = ObjectId()
@pytest.mark.it('Returns a 404 (Not Found) status code')
def test_status_code(self, delete):
assert delete().status_code == 404
@pytest.mark.it('Returns the correct body')
def test_response_body(self, delete):
assert delete().get_json() == {
'status': 404,
'field': 'ruleset_id',
'error': 'unknown'
}
```
|
{
"source": "jdrudolph/enrich",
"score": 3
}
|
#### File: enrich/goenrich/obo.py
```python
import itertools
import networkx as nx
def _tokenize(f):
token = []
for line in f:
if line == '\n':
yield token
token = []
else:
token.append(line)
def _filter_terms(tokens):
for token in tokens:
if token[0] == '[Term]\n':
yield token[1:]
def _parse_terms(terms):
for term in terms:
obsolete = False
node = {}
parents = []
for line in term:
if line.startswith('id:'):
id = line[4:-1]
elif line.startswith('name:'):
node['name'] = line[6:-1]
elif line.startswith('namespace:'):
node['namespace'] = line[11:-1]
elif line.startswith('is_a:'):
parents.append(line[6:16])
elif line.startswith('relationship: part_of'):
parents.append(line[22:32])
elif line.startswith('is_obsolete'):
obsolete = True
break
if not obsolete:
edges = [(p, id) for p in parents] # will reverse edges later
yield (id, node), edges
else:
continue
_filename = 'db/go-basic.obo'
def ontology(file):
""" read ontology from file
:param file: file path of file handle
"""
O = nx.DiGraph()
if isinstance(file, str):
f = open(file)
we_opened_file = True
else:
f = file
we_opened_file = False
try:
tokens = _tokenize(f)
terms = _filter_terms(tokens)
entries = _parse_terms(terms)
nodes, edges = zip(*entries)
O.add_nodes_from(nodes)
O.add_edges_from(itertools.chain.from_iterable(edges))
O.graph['roots'] = {data['name'] : n for n, data in O.nodes.items()
if data['name'] == data['namespace']}
finally:
if we_opened_file:
f.close()
for root in O.graph['roots'].values():
for n, depth in nx.shortest_path_length(O, root).items():
node = O.nodes[n]
node['depth'] = min(depth, node.get('depth', float('inf')))
return O.reverse()
```
#### File: goenrich/tests/test_propagation.py
```python
import unittest
import networkx as nx
import pandas as pd
import goenrich
from goenrich.enrich import propagate
class TestPropagationExample(unittest.TestCase):
def test_correctness_on_small_example(self):
r""" Example graph
r
/ \
c1 c2
\ / \
\ / \
c3 c4
"""
O = nx.DiGraph([('c4', 'c2'), ('c3', 'c1'), ('c3', 'c2'),
('c1', 'r'), ('c2', 'r')])
r = set([6])
c1 = set([])
c2 = set([4,5])
c3 = set([1,2,3])
c4 = set([0])
x = { 'r' : r, 'c1' : c1, 'c2' : c2, 'c3' : c3, 'c4' : c4 }
b = 'background'
propagate(O, x, b)
self.assertSetEqual(O.nodes['c3'][b], c3)
self.assertSetEqual(O.nodes['c2'][b], c4 | c3 | c2)
self.assertSetEqual(O.nodes['c1'][b], c3 | c1)
self.assertSetEqual(O.nodes['r'][b], c4 | c3 | c2 | c1 | r)
class TestPropagationReal(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestPropagationReal, self).__init__(*args, **kwargs)
self.gene2go = goenrich.read.gene2go('db/gene2go.gz')
self.O = goenrich.obo.ontology('db/go-basic.obo')
def test_on_gene2go_head(self):
test = self.gene2go.head(100)
values = {k: set(v) for k,v in test.groupby('GO_ID')['GeneID']}
propagate(self.O, values, 'head')
def test_if_runs_trough_on_real_data(self):
values = {k: set(v) for k,v in self.gene2go.groupby('GO_ID')['GeneID']}
propagate(self.O, values, 'real')
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jdrudolph/networkx",
"score": 4
}
|
#### File: examples/subclass/printgraph.py
```python
__author__ = """<NAME> (<EMAIL>)"""
# Copyright (C) 2004-2015 by
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
#
__docformat__ = "restructuredtext en"
from networkx import Graph
from networkx.exception import NetworkXException, NetworkXError
import networkx.convert as convert
from copy import deepcopy
class PrintGraph(Graph):
"""
Example subclass of the Graph class.
Prints activity log to file or standard output.
"""
def __init__(self, data=None, name='', file=None, **attr):
Graph.__init__(self, data=data,name=name,**attr)
if file is None:
import sys
self.fh=sys.stdout
else:
self.fh=open(file,'w')
def add_node(self, n, attr_dict=None, **attr):
Graph.add_node(self,n,attr_dict=attr_dict,**attr)
self.fh.write("Add node: %s\n"%n)
def add_nodes_from(self, nodes, **attr):
for n in nodes:
self.add_node(n, **attr)
def remove_node(self,n):
Graph.remove_node(self,n)
self.fh.write("Remove node: %s\n"%n)
def remove_nodes_from(self, nodes):
adj = self.adj
for n in nodes:
self.remove_node(n)
def add_edge(self, u, v, attr_dict=None, **attr):
Graph.add_edge(self,u,v,attr_dict=attr_dict,**attr)
self.fh.write("Add edge: %s-%s\n"%(u,v))
def add_edges_from(self, ebunch, attr_dict=None, **attr):
for e in ebunch:
u,v=e[0:2]
self.add_edge(u,v,attr_dict=attr_dict,**attr)
def remove_edge(self, u, v):
Graph.remove_edge(self,u,v)
self.fh.write("Remove edge: %s-%s\n"%(u,v))
def remove_edges_from(self, ebunch):
for e in ebunch:
u,v=e[0:2]
self.remove_edge(u,v)
def clear(self):
self.name = ''
self.adj.clear()
self.node.clear()
self.graph.clear()
self.fh.write("Clear graph\n")
def subgraph(self, nbunch, copy=True):
# subgraph is needed here since it can destroy edges in the
# graph (copy=False) and we want to keep track of all changes.
#
# Also for copy=True Graph() uses dictionary assignment for speed
# Here we use H.add_edge()
bunch =set(self.nbunch_iter(nbunch))
if not copy:
# remove all nodes (and attached edges) not in nbunch
self.remove_nodes_from([n for n in self if n not in bunch])
self.name = "Subgraph of (%s)"%(self.name)
return self
else:
# create new graph and copy subgraph into it
H = self.__class__()
H.name = "Subgraph of (%s)"%(self.name)
# add nodes
H.add_nodes_from(bunch)
# add edges
seen=set()
for u,nbrs in self.adjacency_iter():
if u in bunch:
for v,datadict in nbrs.items():
if v in bunch and v not in seen:
dd=deepcopy(datadict)
H.add_edge(u,v,dd)
seen.add(u)
# copy node and graph attr dicts
H.node=dict( (n,deepcopy(d))
for (n,d) in self.node.items() if n in H)
H.graph=deepcopy(self.graph)
return H
if __name__=='__main__':
G=PrintGraph()
G.add_node('foo')
G.add_nodes_from('bar',weight=8)
G.remove_node('b')
G.remove_nodes_from('ar')
print(G.nodes(data=True))
G.add_edge(0,1,weight=10)
print(G.edges(data=True))
G.remove_edge(0,1)
G.add_edges_from(list(zip(list(range(0o3)),list(range(1,4)))),weight=10)
print(G.edges(data=True))
G.remove_edges_from(list(zip(list(range(0o3)),list(range(1,4)))))
print(G.edges(data=True))
G=PrintGraph()
G.add_path(list(range(10)))
print("subgraph")
H1=G.subgraph(list(range(4)),copy=False)
H2=G.subgraph(list(range(4)),copy=False)
print(H1.edges())
print(H2.edges())
```
#### File: algorithms/flow/mincost.py
```python
__author__ = """<NAME>. <<EMAIL>>"""
# Copyright (C) 2010 <NAME>. <<EMAIL>>
# All rights reserved.
# BSD license.
__all__ = ['min_cost_flow_cost',
'min_cost_flow',
'cost_of_flow',
'max_flow_min_cost']
import networkx as nx
def min_cost_flow_cost(G, demand = 'demand', capacity = 'capacity',
weight = 'weight'):
"""Find the cost of a minimum cost flow satisfying all demands in digraph G.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
demand: string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowCost: integer, float
Cost of a minimum cost flow satisfying all demands.
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
See also
--------
cost_of_flow, max_flow_min_cost, min_cost_flow, network_simplex
Examples
--------
A simple example of a min cost flow problem.
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_node('a', demand = -5)
>>> G.add_node('d', demand = 5)
>>> G.add_edge('a', 'b', weight = 3, capacity = 4)
>>> G.add_edge('a', 'c', weight = 6, capacity = 10)
>>> G.add_edge('b', 'd', weight = 1, capacity = 9)
>>> G.add_edge('c', 'd', weight = 2, capacity = 5)
>>> flowCost = nx.min_cost_flow_cost(G)
>>> flowCost
24
"""
return nx.network_simplex(G, demand = demand, capacity = capacity,
weight = weight)[0]
def min_cost_flow(G, demand = 'demand', capacity = 'capacity',
weight = 'weight'):
"""Return a minimum cost flow satisfying all demands in digraph G.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
demand: string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
See also
--------
cost_of_flow, max_flow_min_cost, min_cost_flow_cost, network_simplex
Examples
--------
A simple example of a min cost flow problem.
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_node('a', demand = -5)
>>> G.add_node('d', demand = 5)
>>> G.add_edge('a', 'b', weight = 3, capacity = 4)
>>> G.add_edge('a', 'c', weight = 6, capacity = 10)
>>> G.add_edge('b', 'd', weight = 1, capacity = 9)
>>> G.add_edge('c', 'd', weight = 2, capacity = 5)
>>> flowDict = nx.min_cost_flow(G)
"""
return nx.network_simplex(G, demand = demand, capacity = capacity,
weight = weight)[1]
def cost_of_flow(G, flowDict, weight = 'weight'):
"""Compute the cost of the flow given by flowDict on graph G.
Note that this function does not check for the validity of the
flow flowDict. This function will fail if the graph G and the
flow don't have the same edge set.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Returns
-------
cost: Integer, float
The total cost of the flow. This is given by the sum over all
edges of the product of the edge's flow and the edge's weight.
See also
--------
max_flow_min_cost, min_cost_flow, min_cost_flow_cost, network_simplex
"""
return sum((flowDict[u][v] * d.get(weight, 0)
for u, v, d in G.edges_iter(data = True)))
def max_flow_min_cost(G, s, t, capacity = 'capacity', weight = 'weight'):
"""Return a maximum (s, t)-flow of minimum cost.
G is a digraph with edge costs and capacities. There is a source
node s and a sink node t. This function finds a maximum flow from
s to t whose total cost is minimized.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
s: node label
Source of the flow.
t: node label
Destination of the flow.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnbounded
This exception is raised if there is an infinite capacity path
from s to t in G. In this case there is no maximum flow. This
exception is also raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
is unbounded below.
See also
--------
cost_of_flow, min_cost_flow, min_cost_flow_cost, network_simplex
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_edges_from([(1, 2, {'capacity': 12, 'weight': 4}),
... (1, 3, {'capacity': 20, 'weight': 6}),
... (2, 3, {'capacity': 6, 'weight': -3}),
... (2, 6, {'capacity': 14, 'weight': 1}),
... (3, 4, {'weight': 9}),
... (3, 5, {'capacity': 10, 'weight': 5}),
... (4, 2, {'capacity': 19, 'weight': 13}),
... (4, 5, {'capacity': 4, 'weight': 0}),
... (5, 7, {'capacity': 28, 'weight': 2}),
... (6, 5, {'capacity': 11, 'weight': 1}),
... (6, 7, {'weight': 8}),
... (7, 4, {'capacity': 6, 'weight': 6})])
>>> mincostFlow = nx.max_flow_min_cost(G, 1, 7)
>>> mincost = nx.cost_of_flow(G, mincostFlow)
>>> mincost
373
>>> from networkx.algorithms.flow import maximum_flow
>>> maxFlow = maximum_flow(G, 1, 7)[1]
>>> nx.cost_of_flow(G, maxFlow) >= mincost
True
>>> mincostFlowValue = (sum((mincostFlow[u][7] for u in G.predecessors(7)))
... - sum((mincostFlow[7][v] for v in G.successors(7))))
>>> mincostFlowValue == nx.maximum_flow_value(G, 1, 7)
True
"""
maxFlow = nx.maximum_flow_value(G, s, t, capacity = capacity)
H = nx.DiGraph(G)
H.add_node(s, demand = -maxFlow)
H.add_node(t, demand = maxFlow)
return min_cost_flow(H, capacity = capacity, weight = weight)
```
#### File: networkx/generators/stochastic.py
```python
from __future__ import division
import warnings
import networkx as nx
from networkx.utils import not_implemented_for
__author__ = "<NAME> <<EMAIL>>"
__all__ = ['stochastic_graph']
@not_implemented_for('multigraph')
@not_implemented_for('undirected')
def stochastic_graph(G, copy=True, weight='weight'):
"""Returns a right-stochastic representation of the directed graph ``G``.
A right-stochastic graph is a weighted digraph in which for each node, the
sum of the weights of all the out-edges of that node is 1. If the graph is
already weighted (for example, via a ``'weight'`` edge attribute), the
reweighting takes that into account.
Parameters
-----------
G : directed graph
A NetworkX DiGraph
copy : boolean, optional
If this is ``True``, then this function returns a new instance of
:class:`networkx.Digraph`. Otherwise, the original graph is modified
in-place (and also returned, for convenience).
weight : edge attribute key (optional, default='weight')
Edge attribute key used for reading the existing weight and setting the
new weight. If no attribute with this key is found for an edge, then the
edge weight is assumed to be 1. If an edge has a weight, it must be a
a positive number.
"""
if copy:
W = nx.DiGraph(G)
else:
# Reference the original graph, don't make a copy.
W = G
degree = W.out_degree(weight=weight)
for (u, v, d) in W.edges(data=True):
if degree[u] == 0:
warnings.warn('zero out-degree for node %s' % u)
d[weight] = 0
else:
d[weight] = d.get(weight, 1) / degree[u]
return W
```
|
{
"source": "jdrudolph/perseuspy",
"score": 3
}
|
#### File: io/perseus/matrix.py
```python
import numpy as np
import pandas as pd
from collections import OrderedDict
separator = '\t'
def multi_numeric_converter(numbers):
return [float(num) for num in numbers.split(';') if num != '']
converters = {'M': multi_numeric_converter}
perseus_to_dtype = {'E' : float, 'T' : str, 'C' : 'category', 'N' : float}
def dtype_to_perseus(dtype):
if type(dtype) is pd.core.dtypes.dtypes.CategoricalDtype:
return 'C'
else:
mapping = {np.dtype('float'): 'N', np.dtype('str'): 'T',
np.dtype('object'): 'T', np.dtype('int64'): 'N',
np.dtype('bool'): 'C'}
return mapping[dtype]
def read_annotations(path_or_file, separator='\t', reset=True):
"""
Read all annotations from the specified file.
>>> annotations = read_annotations(path_or_file, separator)
>>> colnames = annotations['Column Name']
>>> types = annotations['Type']
>>> annot_row = annotations['Annot. row name']
:param path_or_file: Path or file-like object
:param separator: Column separator
:param reset: Reset the file after reading. Useful for file-like, no-op for paths.
:returns: Ordered dictionary of annotations.
"""
annotations = OrderedDict({})
with PathOrFile(path_or_file, 'r', reset=reset) as f:
annotations['Column Name'] = f.readline().strip().split(separator)
for line in f:
if line.startswith('#!{'):
tokens = line.strip().split(separator)
_name, first_value = tokens[0].split('}')
name = _name.replace('#!{', '')
values = [first_value] + tokens[1:]
if name == 'Type':
colnames = annotations['Column Name']
annotations['dtype'] = {colnames[i]: perseus_to_dtype[x] for i, x in enumerate(values) if x in perseus_to_dtype}
annotations['converters'] = {colnames[i]: converters[x] for i, x in enumerate(values) if x in converters}
annotations[name] = values
return annotations
def annotation_rows(prefix, annotations):
"""
Helper function to extract N: and C: rows from annotations and pad their values
"""
ncol = len(annotations['Column Name'])
return {name.replace(prefix, '', 1) : values + [''] * (ncol - len(values))
for name, values in annotations.items() if name.startswith(prefix)}
def create_column_index(annotations):
"""
Create a pd.MultiIndex using the column names and any categorical rows.
Note that also non-main columns will be assigned a default category ''.
"""
_column_index = OrderedDict({'Column Name' : annotations['Column Name']})
categorical_rows = annotation_rows('C:', annotations)
_column_index.update(categorical_rows)
numerical_rows = {name: [float(x) if x != '' else float('NaN') for x in values]
for name, values in annotation_rows('N:', annotations).items()} # to floats
_column_index.update(numerical_rows)
column_index = pd.MultiIndex.from_tuples(list(zip(*_column_index.values())), names=list(_column_index.keys()))
if len(column_index.names) == 1:
# flatten single-level index
name = column_index.names[0]
column_index = column_index.get_level_values(name)
return column_index
def read_perseus(path_or_file, **kwargs):
"""
Read a Perseus-formatted matrix into a pd.DataFrame.
Annotation rows will be converted into a multi-index.
By monkey-patching the returned pd.DataFrame a `to_perseus`
method for exporting the pd.DataFrame is made available.
:param path_or_file: File path or file-like object
:param kwargs: Keyword arguments passed as-is to pandas.read_csv
:returns: The parsed data frame
"""
annotations = read_annotations(path_or_file, separator)
column_index = create_column_index(annotations)
if 'usecols' in kwargs:
usecols = kwargs['usecols']
if type(usecols[0]) is str:
usecols = sorted([list(column_index).index(x) for x in usecols])
column_index = column_index[usecols]
kwargs['dtype'] = dict(kwargs.get('dtype', {}), **annotations.get('dtype', {}))
kwargs['converters'] = dict(kwargs.get('converters', {}), **annotations.get('converters', {}))
df = pd.read_csv(path_or_file, sep=separator, comment='#', **kwargs)
df.columns = column_index
return df
import numpy as np
def to_perseus(df, path_or_file, main_columns=None,
separator=separator,
convert_bool_to_category=True,
numerical_annotation_rows = set([])):
"""
Save pd.DataFrame to Perseus text format.
:param df: pd.DataFrame.
:param path_or_file: File name or file-like object.
:param main_columns: Main columns. Will be infered if set to None. All numeric columns up-until the first non-numeric column are considered main columns.
:param separator: For separating fields, default='\t'.
:param covert_bool_to_category: Convert bool columns of True/False to category columns '+'/'', default=True.
:param numerical_annotation_rows: Set of column names to be interpreted as numerical annotation rows, default=set([]).
"""
_df = df.copy()
if not _df.columns.name:
_df.columns.name = 'Column Name'
column_names = _df.columns.get_level_values('Column Name')
annotations = {}
main_columns = _infer_main_columns(_df) if main_columns is None else main_columns
annotations['Type'] = ['E' if column_names[i] in main_columns else dtype_to_perseus(dtype)
for i, dtype in enumerate(_df.dtypes)]
# detect multi-numeric columns
for i, column in enumerate(_df.columns):
valid_values = [value for value in _df[column] if value is not None]
if len(valid_values) > 0 and all(type(value) is list for value in valid_values):
annotations['Type'][i] = 'M'
_df[column] = _df[column].apply(lambda xs: ';'.join(str(x) for x in xs))
if convert_bool_to_category:
for i, column in enumerate(_df.columns):
if _df.dtypes[i] is np.dtype('bool'):
values = _df[column].values
_df[column][values] = '+'
_df[column][~values] = ''
annotation_row_names = set(_df.columns.names) - {'Column Name'}
for name in annotation_row_names:
annotation_type = 'N' if name in numerical_annotation_rows else 'C'
annotations['{}:{}'.format(annotation_type, name)] = _df.columns.get_level_values(name)
with PathOrFile(path_or_file, 'w') as f:
f.write(separator.join(column_names) + '\n')
for name, values in annotations.items():
f.write('#!{{{name}}}{values}\n'.format(name=name, values=separator.join([str(x) for x in values])))
_df.to_csv(f, header=None, index=False, sep=separator)
class PathOrFile():
"""Small context manager for file paths or file-like objects
:param path_or_file: Path to a file or file-like object
:param mode: Set reading/writing mode
:param reset: Reset file-like to initial position. Has no effect on path."""
def __init__(self, path_or_file, mode = None, reset=False):
self.path_or_file = path_or_file
self.mode = mode
self.isPath = isinstance(path_or_file, str)
self.reset = reset and not self.isPath
if self.reset:
self.position = self.path_or_file.seek(0, 1)
def __enter__(self):
if self.isPath:
self.open_file = open(self.path_or_file, self.mode)
return self.open_file
else:
self.open_file = None
return self.path_or_file
def __exit__(self, *args):
if self.open_file:
self.open_file.close()
if self.reset:
self.path_or_file.seek(self.position)
_numeric_dtypes = {np.dtype('float32'), np.dtype('float64'), np.dtype('int32'), np.dtype('int64')}
def _infer_main_columns(df, index_level='Column Name', numeric_dtypes=_numeric_dtypes):
"""
All numeric columns up-until the first non-numeric column are considered main columns.
:param df: The pd.DataFrame
:param index_level: Name of the index level of the column names. Default 'Column Name'
:param numeric_dtypes: Set of numpy.dtype containing all numeric types. Default int/float.
:returns: The names of the infered main columns
"""
columns = df.columns.get_level_values(index_level)
main_columns = []
for i,dtype in enumerate(df.dtypes):
if dtype in numeric_dtypes:
main_columns.append(columns[i])
else:
break
return main_columns
def main_df(infile, df):
annotations = read_annotations(infile)
main_index = []
i = 0
for c_type in annotations['Type']:
if c_type == 'E':
main_index.append(i)
i = i + 1
main_dataframe = df.ix[:, main_index[0]:main_index[-1]+1]
return main_dataframe
```
#### File: io/perseus/network.py
```python
from os import path, makedirs
import uuid
import networkx as nx
from collections import OrderedDict
from perseuspy.io.perseus.matrix import read_perseus
import pandas as pd
import warnings
def read_networks(folder):
"""
Read perseus network collection folder format
>>> network_table, networks = read_networks(folder)
:param folder: Path to network collection
:returns: Network table and dictionary with 'name', 'edge_table', and 'node_table' keys.
"""
network_table = read_perseus(path.join(folder, "networks.txt"))
networks = {}
for name, guid in network_table[['Name', 'GUID']].values:
networks[guid] = {
'name': name,
'guid': guid,
'node_table': read_perseus(path.join(folder, "{}_nodes.txt".format(guid))),
'edge_table': read_perseus(path.join(folder, "{}_edges.txt".format(guid)))
}
return network_table, networks
def from_perseus(network_table, networks):
"""
Create networkx graph from network tables
>>> from perseuspy import read_networks, nx
>>> network_table, networks = read_networks(folder)
>>> graphs = nx.from_perseus(network_table, networks)
"""
graphs = []
for guid, graph_attr in zip(network_table['GUID'], network_table.values):
network = networks[guid]
edge_table = network['edge_table']
if edge_table[['Source', 'Target']].duplicated().any():
warnings.warn('Duplicate edges were found and ignored in network {}'.format(network['name']))
G = nx.from_pandas_edgelist(edge_table, 'Source', 'Target', True, create_using=nx.DiGraph())
for attr, value in zip(network_table.columns, graph_attr):
G.graph[attr] = value
node_table = network['node_table']
if node_table['Node'].duplicated().any():
warnings.warn('Duplicate nodes were found and ignored in network {}'.format(network['name']))
node_column = node_table['Node']
for name, attributes in zip(node_column, node_table.values):
if name not in G:
G.add_node(name)
for attr, value in zip(node_table.columns, attributes):
G.node[name][attr] = value
graphs.append(G)
return graphs
def to_perseus(graphs):
"""
Create a network table and the network dictionary for export to Perseus.
:param graphs: Collection of networkx graphs
>>> from perseuspy import nx
>>> G = nx.random_graphs.barabasi_albert_graph(10, 3)
>>> network_table, networks = nx.to_perseus([G])
"""
graph_attributes = []
networks = {}
for graph in graphs:
attributes = dict(graph.graph)
attributes.update({"Name" : attributes.get("Name", attributes.get("name", "networkx graph")),
"GUID": attributes.get("GUID", str(uuid.uuid4()))})
graph_attributes.append(attributes)
if len(graph) > 0:
edge_table = pd.DataFrame([dict(data, **{"Source": str(f), "Target": str(t)}) for f,t,data in graph.edges(data=True)])
edge_table.columns.name = "Column Name"
node_table = pd.DataFrame([dict(data, **{"Node": str(n)}) for n,data in graph.nodes(data=True)])
node_table.columns.name = "Column Name"
else:
edge_table = pd.DataFrame(columns=pd.Index(['Source', 'Target'], name='Column Name'))
node_table = pd.DataFrame(columns=pd.Index(['Node'], name='Column Name'))
guid = attributes['GUID']
networks[guid] = {
'edge_table': edge_table,
'node_table': node_table,
'name': attributes['Name'],
'guid': guid }
network_table = pd.DataFrame(graph_attributes)
network_table.columns.name = "Column Name"
return network_table, networks
def write_networks(folder, network_table, networks):
"""
Writing networkTable, nodes and edges to Perseus readable format.
:param folder: Path to output directory.
:param network_table: Network table.
:param networks: Dictionary with node and edge tables, indexed by network guid.
"""
makedirs(folder, exist_ok=True)
network_table.to_perseus(path.join(folder, 'networks.txt'), main_columns=[])
for guid, network in networks.items():
network['node_table'].to_perseus(path.join(folder, '{}_nodes.txt'.format(guid)), main_columns=[])
network['edge_table'].to_perseus(path.join(folder, '{}_edges.txt'.format(guid)), main_columns=[])
```
#### File: perseuspy/perseuspy/parameters.py
```python
import xml.etree.ElementTree as ET
def parse_parameters(filename):
""" parse the parameters.xml file.
:param filename: 'parameters.xml'
:returns tree: a 'xml.etree.ElementTree' xml object.
"""
return ET.parse(filename)
def _simple_string_value(tree, kind, name):
""" base function for extracting a simple parameter value.
:param tree: the parameters tree.
:param kind: the xml-tag name of the parameter.
:param name: the name of the parameter.
:returns value: the content of the parameter 'Value' as string."""
query = ".//{kind}[@Name='{name}']/Value".format(kind=kind, name=name)
return tree.find(query).text
def stringParam(parameters, name):
""" string parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return _simple_string_value(parameters, 'StringParam', name)
def fileParam(parameters, name):
""" file parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return _simple_string_value(parameters, 'FileParam', name)
def intParam(parameters, name):
""" integer parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return int(_simple_string_value(parameters, 'IntParam', name))
def boolParam(parameters, name):
""" boolean parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
value = _simple_string_value(parameters, 'BoolParam', name)
if value not in {'true', 'false'}:
raise ValueError('BoolParam Value has to be either "true" or "false", was {}.'.format(value))
return value == 'true'
def doubleParam(parameters, name):
""" double parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return float(_simple_string_value(parameters, 'DoubleParam', name))
def singleChoiceParam(parameters, name, type_converter = str):
""" single choice parameter value. Returns -1 if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceParam[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value
return type_converter(values[value].text)
def multiChoiceParam(parameters, name, type_converter = str):
""" multi choice parameter values.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'
:returns dictionary: value -> values
"""
param = parameters.find(".//MultiChoiceParam[@Name='{name}']".format(name=name))
value = param.find('Value')
values = param.find('Values')
return [type_converter(values[int(item.text)].text) for item in value.findall('Item')]
def singleChoiceWithSubParams(parameters, name, type_converter = str):
""" single choice with sub parameters value and chosen subparameters. Returns -1 and None if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceWithSubParams[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value, None
return type_converter(values[value].text), param.findall('SubParams/Parameters')[value]
def boolWithSubParams(parameters, name):
""" single choice with sub parameters value and chosen subparameters. Returns -1 and None if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
"""
param = parameters.find(".//BoolWithSubParams[@Name='{name}']".format(name=name))
str_value = param.find('Value').text
if str_value not in {'true', 'false'}:
raise ValueError('BoolParamWithSubParams Value has to be either "true" or "false", was {}'.format(str_value))
value = str_value == 'true'
choice = 'SubParamsTrue' if value else 'SubParamsFalse'
return value, param.find('{}/Parameters'.format(choice))
```
#### File: jdrudolph/perseuspy/setup.py
```python
from setuptools import setup, find_packages
import os
HERE = os.path.dirname(__file__)
def read(fname):
return open(os.path.join(HERE, fname)).read()
# creates version_string
exec(open(os.path.join(HERE, "perseuspy", "version.py")).read())
setup(name='perseuspy',
version=version_string, # read from version.py
description='Utilities for integrating python scripts into Perseus workflows',
long_description=read('README.rst'),
url='http://www.github.com/jdrudolph/perseuspy',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
install_requires=['pandas >= 0.24.0', 'networkx >= 2.1'],
test_suite = 'nose.collector',
tests_require= ['nose']
)
```
|
{
"source": "jdrugo/LCSBookCode",
"score": 2
}
|
#### File: jdrugo/LCSBookCode/cl.py
```python
from numpy import double, array, zeros, ones, identity, empty, vstack, \
dot, sum, log, power, concatenate, abs, max, exp, clip, nan_to_num, \
pi, inf, isnan
from numpy.linalg import pinv, det, LinAlgError
from numpy.random import uniform, randint
from scipy.special import gammaln, psi
from math import log as math_log
# fact from http://importantshock.wordpress.com/2006/11/03/one-line-factorial-function-in-python/
import operator
def fact(x): return reduce(operator.mul, xrange(2, x+1), 1)
# constants for later use
ln2pi = log(2.0 * pi)
double_max = nan_to_num(double(inf))
double_min = nan_to_num(double(-inf))
ln_double_max = log(double_max)
exp_min = double(-708.3964) # for double=float64 type
class Classifier(object):
"""A single classifier, if used needs to have matches() overridden.
"""
# noise prior Gam(tau_a, tau_b) parameters
tau_a = 1.e-2
tau_b = 1.e-4
# weight vector hyperprior Gam(a_a, a_b) parameters
a_a = 1.e-2
a_b = 1.e-4
# constant for variational bound
L_tau_const = -gammaln(tau_a) + tau_a * log(tau_b)
L_a_const = -gammaln(a_a) + a_a * log(a_b)
varbound_const = - gammaln(tau_a) + tau_a * (log(tau_b) + ln2pi) \
- gammaln(a_a) + a_a * log(a_b)
# stopping criteria for classifier update
max_iter = 20
max_dL = 1.e-4
def __init__(self, X, Dy, m_param):
"""Initialises classifier with given input matrix X, for Dy outputs.
The classifier is not trained, but its matching vector is initialised,
using the parameter vector m_param.
"""
N, Dx = X.shape
# weight matrix model N(wj, Sig), wj is rows of W
self.W = zeros((Dy, Dx), double)
self.Sig = zeros((Dx, Dx), double)
# noise precision model Gam(tau_ak, tau_bk)
self.tau_ak = Classifier.tau_a
self.tau_bk = Classifier.tau_b
# weight prior model Gam(a_ak, a_bk)
self.a_ak = Classifier.a_a
self.a_bk = Classifier.a_b
# evaluate matching
self.m_param = m_param
self.m = self.init_matching(X, m_param)
# cached values
self.res = zeros(N, double)
self.var = zeros(N, double)
self.ln_Sig_det = -inf
def matches(self, x):
"""Returns the degree to which the classifier matches the input.
In the base class it always returns 1.0.
"""
return 1.0
def __str__(self):
"""Returns a string representation of its matching function.
In the base class it always returns 'all'.
"""
return "all"
def init_matching(self, X, m_param):
"""Initialises the matching vector for the given input matrix by
classing matches() for each input in the input matrix X. m_param is
the matching parameter vector.
"""
return array(
[self.matches(X[n,:]) for n in xrange(X.shape[0])],
double)
def update(self, X, Y, resp = None):
"""Updates the classifier model with given input matrix X, output
matrix Y, and responsibilities resp. If the responsibilities are not
given, then the classifier's matching vector is used instead. The
method returns (interations, L) where iterations is the number of
interations that the update required, or a negative number if the
update didn't converge within the maximum number of updates. L is the
variational bound of the classifier at the last iteration, and
convergence is tested by monitoring the change of this value.
"""
if resp == None:
resp = self.m
# dimensions
N, Dx = X.shape
Dy = Y.shape[1]
DxDy = Dx * Dy
# local cache
tau_a, tau_b = Classifier.tau_a, Classifier.tau_b
a_a, a_b = Classifier.a_a, Classifier.a_b
L_tau_const, L_a_const = Classifier.L_tau_const, Classifier.L_a_const
max_dL, max_iter = Classifier.max_dL, Classifier.max_iter
# responsibility-weighted input and output
resp_mat = resp.reshape((resp.shape[0], 1))
Xk, Yk = X * resp_mat, Y * resp_mat
resp_sum = sum(resp)
# initial values
a_ak, a_bk = self.a_ak, self.a_bk
tau_ak, tau_bk, W = self.tau_ak, self.tau_bk, self.W
L, dL, i = -inf, max_dL + 1.0, 0
while (i < max_iter) and (dL > max_dL):
# update weight vector model
prec_mat = identity(Dx, double) * (a_ak / a_bk) + dot(X.T, Xk)
Sig = pinv(prec_mat)
W = dot(dot(Y.T, Xk), Sig)
ln_Sig_det = log(det(Sig))
# update noise precision model
tau_ak = tau_a + 0.5 * resp_sum
tau_bk = tau_b + 0.5 / float(Dy) * (sum(Yk * Y) -
sum(W * dot(W, prec_mat)))
# alternative (more expensive) to noise precision model update
#res = sum(power(Y - dot(X, W.T), 2.0), 1)
#tau_bk = tau_b + 0.5 / float(Dy) * (dot(resp, res) +
# a_ak / a_bk * sum(W * W))
# update weight prior model
a_ak = a_a + 0.5 * DxDy
a_bk = 0.5 * (tau_ak / tau_bk * sum(W * W) + Dy * Sig.trace())
# residual and variances (for the variational bound)
res = sum(power(Y - dot(X, W.T), 2.0), 1)
var = sum(X * dot(X, Sig), 1)
# variational bound components
prev_L = L
psi_tau_ak, ln_tau_bk = psi(tau_ak), log(tau_bk)
E_tau = tau_ak / tau_bk
L_tau = float(Dy) * (L_tau_const + (tau_a - tau_ak) * psi_tau_ak -
tau_a * ln_tau_bk - tau_b * E_tau +
gammaln(tau_ak) + tau_ak)
L_Y = 0.5 * float(Dy) * \
(psi_tau_ak - ln_tau_bk - ln2pi) * resp_sum - \
0.5 * dot(resp, (E_tau * res + float(Dy) * var))
L_Wa = L_a_const + gammaln(a_ak) - a_ak * log(a_bk) + \
0.5 * (DxDy + float(Dy) * ln_Sig_det)
L = L_tau + L_Y + L_Wa
# monitor change of variational bound
dL = L - prev_L
if dL < 0.0:
raise Exception, "Variational bound decreased by %f" % -dL
# next iteratoin
i += 1
# copy values into object variables
self.Sig, self.W = Sig, W
self.tau_ak, self.tau_bk = tau_ak, tau_bk
self.a_ak, self.a_bk = a_ak, a_bk
# cache residuals and uncertainties, and covariance determinant
self.res = sum(power(Y - dot(X, W.T), 2.0), 1)
self.var = sum(X * dot(X, Sig), 1)
self.ln_Sig_det = ln_Sig_det
# return number of iterations taken
if (dL > max_dL):
return (-1, L)
else:
return (i, L)
def pred(self, x):
"""Returns the prediction mean vector for the given input x.
"""
return dot(self.W, x)
def pred_var(self, x):
"""Returns the prediction mean vector and variance for the given
input x, as the tuple (mean vector, variance). The variance is
the same for all elements of the output vector.
"""
var = 2 * self.tau_bk / (self.tau_ak - 1.0) * \
(1.0 + dot(dot(x, self.Sig), x))
return (dot(self.W, x), var)
def var_bound(self, resp = None):
"""Returns the variational bound. If not responsibilities resp are
given, then the matching values are taken instead.
"""
if resp == None:
resp = self.m
# cache some values locally
Dy, Dx = self.W.shape
tau_a, tau_b = Classifier.tau_a, Classifier.tau_b
tau_ak, tau_bk = self.tau_ak, self.tau_bk
a_a, a_b = Classifier.a_a, Classifier.a_b
a_ak, a_bk = self.a_ak, self.a_bk
psi_tau_ak, ln_tau_bk = psi(tau_ak), log(tau_bk)
E_tau = tau_ak / tau_bk
# E(ln p(Y|Wk, Tk, zk))
E_Y = 0.5 * float(Dy) * (psi_tau_ak - ln_tau_bk - ln2pi) * sum(resp) -\
0.5 * dot(resp, E_tau * self.res + float(Dy) * self.var)
# related to alpha and W
E_Wa = Classifier.L_a_const + \
gammaln(a_ak) - a_ak * log(a_bk) + \
0.5 * Dy * (Dx + self.ln_Sig_det)
# related to tau
E_tau = Dy * (Classifier.L_tau_const +
(tau_a - tau_ak) * psi_tau_ak -
tau_a * ln_tau_bk - tau_b * E_tau +
gammaln(tau_ak) + tau_ak)
return E_Y + E_Wa + E_tau
def var_bound_test(self, resp = None):
"""Returns the variational bound and prints all of its components to
the standard output.
"""
if resp == None:
resp = self.m
# cache some values locally
Dy, Dx = self.W.shape
DxDy = float(Dx * Dy)
tau_a, tau_b = Classifier.tau_a, Classifier.tau_b
tau_ak, tau_bk = self.tau_ak, self.tau_bk
W, Sig, ln_Sig_det = self.W, self.Sig, self.ln_Sig_det
a_a, a_b = Classifier.a_a, Classifier.a_b
a_ak, a_bk = self.a_ak, self.a_bk
psi_tau_a, ln_tau_b = psi(tau_a), log(tau_b)
psi_tau_ak, ln_tau_bk = psi(tau_ak), log(tau_bk)
psi_a_a, ln_a_b = psi(a_a), log(a_b)
psi_a_ak, ln_a_bk = psi(a_ak), log(a_bk)
E_tau, E_a = tau_ak / tau_bk, a_ak / a_bk
# E(ln p(Y|Wk, Tk, zk))
E_p_Y = 0.5 * float(Dy) * \
(psi_tau_ak - ln_tau_bk - ln2pi) * sum(resp) - \
0.5 * dot(resp, E_tau * self.res + float(Dy) * self.var)
# E(ln p(Wk, Tk | Ak))
E_p_W_T = 0.5 * DxDy * (psi_a_ak - ln_a_bk +
psi_tau_ak - ln_tau_bk - ln2pi) - \
0.5 * E_a * (E_tau * sum(W * W) + float(Dy) * Sig.trace()) +\
float(Dy) * (- gammaln(tau_a) + tau_a * ln_tau_b +
(tau_a - 1.0) * (psi_tau_ak - ln_tau_bk) -
tau_b * E_tau)
# E(ln p(Ak))
E_p_A = - gammaln(a_a) + a_a * ln_a_b + \
(a_a - 1.0) * (psi_a_ak - ln_a_bk) - a_b * E_a
# E(ln q(Wk, Tk))
E_q_W_T = 0.5 * DxDy * (psi_tau_ak - ln_tau_bk - ln2pi - 1.0) - \
0.5 * float(Dy) * ln_Sig_det + \
float(Dy) * (- gammaln(tau_ak) +
(tau_ak - 1.0) * psi_tau_ak +
ln_tau_bk - tau_ak)
# E(ln q(Ak))
E_q_A = - gammaln(a_ak) + (a_ak - 1.0) * psi_a_ak + ln_a_bk - a_ak
L = E_p_Y + E_p_W_T + E_p_A - E_q_W_T - E_q_A
# output and return
print "E(ln p(Y | Wk, Tk, zk)) = %8.3f" % E_p_Y
print " E(ln p(Wk, Tk | Ak)) = %8.3f" % E_p_W_T
print " E(ln p(Ak)) = %8.3f" % E_p_A
print " - E(ln q(Wk, Tk)) = %8.3f" % -E_q_W_T
print " - E(ln q(Ak)) = %8.3f" % -E_q_A
print " Lk(q) = %8.3f" % L
return L
class Gating(object):
"""The gating network, used to combine the prediction of the different
classifiers. It can be trained either independently of the classifiers,
or in combination with them.
"""
# prior model for gating weight prior
b_a = 1.e-2
b_b = 1.e-4
# constant for variational bound
L_b_const = -gammaln(b_a) + b_a * log(b_b)
# convergence criteria for IRLS and full update
irls_max_iter = 40
irls_max_dKL = 1.e-8
max_iter = 40
max_dL = 1.e-2
def __init__(self, cls, N, Dv):
"""Initialises the gating network for the given set cls of classifiers.
N is the number of training samples available, and Dv is the size
of the gating feature vector.
"""
self.cls = cls
K = len(cls)
# responsibilities are initialised equiprobably
self.R = ones((N, K), double) / double(K)
# gating weight vectors are columns of V
self.V = ones((Dv, K), double)
# prior model in weight vector (beta). Each row represents the
# parameters (a, b) for the kth gating weight prior
self.b = ones((K, 2), double)
self.b[:,0] *= Gating.b_a
self.b[:,1] *= Gating.b_b
# cached values for computing variational bound
self.ln_cov_det = 0.0
self.cov_Tr = zeros(K, double)
def gating_matrix(self, Xf, V = None):
"""Returns the matrix G of gating weights for the given feature
matrix Xf, with each column corresponding to the gating weights
for each state of one classifier. If V is given, then its values
are used rather than the gating weights of the gating network.
"""
if V == None:
V = self.V
cls = self.cls
K = len(cls)
# limit the activation that we won't get an over/underflow when
# computing the exponential. From below we have to make sure
# that it is larger than log(0.0) such that exp(it) > 0. From above, it
# needs to be smaller than log(double_max / K) as we need to make
# sure that sum(exp(it)) <= K * exp(it) < inf (for the normalisation
# step)
G = dot(Xf, V)
G = exp(clip(G, exp_min, ln_double_max - log(K)))
for k in xrange(K):
# apply matching to k'th column
G[:,k] *= cls[k].m
# normalise G
G /= sum(G, 1).reshape(G.shape[0], 1)
# due to matching it could be that the gating for particular states
# was 0.0 for all classifiers, causing 0.0 / 0.0 = nan. Hence, we are
# going to remove those by gating these states equally to all
# classifiers.
G[isnan(G)] = 1.0 / float(K)
return G
def hessian(self, Xf, G):
"""Return the hessian matrix for the given gating feature matrix Xf,
and the gating weight matrix G.
"""
Dv, K = self.V.shape
N = Xf.shape[0]
b_identity = identity(Dv, double)
E_b = self.b[:,0] / self.b[:,1]
H = empty((K * Dv,) * 2, double)
# fill hessian block-wise
for k in xrange(K):
gk = G[:,k]
kb = k * Dv
# create block elements (k,j) and (j,k) (they are the same)
for j in xrange(k):
# get - (gk * gj) (element-wise)
gkj = - gk * G[:,j]
Hkj = dot(Xf.T, Xf * gkj.reshape(N, 1))
# distribute block
jb = j * Dv
H[kb:kb+Dv, jb:jb+Dv] = Hkj
H[jb:jb+Dv, kb:kb+Dv] = Hkj
# create diagonal entry (k,k)
gkk = gk * (1.0 - gk)
Hkk = dot(Xf.T, Xf * gkk.reshape(N, 1)) + b_identity * E_b[k]
H[kb:kb+Dv, kb:kb+Dv] = Hkk
return H
def update_weights(self, Xf):
"""Applies the IRLS algorithm to update the gating weights. The
Newton-Raphson steps are preformed until the KL-distance between
the desired responsibilities and the real gating converges, or until
the maximum number of iterations is reached. The method returns
(iterations, KL), where interations is the number of iterations
required, and KL is the Kullback-Leibler divergence after the
last update. If the maximum number of iterations is exceeded,
then -1 is returned for the iterations.
"""
R = self.R
N, Dv = Xf.shape
b_mean = self.b[:,0] / self.b[:,1]
V = self.V
K = V.shape[1]
# iterate until onvergence
max_iter, max_dKL = Gating.irls_max_iter, Gating.irls_max_dKL
i, KL, dKL = 0, -inf, max_dKL + 1.0
G = self.gating_matrix(Xf, V)
while (dKL > max_dKL) and (i < max_iter):
# target vector if column-ravelled (Xf' (G - R) - E[b] * V),
# where last product is element-wise for each row in V
e = (dot(Xf.T, G - R) + V * b_mean).T.ravel()
# get inverted hessian
try:
H_inv = pinv(self.hessian(Xf, G))
except LinAlgError, e:
print "LinAlgError on computing pseudo-inverse of hessian"
raise e
# update gating weight vector
V -= dot(H_inv, e).reshape(K, Dv).T
# get gating vector for updated V (for KL divergence)
G = self.gating_matrix(Xf, V)
# update Kullback-Leibler divergence between G and R
prev_KL = KL
KL = sum(R * nan_to_num(log(G / R)))
# as the IRLS is not a variational algorithm, it is not
# guaranteed to converge monotonically
dKL = abs(KL - prev_KL)
# next iteration
i += 1
# update gating weight model
self.V = V
# get new hessian for updated gating weights to
# compute values that are required for the variational bound.
# We cannot use the last hessian from the IRLS iteration, as the
# weights have been updated since then.
try:
H_inv = pinv(self.hessian(Xf, G))
except LinAlgError, e:
print "LinAlgError on computing pseudo-inverse of hessian"
raise e
self.ln_cov_det = log(det(H_inv))
cov_Tr = self.cov_Tr
for k in xrange(K):
kb = k * Dv
cov_Tr[k] = H_inv[kb:kb+Dv, kb:kb+Dv].trace()
# return number of iterations
if dKL > max_dKL:
return (-1, KL)
else:
return (i, KL)
def update_resp(self, Xf):
"""Updates the responsibilities matrix, based on the current
goodness-of-fit of the classifiers, and the current gating weight
vectors. Xf is the gateing feature matrix.
"""
R, cls = self.R, self.cls
Dy = float(cls[0].W.shape[0])
# fill R with goodness-of-fit data from classifiers
for k in xrange(R.shape[1]):
cl = cls[k]
tau_ak, tau_bk = cl.tau_ak, cl.tau_bk
# k'th column is exp( Dy/2 E[ln Tk] - 1/2 (E[Tk] res + Dy var) )
R[:,k] = exp(0.5 * (Dy * (psi(tau_ak) - log(tau_bk)) -
(tau_ak / tau_bk) * cl.res + Dy * cl.var))
# multiply with current gating
R *= self.gating_matrix(Xf)
# normalise row vectors
R /= sum(R, 1).reshape(R.shape[0], 1)
def update_gating(self, Xf):
"""Updates the gating weight vectors and priors until convergence,
based on the current classifier models. Convergence is determined
by monitoring the variational bound of the gating network. The method
returns (iterations, L), where iterations is the number of iterations
that are performed until convergence, and L is the variational
bound after the last iteration. If the maximum number of iterations
was exceeded, then (-1, L) is returned.
"""
N, Dv = Xf.shape
K = len(self.cls)
# local caches
b_a, b_b = Gating.b_a, Gating.b_b
L_b_const = Gating.L_b_const
b = self.b
max_dL, max_iter = Gating.max_dL, Gating.max_iter
# pre-update b_a, as it stays the same in each iteration
b[:,0] = Gating.b_a + 0.5 * float(Dv)
gammaln_b_a = sum(gammaln(b[:,0]))
# start iteration
i, dL, L = 0, max_dL + 1.0, -inf
while (dL > max_dL) and (i < max_iter):
# update responsibilities
self.update_resp(Xf)
# update priors (only b_b, as b_a was updated before)
V = self.V
b[:,1] = b_b + 0.5 * (sum(V * V, 0) + self.cov_Tr)
# update gating weights
id, KL = self.update_weights(Xf)
if id < 0:
#print "Iteration limit exceeded when updating gating weights"
pass
# get new variational bound
L_prev = L
# E_V_b = E(ln p(V | b)) + E(ln p(b)) - E(ln q(b)) - E(ln q(V))
E_V_b = K * L_b_const + gammaln_b_a - sum(b[:,0] * log(b[:,1])) + \
0.5 * self.ln_cov_det + 0.5 * K * Dv
L = E_V_b + KL
#print L, exp(self.V)
# as we are using an approximation, the variational bound
# might decrease, so we're not checking and need to take the abs()
dL = abs(L - L_prev)
# next iteration
i += 1
if dL > max_dL:
return (-1, L)
else:
return (i, L)
def var_bound(self, Xf):
"""Returns the variational bound of classifiers and gating network.
"""
cls, b, R = self.cls, self.b, self.R
N, Dv = Xf.shape
K = len(cls)
# get the classifier variational bound
cl_var_bound = 0.0
for k in xrange(K):
cl_var_bound += cls[k].var_bound(R[:,k])
# get the gating network variational bound
# E(ln p(V | b)) + E(ln p(b)) - E(ln q(b)) - E(ln q(V))
E_V_b = K * Gating.L_b_const + \
sum(gammaln(b[:,0])) - sum(b[:,0] * log(b[:,1])) + \
0.5 * self.ln_cov_det + 0.5 * K * Dv
# E(ln p(Z | V)) - E(ln q(Z))
E_Z = KL = sum(R * nan_to_num(log(self.gating_matrix(Xf) / R)))
return cl_var_bound + E_V_b + E_Z
def ln_model_prob(self, Xf):
"""Returns the ln of the model probability, which is the
variational bound / K! to account for symmetries in the classifier
matching function.
"""
# we need to use the log function from the math module,
# as the one from the numpy module cannot handle numbers of type 'long'
return self.var_bound(Xf) - math_log(fact(len(self.cls)))
def gating_weights(self, xf, x):
"""Returns the gating vector for the given input x and gating
features xf.
"""
# for detailed comments see gating_matrix()
cls = self.cls
K = len(cls)
g = dot(xf, self.V)
g = exp(clip(g, exp_min, ln_double_max - log(K)))
for k in xrange(K):
g[k] *= cls[k].matches(x)
g /= sum(g)
g[isnan(g)] = 1.0 / K
return g
def pred(self, x, xf):
"""Returns the prediction mean for a new input x with gating
features xf.
"""
g = self.gating_weights(xf, x)
means = vstack([cl.pred(x) for cl in self.cls])
return dot(g, means)
def pred_var(self, x, xf):
"""Returns the prediction mean and variance for a new input x with
gating feature xf. The return value is (means, variances).
"""
g = self.gating_weights(xf, x)
# get means and variances
means_vars = [cl.pred_var(x) for cl in self.cls]
means = vstack([m_v[0] for m_v in means_vars])
variances = array([m_v[1] for m_v in means_vars], double)
# get mean and variance vector
mean = dot(g, means)
var = dot(g, variances.reshape(variances.shape[0], 1) +
power(means, 2.0)) - power(mean, 2.0)
return (mean, var)
```
#### File: jdrugo/LCSBookCode/exp2.py
```python
import sys
from numpy import double, array, ones, empty, arange, empty, hstack, \
sqrt, exp, sort, sum, inf, power, dot, linspace, sin, pi
from numpy.random import random, randn, binomial, uniform, normal
import Gnuplot
from ga import GeneticAlgorithm_TS
from cls import RBF1DClStore, RBF1DIndv, \
SoftIntervalClStore, SoftInterval1DIndv
from mcmc import SampleModelPosterior
from experiments import read_data, write_data, write_raw_data, plot_cls, \
GA_experiment, MCMC_experiment
waterhouse_data_file = "exp2_waterhouse.data"
waterhouse_data_raw_file = "exp2_waterhouse_raw.data"
waterhouse_data_points = 200
own_data_file = "exp2_own.data"
own_data_raw_file = "exp2_own_raw.data"
own_data_points = 300
noise_data_file = "exp2_noise.data"
noise_data_raw_file = "exp2_noise_raw.data"
noise_data_points = 200
sinus_data_file = "exp2_sinus.data"
sinus_data_raw_file = "exp2_sinus_raw.data"
sinus_data_points = 300
def write_waterhouse_data():
"""Generates the data set and writes it to the data_file.
"""
# generate the data x, y
#var = 0.44
var = 0.20
#var = 0.05
x = sort(random(waterhouse_data_points) * 4.0)
y = 4.26 * (exp(-x) - 4 * exp(-2 * x) + 3 * exp(-3 * x)) \
+ sqrt(var) * randn(waterhouse_data_points)
# write the data
write_data(x, y, waterhouse_data_file)
def write_waterhouse_raw_data():
"""Writes the raw data without noise.
"""
x = linspace(0, 4, 1000)
y = 4.26 * (exp(-x) - 4 * exp(-2 * x) + 3 * exp(-3 * x))
write_data(x, y, waterhouse_data_raw_file)
def read_waterhouse_data():
return read_data(waterhouse_data_file)
def own_f(x):
"""Returns f(x) for given x.
"""
# functions are
# f1(x) = 0.05 + 0.5 x
# f2(x) = 2 - 4 x
# f3(x) = -1.5 + 2.5 x
fns = array([[0.05, 0.5], [2.0, -4.0], [-1.5, 2.5]], double)
# gaussian basis functions are given by (mu, var, weight):
# (0.2, 0.05), (0.5, 0.01), (0.8, 0.05)
gbfs = array([[0.2, 0.05, 0.5], [0.5, 0.01, 1.0], [0.8, 0.05, 0.4]], double)
# plain function values
fx = fns[:,0] + x * fns[:,1]
#print "%f\t%f\t%f\t%f" % (x, fx[0], fx[1], fx[2])
# mixing weights
mx = gbfs[:,2] * exp(-0.5 / gbfs[:,1] * power(x - gbfs[:,0], 2.0))
mx /= sum(mx)
#print "%f\t%f\t%f\t%f" % (x, mx[0], mx[1], mx[2])
# return mixed function
return dot(fx, mx)
def write_own_data():
"""Generates 'artificial' dataset and writes it to file.
"""
noise = 0.1
x = uniform(size = own_data_points)
y = array([own_f(x_n) for x_n in x], double) + \
normal(size = own_data_points) * noise
write_data(x, y, own_data_file)
def write_own_raw_data():
"""Writes raw classifier and function to file.
"""
x = linspace(0, 1.0, 1000)
y = array([own_f(x_n) for x_n in x], double)
W = array([[0.05, 0.5], [2.0, -4.0], [-1.5, 2.5]], double)
X = hstack((ones(len(x), double).reshape(len(x), 1),
x.reshape(len(x), 1)))
Y = dot(X, W.T)
write_raw_data(x, hstack([y.reshape(len(x), 1), Y]), own_data_raw_file)
def read_own_data():
return read_data(own_data_file)
def noise_f(x):
"""function with different noise levels.
"""
if x > 0:
return -1.0 + 2.0 * x
else:
return -1.0 - 2.0 * x
def write_noise_data():
"""Generates function with different leven of noise in different
areas of the function.
"""
l_noise, u_noise = 0.6, 0.1
x = uniform(-1.0, 1.0, size = noise_data_points)
y = array([noise_f(xn) + \
(normal(0.0, l_noise) if xn < 0 else normal(0.0, u_noise)) \
for xn in x], double)
write_data(x, y, noise_data_file)
def write_noise_raw_data():
"""Writes the basic function.
"""
x = linspace(-1, 1, 1000)
y = array([noise_f(x_n) for x_n in x], double)
write_data(x, y, noise_data_raw_file)
def read_noise_data():
return read_data(noise_data_file)
def write_sinus_data():
"""Generates sinusoid data with some noise.
"""
x = uniform(-1.0, 1.0, size = sinus_data_points)
y = sin(2 * pi * x) + normal(0.0, 0.15, size = sinus_data_points)
write_data(x, y, sinus_data_file)
def write_sinus_raw_data():
"""Generate sinusoid data without noise.
"""
x = linspace(-1.0, 1.0, 1000)
y = sin(2 * pi * x)
write_data(x, y, sinus_data_raw_file)
def read_sinus_data():
return read_data(sinus_data_file)
def exp2a():
"""Running GA on waterhouse data.
"""
X, Y = read_waterhouse_data()
N = X.shape[0]
Xf = ones(N, double).reshape(N, 1)
cl_store = RBF1DClStore(0.0, 4.0)
# run experiment with over 100 epochs with 20 individuals in the pop.
GA_experiment(X, Y, Xf, 250,
[1 + binomial(4, 0.5) for p in xrange(20)],
cl_store, RBF1DIndv,
'exp2a_fitness.data', 'exp2a_cls.data')
def exp2b():
"""Running MCMC on waterhouse data.
"""
X, Y = read_waterhouse_data()
N = X.shape[0]
Xf = ones(N, double).reshape(N, 1)
cl_store = RBF1DClStore(0.0, 4.0)
MCMC_experiment(X, Y, Xf, 500, 10, 0.25,
1 + binomial(4, 0.5),
cl_store,
'exp2b_varbound.data', 'exp2b_cls.data')
def exp2c():
"""Running GA on own data.
"""
X, Y = read_own_data()
N = X.shape[0]
Xf = ones(N, double).reshape(N, 1)
cl_store = RBF1DClStore(0.0, 1.0)
# run experiment with over 100 epochs with 20 individuals in the pop.
GA_experiment(X, Y, Xf, 250,
[1 + binomial(8, 0.5) for p in xrange(20)],
cl_store, RBF1DIndv,
'exp2c_fitness.data', 'exp2c_cls.data')
def exp2d():
"""Running MCMC on own data.
"""
X, Y = read_own_data()
N = X.shape[0]
Xf = ones(N, double).reshape(N, 1)
cl_store = RBF1DClStore(0.0, 1.0)
MCMC_experiment(X, Y, Xf, 500, 10, 0.25,
1 + binomial(8, 0.5),
cl_store,
'exp2d_varbound.data', 'exp2d_cls.data')
def exp2e():
"""Running GA on noisy data, using soft interval classifiers.
"""
X, Y = read_noise_data()
N = X.shape[0]
Xf = ones(N, double).reshape(N, 1)
cl_store = SoftIntervalClStore(-1.0, 1.0)
# run experiment with over 100 epochs with 20 individuals in the pop.
GA_experiment(X, Y, Xf, 250,
[1 + binomial(8, 0.5) for p in xrange(20)],
cl_store, SoftInterval1DIndv,
'exp2e_fitness.data', 'exp2e_cls.data')
def exp2f():
"""Running MCMC on noisy data, using soft interval classifiers.
"""
X, Y = read_noise_data()
N = X.shape[0]
Xf = ones(N, double).reshape(N, 1)
cl_store = SoftIntervalClStore(-1.0, 1.0)
MCMC_experiment(X, Y, Xf, 500, 10, 0.25,
1 + binomial(8, 0.5),
cl_store,
'exp2f_varbound.data', 'exp2f_cls.data')
def exp2g():
"""Running GA on sinusoid data, using soft interval classifiers.
"""
X, Y = read_sinus_data()
N = X.shape[0]
Xf = ones(N, double).reshape(N, 1)
cl_store = SoftIntervalClStore(-1.0, 1.0)
# run experiment with over 100 epochs with 20 individuals in the pop.
GA_experiment(X, Y, Xf, 250,
[1 + binomial(8, 0.5) for p in xrange(20)],
cl_store, SoftInterval1DIndv,
'exp2g_fitness.data', 'exp2g_cls.data')
def exp2h():
"""Running MCMC on sinusoid data, using soft interval classifiers.
"""
X, Y = read_sinus_data()
N = X.shape[0]
Xf = ones(N, double).reshape(N, 1)
cl_store = SoftIntervalClStore(-1.0, 1.0)
MCMC_experiment(X, Y, Xf, 500, 10, 0.25,
1 + binomial(8, 0.5),
cl_store,
'exp2h_varbound.data', 'exp2h_cls.data')
# run experiments from arguments
if __name__ == '__main__':
exp_modes = {'gen1': lambda: write_waterhouse_data(),
'gen2': lambda: write_own_data(),
'gen3': lambda: write_noise_data(),
'gen4': lambda: write_sinus_data(),
'raw1': lambda: write_waterhouse_raw_data(),
'raw2': lambda: write_own_raw_data(),
'raw3': lambda: write_noise_raw_data(),
'raw4': lambda: write_sinus_raw_data(),
'a': lambda: exp2a(),
'b': lambda: exp2b(),
'c': lambda: exp2c(),
'd': lambda: exp2d(),
'e': lambda: exp2e(),
'f': lambda: exp2f(),
'g': lambda: exp2g(),
'h': lambda: exp2h()}
for argv in sys.argv[1:]:
if not exp_modes.has_key(argv):
print "--- Unkown experiment: %s" % argv
else:
print "--- Running '%s'" % argv
exp_modes[argv]()
```
#### File: jdrugo/LCSBookCode/experiments.py
```python
import sys
from numpy import double, array, ones, empty, hstack, inf, linspace, sqrt, \
isnan, nan_to_num
from math import log
import Gnuplot
from cl import fact
from ga import GeneticAlgorithm_TS
from mcmc import SampleModelPosterior
def write_data(x, y, filename):
"""Writes the data to the file with given filename.
"""
assert(x.shape[0] == y.shape[0])
assert(len(x.shape) == 1 and len(y.shape) == 1)
f = open(filename, 'w')
for n in xrange(x.shape[0]):
print >>f, "%0.6f\t%0.6f" % (x[n], y[n])
f.close()
def write_raw_data(x, Y, filename):
"""Same as write_data, but Y is now a matrix of N rows.
"""
assert(x.shape[0] == Y.shape[0])
assert(len(x.shape) == 1 and len(Y.shape) == 2)
f = open(filename, 'w')
data_str = "\t".join(["%0.6f"] * (1 + Y.shape[1]))
for n in xrange(x.shape[0]):
print >>f, data_str % tuple([x[n]] + Y[n,:].tolist())
f.close()
def read_data(filename):
"""Returns input and output matrix (X, Y) by reading the data from
the file with the given filename.
"""
f = open(filename, 'r')
x, y = [], []
for l in f.readlines():
if l[-1] == '\n':
l = l[:-1]
l = l.strip()
if l == '':
continue
xn, yn = map(float, l.split('\t'))
x.append(xn)
y.append(yn)
X = hstack((ones(len(x), double).reshape(len(x), 1),
array(x, double).reshape(len(x), 1)
))
return (X, array(y, double).reshape(len(y), 1))
def plot_cls(X, Y, gate, filename=""):
"""Plots the data, the classifier prediction, and the mixed prediction.
If a filename is given, then the prediction data is also written to a file
with the given filename. The method returns the plot object. The plot is
closed if this object is deleted. It is assumed that the second column
of X contains the full range, and y is of shape (N, 1). The method only
works with classifiers that model straight lines.
"""
cls = gate.cls
N, K = X.shape[0], len(cls)
x = X[:,1]
y, min_x, max_x = Y.reshape(N), x.min(), x.max()
Xf = ones(N, double).reshape(N, 1)
# get the original function
plot_data = [ Gnuplot.Data(x.tolist(), y.tolist(), title="f(x)") ,]
# get classifier predictions
N = 100
x = linspace(min_x, max_x, N)
Pred = empty((N, K+3), double) # +3 for mix and its standard deviation
xf = ones(1, double)
for k in xrange(K):
for n in xrange(N):
Pred[n, k] = cls[k].pred(array([1, x[n]], double))
plot_data.append(
Gnuplot.Data(x.tolist(), Pred[:,k].tolist(),
title="f%i(x)" % (k + 1),
with="lines"))
# get mixed prediction with variance
for n in xrange(N):
mean, var = gate.pred_var(array([1, x[n]], double), xf)
Pred[n, K] = mean[0]
Pred[n, K+1], Pred[n, K+2] = mean[0] - sqrt(var[0]), \
mean[0] + sqrt(var[0])
plot_data.append(
Gnuplot.Data(x.tolist(), Pred[:,K].tolist(),
title="pred", with="lines"))
plot_data.append(
Gnuplot.Data(x.tolist(), Pred[:,K+1].tolist(),
title="pred-", with="lines"))
plot_data.append(
Gnuplot.Data(x.tolist(), Pred[:,K+2].tolist(),
title="pred+", with="lines"))
# plot the graph
g = Gnuplot.Gnuplot()
g.plot(*plot_data)
# write to file, if requested
if filename != "":
data_str = '\t'.join(["%0.6f"] * (K + 4))
f = open(filename, 'w')
for n in xrange(N):
print >>f, data_str % tuple([x[n]] + list(Pred[n,:]))
return g
def print_cls(cls):
"""Prints the classifiers in the population to the standard output.
"""
for k in xrange(len(cls)):
print "% 2d: %s" % (k + 1, str(cls[k]))
def GA_experiment(X, Y, Xf, epochs, Ks, cl_store, indv_class,
fitness_file = "", best_file = ""):
"""Performs a GA experiment by running 'epochs' epochs. The initial
population is initialised with individuals of size K, where the Ks are
given by the sequence Ks. If the fitness_file is given, then the best,
worst, and average fitness, and the average number of classifiers is
written to the file. If best_file is given, then the best final
individual is written to this file, using plot_cls(). The tournament
size is always 5, and mutation and crossover probability are 0.4 and
0.8 respectively.
"""
# create initial population
pop = []
for K in Ks:
pop.append(indv_class(cl_store, X, Y, Xf,
[cl_store.random_cl_key() for k in xrange(K)]))
# initialise GA
GA = GeneticAlgorithm_TS(pop, 5, 0.4, 0.4, 0.00)
gr = None
fitnesses = empty((epochs, 4), double)
best_varbound, best_cls = -inf, None
# run epochs
print "Running epoch %6d" % 0,
sys.stdout.flush()
for epoch in xrange(epochs):
#print "\033[7D%6d" % (epoch + 1),
#sys.stdout.flush()
# create new populations and get stats
fitnesses[epoch,0:3] = GA.next_gen()
Ks = array([len(indv.chrom) for indv in GA.pop], double)
fitnesses[epoch,3] = Ks.mean()
# print population structure
pop, pop_f = GA.pop, GA.pop_f
for k in xrange(len(pop)):
print pop_f[k], pop[k].chrom
print "----- %d" % (epoch + 1)
# store best individual
if best_varbound < fitnesses[epoch, 0]:
best_cls = GA.best_indv()
best_varbound = fitnesses[epoch, 0]
# generate graph every 10th epoch
if epoch % 10 == 0:
del(gr)
gr = plot_cls(X, Y, GA.best_indv().gate)
print
# write fitnesses to file
if fitness_file:
fitnesses[isnan(fitnesses)] = -inf
fitnesses = nan_to_num(fitnesses)
f = open(fitness_file, 'w')
print >>f,"# Epoch, Max, Min, Avg fitness, average K"
for epoch in xrange(epochs):
print >>f, "%d\t%0.6f\t%0.6f\t%0.6f\t%0.6f" % \
(epoch + 1, fitnesses[epoch, 0], fitnesses[epoch, 1],
fitnesses[epoch, 2], fitnesses[epoch, 3])
# write best individual to file
if best_file:
gr = plot_cls(X, Y, best_cls.gate, best_file)
else:
gr = plot_cls(X, Y, best_cls.gate)
# print best individual
print "Best individual:"
print_cls(best_cls.gate.cls)
print "Variational bound: %0.6f" % best_varbound
raw_input('Please press return to continue...\n')
del gr
def MCMC_experiment(X, Y, Xf, inner_steps, outer_steps, del_add_prob,
K, cl_store, varbound_file = "", best_file = ""):
"""Performs an MCMC experiment by running outer_steps runs of
inner_steps steps each, and reinitialising the population before each
run with K classifiers. The probabiliy for adding and deleting classifiers
is given by del_add_prob, and the classifier store cl_store is used.
If varbound_file is given, then the variational bound of each step, as
well as the current number of classifiers is written to that file.
If best_file is given, then the best set of classifiers is written
to that file.
"""
best_cls, best_varbound = None, -inf
varbound_plot = Gnuplot.Gnuplot()
cls_plot = None
varbounds = empty((inner_steps * outer_steps, 2), double)
total_actions, total_rejects = [0, 0, 0], [0, 0, 0]
step = 0
for outer in xrange(outer_steps):
# use sys to get immediate output
print "Running outer loop %d, inner %6d" % (outer + 1, 0),
sys.stdout.flush()
rejects, accepts = [0, 0, 0], [0, 0, 0]
best_inner_cls, best_inner_varbound = None, -inf
# initialise sampler
cls = [cl_store.random_cl(X, Y) for k in xrange(K)]
sampler = SampleModelPosterior(del_add_prob, cls, cl_store, X, Y, Xf)
for inner in xrange(inner_steps):
if inner % 10 == 0:
print "\033[7D%6d" % inner,
sys.stdout.flush()
# perform next step
act, accepted = sampler.next()
# create some stats
total_actions[act] += 1
if accepted == False:
rejects[act] += 1
total_rejects[act] += 1
else:
accepts[act] += 1
# store cls if better (inner loop)
if sampler.var_bound > best_inner_varbound:
best_inner_cls = sampler.gate
best_inner_varbound = sampler.var_bound
varbounds[step, 0] = sampler.var_bound
varbounds[step, 1] = len(sampler.cls)
step += 1
# store cls if better (outer loop)
if best_inner_varbound > best_varbound:
best_cls = best_inner_cls
best_varbound = best_inner_varbound
# print stats
print
print " Accepted Rejected"
print "Change %8d %8d" % (accepts[0], rejects[0])
print "Remove %8d %8d" % (accepts[1], rejects[1])
print "Add %8d %8d" % (accepts[2], rejects[2])
print_cls(best_inner_cls.cls)
print "Variational bound: %0.6f" % best_inner_varbound
print
# plot graphs
del(cls_plot)
cls_plot = plot_cls(X, Y, best_inner_cls)
varbound_plot.plot(
Gnuplot.Data(varbounds[:step,0], with='lines'),
Gnuplot.Data(varbounds[:step,1] * 10, with='lines'))
# need to remove previous plot
del(cls_plot)
# write varbounds to file
if varbound_file:
f = open(varbound_file, 'w')
print >>f,"# Step, Varbound, K"
for step in xrange(inner_steps * outer_steps):
print >>f, "%d\t%0.6f\t%0.6f" % \
(step + 1, varbounds[step, 0], varbounds[step, 1])
# write best population to file
if best_file:
cls_plot = plot_cls(X, Y, best_cls, best_file)
else:
cls_plot = plot_cls(X, Y, best_cls)
# write stats to standard output
print " Total Rejected"
print "Change %8d %4.1f%%" % (total_actions[0], float(total_rejects[0]) /
total_actions[0] * 100.0)
print "Remove %8d %4.1f%%" % (total_actions[1], float(total_rejects[1]) /
total_actions[1] * 100.0)
print "Add %8d %4.1f%%" % (total_actions[2], float(total_rejects[2]) /
total_actions[2] * 100.0)
print "Total %8d %4.1f%%" % (total_actions[0] + total_actions[1] +
total_actions[2],
float(total_rejects[0] + total_rejects[1] +
total_rejects[2]) /
(total_actions[0] + total_actions[1] +
total_actions[2]) * 100.0)
print "Overall best population:"
print_cls(best_cls.cls)
print "Variational bound: %0.6f" % best_varbound
print "L(q) - ln K!: %0.6f" % (best_varbound - log(fact(len(best_cls.cls))))
print
raw_input('Please press return to continue...\n')
```
|
{
"source": "jdruiter/djmail",
"score": 2
}
|
#### File: djmail/djmail/models.py
```python
from __future__ import unicode_literals
from django.db import models
from . import utils
class Message(models.Model):
STATUS_DRAFT = 10
STATUS_PENDING = 20
STATUS_SENT = 30
STATUS_FAILED = 40
STATUS_DISCARDED = 50
STATUS_CHOICES = (
(STATUS_DRAFT, 'Draft'),
(STATUS_SENT, 'Sent'),
(STATUS_FAILED, 'Failed'),
(STATUS_DISCARDED, 'Discarded'), )
PRIORITY_LOW = 20
PRIORITY_STANDARD = 50
PRIORITY_CHOICES = (
(PRIORITY_LOW, 'Low'),
(PRIORITY_STANDARD, 'Standard'),
)
uuid = models.CharField(max_length=40, primary_key=True)
from_email = models.CharField(max_length=1024, blank=True)
to_email = models.TextField(blank=True)
body_text = models.TextField(blank=True)
body_html = models.TextField(blank=True)
subject = models.CharField(max_length=1024, blank=True)
data = models.TextField(blank=True, editable=False)
retry_count = models.SmallIntegerField(default=-1)
status = models.SmallIntegerField(choices=STATUS_CHOICES, default=STATUS_DRAFT)
priority = models.SmallIntegerField(choices=PRIORITY_CHOICES, default=PRIORITY_STANDARD)
created_at = models.DateTimeField(auto_now_add=True)
sent_at = models.DateTimeField(null=True, default=None)
exception = models.TextField(editable=True, blank=True)
def get_email_message(self):
return utils.deserialize_email_message(self.data)
@classmethod
def from_email_message(cls, email_message, save=False):
def get_body_key(body_type):
"""Declare HTML body subtype as text/html else as text/plain."""
return 'body_{}'.format('html' if body_type.split('/')[-1] == 'html' else 'text')
kwargs = {
"from_email": utils.force_text(email_message.from_email),
"to_email": ",".join(utils.force_text(x) for x in email_message.to),
"subject": utils.force_text(email_message.subject),
"data": utils.serialize_email_message(email_message),
get_body_key(email_message.content_subtype):
utils.force_text(email_message.body)
}
# Update the body (if missing) from the alternatives
for alt_body, alt_type in getattr(email_message, 'alternatives', None) or []:
kwargs.setdefault(get_body_key(alt_type), alt_body)
instance = cls(**kwargs)
if save:
instance.save()
return instance
class Meta:
ordering = ['-created_at']
verbose_name = 'Message'
verbose_name_plural = 'Messages'
```
|
{
"source": "jdruiter/saleor",
"score": 2
}
|
#### File: dashboard/customer/views.py
```python
from __future__ import unicode_literals
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.db.models import Count, Max
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils.translation import pgettext_lazy
from ...core.utils import get_paginator_items
from ...userprofile.models import User
from ..views import staff_member_required
from .filters import CustomerFilter
@staff_member_required
@permission_required('userprofile.view_user')
def customer_list(request):
customers = (
User.objects
.prefetch_related('orders', 'addresses')
.select_related('default_billing_address', 'default_shipping_address')
.annotate(
num_orders=Count('orders', distinct=True),
last_order=Max('orders', distinct=True))
.order_by('email'))
customer_filter = CustomerFilter(request.GET, queryset=customers)
customers = get_paginator_items(
customer_filter.qs, settings.DASHBOARD_PAGINATE_BY,
request.GET.get('page'))
ctx = {'customers': customers, 'filter': customer_filter}
return TemplateResponse(request, 'dashboard/customer/list.html', ctx)
@staff_member_required
@permission_required('userprofile.view_user')
def customer_details(request, pk):
queryset = User.objects.prefetch_related(
'orders', 'addresses').select_related(
'default_billing_address', 'default_shipping_address')
customer = get_object_or_404(queryset, pk=pk)
customer_orders = customer.orders.all()
ctx = {'customer': customer, 'customer_orders': customer_orders}
return TemplateResponse(request, 'dashboard/customer/detail.html', ctx)
@staff_member_required
@permission_required('userprofile.edit_staff')
@permission_required('userprofile.edit_user')
def customer_promote_to_staff(request, pk):
customer = get_object_or_404(User, pk=pk)
if request.method == 'POST':
customer.is_staff = True
customer.save()
msg = pgettext_lazy(
'Dashboard message',
'Customer %s promoted to staff member') % customer
messages.success(request, msg)
return redirect('dashboard:customer-details', pk=customer.pk)
return TemplateResponse(
request, 'dashboard/customer/modal/confirm_promote.html',
{'customer': customer})
```
#### File: search/backends/postgresql.py
```python
from __future__ import unicode_literals
from . import postgresql_dashboard, postgresql_storefront
def search_storefront(phrase):
return postgresql_storefront.search(phrase)
def search_dashboard(phrase):
return postgresql_dashboard.search(phrase)
```
#### File: saleor/search/forms.py
```python
from __future__ import unicode_literals
from django import forms
from django.utils.translation import pgettext
from .backends import picker
class SearchForm(forms.Form):
q = forms.CharField(
label=pgettext('Search form label', 'Query'), required=True)
def search(self):
search = picker.pick_backend()
return search(self.cleaned_data['q'])
```
#### File: tests/dashboard/test_customer.py
```python
from __future__ import unicode_literals
from django.urls import reverse
from saleor.userprofile.models import User
def test_customer_promote_to_staff_with_POST(admin_client, customer_user):
assert User.objects.filter(is_staff=True).count() == 1
url = reverse(
'dashboard:customer-promote', kwargs={'pk': customer_user.pk})
admin_client.get(url)
assert User.objects.filter(is_staff=True).count() == 1
def test_customer_promote_to_staff(admin_client, customer_user):
assert User.objects.filter(is_staff=True).count() == 1
url = reverse(
'dashboard:customer-promote', kwargs={'pk': customer_user.pk})
data = {'pk': customer_user.pk}
response = admin_client.post(url, data)
assert User.objects.filter(is_staff=True).count() == 2
assert response['Location'] == reverse('dashboard:customer-details',
kwargs={'pk': customer_user.pk})
```
#### File: saleor/tests/test_asynchronous_tasks.py
```python
from __future__ import unicode_literals
import celery
import pytest
import mock
from saleor.order.emails import (send_order_confirmation,
send_payment_confirmation)
@celery.shared_task
def dummy_task(x):
return x+1
@pytest.mark.integration
def test_task_running_asynchronously_on_worker(celery_worker):
assert dummy_task.delay(42).get(timeout=10) == 43
@pytest.mark.django_db
@pytest.mark.integration
@mock.patch('saleor.order.emails.send_templated_mail')
def test_email_sending_asynchronously(email_send, transactional_db, celery_app,
celery_worker):
order = send_order_confirmation.delay('<EMAIL>', '/nowhere/to/go')
payment = send_payment_confirmation.delay('<EMAIL>', '/nowhere/')
order.get()
payment.get()
```
|
{
"source": "jdrumgoole/atlascli",
"score": 2
}
|
#### File: atlascli/test/test_config.py
```python
import shutil
import unittest
import os
from atlascli.atlasapi import AtlasAPI
from atlascli.config import Config
class TestConfig(unittest.TestCase):
def setUp(self):
if Config.PUBLIC_KEY_ENV in os.environ:
del os.environ[Config.PUBLIC_KEY_ENV]
if Config.PRIVATE_KEY_ENV in os.environ:
del os.environ[Config.PRIVATE_KEY_ENV]
def test_config_filename(self):
cfg = Config()
self.assertEqual(cfg.filename, cfg.default_config_filename)
cfg.save_config()
os.unlink(cfg.filename)
cfg = Config(filename="test.cfg")
self.assertEqual(cfg.filename, "test.cfg")
cfg.save_config()
os.unlink("test.cfg")
def test_load_config(self):
org="tester"
cfg = Config()
cfg.load(private_key=None, public_key=None, filename="atlascli.cfg.test")
self.assertTrue(cfg.filename == "atlascli.cfg.test")
cfg.save_config_file_keys("public_xxx", "private_xxx", org=org)
new_cfg = Config(filename="atlascli.cfg.test")
self.assertEqual(cfg.get_config_file_keys(org=org), new_cfg.get_config_file_keys(org=org))
new_cfg.save_config_file_keys("public_yyy", "private_yyy", org="new org")
cfg = Config()
cfg.load(private_key=None, public_key=None, filename="atlascli.cfg.test")
self.assertEqual(cfg.get_config_file_keys(org=org), new_cfg.get_config_file_keys(org=org))
self.assertEqual(cfg.get_config_file_keys(org="new org"), new_cfg.get_config_file_keys(org="new org"))
os.unlink("atlascli.cfg.test")
#print(cfg)
def test_command_line(self):
os.environ[Config.PUBLIC_KEY_ENV] = "PUB"
os.environ[Config.PRIVATE_KEY_ENV] = "PRI"
cfg = Config()
cfg.load_from_args(public_key="boojum", private_key="taco")
self.assertEqual(cfg.get_public_key(), "boojum")
self.assertEqual(cfg.get_private_key(), "taco")
def test_env(self):
os.environ[Config.PUBLIC_KEY_ENV] = "PUB"
os.environ[Config.PRIVATE_KEY_ENV] = "PRI"
cfg = Config()
cfg.load_from_env()
self.assertEqual(cfg.get_public_key(), "PUB")
self.assertEqual(cfg.get_private_key(), "PRI")
cfg = Config()
cfg.load_from_args(public_key="boojum", private_key="taco")
self.assertEqual(cfg.get_public_key(), "boojum")
self.assertEqual(cfg.get_private_key(), "taco")
cfg = Config()
cfg.load_from_file(input_file="test_atlascli.cfg")
self.assertEqual(cfg.get_public_key(), "bogus")
self.assertEqual(cfg.get_private_key(), "20d81ed9-f22c-47d4-a97b-FFFFFFFFFFFF")
def test_cfg(self):
cfg = Config()
cfg.load_from_file(input_file="test_atlascli.cfg")
self.assertEqual(cfg.get_public_key(), "bogus")
self.assertEqual(cfg.get_private_key(), "20d81ed9-f22c-47d4-a97b-FFFFFFFFFFFF")
shutil.copyfile("test_atlascli.cfg", "atlascli.cfg")
cfg = Config()
cfg.load_from_file()
self.assertEqual(cfg.get_public_key(), "bogus")
self.assertEqual(cfg.get_private_key(), "20d81ed9-f22c-47d4-a97b-FFFFFFFFFFFF")
self.assertEqual(cfg.get_public_key(org="Open Data at MongoDB"), "Zombie")
os.unlink("atlascli.cfg")
def test_bad_cfg_file(self):
with self.assertRaises(ValueError):
_ = Config().load_from_file(AtlasAPI.random_name())
def test_key(self):
org = "tester"
cfg = Config(filename="atlascli.cfg.test")
with self.assertRaises(ValueError):
cfg.get_public_key("dumbo")
with self.assertRaises(ValueError):
cfg.get_private_key("dumbo")
with self.assertRaises(ValueError):
_,_ = cfg.get_config_file_keys("wrong org")
def test_org(self):
org = "Open Data at MongoDB"
cfg = Config(default_org=org)
cfg.load_from_file(input_file="test_atlascli.cfg")
self.assertTrue(cfg.get_public_key(org).startswith("Zo"))
public_key, private_key = cfg.get_config_file_keys(org)
self.assertTrue(public_key.startswith("Zo"))
self.assertTrue(private_key.startswith("9f3"))
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jdrumgoole/gdelttools",
"score": 3
}
|
#### File: gdelttools/gdelttools/gdeltfile.py
```python
import hashlib
import os
import sys
import zipfile
from enum import Enum
from typing import List
from requests import exceptions
#from gdelttools.mongoimport import MongoImport
from gdelttools.web import WebDownload
def compute_md5(file):
hasher = hashlib.md5()
with open(file, 'rb') as input:
buf = input.read()
hasher.update(buf)
return hasher.hexdigest()
class GDELTChecksumError(ValueError):
pass
class GDELTZipError(ValueError):
pass
class GDELTFilter(Enum):
all = "all"
gkg = "gkg"
mentions = "mentions"
export = "export"
def __str__(self):
return self.value
class GDELTFile:
TEST_FILES = """150383 297a16b493de7cf6ca809a7cc31d0b93 http://data.gdeltproject.org/gdeltv2/20150218230000.export.CSV.zip
318084 bb27f78ba45f69a17ea6ed7755e9f8ff http://data.gdeltproject.org/gdeltv2/20150218230000.mentions.CSV.zip
10768507 ea8dde0beb0ba98810a92db068c0ce99 http://data.gdeltproject.org/gdeltv2/20150218230000.gkg.csv.zip
"""
def __init__(self, url:str, size:int, md5:str, gfilter: GDELTFilter = GDELTFilter.all):
self._url = url
self._size = size
self._md5 = md5
filename_with_args = url.split('/')[-1]
self._zip_filename = filename_with_args.split('?')[0]
self._csv_filename = os.path.splitext(self._zip_filename)[0]
self._wd = WebDownload()
self._filter = gfilter
@property
def filter(self):
return self._filter
@filter.setter
def filter(self, rhs:GDELTFilter):
self._filter = rhs
@property
def url(self):
return self._url
@property
def size(self):
return self._size
@property
def md5(self):
return self._md5
@property
def zip_filename(self):
return self._zip_filename
@property
def csv_filename(self):
return self._csv_filename
def download_file(self):
print(f"{self.url} --> ", end="")
self._zip_filename = self._wd.download_url(self.url)
if not self.is_valid_checksum():
raise GDELTChecksumError(self.url)
print(f"{self.zip_filename}")
return self.zip_filename
def is_valid_checksum(self):
computed_md5 = compute_md5(self._zip_filename)
return computed_md5 == self.md5
def process_zip_file(self, overwrite: bool = True):
#
# If overwrite download and extract
# else
if overwrite:
self._zip_filename = self.download_file()
self._csv_filename = self.extract_csv_file()
return self.csv_filename
elif os.path.exists(self.csv_filename):
print(f"{self.csv_filename} exists")
elif os.path.exists(self.zip_filename):
print(f"{self.zip_filename} exists")
csv_filename = self.extract_csv_file()
else:
self._zip_filename = self.download_file()
self._csv_filename = self.extract_csv_file()
return self._csv_filename
@staticmethod
def unzip(filename: str):
zfilename = None
with zipfile.ZipFile(filename) as archive:
if len(archive.namelist()) > 1:
raise GDELTZipError(f"More than one file in archive: {filename}")
for zfilename in archive.namelist():
text = archive.read(zfilename) # .decode(encoding="utf-8")
print(f"extracting: '{zfilename}'")
with open(zfilename, "wb") as output_file:
output_file.write(text)
return zfilename
def extract_csv_file(self):
self._csv_filename = GDELTFile.unzip(self.zip_filename)
return self._csv_filename
@classmethod
def get_input_files(cls, f: str, last : int):
lines = []
with open(f, "r") as input_file:
for input_line in input_file:
lines.append(input_line)
if last > 0:
section = len(lines) - (last * 3) # three files per set, gkg, export, mentions
lines = lines[section:] # slice the last days
return lines
def download_gdelt_files(file_list: List[str], last=None, filter:GDELTFilter=GDELTFilter.all, overwrite=False):
if last is None or last < 0:
last = 0
csv_files: List[str] = []
for f in file_list:
lines = GDELTFile.get_input_files(f, last)
# with open(f, "r") as input_file:
# for input_line in input_file:
# lines.append(input_line)
#
# if last > 0:
# section = len(lines) - (last * 3) # three files per set, gkg, exports, mentions
# lines = lines[section:] # slice the last days
#importer = MongoImport()
for l in lines:
try:
size, md5, zipurl = l.split()
gdelt_file = GDELTFile(zipurl, int(size), md5, filter)
# print(f"{size}:{sha}:{zip}")
if (gdelt_file.filter.value in zipurl) or (gdelt_file.filter == GDELTFilter.all):
f = gdelt_file.process_zip_file(overwrite)
#importer.command(f)
csv_files.append(f)
except zipfile.BadZipfile as e:
print(gdelt_file.zip_filename)
print(e)
sys.exit(1)
except exceptions.HTTPError as e:
print(f"Error for {zipurl}")
print(e)
sys.exit(1)
except GDELTChecksumError as e:
print(f"'{gdelt_file.md5}' checksum for {gdelt_file.url} doesn't match\n"
f" checksum for {gdelt_file.zip_filename}")
sys.exit(1)
return csv_files
```
|
{
"source": "jdrumgoole/mongodb-airline-app",
"score": 3
}
|
#### File: mongodb-airline-app/airline/seatlog.py
```python
from airline.eventlog import EventLog
from airline.seat import Seat
from airline.person import Person
from enum import Enum
class SeatLog(EventLog):
"""
Collection of all the seats allocated and unallocated on a set of planes
"""
class Seat_Type(Enum):
Free = 0
Booked = 1
def __init__(self, database, collection_name="seat_log"):
super().__init__(database, collection_name)
def add_flight(self, flight ):
return self.insert_many(flight.dict())
def allocate_seat(self, seat, person: Person) -> object:
"""
:rtype: object
"""
seat.allocate(person)
self.insert_one( seat.dict())
return seat
def find_seats(self, flight_no, seat_type = Seat_Type.Free, limit=0):
"""
:type seat_type: Enum
"""
if seat_type is seat_type.Free:
person_query = { "$eq" : None}
else:
person_query = { "$ne" : None }
cursor = self._collection.find( {"flight_no": flight_no,
"person" : person_query }).limit( limit )
for i in cursor:
yield Seat.make_seat(i)
def deallocate_seat(self, seat):
seat.deallocate()
self.insert_one(seat.dict())
return seat
```
#### File: mongodb-airline-app/test/test_seatlog.py
```python
import unittest
import pymongo
from airline.flight import Flight
from airline.seatlog import SeatLog
from airline.person import Person
class TestSeatLog(unittest.TestCase):
def setUp(self):
self._client = pymongo.MongoClient()
self._database = self._client[ "TEST_SEATLOG"]
self._log = SeatLog(self._database)
self._log.initalise()
def tearDown(self):
pass
def test_flight(self):
f = Flight( "EI172", 50, Flight.six_seats)
self._log.add_flight(f)
seats = list( self._log.find_seats( "EI172", SeatLog.Seat_Type.Free))
self.assertEqual(len(seats), 50 * len( Flight.six_seats))
self._log.allocate_seat( seats[0], Person( "Joe", "Drumgoole", "1-May-2000"))
if __name__ == "__main__" :
unittest.main()
```
|
{
"source": "jdrumgoole/mongodb_formatter",
"score": 3
}
|
#### File: mongodb_formatter/pymongo_formatter/formatter.py
```python
import pymongo
import sys
import contextlib
from datetime import datetime
from pprint import pprint
import csv
from mongodb_formatter.nested_dict import Nested_Dict
class Transform(object):
def __init__(self):
pass
def __call__(self, doc):
return doc
class Date_Transform(object):
def __init__(self, tformat=None):
super().__init__()
if tformat is None:
self._tformat = "%d-%b-%Y %H:%M"
else:
self._tformat = tformat
def __call__(self, doc):
d = Nested_Dict(doc)
if d.has_key(field):
value = d.get_value(field)
if isinstance(value, datetime):
d.set_value(field, value.strftime(time_format))
else:
d.set_value(field, datetime.fromtimestamp(value / 1000))
return d.dict_value()
class Cursor_Processor(object):
def __init__(self, cursor):
self._cursor = cursor
self._xform_list = []
def add_transform(self, xform:Transform):
self._xform_list.append(xform)
def transform(self, doc):
for i in self._xform_list:
doc = i(doc)
return doc
def process(self):
for i in self._cursor:
yield self.transform(i)
class Doc_Formatter(object):
@staticmethod
def date_map_field(doc, field, time_format=None):
'''
Given a field that contains a datetime we want it to be output as a string otherwise
pprint and other functions will abandon ship when they meet BSON time objects
'''
if time_format is None:
time_format = "%d-%b-%Y %H:%M"
d = Nested_Dict(doc)
if d.has_key(field):
value = d.get_value(field)
if isinstance(value, datetime):
d.set_value(field, value.strftime(time_format))
else:
d.set_value(field, datetime.fromtimestamp(value / 1000))
return d.dict_value()
@staticmethod
def select_fields(doc, field_list):
'''
Take 'doc' and create a new doc using only keys from the 'fields' list.
Supports referencing fields using dotted notation "a.b.c" so we can parse
nested fields the way MongoDB does. The nested field class is a hack. It should
be a sub-class of dict.
'''
if field_list is None or len(field_list) == 0:
return doc
newDoc = Nested_Dict({})
oldDoc = Nested_Dict(doc)
for i in field_list:
if oldDoc.has_key(i):
# print( "doc: %s" % doc )
# print( "i: %s" %i )
newDoc.set_value(i, oldDoc.get_value(i))
return newDoc.dict_value()
@staticmethod
def date_map(doc, datemap_list, time_format=None):
'''
For all the datetime fields in "datemap" find that key in doc and map the datetime object to
a strftime string. This pprint and others will print out readable datetimes.
'''
if datemap_list:
for i in datemap_list:
if isinstance(i, datetime):
doc=CursorFormatter.date_map_field(doc, i, time_format=time_format)
return doc
def format(self,doc):
new_doc = Doc_Formatter.select_fields( doc, self._select_fields)
return Doc_Formatter.date_map( new_doc, self._date_fields)
def __init__(self, doc, select_fields, date_fields):
self._select_fields = select_fields
self._date_fields = date_fields
self._doc = doc
def __call__(self):
return self.format( self._doc)
class CursorFormatter(object):
'''
If root is a file name output the content to that file.
'''
def __init__(self, cursor, filename="", formatter="json"):
self._filename = filename
self._formatter = formatter
self._cursor = cursor
def results(self):
return self._results
@contextlib.contextmanager
def _smart_open(self, filename=None):
if filename and filename != '-':
fh = open(filename, 'w')
else:
fh = sys.stdout
try:
yield fh
finally:
if fh is not sys.stdout:
fh.close()
def mapper(self, doc, field_map, date_map, time_format=None):
return CursorFormatter.fieldMapper( doc, field_map ).
def printCSVCursor(self, fieldnames, datemap, time_format=None):
'''
Output CSV format. items are separated by commas. We only output the fields listed
in the 'fieldnames'. We datemap fields listed in 'datemap'. If a datemap listed field
is not a datetime object we will thow an exception.
'''
with self._smart_open(self._filename) as output:
writer = csv.DictWriter(output, fieldnames=fieldnames)
writer.writeheader()
count = 0
for i in self._cursor:
self._results.append(i)
count = count + 1
d = CursorFormatter.fieldMapper(i, fieldnames)
d = CursorFormatter.dateMapper(d, datemap, time_format)
# x = {}
# for k, v in d.items():
#
# if type(v) is unicode:
# x[k] = v
# else:
# x[k] = str(v).encode('utf8')
# writer.writerow({k: v.encode('utf8') for k, v in x.items()})
writer.writerow(d)
return count
def printJSONCursor(self, fieldnames, datemap, time_format=None):
"""
Output plan json objects.
:param c: collection
:param fieldnames: fieldnames to include in output
:param datemap: fieldnames to map dates to date strings
:param time_format: field names to map to a specific time format
:return:
"""
count = 0
with self._smart_open(self._filename) as output:
for i in self._cursor:
# print( "processing: %s" % i )
# print( "fieldnames: %s" % fieldnames )
self._results.append(i)
d = CursorFormatter.fieldMapper(i, fieldnames)
# print( "processing fieldmapper: %s" % d )
d = CursorFormatter.dateMapper(d, datemap, time_format)
pprint.pprint(d, output)
count = count + 1
return count
def printCursor(self, fieldnames=None, datemap=None, time_format=None):
'''
Output a cursor to a filename or stdout if filename is "-".
fmt defines whether we output CSV or JSON.
'''
if self._format == 'csv':
count = self.printCSVCursor(fieldnames, datemap, time_format)
else:
count = self.printJSONCursor( fieldnames, datemap, time_format)
return count
def output(self, fieldNames=None, datemap=None, time_format=None):
'''
Output all fields using the fieldNames list. for fields in the list datemap indicates the field must
be date
'''
count = self.printCursor(self._cursor, fieldNames, datemap, time_format)
# print( "Wrote %i records" % count )
```
|
{
"source": "jdrumgoole/mongodb_random_data_generator",
"score": 3
}
|
#### File: mongodb_random_data_generator/mongodbrdg/randomdata.py
```python
from datetime import timedelta
from mimesis import Generic
from mimesis.enums import Gender
import random
from datetime import datetime
class User:
interests = ["Soccer", "Golf", "Football", "Stamp Collecting", "skydiving",
"Board gaming", "Darts", "Swimmming", "Triathlon", "Running",
"Reading", "politics"]
def __init__(self, locale: str = "en",
user_id_start:int=0,
user_id_end:int=1000,
max_friends:int = 0,
start_year:int = None,
end_year:int = None,
seed: int = None) -> object:
self._locale = locale
self._seed = seed
if self._seed:
self._generic = Generic(self._locale, self._seed)
else:
self._generic = Generic(self._locale)
self._user_id_start = user_id_start
self._user_id_end = user_id_end
self._max_friends = max_friends
assert self._user_id_start < self._user_id_end
if start_year is None:
self._start_year = 2015
else:
self._start_year = start_year
if end_year is None:
self._end_year = 2019
else:
self._end_year = end_year
@property
def user_id_start(self):
return self._user_id_start
@property
def user_id_end(self):
return self._user_id_end
@property
def size(self):
return self._user_id_end - self._user_id_start
@property
def start_year(self):
return self._start_year
@property
def end_year(self):
return self._end_year
def make_friends(self):
friends:set=set()
for i in range(random.randint(0, self._max_friends)):
friend = random.randint( self._user_id_start, self._user_id_end)
friends.add(friend)
return list(friends)
def make_one_user(self, user_id:int=0):
person = self._generic.person
address = self._generic.address
business = self._generic.business
internet = self._generic.internet
datetime = self._generic.datetime
user = {}
gender = random.choice(list(Gender))
gender_string = str(gender).split(".")[1]
user["first_name"] = person.name(gender)
user["last_name"] = person.surname(gender)
user["gender"] = gender_string
user["company"] = business.company()
email_domain = "".join(user['company'].lower().split(" "))
user["email"] = f"{user['first_name']}.{user['last_name']}@{email_domain}{internet.top_level_domain()}"
year = random.randint(2000, 2018)
user["registered"] = datetime.datetime(start=self._start_year, end=self._end_year)
user["user_id"] = user_id
user["country"]= address.country()
user["city"] = address.city()
user["phone"] = person.telephone()
user["location"] = { "type": "Point", "coordinates" : [address.longitude(), address.latitude()]}
user["language"] = person.language()
if self._max_friends > 0:
user["friends"] = self.make_friends()
sample_size = random.randint(0,5)
user["interests"] = random.sample(User.interests, sample_size)
return user
def make_users(self):
for i in range(self._user_id_start, self._user_id_end):
yield self.make_one_user(i)
class Sessions:
def __init__(self, user:dict, total:int):
self._user_id = user["user_id"]
self._start_time = user["registered"]
self._total = total
@property
def total(self):
return self._total
@property
def total_documents(self):
return self._total * 2
@staticmethod
def future_random_time(now, days=0, hours=0, minutes=0, seconds=0, milliseconds=0, basis=1):
return now + timedelta(days=random.randint(basis, basis+days),
hours=random.randint(basis, basis+hours),
minutes=random.randint(basis, basis+minutes),
milliseconds=random.randint(basis, basis+milliseconds))
def make_session(self, start_ts):
login_ts = self.future_random_time( now=start_ts, minutes=180, seconds=59, milliseconds=999)
logout_ts = self.future_random_time(now=login_ts, minutes=180, seconds=59, milliseconds=999)
login_session = {"user_id": self._user_id,
"login": login_ts}
logout_session = {"user_id": self._user_id,
"logout": logout_ts }
return login_session, logout_session
def make_sessions(self):
start = self._start_time
for i in range(self._total):
s1,s2 = self.make_session(start)
start = s2["logout"]
yield s1,s2
```
|
{
"source": "jdrumgoole/mongodbutils",
"score": 3
}
|
#### File: mongodbutils/pymongoshell/mongoclient.py
```python
import pprint
import sys
from functools import wraps
# import pprint
import pymongo
from pymongoshell.pager import Pager, FileNotOpenError
from pymongoshell.version import VERSION
from pymongoshell.errorhandling import handle_exceptions, MongoDBShellError, CollectionNotSetError
if sys.platform == "Windows":
db_name_excluded_chars = r'/\. "$*<>:|?'
else:
db_name_excluded_chars = r'/\. "$'
class HandleResults:
def __init__(self, pager: Pager):
self._pager = Pager()
def is_result_type(self, result):
return type(result) in [pymongo.results.InsertOneResult,
pymongo.results.InsertManyResult,
pymongo.results.UpdateResult,
pymongo.results.DeleteResult,
pymongo.results.BulkWriteResult
]
def handle(self, result):
"""
result is a pymongo result Type.
"""
if type(result) is pymongo.results.InsertOneResult:
self.handle_InsertOneResult(result)
elif type(result) is pymongo.results.InsertManyResult:
self.handle_InsertManyResult(result)
elif type(result) is pymongo.results.UpdateResult:
self.handle_UpdateResult(result)
elif type(result) is pymongo.results.DeleteResult:
self.handle_DeleteResult(result)
elif type(result) is pymongo.results.BulkWriteResult:
self.handle_BulkWriteResult(result)
else:
raise TypeError(result)
def handle_InsertOneResult(self, result: pymongo.results.InsertOneResult):
print(f"Inserted: {result.inserted_id}")
def handle_InsertManyResult(self, result: pymongo.results.InsertManyResult):
doc = pprint.pformat(result.inserted_ids)
self._pager.paginate_lines(doc.splitlines())
def handle_UpdateResult(self, result: pymongo.results.UpdateResult):
self._pager.paginate_doc(result.raw_result)
def handle_DeleteResult(self, result: pymongo.results.DeleteResult):
self._pager.paginate_doc(result.raw_result)
def handle_BulkWriteResult(self, result: pymongo.results.BulkWriteResult):
doc = pprint.pformat(result.bulk_api_result)
self._pager.paginate_doc(doc)
class MongoClient:
"""
Simple command line MongoDB proxy for use in the Python shell.
"""
def __init__(self,
banner: str = True,
host: str = "mongodb://localhost:27017",
serverSelectionTimeoutMS: int = 5000,
*args: object,
**kwargs: object) -> object:
"""
Creat a new client object with a default database and
collection.
:param database_name: The name of the database to be opened
:param collection_name: The collection name to be opened
:param mongodb_uri: A properly formatted MongoDB URI
:param *args, *kwargs : Passed through to MongoClient
>>> import pymongoshell
>>> c = pymongoshell.MongoClient()
pymongoshell 1.1.0b7
Server requests set to timeout after 5.0 seconds
>>> c
pymongoshell.MongoClient(banner=True,
database_name='test',
collection_name='test',
host= 'mongodb://localhost:27017')
Access the internal MongoClient object:
>>> c.client
MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=5000)
>>>
"""
# We use __setattr__ so that we can protect against someone mistakenly typing a field
# wrong and the class just adding that field. For example you might want
# to define a different collection by saying
# >>> c=MongoClient()
# pymongoshell 1.1.0b5
# Using collection 'test.test'
# Server selection timeout set to 5.0 seconds
# >>> c.collection="demo.zipcodes"
# Without protection you might inadvertently say
# >>> c.collections="demo.zipcodes" # note the plural.
# With us trapping that with __setattr__ Python would just
# add another member to the class. The overhead of _setattr__
# protection is we must use the extended form to add members
# in the first place.
object.__setattr__(self, "_banner", banner)
object.__setattr__(self, "_mongodb_uri", host)
client = pymongo.MongoClient(host=self._mongodb_uri, serverSelectionTimeoutMS=serverSelectionTimeoutMS, *args,
**kwargs)
object.__setattr__(self, "_client", client)
uri_dict = pymongo.uri_parser.parse_uri(self._mongodb_uri)
object.__setattr__(self, "_uri_dict", uri_dict)
'''
The dict returned by parse_uri.
{
'nodelist': <list of (host, port) tuples>,
'username': <username> or None,
'password': <password> or None,
'database': <database name> or None,
'collection': <collection name> or None,
'options': <dict of MongoDB URI options>,
'fqdn': <fqdn of the MongoDB+SRV URI> or None
}
'''
object.__setattr__(self, "_username", self._uri_dict['username'])
object.__setattr__(self, "_password", self._uri_dict['password'])
object.__setattr__(self, "_database_name", self._uri_dict['database'])
object.__setattr__(self, "_collection_name", self._uri_dict['collection'])
object.__setattr__(self, "_options", self._uri_dict['options'])
if "fqdn" in self._uri_dict: # older versions of PyMongo don't support fqdn.
object.__setattr__(self, "_fqdn", self._uri_dict['fqdn'])
self._fqdn = self._uri_dict['fqdn']
else:
object.__setattr__(self, "_fqdn", None)
# # if we don't parse a database out of the URL lets see if we got one
# # from the __init__ parameters.
# if self._database_name is None:
# self._database_name = database_name
# if self._collection_name is None:
# self._collection_name = collection_name
object.__setattr__(self, "_collection", None)
object.__setattr__(self, "_database", None)
object.__setattr__(self, "_result", None)
if self._database_name:
object.__setattr__(self, "_database", self._client[self._database_name])
else:
self._database_name = "test"
self._database = self._client[self._database_name]
if self._collection_name:
self._set_collection(self._collection_name)
else:
self._set_collection("test")
#
# self._collection = self._database[self._collection_name]
object.__setattr__(self, "_output_filename", None)
object.__setattr__(self, "_output_file", None)
object.__setattr__(self, "_line_numbers", True)
object.__setattr__(self, "_paginate", True)
object.__setattr__(self, "_pretty_print", True)
object.__setattr__(self, "_pager", Pager(line_numbers=self._line_numbers,
pretty_print=self._pretty_print,
paginate=self._paginate))
object.__setattr__(self, "_handle_result", HandleResults(self._pager))
object.__setattr__(self, "_overlap", 0)
self._overlap = 0
if self._banner:
self.shell_version()
if not self._collection_name:
print(f"Please set a default collection by assigning one to .collection")
print(f"Server requests set to timeout after {serverSelectionTimeoutMS / 1000} seconds")
@staticmethod
def shell_version():
print(f"pymongoshell {VERSION}")
@staticmethod
def valid_mongodb_name(name):
"""
Check that the name for a database has no illegal
characters
:param name: the name of the database
:return: True if the name is valid
"""
for char in db_name_excluded_chars:
if char in name:
return None
return name
@property
def client(self):
"""
:return: the MongoDBClient object
"""
return self._client
@property
def uri(self):
"""
:return: The URI used to create the Proxy object
"""
return self._mongodb_uri
@property
def database(self):
"""
Assign to this property to set the current default database.
:return: Return the default database object associated with the Proxy
"""
return self._database
@database.setter
def database(self, database_name):
"""
Set the default database for this Proxy object.
:param database_name: A string naming the database
"""
if database_name and MongoClient.valid_mongodb_name(database_name):
self._database = self.client[database_name]
else:
raise MongoDBShellError(f"'{database_name}' is not a valid database name")
@property
def database_name(self):
"""
:return: The name of the default database
"""
return self._database_name
def parse_full_name(self, name):
"""
Take in a name in potential x.y format. Validate that the components
are valid database and/or collection names
:param name: A collection name in bare format or db_name.col_name format
:return: database_name, collection_name
"""
if "." in name:
collection_name: str
database_name: str
database_name, _, collection_name = name.partition(".")
if self.valid_mongodb_name(database_name):
self._database_name = database_name
if self.valid_mongodb_name(collection_name):
self._collection_name = collection_name
return self._database_name, self._collection_name
else:
raise MongoDBShellError(f"'{collection_name}' is not a valid collection name")
else:
raise MongoDBShellError(f"'{database_name}' is not a valid database name")
else:
if self.valid_mongodb_name(name):
self._collection_name = name
return self._database_name, self._collection_name
else:
raise MongoDBShellError(f"'{name}' is not a valid collection name")
# @handle_exceptions("_set_collection")
def _set_collection(self, name: str):
'''
Set a collection name. The name parameter can be a bare
collection name or it can specify a database in the format
"<database_name>.<collection_name>".
:param name: The collection name
:return: The mongodb collection object
'''
self._database_name, self._collection_name = self.parse_full_name(name)
self._database = self._client[self._database_name]
self._collection = self._database[self._collection_name]
return self._collection
@property
def collection(self):
"""
Assign to `collection` to reset the current default collection.
Return the default collection object associated with the `MongoDB` object.
"""
if self._collection:
return self._collection
else:
return None
@property
def collection_name(self):
"""
:return: The name of the default collection
"""
if self._database_name is None:
return ""
elif self._collection_name is None:
return f"{self._database_name}"
else:
return f"{self._database_name}.{self._collection_name}"
@collection.setter
@handle_exceptions("collection.setter")
def collection(self, db_collection_name):
"""
Set the default collection for the database associated with the `MongoDB`
object. The user can specify a database and a collection by using a dot
notation <database_name.collection_name>.
:param db_collection_name: the name of the database and collection
"""
self._set_collection(db_collection_name)
print(f"Now using collection '{self.collection_name}'")
if self._database.list_collection_names() is None:
print("Info: You have specified an empty database '{self._database}'")
return self
@handle_exceptions("is_master")
def is_master(self):
"""
Run the pymongo is_master command for the current server.
:return: the is_master result doc.
"""
return self._pager.paginate_doc(self._database.command("ismaster"))
def count_documents(self, filter=None, *args, **kwargs):
filter_arg = filter or {}
return self.collection.count_documents(filter=filter_arg, *args, *kwargs)
def rename(self, new_name, **kwargs):
if not self.valid_mongodb_name(new_name):
print(f"{new_name} cannot be used as a collection name")
return None
old_name = self._collection.name
db_name = self._collection.database.name
self._collection.rename(new_name, **kwargs)
print(f"renamed collection '{db_name}.{old_name}' to '{db_name}.{new_name}'")
def command(self, cmd):
result = self._database.command(cmd)
self._pager.paginate_doc(result)
def list_database_names(self):
"""
List all the databases on the default server.
"""
self._pager.paginate_lines(self.client.list_database_names())
def dbstats(self):
"""
Run dbstats command for database
See https://docs.mongodb.com/manual/reference/method/db.stats/
"""
pprint.pprint(self.database.command("dbstats"))
def coll_stats(self, scale=1024, verbose=False):
"""
Run collection stats for collection.
see https://docs.mongodb.com/manual/reference/command/collStats/
:param scale: Scale at which to report sizes
:param verbose: used for extended report on legacy MMAPV1 storage engine
:return: JSON doc with stats
"""
if self._collection_name in self.database.list_collection_names():
stats = self.database.command({"collStats": self._collection_name,
"scale": scale,
"verbose": verbose})
self._pager.paginate_doc(stats)
else:
print(f"'{self.collection_name}'is not a valid collection")
def _get_collections(self, db_names=None):
"""
Internal function to return all the collections for every database.
include a list of db_names to filter the list of collections.
"""
if db_names:
db_list = db_names
else:
db_list = self.client.list_database_names()
for db_name in db_list:
db = self.client.get_database(db_name)
for col_name in db.list_collection_names():
yield f"{db_name}.{col_name}"
def list_collection_names(self, database_name=None):
if database_name:
self._pager.paginate_lines(self._get_collections([database_name]))
else:
self._pager.paginate_lines(self._get_collections())
@property
def lcols(self):
"""
Shorthand for list_collection_names
"""
self.list_collection_names()
@property
def ldbs(self):
"""
Shorthand for list_database_names()
"""
self.list_database_names()
@property
def version(self):
return f"{self.__module__.__name__}1.2.1b5"
@staticmethod
def confirm_yes(message):
"""
Return true if user confirms yes. A correct response
is 'y' or 'Y'. All other chars will return false.
:param message: A string
:return: bool.
"""
response = input(f"{message} [y/Y]: ")
response.upper()
return response == "Y"
# def command(self, *args, **kwargs, ):
# try:
# self._pager.paginate_doc(self.database.command(*args, **kwargs))
# except OperationFailure as e:
# print(f"Error: {e}")
# def create_index(self, name):
# name = self._collection.create_index(name)
# print(f"Created index: '{name}'")
@handle_exceptions("drop_collections")
def drop_collection(self, confirm=True):
if confirm and self.confirm_yes(f"Drop collection: '{self._database_name}.{self._collection_name}'"):
return self._collection.drop()
else:
return self._collection.drop()
def drop_database(self, confirm=True):
if confirm and self.confirm_yes(f"Drop database: '{self._database_name}'"):
result = self._client.drop_database(self.database)
else:
result = self._client.drop_database(self.database)
print(f"dropped database: '{self._database_name}'")
@property
def overlap(self):
"""
Get and set the line_numbers boolean
:return: `line_numbers` (True|False)
"""
return self._overlap
@overlap.setter
def overlap(self, value):
self._overlap = value
@property
def line_numbers(self):
"""
Get and set the line_numbers boolean
:return: `line_numbers` (True|False)
"""
return self._line_numbers
@line_numbers.setter
def line_numbers(self, state):
self._pager.line_numbers = state
self._line_numbers = state
@property
def pretty_print(self):
"""
Get and set the pretty print boolean
:return: `pretty_print` (True|False)
"""
return self._pager.pretty_print
@pretty_print.setter
def pretty_print(self, state):
self._pager.pretty_print = state
@property
def paginate(self):
return self._pager.paginate
@paginate.setter
def paginate(self, state):
"""
:param state: True, turn on pagination
:return:
"""
self._pager.paginate = state
@property
def output_file(self):
"""
:return: The name of the output file
"""
return self._pager.output_file
@output_file.setter
def output_file(self, filename):
"""
:param filename: file to output `pager` output to.
:return:
"""
self._pager.output_file = filename
def write_file(self, s):
try:
self._pager.write_file(s)
except FileNotOpenError:
print("before writing create a file by assigning a name to 'output_file' e.g.")
print(">> x=MongoClient()")
print(">> x.output_file='operations.log'")
print(">> x.write('hello')")
def __str__(self):
if self._client:
client_str = f"'{self.uri}'"
else:
client_str = "No client created"
if self._database_name:
db_str = f"'{self.database_name}'"
else:
db_str = "no database set"
if self._collection_name:
col_str = f"'{self.collection_name}'"
else:
col_str = "no client set"
return f"client : {client_str}\n" + \
f"db : {db_str}\n" + \
f"collection : {col_str}"
def __repr__(self):
return f"pymongoshell.MongoClient(banner={self._banner},\n" \
f" database_name='{self._database_name}',\n" \
f" collection_name='{self._collection_name}',\n" \
f" host= '{self._mongodb_uri}')"
@staticmethod
def has_attr(col, name):
'''
Can't use the built in hasattr to check if name is a member
of a collection object as built hasattr uses getattr and in
the Collection object getattr is overwritten to allow collections
to be generated by specifying col.<name>.
:param col: a pymongo.Collection object
:param name: a candidate name
:return: the specified object or None
'''
if name in dir(col):
return getattr(col, name)
else:
return None
def process_result(self, result):
if result is None:
print("None")
elif type(result) in [pymongo.command_cursor.CommandCursor, pymongo.cursor.Cursor]:
self._pager.print_cursor(result)
elif self._handle_result.is_result_type(result):
self._handle_result.handle(result)
elif type(result) is dict:
self._pager.paginate_doc(result)
elif type(result) is list:
self._pager.paginate_list(result)
else:
print(result)
self._result = result
@property
def result(self):
return self._result
def interceptor(self, func):
assert callable(func)
@handle_exceptions(func.__name__)
@wraps(func)
def inner_func(*args, **kwargs):
# print(f"{func.__name__}({args}, {kwargs})")
result = func(*args, **kwargs)
self.process_result(result)
# print(f"inner_func.__name__ : {inner_func.__name__}")
return inner_func
def __getattr__(self, item):
if self._collection is None:
return self._set_collection(item)
else:
db_name, col_name = self.parse_full_name(item)
# print(f"item:{item}")
# print(f"col_name:{col_name}")
func = self.has_attr(self._collection, col_name)
if callable(func):
return self.interceptor(func)
else:
self._collection = self._set_collection(item)
return self
def __del__(self):
self._pager.close()
def __getitem__(self, name):
self._set_collection(name)
return self
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
raise TypeError("'Collection' object is not callable. If you meant to "
"call the '%s' method on a 'Collection' object it is "
"failing because no such method exists." %
self._collection_name)
if __name__ == "__main__":
import doctest
doctest.testmod()
oc = pymongo.MongoClient()
db = oc["dbtest"]
col = db["coltest"]
c = MongoClient()
c.collection = "test.test"
c.is_master()
d1 = {"name": "Heracles"}
d2 = {"name": "Orpheus"}
d3 = {"name": "Jason"}
d4 = {"name": "Odysseus"}
d5 = {"name": "Achilles"}
d6 = {"name": "Menelaeus"}
c.insert_one(d1)
c.insert_many([d2, d3, d4, d5])
c.drop_collection(confirm=False)
p1 = {"name": "<NAME>",
"social": ["twitter", "instagram", "linkedin"],
"mobile": "+353 87xxxxxxx",
"email": "<EMAIL>"}
p2 = {"name": "<NAME>",
"social": ["twitter", "linkedin"],
"mobile": "+1 12345678",
"email": "<EMAIL>"}
p3 = {"name": "<NAME>",
"social": ["instagram"],
"mobile": "+1 67891011",
"email": "<EMAIL>"}
c.insert_many([p1, p2, p3])
c.count_documents()
c.count_documents({"name": "<NAME>"})
c.create_index("social")
c.drop_index("social_1")
c.update_one({"name": "<NAME>"}, {"$set": {"age": 35}})
c.update_one({"name": "<NAME>"}, {"$set": {"age": 35}})
c.update_many({"social": "twitter"}, {"$set": {"followers": 1000}})
c = MongoClient(
host="mongodb+srv://readonly:[email protected]/test?retryWrites=true&w=majority")
try:
c.find(1) # force an error
except TypeError:
pass
c.bongo # make a collection
try:
c.bongospongo() # call an invalid method
except TypeError:
pass
#
c = MongoClient()
c.ldbs
c.collection = "dummy.data"
c.insert_one({"name": "<NAME>"})
c.ldbs
c.drop_database(confirm=False)
c.ldbs
```
#### File: mongodbutils/tests/test_pager.py
```python
import unittest
from contextlib import contextmanager
import string
import random
import os
from io import StringIO
import sys
from pymongoshell.pager import Pager
from pymongoshell.pager import LineNumbers
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def randomString(string_length: int = 10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for _ in range(string_length))
def find(fn: str, s: str):
with open(fn) as my_file:
return s in my_file.read()
class TestPager(unittest.TestCase):
def test_LineNumber(self):
x = LineNumbers()
self.assertEqual(str(x), "")
x = LineNumbers(1)
self.assertEqual(str(x), "1 : ")
#print(f"'{LineNumbers(1).prefix()}'")
def test_pager(self):
pager = Pager(line_numbers=True)
lines = pager.line_to_paragraph("aaa", line_number=1)
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0], LineNumbers(1).prefix() + "aaa")
lines = pager.line_to_paragraph("")
self.assertEqual(len(lines), 0)
lines = pager.line_to_paragraph("12345abcde12345", width=5, line_number=1)
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0], LineNumbers(1).prefix())
lines = pager.line_to_paragraph("12345abcde12345", 10, line_number=1)
self.assertEqual(len(lines), 3)
self.assertEqual(lines[0], LineNumbers(1).prefix() + "12345")
self.assertEqual(lines[1], LineNumbers(2).prefix() + "abcde")
self.assertEqual(lines[2], LineNumbers(3).prefix() + "12345")
def test_file(self):
pager = Pager()
name = randomString(10)
rs = randomString(10)
pager.output_file = name
self.assertTrue(find(name, "# opening"))
pager.output_file = None
self.assertTrue(find(name, "# closing"))
pager.output_file = name
pager.write_file(f"{rs}\n")
pager.close()
self.assertTrue(find(name, rs))
os.unlink(name)
def test_paginate(self):
line = '12345678901234567890' # len(line) = 20
lines_in = [line for _ in range(3)]
assert len(line) == 20
pager = Pager()
#print("lines in")
#print(lines_in)
with captured_output() as (out, err):
pager.paginate_lines(lines_in, default_terminal_cols=20, default_terminal_lines=24)
# # print("out.getvalue()")
# print(f"{out.getvalue().splitlines()}")
lines = out.getvalue().splitlines()
self.assertEqual(len(lines[0]), 20)
test_output = "1 : 123456789012345\n" \
"2 : 67890\n" \
"3 : 123456789012345\n" \
"4 : 67890\n" \
"5 : 123456789012345\n" \
"6 : 67890\n"
# print("test_output")
# print(f"{test_output.splitlines()}")
self.assertEqual(len(out.getvalue()), len(test_output))
self.assertEqual(out.getvalue(), test_output, out.getvalue())
def test_list_to_line(self):
pager = Pager()
test_list = [1, 2, 3, 4]
l = Pager.list_to_lines([1, 2, 3, 4])
self.assertEqual(len(l), len(test_list))
self.assertEqual("[1,", l[0])
self.assertEqual("4]", l[3])
if __name__ == '__main__':
unittest.main()
```
#### File: mongodbutils/tests/testshell.py
```python
import unittest
import sys
from contextlib import contextmanager
from io import StringIO
from datetime import datetime
import pymongo
from pymongoshell.mongoclient import MongoClient
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestShell(unittest.TestCase):
def setUp(self):
with captured_output() as (out, err):
self._c = MongoClient(banner=False)
self._c.collection = "testshell.test"
def tearDown(self):
with captured_output() as (out, err):
self._c.drop_collection(confirm=False)
def test_Client(self):
with captured_output() as (out, err):
c = MongoClient(banner=False)
self.assertEqual("", err.getvalue())
def test_ismaster(self):
with captured_output() as (out, err):
self._c.is_master()
self.assertEqual("", err.getvalue(), err.getvalue())
self.assertTrue("'ismaster': True," in out.getvalue(), out.getvalue())
def test_retrywrites(self):
with captured_output() as (out, err):
self._c.is_master()
self.assertTrue("'ismaster': True," in out.getvalue(), out.getvalue())
def test_find_one(self):
with captured_output() as (out, err):
c = MongoClient(banner=False, host="mongodb+srv://readonly:[email protected]/test?retryWrites=true&w=majority")
c.collection = "demo.zipcodes"
self.assertTrue("zipcodes" in c.database.list_collection_names())
with captured_output() as (out, err):
c.line_numbers = False
c.find_one()
self.assertTrue('PALMER' in out.getvalue())
self.assertEqual("", err.getvalue())
def test_find(self):
with captured_output() as (out, err):
c = MongoClient(banner=False, host="mongodb+srv://readonly:<EMAIL>/test?retryWrites=true&w=majority")
c.collection = "demo.zipcodes"
c.pretty_print = False
c.paginate = False
c.line_numbers = False
c.find(limit=50)
#print(out.getvalue())
self.assertEqual(len(out.getvalue().splitlines()), 51) # error line
self.assertTrue('01069' in out.getvalue())
self.assertTrue('01970' in out.getvalue())
self.assertEqual("", err.getvalue())
def test_line_numbers(self):
with captured_output() as (out, err):
c = MongoClient(banner=False, host="mongodb+srv://readonly:<EMAIL>/test?retryWrites=true&w=majority")
c.collection = "demo.zipcodes"
c.pretty_print = False
c.paginate = False
c.line_numbers = False
c.find(limit=2)
for i in out.getvalue().splitlines()[1:]:
# print(i)
self.assertTrue(i.startswith("{"))
with captured_output() as (out, err):
c.line_numbers = True
c.find(limit=2)
counter = 0
for i in out.getvalue().splitlines():
# print(i)
counter = counter + 1
self.assertTrue(i.startswith(str(counter)))
def test_insert_one(self):
with captured_output() as (out, err):
now = datetime.utcnow()
self._c.insert_one({"ts": now})
doc = self._c.collection.find_one({"ts": now})
self.assertTrue(self._c.collection.find_one({"ts": now}))
id_str = str(doc["_id"])
self.assertTrue(id_str in out.getvalue(), out.getvalue())
self.assertTrue("Inserted:" in out.getvalue(), out.getvalue())
with captured_output() as (out, err):
self._c.insert_one(doc)
self.assertTrue("DuplicateKeyError" in err.getvalue(), err.getvalue())
self._c.drop_collection(confirm=False)
def test_insert_many(self):
with captured_output() as (out, err):
many = [{"a": 1}, {"a": 1}, {"a": 3}]
self._c.insert_many(many)
self.assertTrue(self._c.collection.find_one({"a": 3}))
self._c.delete_many({"a": 1})
self._c.delete_one({"a": 3})
self.assertFalse(self._c.collection.find_one({"a": 3}))
self._c.drop_collection(confirm=False)
def test_update_one(self):
with captured_output() as (out, err):
self._c.insert_many( [{"a": 1}, {"a": 1}, {"a": 3}])
orig_doc = self._c.collection.find_one({"a":1})
self._c.update_one( {"a":1}, {"$inc" : {"a" :1}})
mod_doc = self._c.collection.find_one({"a":2})
self.assertEqual(orig_doc["_id"],mod_doc["_id"])
self.assertTrue("'nModified': 1" in out.getvalue())
self._c.drop_collection(confirm=False)
def test_update_many(self):
with captured_output() as (out, err):
self._c.collection.insert_many( [{"a": 1}, {"a": 1}, {"a": 3}])
orig_doc = self._c.collection.find_one({"a":1})
modified_count = self._c.update_many( {"a":1}, {"$inc" : {"a" :1}})
mod_docs = list(self._c.collection.find({"a":2}))
self.assertEqual(orig_doc["_id"],mod_docs[0]["_id"])
self.assertTrue("'nModified': 2" in out.getvalue())
self._c.drop_collection(confirm=False)
def test_aggregate(self):
with captured_output() as (out, err):
self._c.insert_many([{"a": 1}, {"a": 1}, {"a": 3}])
doc = self._c.collection.find_one({"a": 3})
self._c.aggregate([{"$match": {"a": 3}}])
self.assertTrue(str(doc["_id"]) in out.getvalue())
self._c.drop_collection(confirm=False)
def test_drop_database(self):
with captured_output() as (out, err):
self._c.collection = "dropme.test"
self._c.insert_one({"dummy":"data"})
self._c.drop_database(confirm=False)
self.assertTrue("dropped database: 'dropme'" in out.getvalue())
def test_database_collection_assign(self):
client = MongoClient(banner=False)
with captured_output() as (out, err):
client.collection = "test.jdrumgoole"
self.assertEqual(client.collection_name, "test.jdrumgoole")
self.assertEqual(client.database_name, "test")
client.drop_collection(confirm=False)
@staticmethod
def set_collection(client, name):
client.collection = name
return client
def test_exceptions(self):
with captured_output() as (out, err):
self._c.collection = "new$db.jdrumgoole"
self.assertTrue("MongoDBShellError: 'new$db' is not a valid database name" in err.getvalue(), err.getvalue())
with captured_output() as (out, err):
self._c.collection = "newdb.jdr$umgoole"
self.assertTrue( "MongoDBShellError: 'jdr$umgoole' is not a valid collection name" in err.getvalue(), err.getvalue())
def test_database_url(self):
with captured_output() as (out, err):
c=MongoClient(host="mongodb+srv://readonly:[email protected]/covid19")
#c.collection="statistics"
c.collection="covid19.statistics"
self.assertEqual(c.database_name, "covid19")
self.assertEqual(c.collection_name, "covid19.statistics")
def test_output(self):
with captured_output() as (out, err):
c = MongoClient(banner=False, host="mongodb+srv://readonly:<EMAIL>/test?retryWrites=true&w=majority")
c.collection = "demo.zipcodes"
c.output_file = "test_output.txt"
c.paginate=False
c.find(limit=10)
def test_distinct(self):
with captured_output() as (out, err):
c = MongoClient(banner=False,
host="mongodb+srv://readonly:<EMAIL>/test?retryWrites=true&w=majority")
c.collection = "demo.zipcodes"
c.paginate = False
c.distinct("city")
self.assertEqual(16584, len(c.result))
self.assertEqual(c.result[0], "AARON")
self.assertEqual(c.result[-1], "ZWOLLE")
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jdrumgoole/pymag",
"score": 3
}
|
#### File: pymag/pymag/cursor.py
```python
import contextlib
import csv
import pprint
import sys
from datetime import datetime
from nesteddict import NestedDict
import pymongo
class CursorFormatter(object):
'''
Output a set of cursor elements by iterating over then.
If root is a file name output the content to that file.
'''
def __init__(self, cursor, filename="", formatter="json", results=None):
'''
Data from cursor
output to #suffix.ext.
'''
self._results = []
self._cursor = cursor
if (isinstance(cursor, pymongo.cursor.Cursor) or
isinstance(cursor, pymongo.command_cursor.CommandCursor)):
self._format = formatter
self._filename = filename
if results:
self._results = results
else:
raise ValueError("aggregate argument to CursorFormatter is not of class pymongo cursor")
def results(self):
return self._results
@contextlib.contextmanager
def _smart_open(self, filename=None):
if filename and filename != '-':
fh = open(filename, 'w')
else:
fh = sys.stdout
try:
yield fh
finally:
if fh is not sys.stdout:
fh.close()
@staticmethod
def dateMapField(doc, field, time_format=None):
'''
Given a field that contains a datetime we want it to be output as a string otherwise
pprint and other functions will abondon ship when they meet BSON time objects
'''
if time_format is None:
time_format = "%d-%b-%Y %H:%M"
d = NestedDict(doc)
if field in d:
value = d[field]
if isinstance(value, datetime):
d[field] = value.strftime(time_format)
else:
d[field] = datetime.fromtimestamp(value/1000)
return dict(d)
@staticmethod
def fieldMapper(doc, fields):
"""
Take 'doc' and create a new doc using only keys from the 'fields' list.
Supports referencing fields using dotted notation "a.b.c" so we can parse
nested fields the way MongoDB does.
"""
if fields is None or len(fields) == 0:
return doc
new_doc = NestedDict()
old_doc = NestedDict(doc)
for i in fields:
if i in old_doc:
# print( "doc: %s" % doc )
# print( "i: %s" %i )
new_doc[i] = old_doc[i]
return dict(new_doc)
@staticmethod
def dateMapper(doc, date_map, time_format=None):
'''
For all the fields in "datemap" find that key in doc and map the datetime object to
a strftime string. This pprint and others will print out readable datetimes.
'''
if date_map:
for i in date_map:
if isinstance(i, datetime):
CursorFormatter.dateMapField(doc, i, time_format=time_format)
return doc
def printCSVCursor(self, c, fieldnames, datemap, time_format=None):
'''
Output CSV format. items are separated by commas. We only output the fields listed
in the 'fieldnames'. We datemap fields listed in 'datemap'. If a datemap listed field
is not a datetime object we will thow an exception.
'''
with self._smart_open(self._filename) as output:
writer = csv.DictWriter(output, fieldnames=fieldnames)
writer.writeheader()
count = 0
for i in c:
self._results.append(i)
count = count + 1
d = CursorFormatter.fieldMapper(i, fieldnames)
d = CursorFormatter.dateMapper(d, datemap, time_format)
writer.writerow(d)
return count
def printJSONCursor(self, c, fieldnames, datemap, time_format=None):
"""
Output plan json objects.
:param c: collection
:param fieldnames: fieldnames to include in output
:param datemap: fieldnames to map dates to date strings
:param time_format: field names to map to a specific time format
:return:
"""
count = 0
with self._smart_open(self._filename) as output:
for i in c:
# print( "processing: %s" % i )
# print( "fieldnames: %s" % fieldnames )
self._results.append(i)
d = CursorFormatter.fieldMapper(i, fieldnames)
# print( "processing fieldmapper: %s" % d )
d = CursorFormatter.dateMapper(d, datemap, time_format)
pprint.pprint(d, output)
count = count + 1
return count
def printCursor(self, c, fieldnames=None, datemap=None, time_format=None):
'''
Output a cursor to a filename or stdout if filename is "-".
fmt defines whether we output CSV or JSON.
'''
if self._format == 'csv':
count = self.printCSVCursor(c, fieldnames, datemap, time_format)
else:
count = self.printJSONCursor(c, fieldnames, datemap, time_format)
return count
def output(self, fieldNames=None, datemap=None, time_format=None, aggregate=True):
'''
Output all fields using the fieldNames list. for fields in the list datemap indicates the field must
be date
'''
count = self.printCursor(self._cursor, fieldNames, datemap, time_format)
```
#### File: pymag/pymag/typedlist.py
```python
from collections import UserList
class TypedList(UserList):
def __init__(self, *args, **kwargs):
self._type = type_constraint
super().__init__([self._validate(x) for x in seq])
def _validate(self, x):
if self._type:
if not isinstance(x, self._type):
raise ValueError(f"{x} is not an instance of {self._type}")
else:
return x
else:
return x
@property
def item_type(self):
return self._type
def __add__(self, rhs):
if isinstance(rhs, TypedList):
return TypedList(list.__add__(self, rhs))
else:
raise ValueError(f"{rhs} is not an instance of {self.__class__.__name__}")
def __iadd__(self, rhs):
if isinstance(rhs, TypedList):
return TypedList(list.__iadd__(self, rhs))
else:
raise ValueError(f"{rhs} is not an instance of {self.__class__.__name__}")
def __setitem__(self, key, value):
if self._validate(value):
super().__setitem__(key, value)
def append(self, value):
if self._validate(value):
super().append(value)
def extend(self, l):
if type(l) == TypedList:
if self.item_type == l.item_type:
super().extend(l)
else:
raise ValueError(f"{l.item_type} does not match {self.item_type}")
else:
raise ValueError(f"{l} is not a TypedList")
def insert(self, index, value):
if self._validate(value):
super().insert(index, value)
if __name__ == "__main__":
l = TypedList(int, [])
l.append(1)
```
#### File: pymag/test/test_typedlist.py
```python
import unittest
from pymag.typedlist import TypedList
class TestTypedList(unittest.TestCase):
def test_init(self):
t = TypedList()
self.assertTrue(isinstance(t, list))
t = TypedList(int)
self.assertRaises(ValueError, t.append, "hello")
def test_overrides(self):
t = TypedList(tuple)
t = t + TypedList((1, 2))
self.assertEqual(t[0], (1, 2))
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jdrumgoole/pymongodbimport",
"score": 3
}
|
#### File: pymongodbimport/pymongoimport/pymongomultiimport_main.py
```python
import argparse
import multiprocessing
import os
import sys
import time
from collections import OrderedDict
from multiprocessing import Process
import pymongo
from pymongoimport.argparser import add_standard_args
from pymongoimport.audit import Audit
from pymongoimport.logger import Logger
from pymongoimport.pymongoimport_main import Importer
def strip_arg(arg_list, remove_arg, has_trailing=False):
"""
Remove arg and arg argument from a list of args. If has_trailing is true then
remove --arg value else just remove --arg.
Args:
arg_list (list) : List of args that we want to remove items from
remove_arg (str) : Name of arg to remove. Must match element in `arg_list`.
has_trailing (boolean) : If the arg in `remove_arg` has an arg. Then make sure
to remove that arg as well
"""
try:
location = arg_list.index(remove_arg)
if has_trailing:
del arg_list[location + 1]
del arg_list[location]
except ValueError:
pass
return arg_list
def chunker(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def multi_import(*argv):
"""
.. function:: multi_import ( *argv )
Import CSV files using multiprocessing
:param argv: list of command lines
"""
usage_message = '''
A master script to manage uploading of a single data file as multiple input files. Multi-import
will optionally split a single file (specified by the --single argument) or optionally upload an
already split list of files passed in on the command line.
Each file is uplaoded by a separate pymongoimport subprocess.
'''
parser = argparse.ArgumentParser(usage=usage_message)
parser = add_standard_args(parser)
parser.add_argument("--poolsize", type=int, default=multiprocessing.cpu_count(),
help="The number of parallel processes to run")
parser.add_argument("--forkmethod", choices=["spawn", "fork", "forkserver"], default="spawn",
help="The model used to define how we create subprocesses (default:'spawn')")
args = parser.parse_args(*argv)
multiprocessing.set_start_method(args.forkmethod)
log = Logger("multi_import").log()
Logger.add_file_handler("multi_import")
Logger.add_stream_handler("multi_import")
child_args = sys.argv[1:]
children = OrderedDict()
log.info("filenames:%s", args.filenames)
if len(args.filenames) == 0:
log.info("no input files")
sys.exit(0)
if args.poolsize:
poolsize = args.poolsize
child_args = strip_arg(child_args, "--poolsize", True)
if args.restart:
log.info("Ignoring --drop overridden by --restart")
elif args.drop:
client = pymongo.MongoClient(args.host)
log.info("Dropping database : %s", args.database)
client.drop_database(args.database)
child_args = strip_arg(child_args, args.drop)
if args.audit:
audit = Audit(client)
batch_ID = audit.start_batch({"command": sys.argv})
else:
audit = None
batch_ID = None
start = time.time()
process_count = 0
log.info("Poolsize:{}".format(poolsize))
log.info("Fork using:'%s'", args.forkmethod)
subprocess = Importer(audit=audit, batch_ID=batch_ID, args=args)
subprocess.setup_log_handlers()
try:
#
# Should use a Pool here but Pools need top level functions which is
# ugly.
#
proc_list = []
for arg_list in chunker(args.filenames, poolsize): # blocks of poolsize
proc_list = []
for i in arg_list:
if os.path.isfile(i):
log.info(f"Processing:'{i}'")
proc = Process(target=subprocess.run, args=(i,), name=i)
proc.start()
proc_list.append(proc)
else:
log.warning(f"No such file: '{i}' ignoring")
for proc in proc_list:
proc.join()
except KeyboardInterrupt:
log.info("Keyboard interrupt...")
for i in proc_list:
log.info("terminating process: '%s'", proc_list[i].name)
proc_list[i].terminate()
finish = time.time()
log.info("Total elapsed time:%f" % (finish - start))
if __name__ == '__main__':
multi_import(sys.argv[1:])
```
#### File: pymongodbimport/pymongoimport/restart.py
```python
import socket
import sys
from datetime import datetime
from enum import Enum
import pymongo
from pymongoimport.canonical_path import Canonical_Path
class Restart_State(Enum):
undefined = 0
start = 1
inprogress = 2
finish = 3
class Restarter(object):
"""
Track insertion of a collection of docs by adding the last inserted
ID into a collection called "restartlog". Each time we insert we add
a doc with a timestamp and an ID field and a count of the number of
entries inserted to date. The doc also contains a batch
start time.
These class assumes the object ID is defined by default as per the MongoDB docs
(https://docs.mongodb.com/manual/reference/method/ObjectId/). In this case in a single run
of the pymongoimport the IDs will contain identical host and process components. We can use
these fields to identify inserts that happened in the previous run. So we search for all inserts
with an ID greater than the ID in the restartlog.
We then scan that list of inserts for a matching ID
(remember we may be running multiple batch uploads in parallel) to find the inserts related to this
batch restart. Once we have that list of matching inserts then we have the count of objects inserted.
Now we know where the restart position should be (restartlog number of entries + len( list of matching inserts)
We can now skip() to that position in the input file and update the restart log.
"""
def __init__(self, database, input_filename, batch_size, cmd=None):
"""
Constructor
"""
self._audit = database["AUDIT"]
self._name = Canonical_Path(input_filename)
self._batch_size = batch_size
self._hostname = socket.gethostname()
if cmd is None:
self._cmd = " ".join(sys.argv)
else:
self._cmd = cmd
self._restartDoc = self._audit.find_one({"name": self._name(),
"state": "inprogress"})
if self._restartDoc is None:
self.start()
@staticmethod
def split_ID(doc_id):
"""
Split a MongoDB Object ID
a 4-byte value representing the seconds since the Unix epoch,
a 3-byte machine identifier,
a 2-byte process id, and
A 3-byte counter, starting with a random value.
"""
id_str = str(doc_id)
# epoch 0 machine 1 process ID 2 counter 3
return (id_str[0:8], id_str[8:14], id_str[14:18], id_str[18:24])
def start(self):
self._audit.insert_one({"name": self._name(),
"ts": datetime.utcnow(),
"batch_size": self._batch_size,
"command": self._cmd,
"state": Restart_State.start})
def update(self, doc_id, count):
self._audit.insert_one({"name": self._name(),
"count": count,
"ts": datetime.utcnow(),
"doc_id": doc_id,
"state": Restart_State.inprogress})
def restart(self, collection):
"""
Get the restart doc. Now find any docs created after the restart doc was created
within the same process and machine. Count those so we know where we are.
Return the new doc count that we can skip too.
"""
self._restartDoc = self._audit.find_one({"name": self._name(),
"state": Restart_State.inprogress})
if self._restartDoc is None: # skip nothing, nothing to restart
return 0
count = self._restartDoc["count"]
(_, machine, pid, _) = Restarter.split_ID(self._restartDoc["doc_id"])
cursor = collection.find({"_id": {"$gt": self._restartDoc["doc_id"]}})
for i in cursor:
(_, i_machine, i_pid, _) = Restarter.split_ID(i["_id"])
if i_machine == machine and i_pid == pid:
count = count + 1
if count == self._restartDoc["batch_size"]:
# we have the full batch, we can't have inserted more than
# this before updating the restart doc
break
return count
def finish(self):
self._restartDoc = self._audit.insert_one({"name": self._name(),
"ts": datetime.utcnow(),
"state": Restart_State.finish})
def _find_last(self, col, doc):
if "ts" in doc:
cursor = col.find(doc).sort({"ts": pymongo.DESCENDING}).limit(1)
for c in cursor:
return c
return None
else:
raise ValueError("_find_last requires a timestamp field 'ts'")
def get_state(self, name):
doc = self._audit.find({"name": name,
"state": Restart_State.finish}).sort({"ts": pymongo.DESCENDING}).limit(1)
if doc:
return Restart_State.finish
doc = self._audit.find_one({"name": name,
"state": Restart_State.inprogress})
if doc:
return Restart_State.inprogress
doc = self._audit.find_one({"name": name,
"state": Restart_State.start})
def reset(self):
self._restartDoc = self._audit.find_one_and_update({"name": self._name()},
{"$set": {"timestamp": datetime.utcnow(),
"batch_size": self._batch_size,
"count": 0,
"last_doc_id": 0,
"state": "inprogress"}})
```
#### File: pymongodbimport/pymongoimport/root.py
```python
import os
class Root(object):
"""
What is the root directory for this project
"""
def __init__(self):
"""
Constructor
"""
self._root = os.path.join(os.getenv("HOME"), "GIT", "pymongoimport")
def root(self):
return self._root
def root_path(self, *path):
return os.path.join(self._root, *path)
if __name__ == "__main__":
r = Root()
print("root : '%s'" % r.root())
# print( "root path: '%s'" % r.root_path( "a", "b"))
```
#### File: pymongodbimport/test/test_fieldfile.py
```python
import os
import unittest
from typing import Dict
from datetime import datetime
import pymongo
import dateutil
from pymongoimport.fieldfile import FieldFile
from pymongoimport.filewriter import FileWriter
from pymongoimport.filereader import FileReader
from pymongoimport.filesplitter import LineCounter
from pymongoimport.logger import Logger
from pymongoimport.type_converter import Converter
from pymongoimport.linetodictparser import LineToDictParser
path_dir = os.path.dirname(os.path.realpath(__file__))
def f(path):
return os.path.join(path_dir, path)
class Test(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(Test, self).__init__(*args, **kwargs)
Logger.add_null_hander()
def setUp(self):
self._client = pymongo.MongoClient(host="mongodb://localhost:27017")
self._db = self._client["FC_TEST"]
self._col = self._db["FC_TEST"]
def tearDown(self):
self._db.drop_collection("FC_TEST")
def test_FieldConfig(self):
fc = FieldFile(f("data/test_fieldconfig.tff"))
self.assertEqual(len(fc.fields()), 4)
self.assertEqual(fc.fields()[0], "Test 1")
self.assertEqual(fc.fields()[3], "Test 4")
fc:FieldFile = FieldFile(f("data/uk_property_prices.tff"))
self.assertEqual(len(fc.fields()), 16)
self.assertEqual(fc.fields()[0], "txn")
self.assertEqual(fc.fields()[2], "Date of Transfer")
self.assertEqual(fc.fields()[14], "PPD Category Type")
def test_delimiter_no_header(self):
start_count = self._col.count_documents({})
fc = FieldFile(f("data/10k.tff"))
parser = LineToDictParser(fc)
reader = FileReader(f("data/10k.txt"), has_header=False, delimiter="|")
bw = FileWriter(self._col, reader=reader, parser=parser)
bw.write()
self.assertEqual(self._col.count_documents({}) - start_count, 10000)
def test_fieldfile_nomatch(self):
fc = FieldFile(f("data/AandE_Data_2011-04-10.tff"))
parser = LineToDictParser(fc)
reader = FileReader(f('data/inventory.csv'), has_header=True)
bw = FileWriter(self._col, reader=reader, parser=parser)
with self.assertRaises(ValueError):
bw.write()
def test_new_delimiter_and_timeformat_header(self):
start_count = self._col.count_documents({})
fc = FieldFile(f("data/mot.tff"))
parser = LineToDictParser(fc)
reader = FileReader(f('data/mot_test_set_small.csv'), has_header=False, delimiter="|")
self.assertTrue(type(reader.name) == str)
bw = FileWriter(self._col, reader=reader, parser=parser)
total, elapsed = bw.write()
lines = LineCounter(f('data/mot_test_set_small.csv')).line_count
inserted_count = self._col.count_documents({}) - start_count
self.assertEqual(inserted_count, total)
self.assertEqual(inserted_count, lines)
def test_delimiter_header(self):
start_count = self._col.count_documents({})
fc = FieldFile(f("data/AandE_Data_2011-04-10.tff"))
parser = LineToDictParser(fc)
reader = FileReader(f('data/AandE_Data_2011-04-10.csv'), has_header=True)
bw = FileWriter(self._col, reader=reader, parser=parser)
bw.write()
self.assertEqual(self._col.count_documents({}) - start_count, 300)
def test_generate_field_filename(self):
gfc = FieldFile.generate_field_file(f('data/inventory.csv'), ext="xx")
self.assertEqual(gfc.field_filename, f("data/inventory.xx"))
rfc = FieldFile(gfc.field_filename)
self.assertTrue("Inventory Item" in rfc.fields())
self.assertTrue("Amount" in rfc.fields())
self.assertTrue("Last Order", rfc.fields())
self.assertEqual(len(rfc.fields()), 3)
os.unlink(gfc.field_filename)
fc = FieldFile.generate_field_file(f('data/inventory.csv'))
self.assertEqual(fc.field_filename, f("data/inventory.tff"))
os.unlink(fc.field_filename)
fc = FieldFile.generate_field_file(f('data/inventory.csv.1'))
self.assertEqual(fc.field_filename, f("data/inventory.csv.tff"), fc.field_filename)
os.unlink(fc.field_filename)
fc = FieldFile.generate_field_file(f('data/yellow_tripdata_2015-01-06-200k.csv.1'))
self.assertEqual(fc.field_filename, f("data/yellow_tripdata_2015-01-06-200k.csv.tff"), fc.field_filename)
os.unlink(fc.field_filename)
fc = FieldFile.generate_field_file(f('data/yellow_tripdata_2015-01-06-200k.csv.10'))
self.assertEqual(fc.field_filename, f("data/yellow_tripdata_2015-01-06-200k.csv.tff"), fc.field_filename)
os.unlink(fc.field_filename)
fc = FieldFile.generate_field_file(f('data/test_results_2016_10.txt.1'))
self.assertEqual(fc.field_filename, f("data/test_results_2016_10.txt.tff"), fc.field_filename)
os.unlink(fc.field_filename)
def test_nyc_2016_genfieldfile(self):
fc = FieldFile.generate_field_file(f('data/2018_Yellow_Taxi_Trip_Data_1000.csv'),
delimiter=";")
fc_new = FieldFile(fc.field_filename)
self.assertEqual(fc.fields(), fc_new.fields())
os.unlink(fc.field_filename)
def test_reader(self):
fc = FieldFile.generate_field_file(f("data/inventory.csv"), f("data/inventory_test.tff"))
ff = FieldFile(fc.field_filename)
reader = FileReader(f("data/inventory.csv"), has_header=True)
parser = LineToDictParser(ff)
for i, row in enumerate(reader.readline(), 1):
doc = parser.parse_list(row, i)
for field in ff.fields():
self.assertTrue(field in doc, f"'{field}'")
os.unlink(fc.field_filename)
ff = FieldFile(f("data/uk_property_prices.tff"))
reader = FileReader(f("data/uk_property_prices.csv"), has_header=True)
parser = LineToDictParser(ff)
for i, row in enumerate(reader.readline(), i):
doc = parser.parse_list(row, i)
for field in ff.fields():
if field == "txn": # converted to _id field
continue
self.assertTrue(field in doc, f"{field} not present")
self.assertTrue(type(doc["Price"]) == int)
self.assertTrue(type(doc["Date of Transfer"]) == datetime)
def test_generate_fieldfile(self):
fc = FieldFile.generate_field_file(f("data/inventory.csv"), ext="testff")
self.assertEqual(fc.field_filename, f("data/inventory.testff"), fc.field_filename)
self.assertTrue(os.path.isfile(f("data/inventory.testff")), f("data/inventory.testff"))
parser = LineToDictParser(fc)
reader = FileReader(f("data/inventory.csv"), has_header=True)
start_count = self._col.count_documents({})
writer = FileWriter(self._col, reader=reader, parser=parser)
write_count, elapsed = writer.write()
line_count = LineCounter(f("data/inventory.csv")).line_count
new_inserted_count = self._col.count_documents({}) - start_count
self.assertEqual(new_inserted_count, write_count) # header must be subtracted
self.assertEqual(new_inserted_count, line_count - 1) # header must be subtracted
os.unlink(f("data/inventory.testff"))
def test_date(self):
config = FieldFile(f("data/inventory_dates.tff"))
parser = LineToDictParser(config, locator=False) # screws up comparison later if locator is true
reader = FileReader(f("data/inventory.csv"), has_header=True)
start_count = self._col.count_documents({})
writer = FileWriter(self._col, reader=reader, parser=parser)
docs_written, elapsed = writer.write()
line_count = LineCounter(f("data/inventory.csv")).line_count
self.assertEqual(self._col.count_documents({}) - start_count, line_count - 1) # header must be subtracted
self.assertEqual(self._col.count_documents({}), docs_written)
nuts_doc = self._col.find_one({"Last Order": dateutil.parser.parse("29-Feb-2016")})
self.assertTrue(nuts_doc)
def testFieldDict(self):
d = FieldFile(f("data/testresults.tff")).field_dict
self.assertTrue("TestID" in d)
self.assertTrue("FirstUseDate" in d)
self.assertTrue("Colour" in d)
self.assertTrue(d["TestID"]["type"] == "int")
def test_duplicate_id(self):
self.assertRaises(ValueError, FieldFile, f("data/duplicate_id.tff"))
if __name__ == "__main__":
unittest.main()
```
#### File: pymongodbimport/test/test_linecounter.py
```python
import os
import unittest
from pymongoimport.filesplitter import LineCounter
from pymongoimport.liner import make_line_file
class MyTestCase(unittest.TestCase):
def _test_file(self, count, doseol=False,filename="liner.txt", unlink=True):
f = make_line_file(count=count, doseol=doseol, filename=filename)
self.assertEqual(count, LineCounter(f).line_count)
if unlink:
os.unlink(f)
def test_Line_Counter(self):
self._test_file(1, filename="1.txt")
self._test_file(2, filename="2.txt")
self._test_file(512, filename="3.txt")
self._test_file(65000, filename="5.txt")
self._test_file(1, doseol=True, filename="6.txt")
self._test_file(10, filename="7.txt", doseol=True)
self._test_file(65000, filename="8.txt",doseol=True)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jdrumgoole/python-atlasapi",
"score": 2
}
|
#### File: python-atlasapi/atlasapi/measurements.py
```python
from datetime import datetime
from typing import Tuple, NewType, List, Optional
from dateutil.parser import parse
from atlasapi.atlas_types import OptionalFloat
from atlasapi.lib import _GetAll, logger
class AtlasMeasurementTypes(_GetAll):
"""
Helper class for all available atlas measurements.
All classes and embedded classes have a get_all class method that returns an iterator of all measurements
and sub measurements.
"""
connections = 'CONNECTIONS'
class Asserts(_GetAll):
regular = 'ASSERT_REGULAR'
warning = 'ASSERT_WARNING'
msg = 'ASSERT_MSG'
user = 'ASSERT_USER'
class Cache(_GetAll):
bytes_read = 'CACHE_BYTES_READ_INTO'
bytes_written = 'CACHE_BYTES_WRITTEN_FROM'
dirty = 'CACHE_DIRTY_BYTES'
used = 'CACHE_USED_BYTES'
class Cursors(_GetAll):
open = 'CURSORS_TOTAL_OPEN'
timed_out = 'CURSORS_TOTAL_TIMED_OUT'
class Db(_GetAll):
storage = 'DB_STORAGE_TOTAL'
data_size = 'DB_DATA_SIZE_TOTAL'
class DocumentMetrics(_GetAll):
returned = 'DOCUMENT_METRICS_RETURNED'
inserted = 'DOCUMENT_METRICS_INSERTED'
updated = 'DOCUMENT_METRICS_UPDATED'
deleted = 'DOCUMENT_METRICS_DELETED'
class ExtraInfo(_GetAll):
page_faults = 'EXTRA_INFO_PAGE_FAULTS'
class GlobalLockCurrentQueue(_GetAll):
total = 'GLOBAL_LOCK_CURRENT_QUEUE_TOTAL'
readers = 'GLOBAL_LOCK_CURRENT_QUEUE_READERS'
writers = 'GLOBAL_LOCK_CURRENT_QUEUE_WRITERS'
class Memory(_GetAll):
resident = 'MEMORY_RESIDENT'
virtual = 'MEMORY_VIRTUAL'
mapped = 'MEMORY_MAPPED'
class Network(_GetAll):
bytes_id = 'NETWORK_BYTES_IN'
bytes_out = 'NETWORK_BYTES_OUT'
num_requests = 'NETWORK_NUM_REQUESTS'
class Opcounter(_GetAll):
cmd = 'OPCOUNTER_CMD'
query = 'OPCOUNTER_QUERY'
update = 'OPCOUNTER_UPDATE'
delete = 'OPCOUNTER_DELETE'
getmore = 'OPCOUNTER_GETMORE'
insert = 'OPCOUNTER_INSERT'
class Repl(_GetAll):
cmd = 'OPCOUNTER_REPL_CMD'
update = 'OPCOUNTER_REPL_UPDATE'
delete = 'OPCOUNTER_REPL_DELETE'
insert = 'OPCOUNTER_REPL_INSERT'
class Operations(_GetAll):
scan_and_order = 'OPERATIONS_SCAN_AND_ORDER'
class ExecutionTime(_GetAll):
reads = 'OP_EXECUTION_TIME_READS'
writes = 'OP_EXECUTION_TIME_WRITES'
commands = 'OP_EXECUTION_TIME_COMMANDS'
class Oplog(_GetAll):
master_time = 'OPLOG_MASTER_TIME'
rate = 'OPLOG_RATE_GB_PER_HOUR'
class QueryExecutor(_GetAll):
scanned = 'QUERY_EXECUTOR_SCANNED'
scanned_objects = 'QUERY_EXECUTOR_SCANNED_OBJECTS'
class QueryTargetingScanned(_GetAll):
per_returned = 'QUERY_TARGETING_SCANNED_PER_RETURNED'
objects_per_returned = 'QUERY_TARGETING_SCANNED_OBJECTS_PER_RETURNED'
class TicketsAvailable(_GetAll):
reads = 'TICKETS_AVAILABLE_READS'
writes = 'TICKETS_AVAILABLE_WRITES'
class CPU(_GetAll):
class Process(_GetAll):
user = 'PROCESS_CPU_USER'
kernel = 'PROCESS_CPU_KERNEL'
children_user = 'PROCESS_CPU_CHILDREN_USER'
children_kernel = 'PROCESS_CPU_CHILDREN_KERNEL'
class ProcessNormalized(_GetAll):
user = 'PROCESS_NORMALIZED_CPU_USER'
kernel = 'PROCESS_NORMALIZED_CPU_KERNEL'
children_user = 'PROCESS_NORMALIZED_CPU_CHILDREN_USER'
children_kernel = 'PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL'
class System(_GetAll):
user = 'SYSTEM_CPU_USER'
kernel = 'SYSTEM_CPU_KERNEL'
nice = 'SYSTEM_CPU_NICE'
iowait = 'SYSTEM_CPU_IOWAIT'
irq = 'SYSTEM_CPU_IRQ'
softirq = 'SYSTEM_CPU_SOFTIRQ'
guest = 'SYSTEM_CPU_GUEST'
steal = 'SYSTEM_CPU_STEAL'
class SystemNormalized(_GetAll):
user = 'SYSTEM_NORMALIZED_CPU_USER'
kernel = 'SYSTEM_NORMALIZED_CPU_KERNEL'
nice = 'SYSTEM_NORMALIZED_CPU_NICE'
iowait = 'SYSTEM_NORMALIZED_CPU_IOWAIT'
irq = 'SYSTEM_NORMALIZED_CPU_IRQ'
softirq = 'SYSTEM_NORMALIZED_CPU_SOFTIRQ'
guest = 'SYSTEM_NORMALIZED_CPU_GUEST'
steal = 'SYSTEM_NORMALIZED_CPU_STEAL'
class AtlasMeasurementValue(object):
def __init__(self, value_dict):
"""
Class for holding a measurement value
:type value_dict: dict
:param value_dict: An Atlas standard Measurement value dictionary.
"""
timestamp = value_dict.get('timestamp', None)
value = value_dict.get('value', None)
try:
self.timestamp = parse(timestamp)
except (ValueError, TypeError):
logger.warning('Could not parse "{}" as a datetime.')
self.timestamp = None
try:
if value is None:
self.value = None
self.value = float(value)
except ValueError as e:
self.value = None
logger.warning('Could not parse the metric value "{}". Error was {}'.format(value, e))
except TypeError:
logger.info('Value is none.')
self.value = None
@property
def value_int(self):
try:
return int(self.value)
except Exception:
return None
@property
def value_float(self):
try:
return float(self.value)
except Exception:
return None
def as_dict(self):
return dict(timestamp=self.timestamp.__str__(), value=self.value, value_int=self.value_int,
value_float=self.value_float)
@property
def as_tuple(self) -> Tuple[datetime, OptionalFloat]:
"""
Returns a MeasurementValue as a tuple, timestamp first.
:rtype: Tuple[datetime,OptionalFloat]
:return: A tuple with a datetime and a float
"""
return self.timestamp, self.value
ListOfAtlasMeasurementValues = NewType('ListOfAtlasMeasurementValues', List[Optional[AtlasMeasurementValue]])
class AtlasMeasurement(object):
def __init__(self, name, period, granularity, measurements=list()):
"""
:type measurements: ListOfAtlasMeasurementValues
:type granularity: AtlasGranularities
:type period: AtlasPeriods
:type name: atlasapi.measurements.AtlasMeasurementTypes
"""
self.name = name
self.period = period
self.granularity = granularity
self._measurements = measurements
@property
def measurements(self):
for item in self._measurements:
yield item
@measurements.setter
def measurements(self, value):
if type(value) == list:
self._measurements.extend(value)
else:
self._measurements.append(value)
@measurements.deleter
def measurements(self):
self._measurements = []
def measurements_as_tuples(self):
if isinstance(self._measurements[0], AtlasMeasurementValue):
for item in self._measurements:
yield item.as_tuple
@property
def date_start(self):
"""
:rtype: datetime
"""
seq = [x.timestamp for x in self._measurements]
return min(seq)
@property
def date_end(self):
"""
:rtype: datetime
"""
seq = [x.timestamp for x in self._measurements]
return max(seq)
@property
def measurements_count(self):
"""
:rtype: int
"""
return len(self._measurements)
@property
def as_dict(self):
"""
Returns the measurement as a dict, including the computed properties.
:rtype: dict
"""
return dict(measurements=self._measurements, date_start=self.date_start, date_end=self.date_end, name=self.name,
period=self.period, granularity=self.granularity, measurements_count=self.measurements_count
)
def __hash__(self):
return hash(self.name+'-'+self.period)
def __eq__(self, other):
"""
Measurements are considered duplicate of name and period are the same
:param other:
:return:
"""
if isinstance(other, AtlasMeasurement):
return ((self.name == other.name) and (self.period == other.period))
OptionalAtlasMeasurement = NewType('OptionalAtlasMeasurement', Optional[AtlasMeasurement])
```
|
{
"source": "jdrumgoole/semver_manager",
"score": 3
}
|
#### File: semver_manager/semvermanager/command.py
```python
import os
import queue
class CommandError(ValueError):
pass
class OperationError(ValueError):
pass
class Operation:
def __init__(self, name=None, q=None):
if name:
self._name = name
else:
self._name = __class__.__qualname__
if q:
self._q = q
else:
self._q = queue.Queue()
@property
def q(self):
return self._q
def print_queue(self):
for item in self.q.items():
print(item)
def items(self):
while not self.q.empty():
item = self.q.get()
yield item
class QueryError(ValueError):
pass
class Query(Operation):
def __init__(self, name=None, q=None):
if name:
self._name = name
else:
self._name = __class__.name
if q:
self._q = q
else:
self._q = queue.SimpleQueue()
def __call__(self, *args, **kwargs):
return self
class Command(Operation):
@staticmethod
def null_func(*args, **kwargs):
pass
def __init__(self, name=None, q=None):
super().__init__(q)
def __call__(self, *args, **kwargs):
"""
Either returns changed value based on args
:param args: args
:param kwargs: keyword args
:return: self
"""
return self
@property
def name(self):
return self._name
class EchoCommand(Command):
def __init__(self, name=None, q=None):
super().__init__(q)
if name:
self._name = name
else:
self._name = self.__class__.__qualname__
def __call__(self, *args, **kwargs):
arg_string = ", ".join([str(x) for x in args])
kw_string = ", ".join([f"{k}='{v}'" for k, v in kwargs.items()])
self.q.put(arg_string)
self.q.put(kw_string)
return self
class StatCommand(Command):
def __call__(self, filename):
try:
self.q.put(os.stat(filename))
except FileNotFoundError as e:
raise CommandError(e)
return self
class OperationRunner:
def __init__(self, op):
self._commands = {}
self.add(op)
def add(self, op):
if isinstance(op, Operation):
self._commands[op.name] = op
else:
raise OperationError(f"{op} is not an instance of Operation")
def __call__(self, files, *args, **kwargs):
for i in files:
for name, cmd in self._commands.items():
try:
yield cmd(i, *args, **kwargs)
except CommandError as e:
print(f"ERROR: command '{name}' : {e}")
```
#### File: semver_manager/semvermanager/__init__.py
```python
import os
import re
import sys
import argparse
from typing import List
from .command import Command, Query, QueryError, CommandError, OperationRunner, EchoCommand
class VersionError(ValueError):
"""Exception for handling errors in Version Class"""
pass
class Version:
"""
Handle creation and storage of SEMVER version numbers. In this case
SEMVERs must be of the form a.b.c-tag, Where a,b and c are integers
in the range 0-n and tag is one of `Version.TAGS`.
Version numbers may be bumped by using the various bump functions.
Bumping minor zeros patch, bumping major zeros minor.
"""
TAGS = {0: "alpha", 1: "beta", 2: ""}
FIELDS = ["major", "minor", "patch", "tag", "tag_version"]
FILENAME = "VERSION"
def __init__(self, major=0, minor=0, patch=0, tag="alpha", tag_version=0, lhs="VERSION", separator="="):
"""
:param major: 0-n
:param minor: 0-n
:param patch: 0-n
:param tag: member of Version.TAGs.values()
:param tag_version: The version of the tag value (e.g. alpha0, alpha1 etc.)
:param lhs : str The candidate str for the lhs of a VERSION line
:param separator: str the seperator string between the field name and the version
"""
if isinstance(lhs, str):
self._lhs = lhs
else:
raise VersionError(f"{lhs} is not a str type")
if isinstance(major, int) and major >= 0:
self._major = major
else:
raise VersionError(f"{major} is not an int type or is a negative int")
if isinstance(minor, int) and minor >= 0:
self._minor = minor
else:
raise VersionError(f"{minor} is not an int type or is a negative int")
if isinstance(patch, int) and patch >= 0:
self._patch = patch
else:
raise VersionError(f"{patch} is not an int type or is a negative int")
self._separator = separator
self._tag_index = None
self._tag = None
for k, v in Version.TAGS.items():
if tag == v:
self._tag = v
self._tag_index = k
if isinstance(tag_version, int) and tag_version >= 0 :
self._tag_version = tag_version
else:
raise VersionError(f"{tag_version} is not an int or is negative")
if self._tag_index is None:
raise VersionError(f"'{tag}' is not a valid version tag")
def bump(self, field):
self.bump_map()[field]()
def bump_major(self):
self._patch = 0
self._minor = 0
self._major += 1
def bump_minor(self):
self._patch = 0
self._minor += 1
def bump_patch(self):
self._patch += 1
def bump_tag(self):
if self._tag_index == len(Version.TAGS) - 1:
self._tag_index = 0
else:
self._tag_index += 1
self._tag = Version.TAGS[self._tag_index]
if self._tag == "": # prod
self._tag_version = 0
def bump_tag_version(self):
if self._tag != "":
self.tag_version = self.tag_version + 1
else:
raise VersionError("tag is not 'alpha' or 'beta' no bumping allowed for tag_version")
return self._tag_version
@property
def lhs(self):
return self._lhs
@property
def major(self):
return self._major
@major.setter
def major(self, value):
assert isinstance(value, int) and value >= 0
self._major = value
@property
def minor(self):
return self._minor
@minor.setter
def minor(self, value):
assert isinstance(value, int) and value >= 0
self._minor = value
@property
def patch(self):
return self._patch
@patch.setter
def patch(self, value):
assert isinstance(value, int) and value >= 0
self._patch = value
@property
def tag(self):
return self._tag
@tag.setter
def tag(self, value):
assert self._tag in self.TAGS.values()
self._tag = value
@property
def tag_version(self):
return self._tag_version
@tag_version.setter
def tag_version(self, value):
assert isinstance(value, int) and value >= 0
self._tag_version = value
def bump_map(self):
"""
a mapping of field names to corresponding bump methods
:return: a dict of field names to bump methods
"""
return {"major": self.bump_major,
"minor": self.bump_minor,
"patch": self.bump_patch,
"tag": self.bump_tag,
"tag_version": self.bump_tag_version}
def field_map(self):
"""
Mapping of field names to field values.
:return: A dict of field names to their properties.
"""
return {"major": self.major,
"minor": self.minor,
"patch": self.patch,
"tag": self.tag,
"tag_version": self._tag_version}
def field(self, field):
"""
Return the mapping from a field to its corresponding
property.
:param field: str in Version.FIELDS
:return:
"""
if field not in self.FIELDS:
raise VersionError(f"No such field name'{field}'")
return self.field_map()[field]
@staticmethod
def update(filename, version, lhs="VERSION", separator="="):
"""
Find any line starting with "VERSION" and replace that line with
the new `version`.
:param filename: A path to a file containing at least one VERSION line
:param version: The new version object
:param lhs: The label string
:param separator: label<seperator>value
:return: A tuple (number of lines updated, list(line_numbers))
"""
count = 0 # Number of replacements
lines: List[int] = [] # line numbers of replacement lines
with open(filename, "r") as input_file:
with open(filename+".temp", "w") as output_file:
for i, line in enumerate(input_file, 1):
candidate = line.strip()
if candidate.startswith(lhs):
try:
v = Version.parse_version(line, lhs, separator=separator)
if v:
output_file.write(f"{str(version)}\n")
lines.append(i)
count = count + 1
except VersionError:
output_file.write(line)
else:
output_file.write(line)
os.rename(filename, filename+".old")
os.rename(filename+".temp", filename)
return filename, lines
def write(self, filename):
"""
Write a single line containing the version object to filename.
This will overwrite the existing file if it exists.
:param filename: The file to create with the new version object
:return: A tuple of the filename and the version object
"""
with open(filename, "w") as file:
file.write(f"{str(self)}\n")
return filename, self
@staticmethod
def find(filename, lhs="VERSION", separator="="):
"""Look for the first instance of a VERSION definition in a file
and try and parse it as a `Version`"""
version = None
with open(filename, "r") as file:
for line in file:
line = line.strip()
if line.startswith(lhs):
version = Version.parse_version(line, lhs=lhs, separator=separator)
break
return version
def read(self, filename, lhs=None, separator=None):
"""
Read a single line from filename and parse it as version string.
:param filename: a file containing a single line VERSION string.
:param lhs : override the class field string
:param separator: the character seperating the VERSION label from the value
:return: a Version object
:raises VersionError if it fails to parse the file.
"""
with open(filename, "r") as file:
line = file.readline()
line.rstrip()
if not lhs:
lhs = self._lhs
if not separator:
separator = self._separator
return self.parse_version(line, lhs, separator)
# try:
# _, rhs = line.split(self._separator)
# except ValueError as e:
# raise VersionError(e)
#
# try:
# version, tag = rhs.split("-")
# tag = tag.strip()
# tag = tag.strip("\"\'")
# version = version.strip() # whitespace
# version = version.strip("\"\'") # quotes
# except ValueError as e:
# raise VersionError(e)
#
# try:
# major, minor, patch = [int(x) for x in version.split('.')]
# except ValueError as e:
# raise VersionError(e)
#
# return Version(major, minor, patch, tag, tag_version=t separator=separator)
@staticmethod
def parse_version(line: str, lhs: str = "VERSION", separator="=") -> object:
tag_version = 0
line = line.strip()
if line.startswith(lhs):
try:
version_label, rhs = line.split(separator)
version_label = version_label.strip()
rhs = rhs.strip()
if version_label != lhs:
raise VersionError(f"{line} has wrong left hand side {version_label}")
except ValueError as e:
raise VersionError(f"{e} : in '{line}'")
else:
rhs = line
try:
if "-" in rhs:
version, tag = rhs.split("-")
tag = tag.strip()
tag = tag.strip("\"\'")
match = re.match(r"([a-z]+)([0-9]+)", tag, re.I)
if match:
tag, tag_version = match.groups()
tag_version = int(tag_version)
version = version.strip()
version = version.strip("\"\'")
else:
version = rhs.strip()
version = version.strip("\"\'")
tag = ""
tag_version = 0
except ValueError as e:
raise VersionError(f"{e} : in '{rhs}'")
try:
major, minor, patch = [int(x) for x in version.split('.')]
except ValueError as e:
raise VersionError(f"{e} : in {lhs} '{version}'")
return Version(major, minor, patch, tag, tag_version, lhs=lhs, separator=separator)
def __eq__(self, other):
return self.major == other.major and \
self.minor == other.minor and \
self.patch == other.patch and \
self.tag == other.tag and \
self.tag_version == other.tag_version
def __str__(self):
if self.tag == "":
return f"{self._lhs} {self._separator} '{self._major}.{self._minor}.{self._patch}'"
else:
return f"{self._lhs} {self._separator} '{self._major}.{self._minor}.{self._patch}-{self._tag}{self._tag_version}'"
def __repr__(self):
return f"{self.__class__.__qualname__}({self.major}, {self.minor}, {self.patch}, '{self.tag}', {self.tag_version}, '{self._lhs}', '{self._separator}')"
@property
def bare_version(self):
if self.tag == "":
return f'{self._major}.{self._minor}.{self._patch}'
else:
return f'{self._major}.{self._minor}.{self._patch}-{self._tag}{self.tag_version}'
class BumpCommand(Command):
def __call__(self, filename, label, separator, bump_field):
if not os.path.isfile(filename):
return False, f"No such file:'{filename}' can't bump {bump_field} version"
v = Version.find(filename, label, separator)
if v:
v.bump(bump_field)
Version.update(filename, v, label, separator)
self.q.put((filename, v))
else:
raise VersionError(f"No label or version in {filename}")
return self
class UpdateCommand(Command):
def __call__(self, filename, version, label, separator):
if not os.path.isfile(filename):
raise CommandError(f"No such file:'{filename}' can't bump {label} version")
filename, lines = Version.update(filename=filename, version=version, lhs=label)
self.q.put((filename, lines))
return self
class MakeCommand(Command):
def __init__(self, overwrite):
super().__init__()
self._overwrite = overwrite
def __call__(self, filename, version_label, separator):
v = Version(lhs=version_label, separator=separator)
f=filename
if self._overwrite or not os.path.isfile(filename):
f, v = v.write(filename)
elif os.path.isfile(filename):
answer = input(f"Overwrite file '{filename}' (Y/N [N]: ")
if len(answer) > 0 and answer.strip().lower() == 'y':
f, v = v.write(filename)
return True, f"Made new version {v} in file: '{filename}'"
else:
f = filename
v = None
return f, v
class GetVersionQuery(Query):
def __call__(self, filename):
try:
if os.path.isfile(filename):
v = Version.find(filename)
self.q.put((filename,v))
except FileNotFoundError as e:
raise QueryError(e)
def main(args=None):
if args is None:
args = sys.argv
parser = argparse.ArgumentParser()
parser.add_argument(
"--version",
help="Specify a version in the form major.minor.patch-tag<tag_version>"
)
parser.add_argument(
"--make",
default=False,
action="store_true",
help="Make a new version file")
parser.add_argument(
"--bump",
choices=Version.FIELDS,
help=f"Bump a version field based on the arg {Version.FIELDS}")
parser.add_argument(
"--getversion",
default=False,
action="store_true",
help="Report the current version in the specified files")
parser.add_argument(
"--bareversion",
default=False,
action="store_true",
help="Return the unquoted version string with 'VERSION=' removed")
parser.add_argument(
"--overwrite",
default=False,
action="store_true",
help="overwrite files without checking [default: %(default)s]"
)
parser.add_argument(
"--update",
default=False,
action="store_true",
help="Update multiple version strings in file"
)
parser.add_argument(
"--label",
default="VERSION",
help="field used to determine which line is the version line [default: %(default)s]"
)
parser.add_argument(
"--separator",
default="=",
help="Character used to separate the version label from the version [default: %(default)s]"
)
parser.add_argument(
"filenames",
nargs='*',
help="Files to use as version file"
)
args = parser.parse_args(args)
if args.version:
version = Version.parse_version("VERSION=" + args.version, lhs=args.label)
if args.make:
cmd_runner = OperationRunner(MakeCommand(args.overwrite))
for f, v in cmd_runner(args.filenames, args.label, args.separator):
if v:
print(f"Created version {v} in '{f}'")
else:
print(f"Failed to create version file '{f}'")
if args.getversion:
cmd_runner = OperationRunner(GetVersionQuery())
for cmd in cmd_runner(args.filenames):
for filename, item in cmd.items():
if args.bareversion:
print(f"Version in {filename} is {item.bareversion}")
else:
print(f"Version in {filename} is {item}")
if args.bump:
if args.bump in Version.FIELDS:
cmd_runner = OperationRunner(BumpCommand())
for cmd in cmd_runner(args.filenames, args.label, args.separator, args.bump):
for filename, version in cmd.items():
if version:
print(f"Processed version {version} in file : '{filename}'")
else:
print(f"Could not process '{filename}'")
else:
print(f"{args.bump} is not a valid version field, choose one of {Version.FIELDS}")
sys.exit(1)
if args.update:
cmd_runner = OperationRunner(UpdateCommand())
for cmd in cmd_runner(args.filename, version, args.label):
for filename, lines in cmd.items():
print(f"Processed {version} in {filename} at lines {lines}")
if __name__ == "__main__":
main(sys.argv[1:]) # clip off the program name
```
#### File: semver_manager/semvermanager/semvermgr.py
```python
import argparse
import os
import sys
from semvermanager import Version
from semvermanager.command import Command, OperationRunner
class BumpCommand(Command):
def __call__(self, filename, label, separator, bump_field):
if not os.path.isfile(filename):
print(f'No such file: \'{filename}\' cannot bump {bump_field} version')
return filename, None
v = Version.find(filename, label, separator)
if v:
print(f"Bumping '{bump_field}' value from {v.field(bump_field)} ", end="")
v.bump(bump_field)
print(f"to {v.field(bump_field)} in '{filename}'")
Version.update(filename, v, label, separator)
print(f"new version: {v}")
else:
print(f"Couldn't bump value in {filename}")
return filename, v
class MakeCommand(Command):
def __init__(self, overwrite):
super().__init__()
self._overwrite = overwrite
def __call__(self, filename, version_label, separator):
v = Version(lhs=version_label, separator=separator)
if self._overwrite or not os.path.isfile(filename):
f, v = v.write(filename)
elif os.path.isfile(filename):
answer = input(f"Overwrite file '{filename}' (Y/N [N]: ")
if len(answer) > 0 and answer.strip().lower() == 'y':
f, v = v.write(filename)
else:
f = filename
v = None
return f, v
def script_main():
main(sys.argv)
def main(args=None):
if not args:
args = sys.argv
parser = argparse.ArgumentParser()
parser.add_argument(
"--version",
help="Specify a version in the form major.minor.patch-tag"
)
parser.add_argument(
"--make",
default=False,
action="store_true",
help="Make a new version file")
parser.add_argument(
"--bump",
choices=Version.FIELDS,
help="Bump a version field")
parser.add_argument(
"--getversion",
default=False,
action="store_true",
help="Report the current version in the specified file")
parser.add_argument(
"--bareversion",
default=False,
action="store_true",
help="Return the unquoted version strin with VERSION=")
parser.add_argument(
"--overwrite",
default=False,
action="store_true",
help="overwrite files without checking [default: %(default)s]"
)
parser.add_argument(
"--update",
default=False,
action="store_true",
help="Update multiple version strings in file"
)
parser.add_argument(
"--label",
default="VERSION",
help="field used to determine which line is the version line [default: %(default)s]"
)
parser.add_argument(
"--separator",
default="=",
help="Character used to separate the version label from the version [default: %(default)s]"
)
parser.add_argument(
"filenames",
nargs='*',
help="Files to use as version file"
)
args = parser.parse_args(args)
if args.version:
version = Version.parse_version("VERSION="+args.version, lhs=args.label)
if args.make:
cmd_runner = OperationRunner(MakeCommand(args.overwrite))
if not args.filenames:
args.filenames = ["VERSION"] # make a default file
for f, v in cmd_runner(args.filenames, args.label, args.separator):
if v:
print(f"Created version {v} in '{f}'")
else:
print(f"Failed to create version file '{f}'")
if args.getversion:
if os.path.isfile(args.filename):
v = Version.find(args.filename)
print(v)
else:
print(f"No such version file: '{args.filename}'")
if args.bareversion:
if os.path.isfile(args.filename):
v = Version.find(args.filename, args.label)
print(v.bare_version())
else:
print(f"No such version file: '{args.filename}'")
if args.bump:
if args.bump in Version.FIELDS:
cmd_runner = OperationRunner(BumpCommand())
for filename, v in cmd_runner(args.filenames, args.label, args.separator, args.bump):
if v:
print(f"Processed version {v} in file : '{filename}'")
else:
print(f"Couldn't process '{filename}'")
# if not os.path.isfile(args.filename):
# print(f"No such file:'{args.filename}' can't bump {args.bump} version")
# sys.exit(1)
# v = Version.find(args.filename, args.versionlabel)
# print(f"Bumping '{args.bump}' value from {v.field(args.bump)} ", end="")
# v.bump(args.bump)
# print(f"to {v.field(args.bump)} in '{args.filename}'")
# Version.update(args.filename, v, args.versionlabel)
# print(f"new version: {v}")
else:
print(f"{args.bump} is not a valid version field, choose one of {Version.FIELDS}")
sys.exit(1)
if args.update:
print(f"Updating '{args.filename}' with version '{version}'")
Version.update(filename=args.filename, version=version, lhs=args.label)
if __name__ == "__main__":
main(sys.argv[1:]) # clip off the program name
```
#### File: semver_manager/tests/test_command.py
```python
import unittest
from semvermanager import command
class TestCommand(unittest.TestCase):
def test_command(self):
cmd = command.EchoCommand()
cmd(1, 2, 3, 4, this="that", these="those")
self.assertEqual(cmd.q.get(), "1, 2, 3, 4")
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jdrumgoole/thonto",
"score": 3
}
|
#### File: thonto/test/test_functojs.py
```python
import os
import unittest
from thonto.functojs import FuncToJS
from .simple_function import simple_function
def dummy_function(x: int, y: int, z: int):
return [x, y, z]
class TestJSCode(unittest.TestCase):
@staticmethod
def error_function():
pass
def test_functojs(self):
func_str = "function (x) {print ('{}({})'.format (simple_function.__name__, x));};"
converter = FuncToJS(simple_function)
self.assertEqual(func_str, converter.js_code_min)
os.unlink(converter.hash_path)
def test_Code(self):
javascript_code = "function (x, y, z) {return [x, y, z];};"
self.assertRaises(ValueError, FuncToJS, TestJSCode.error_function)
x = FuncToJS(dummy_function, "bish", "bash", "bosh")
self.assertEqual(javascript_code, x.js_code_min)
os.unlink(x.hash_path)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jdrusso/westpa",
"score": 3
}
|
#### File: westpa/analysis/trajectories.py
```python
import concurrent.futures as cf
import functools
import inspect
import operator
import os
import mdtraj
import numpy as np
from tqdm import tqdm
from typing import Callable
from westpa.analysis.core import Walker, Trace
class Trajectory:
"""A callable that returns the trajectory of a walker or trace.
Parameters
----------
fget : callable
Function for retrieving a single trajectory segment. Must take a
:class:`Walker` instance as its first argument and accept a boolean
keyword argument `include_initpoint`. The function should return a
sequence (e.g., a list or ndarray) representing the trajectory of
the walker. If `include_initpoint` is True, the trajectory segment
should include its initial point. Otherwise, the trajectory segment
should exclude its initial point.
fconcat : callable, optional
Function for concatenating trajectory segments. Must take a sequence
of trajectory segments as input and return their concatenation. The
default concatenation function is :func:`concatenate`.
"""
def __init__(self, fget=None, *, fconcat=None):
if fget is None:
return functools.partial(self.__init__, fconcat=fconcat)
if 'include_initpoint' not in inspect.signature(fget).parameters:
raise ValueError("'fget' must accept a parameter 'include_initpoint'")
self._fget = fget
self.fconcat = fconcat
self._segment_collector = SegmentCollector(self)
@property
def segment_collector(self):
"""SegmentCollector: Segment retrieval manager."""
return self._segment_collector
@property
def fget(self):
"""callable: Function for getting trajectory segments."""
return self._fget
@property
def fconcat(self):
"""callable: Function for concatenating trajectory segments."""
return self._fconcat
@fconcat.setter
def fconcat(self, value):
if value is None:
value = concatenate
elif not isinstance(value, Callable):
raise TypeError("'fconcat' must be a callable object")
self._fconcat = value
def __call__(self, obj, include_initpoint=True, **kwargs):
if isinstance(obj, Walker):
value = self.fget(obj, include_initpoint=include_initpoint, **kwargs)
self._validate_segment(value)
return value
if isinstance(obj, Trace):
initpoint_mask = np.full(len(obj), False)
initpoint_mask[0] = include_initpoint
segments = self.segment_collector.get_segments(obj, initpoint_mask, **kwargs)
return self.fconcat(segments)
raise TypeError('argument must be a Walker or Trace instance')
def _validate_segment(self, value):
if not hasattr(value, '__getitem__'):
msg = f"{type(value).__name__!r} object can't be concatenated"
raise TypeError(msg)
class SegmentCollector:
"""An object that manages the retrieval of trajectory segments.
Parameters
----------
trajectory : Trajectory
The trajectory to which the segment collector is attached.
use_threads : bool, default False
Whether to use a pool of threads to retrieve trajectory segments
asynchronously. Setting this parameter to True may be may be
useful when segment retrieval is an I/O bound task.
max_workers : int, optional
Maximum number of threads to use. The default value is specified in the
`ThreadPoolExecutor <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
documentation.
show_progress : bool, default False
Whether to show a progress bar when retrieving multiple segments.
"""
def __init__(self, trajectory, use_threads=False, max_workers=None, show_progress=False):
self.trajectory = trajectory
self.use_threads = use_threads
self.max_workers = max_workers
self.show_progress = show_progress
@property
def trajectory(self):
return self._trajectory
@trajectory.setter
def trajectory(self, value):
if not isinstance(value, Trajectory):
msg = f'trajectory must be an instance of {Trajectory}'
raise TypeError(msg)
self._trajectory = value
@property
def use_threads(self):
return self._use_threads
@use_threads.setter
def use_threads(self, value):
if not isinstance(value, bool):
raise TypeError('use_threads must be True or False')
self._use_threads = value
@property
def max_workers(self):
return self._max_workers
@max_workers.setter
def max_workers(self, value):
if value is None:
self._max_workers = None
return
if value <= 0:
raise ValueError('max_workers must be greater than 0')
self._max_workers = value
@property
def show_progress(self):
return self._show_progress
@show_progress.setter
def show_progress(self, value):
if not isinstance(value, bool):
raise ValueError('show_progress must be True or False')
self._show_progress = value
def get_segments(self, walkers, initpoint_mask=None, **kwargs):
"""Retrieve the trajectories of multiple walkers.
Parameters
----------
walkers : sequence of Walker
The walkers for which to retrieve trajectories.
initpoint_mask : sequence of bool, optional
A Boolean mask indicating whether each trajectory segment should
include (True) or exclude (False) its initial point. Default is
all True.
Returns
-------
list of sequences
The trajectory of each walker.
"""
if initpoint_mask is None:
initpoint_mask = np.full(len(walkers), True)
else:
initpoint_mask = np.asarray(initpoint_mask, dtype=bool)
get_segment = functools.partial(self.trajectory, **kwargs)
tqdm_kwargs = dict(
desc='Retrieving segments',
disable=(not self.show_progress),
position=0,
total=len(walkers),
)
if self.use_threads:
with cf.ThreadPoolExecutor(self.max_workers) as executor:
future_to_key = {
executor.submit(get_segment, walker, include_initpoint=i): key
for key, (walker, i) in enumerate(zip(walkers, initpoint_mask))
}
futures = list(tqdm(cf.as_completed(future_to_key), **tqdm_kwargs))
futures.sort(key=future_to_key.get)
segments = (future.result() for future in futures)
else:
it = (get_segment(walker, include_initpoint=i) for walker, i in zip(walkers, initpoint_mask))
segments = tqdm(it, **tqdm_kwargs)
return list(segments)
class BasicMDTrajectory(Trajectory):
"""A MD trajectory stored as in the
`Basic NaCl tutorial <https://github.com/westpa/westpa_tutorials/tree/main/basic_nacl>`_.
Parameters
----------
traj_filename : str, default 'seg.dcd'
parent_filename : str, default 'parent.xml'
top : str or mdtraj.Topology, default 'bstate.pdb'
"""
def __init__(self, traj_filename='seg.dcd', parent_filename='parent.xml', top='bstate.pdb'):
self.traj_filename = traj_filename
self.parent_filename = parent_filename
self.top = top
def fget(walker, include_initpoint=True, atom_indices=None, sim_root='.'):
seg_dir = os.path.join(
sim_root,
'traj_segs',
format(walker.iteration.number, '06d'),
format(walker.index, '06d'),
)
if isinstance(self.top, str):
top = os.path.join(seg_dir, self.top)
else:
top = self.top
path = os.path.join(seg_dir, self.traj_filename)
if top is not None:
traj = mdtraj.load(path, top=top)
else:
traj = mdtraj.load(path)
if include_initpoint:
path = os.path.join(seg_dir, self.parent_filename)
parent = mdtraj.load(path, top=traj.top)
traj = parent.join(traj, check_topology=False)
if atom_indices is not None:
traj.atom_slice(atom_indices, inplace=True)
return traj
super().__init__(fget)
self.segment_collector.use_threads = True
self.segment_collector.show_progress = True
def concatenate(segments):
"""Return the concatenation of a sequence of trajectory segments.
Parameters
----------
segments : sequence of sequences
A sequence of trajectory segments.
Returns
-------
sequence
The concatenation of `segments`.
"""
if isinstance(segments[0], np.ndarray):
return np.concatenate(segments)
if isinstance(segments[0], mdtraj.Trajectory):
return segments[0].join(segments[1:], check_topology=False)
return functools.reduce(operator.concat, segments)
```
#### File: cli/core/w_run.py
```python
import argparse
import logging
import traceback
import westpa
import westpa.work_managers as work_managers
from westpa.work_managers import make_work_manager
log = logging.getLogger('w_run')
def entry_point():
parser = argparse.ArgumentParser('w_run', 'start/continue a WEST simulation')
westpa.rc.add_args(parser)
parser.add_argument(
'--oneseg',
dest='only_one_segment',
action='store_true',
help='only propagate one segment (useful for debugging propagators)',
)
work_managers.environment.add_wm_args(parser)
args = parser.parse_args()
westpa.rc.process_args(args)
work_managers.environment.process_wm_args(args)
run_simulation()
def run_simulation():
work_manager = westpa.rc.work_manager = make_work_manager()
# Load the sim manager and other drivers
sim_manager = westpa.rc.get_sim_manager()
system = westpa.rc.get_system_driver()
data_manager = westpa.rc.get_data_manager()
we_driver = westpa.rc.get_we_driver()
propagator = westpa.rc.get_propagator()
propagator.system = system
data_manager.system = system
we_driver.system = system
sim_manager.data_manager = data_manager
sim_manager.system = system
sim_manager.propagator = propagator
sim_manager.we_driver = we_driver
with work_manager:
if work_manager.is_master:
work_manager.install_sigint_handler()
sim_manager.load_plugins()
log.debug('preparing run')
sim_manager.prepare_run()
try:
log.debug('beginning run')
sim_manager.run()
log.debug('finalizing run')
sim_manager.finalize_run()
except KeyboardInterrupt:
westpa.rc.pstatus('interrupted; shutting down')
except Exception:
westpa.rc.pstatus('exception caught; shutting down')
log.error(traceback.format_exc())
else:
work_manager.run()
if __name__ == '__main__':
entry_point()
```
#### File: core/binning/mab.py
```python
import numpy as np
from westpa.core.binning import FuncBinMapper
def map_mab(coords, mask, output, *args, **kwargs):
'''Binning which adaptively places bins based on the positions of extrema segments and
bottleneck segments, which are where the difference in probability is the greatest
along the progress coordinate. Operates per dimension and places a fixed number of
evenly spaced bins between the segments with the min and max pcoord values. Extrema and
bottleneck segments are assigned their own bins.'''
pca = kwargs.pop("pca", False)
bottleneck = kwargs.pop("bottleneck", True)
nbins_per_dim = kwargs.get("nbins_per_dim")
ndim = len(nbins_per_dim)
if not np.any(mask):
return output
allcoords = np.copy(coords)
allmask = np.copy(mask)
weights = None
isfinal = None
splitting = False
# the segments should be sent in by the driver as half initial segments and half final segments
# allcoords contains all segments
# coords should contain ONLY final segments
if coords.shape[1] > ndim:
if coords.shape[1] > ndim + 1:
isfinal = allcoords[:, ndim + 1].astype(np.bool_)
else:
isfinal = np.ones(coords.shape[0], dtype=np.bool_)
coords = coords[isfinal, :ndim]
weights = allcoords[isfinal, ndim + 0]
mask = mask[isfinal]
splitting = True
# in case where there is no final segments but initial ones in range
if not np.any(mask):
coords = allcoords[:, :ndim]
mask = allmask
weights = None
splitting = False
varcoords = np.copy(coords)
originalcoords = np.copy(coords)
if pca and len(output) > 1:
colavg = np.mean(coords, axis=0)
for i in range(len(coords)):
for j in range(len(coords[i])):
varcoords[i][j] = coords[i][j] - colavg[j]
covcoords = np.cov(np.transpose(varcoords))
eigval, eigvec = np.linalg.eigh(covcoords)
eigvec = eigvec[:, np.argmax(np.absolute(eigvec), axis=1)]
for i in range(len(eigvec)):
if eigvec[i, i] < 0:
eigvec[:, i] = -1 * eigvec[:, i]
for i in range(ndim):
for j in range(len(output)):
coords[j][i] = np.dot(varcoords[j], eigvec[:, i])
maxlist = []
minlist = []
difflist = []
flipdifflist = []
for n in range(ndim):
# identify the boundary segments
maxcoord = np.max(coords[mask, n])
mincoord = np.min(coords[mask, n])
maxlist.append(maxcoord)
minlist.append(mincoord)
# detect the bottleneck segments, this uses the weights
if splitting:
temp = np.column_stack((originalcoords[mask, n], weights[mask]))
sorted_indices = temp[:, 0].argsort()
temp = temp[sorted_indices]
for p in range(len(temp)):
if temp[p][1] == 0:
temp[p][1] = 10 ** -39
fliptemp = np.flipud(temp)
difflist.append(None)
flipdifflist.append(None)
maxdiff = 0
flipmaxdiff = 0
for i in range(1, len(temp) - 1):
comprob = 0
flipcomprob = 0
j = i + 1
while j < len(temp):
comprob = comprob + temp[j][1]
flipcomprob = flipcomprob + fliptemp[j][1]
j = j + 1
diff = -np.log(comprob) + np.log(temp[i][1])
if diff > maxdiff:
difflist[n] = temp[i][0]
maxdiff = diff
flipdiff = -np.log(flipcomprob) + np.log(fliptemp[i][1])
if flipdiff > flipmaxdiff:
flipdifflist[n] = fliptemp[i][0]
flipmaxdiff = flipdiff
# assign segments to bins
# the total number of linear bins + 2 boundary bins each dim
boundary_base = np.prod(nbins_per_dim)
bottleneck_base = boundary_base + 2 * ndim
for i in range(len(output)):
if not allmask[i]:
continue
special = False
holder = 0
if splitting:
for n in range(ndim):
coord = allcoords[i][n]
if bottleneck:
if coord == difflist[n]:
holder = bottleneck_base + 2 * n
special = True
break
elif coord == flipdifflist[n]:
holder = bottleneck_base + 2 * n + 1
special = True
break
if coord == minlist[n]:
holder = boundary_base + 2 * n
special = True
break
elif coord == maxlist[n]:
holder = boundary_base + 2 * n + 1
special = True
break
if not special:
for n in range(ndim):
coord = allcoords[i][n]
nbins = nbins_per_dim[n]
minp = minlist[n]
maxp = maxlist[n]
bins = np.linspace(minp, maxp, nbins + 1)
bin_number = np.digitize(coord, bins) - 1
if isfinal is None or not isfinal[i]:
if bin_number >= nbins:
bin_number = nbins - 1
elif bin_number < 0:
bin_number = 0
elif bin_number >= nbins or bin_number < 0:
raise ValueError("Walker out of boundary")
holder += bin_number * np.prod(nbins_per_dim[:n])
output[i] = holder
return output
class MABBinMapper(FuncBinMapper):
'''Adaptively place bins in between minimum and maximum segments along
the progress coordinte. Extrema and bottleneck segments are assigned
to their own bins.'''
def __init__(self, nbins, bottleneck=True, pca=False):
kwargs = dict(nbins_per_dim=nbins, bottleneck=bottleneck, pca=pca)
ndim = len(nbins)
n_total_bins = np.prod(nbins) + ndim * (2 + 2 * bottleneck)
super().__init__(map_mab, n_total_bins, kwargs=kwargs)
```
#### File: westext/hamsm_restarting/restart_driver.py
```python
import h5py
import logging
import operator
import numpy as np
import westpa
from westpa.cli.core import w_init
from westpa.cli.core import w_run
from westpa.core.extloader import get_object
from westpa.core.segment import Segment
from westpa import analysis
import json
import os
import shutil
import pickle
import importlib.util
import tqdm
import mdtraj as md
from rich.logging import RichHandler
from matplotlib import pyplot as plt
# Ensure this is installed via pip. msm_we's setup.py is all set up for that.
# Navigate to the folder where msm_we is, and run python3 -m pip install .
# If you're doing development on msm_we, add the -e flag to pip, i.e. "python3 -m pip install -e ."
# -e will install it in editable mode, so changes to msm_we will take effect next time it's imported.
# Otherwise, if you modify the msm_we code, you'll need to re-install it through pip.
from msm_we import msm_we
import ray
import tempfile
EPS = np.finfo(np.float64).eps
log = logging.getLogger(__name__)
log.setLevel("INFO")
log.propagate = False
log.addHandler(RichHandler())
msm_we_logger = logging.getLogger("msm_we.msm_we")
msm_we_logger.setLevel("INFO")
# Map structure types to extensions.
# This tells the plugin what extension to put on generated start-state files.
STRUCT_EXTENSIONS = {
md.formats.PDBTrajectoryFile: "pdb",
md.formats.AmberRestartFile: "rst7",
}
EXTENSION_LOCKFILE = 'doing_extension'
def check_target_reached(h5_filename):
"""
Check if the target state was reached, given the data in a WEST H5 file.
Parameters
----------
h5_filename: string
Path to a WESTPA HDF5 data file
"""
with h5py.File(h5_filename, 'r') as h5_file:
# Get the key to the final iteration. Need to do -2 instead of -1 because there's an empty-ish final iteration
# written.
for iteration_key in list(h5_file['iterations'].keys())[-2:0:-1]:
endpoint_types = h5_file[f'iterations/{iteration_key}/seg_index']['endpoint_type']
if Segment.SEG_ENDPOINT_RECYCLED in endpoint_types:
log.debug(f"recycled segment found in file {h5_filename} at iteration {iteration_key}")
return True
return False
def fix_deprecated_initialization(initialization_state):
"""
I changed my initialization JSON schema to use underscores instead of hyphens so I can directly expand it into
keywords arguments to w_init. This just handles any old-style JSON files I still had, so they don't choke and die.
"""
log.debug(f"Starting processing, dict is now {initialization_state}")
# Some of my initial files had this old-style formatting. Handle it for now, but remove eventually
for old_key, new_key in [
('tstate-file', 'tstate_file'),
('bstate-file', 'bstate_file'),
('sstate-file', 'sstate_file'),
('segs-per-state', 'segs_per_state'),
]:
if old_key in initialization_state.keys():
log.warning(
f"This initialization JSON file uses the deprecated " f"hyphenated form for {old_key}. Replace with underscores."
)
value = initialization_state.pop(old_key)
initialization_state[new_key] = value
log.debug(f"Finished processing, dict is now {initialization_state}")
return initialization_state
# TODO: Break this out into a separate module, let it be specified (if it's necessary) as a plugin option
# This may not always be required -- i.e. you may be able to directly output to the h5 file in your propagator
def prepare_coordinates(plugin_config, h5file, we_h5filename):
"""
Copy relevant coordinates from trajectory files into <iteration>/auxdata/coord of the h5 file.
Directly modifies the input h5 file.
Adds ALL coordinates to auxdata/coord.
Adapted from original msmWE collectCoordinates.py script.
Parameters
----------
plugin_config: YAMLConfig object
Stores the configuration options provided to the plugin in the WESTPA configuration file
h5file: h5py.File
WESTPA h5 data file
we_h5filename: string
Name of the WESTPA h5 file
"""
refPDBfile = plugin_config.get('ref_pdb_file')
modelName = plugin_config.get('model_name')
# TODO: Don't need this explicit option, use WEST_SIM_ROOT or something
WEfolder = plugin_config.get('we_folder')
parentTraj = plugin_config.get('parent_traj_filename')
childTraj = plugin_config.get('child_traj_filename')
pcoord_ndim = plugin_config.get('pcoord_ndim', 1)
model = msm_we.modelWE()
log.info('Preparing coordinates...')
# Only need the model to get the number of iterations and atoms
# TODO: Replace this with something more lightweight, get directly from WE
log.debug(f'Doing collectCoordinates on WE file {we_h5filename}')
model.initialize(
[we_h5filename],
refPDBfile,
modelName,
# Pass some dummy arguments -- these aren't important, this model is just created for convenience
# in the coordinate collection. Dummy arguments prevent warnings from being raised.
basis_pcoord_bounds=None,
target_pcoord_bounds=None,
tau=1,
pcoord_ndim=pcoord_ndim,
_suppress_boundary_warning=True,
)
model.get_iterations()
log.debug(f"Found {model.maxIter} iterations")
n_iter = None
for n_iter in tqdm.tqdm(range(1, model.maxIter + 1)):
nS = model.numSegments[n_iter - 1].astype(int)
coords = np.zeros((nS, 2, model.nAtoms, 3))
dsetName = "/iterations/iter_%08d/auxdata/coord" % int(n_iter)
coords_exist = False
try:
dset = h5file.create_dataset(dsetName, np.shape(coords))
except (RuntimeError, ValueError):
log.debug('coords exist for iteration ' + str(n_iter) + ' NOT overwritten')
coords_exist = True
continue
for iS in range(nS):
trajpath = WEfolder + "/traj_segs/%06d/%06d" % (n_iter, iS)
try:
coord0 = np.squeeze(md.load(f'{trajpath}/{parentTraj}', top=model.reference_structure.topology)._xyz)
except OSError:
log.warning("Parent traj file doesn't exist, loading reference structure coords")
coord0 = np.squeeze(model.reference_structure._xyz)
coord1 = np.squeeze(md.load(f'{trajpath}/{childTraj}', top=model.reference_structure.topology)._xyz)
coords[iS, 0, :, :] = coord0
coords[iS, 1, :, :] = coord1
if not coords_exist:
dset[:] = coords
log.debug(f"Wrote coords for {n_iter} iterations.")
def msmwe_compute_ss(plugin_config, west_files):
"""
Prepare and initialize an msm_we model, and use it to predict a steady-state distribution.
1. Load coordinate data
2. Perform dimensionality reduction
3. Compute flux and transition matrices
4. Compute steady-state distribution (via eigenvectors of transition matrix)
5. Compute target-state flux
TODO
----
This function does far too many things. Break it up a bit.
Parameters
----------
plugin_config: YAMLConfig object
Stores the configuration options provided to the plugin in the WESTPA configuration file
last_iter: int
The last WE iteration to use for computing steady-state.
Returns
-------
ss_alg: np.ndarray
The steady-state distribution
ss_flux: float
Flux into target state
model: modelWE object
The modelWE object produced for analysis.
"""
n_lag = 0
log.debug("Initializing msm_we")
# TODO: Refactor this to use westpa.core.extloader.get_object
# I'm reinventing the wheel a bit here, I can replace almost all this code w/ that
# ##### Monkey-patch modelWE with the user-override functions
override_file = plugin_config.get('user_functions')
# First, import the file with the user-override functions
# This is a decently janky implementation, but it seems to work, and I don't know of a better way of doing it.
# This is nice because it avoids mucking around with the path, which I think is a Good Thing.
# We're given a path to the user-specified file containing overrides
# This comes from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
# I don't think the name provided here actually matters
user_override_spec = importlib.util.spec_from_file_location("override_module", override_file)
user_overrides = importlib.util.module_from_spec(user_override_spec)
# Make the functions that were overriden in override_file available in the namespace under user_overrides
user_override_spec.loader.exec_module(user_overrides)
# So now we can do the actual monkey-patching of modelWE.
# We monkey-patch at the module level rather than just override the function in the instanced object
# so that the functions retain access to self.
msm_we.modelWE.processCoordinates = user_overrides.processCoordinates
# ##### Done with monkey-patching.
model = msm_we.modelWE()
streaming = plugin_config.get('streaming', False)
refPDBfile = plugin_config.get('ref_pdb_file')
modelName = plugin_config.get('model_name')
n_clusters = plugin_config.get('n_clusters')
tau = plugin_config.get('tau', None)
pcoord_ndim = plugin_config.get('pcoord_ndim', 1)
basis_pcoord_bounds = np.array(plugin_config.get('basis_pcoord_bounds', np.nan), dtype=float)
target_pcoord_bounds = np.array(plugin_config.get('target_pcoord_bounds', np.nan), dtype=float)
if np.isnan(basis_pcoord_bounds).any() or np.isnan(target_pcoord_bounds).any():
log.critical(
"Target and/or basis pcoord bounds were not specified. "
"Set them using the 'basis_pcoord_bounds' or 'target_pcoord_bounds' parameters. "
"'basis/target_pcoord1_min/max' and 'basis/target_pcoord1' are no longer supported. "
"See https://jdrusso.github.io/msm_we/api.html#msm_we.msm_we.modelWE.initialize for details."
)
if tau is None:
log.warning('No tau provided to restarting plugin. Defaulting to 1.')
tau = 1
# Fire up the model object
model.initialize(
west_files,
refPDBfile,
modelName,
basis_pcoord_bounds=basis_pcoord_bounds,
target_pcoord_bounds=target_pcoord_bounds,
tau=tau,
pcoord_ndim=pcoord_ndim,
)
model.dimReduceMethod = plugin_config.get('dim_reduce_method')
model.n_lag = n_lag
log.debug("Loading in iteration data.. (this could take a while)")
# First dimension is the total number of segments
model.get_iterations()
model.get_coordSet(model.maxIter)
model.dimReduce()
first_iter, last_iter = model.first_iter, model.maxIter
clusterFile = modelName + "_clusters_s" + str(first_iter) + "_e" + str(last_iter) + "_nC" + str(n_clusters) + ".h5"
# TODO: Uncomment this to actually load the clusterFile if it exists. For now, disable for development.
exists = os.path.isfile(clusterFile)
exists = False
log.warning("Skipping any potential cluster reloading!")
log.info(f"Launching Ray with {plugin_config.get('n_cpus', 1)} cpus")
ray_tempdir_root = plugin_config.get('ray_temp_dir', None)
if ray_tempdir_root is not None:
ray_tempdir = tempfile.TemporaryDirectory(dir=ray_tempdir_root)
log.info(f"Using {ray_tempdir.name} as temp_dir for Ray")
ray.init(
num_cpus=plugin_config.get('n_cpus', 1), _temp_dir=ray_tempdir.name, ignore_reinit_error=True, include_dashboard=False
)
else:
ray.init(num_cpus=plugin_config.get('n_cpus', 1), ignore_reinit_error=True, include_dashboard=False)
# If a cluster file with the name corresponding to these parameters exists, load clusters from it.
if exists:
log.debug("loading clusters...")
model.load_clusters(clusterFile)
# Otherwise, do the clustering (which will create and save to that file)
else:
# FIXME: This gives the wrong shape, but loading from the clusterfile gives the right shape
log.debug("clustering coordinates into " + str(n_clusters) + " clusters...")
model.cluster_coordinates(n_clusters, streaming=streaming)
first_iter = 1
model.get_fluxMatrix(n_lag, first_iter, last_iter) # extracts flux matrix, output model.fluxMatrixRaw
log.debug(f"Unprocessed flux matrix has shape {model.fluxMatrixRaw.shape}")
model.organize_fluxMatrix() # gets rid of bins with no connectivity, sorts along p1, output model.fluxMatrix
model.get_Tmatrix() # normalizes fluxMatrix to transition matrix, output model.Tmatrix
log.debug(f"Processed flux matrix has shape {model.fluxMatrix.shape}")
model.get_steady_state() # gets steady-state from eigen decomp, output model.pSS
model.get_steady_state_target_flux() # gets steady-state target flux, output model.JtargetSS
# Why is model.pss sometimes the wrong shape? It's "occasionally" returned as a nested array.
# Squeeze fixes it and removes the dimension of length 1, but why does it happen in the first place?
if type(model.pSS) is np.matrix:
ss_alg = np.squeeze(model.pSS.A)
else:
ss_alg = np.squeeze(model.pSS)
ss_flux = model.JtargetSS
log.debug("Got steady state:")
log.debug(ss_alg)
log.debug(ss_flux)
log.info("Completed flux matrix calculation and steady-state estimation!")
log.info("Starting block validation")
num_validation_groups = plugin_config.get('n_validation_groups', 2)
num_validation_blocks = plugin_config.get('n_validation_blocks', 4)
try:
model.do_block_validation(num_validation_groups, num_validation_blocks, use_ray=True)
except Exception as e:
log.exception(e)
log.error("Failed block validation! Continuing with restart, but BEWARE!")
ray.shutdown()
return ss_alg, ss_flux, model
class RestartDriver:
"""
WESTPA plugin to automatically handle estimating steady-state from a WE run, re-initializing a new WE run in that
steady-state, and then running that initialized WE run.
Data from the previous run will be stored in the restart<restart_number>/ subdirectory of $WEST_SIM_ROOT.
This plugin depends on having the start-states implementation in the main WESTPA code, which allows initializing
a WE run using states that are NOT later used for recycling.
These are used so that when the new WE run is initialized, initial structure selection is chosen by w_init, using
weights assigned to the start-states based on MSM bin weight and WE segment weight.
Since it closes out the current WE run and starts a new one, this plugin should run LAST, after all other plugins.
"""
def __init__(self, sim_manager, plugin_config):
"""
Initialize the RestartDriver plugin.
Pulls the data_manager and sim_manager from the WESTPA run that just completed, along with
"""
westpa.rc.pstatus("Restart plugin initialized")
if not sim_manager.work_manager.is_master:
westpa.rc.pstatus("Reweighting not master, skipping")
return
self.data_manager = sim_manager.data_manager
self.sim_manager = sim_manager
self.plugin_config = plugin_config
self.restart_file = plugin_config.get('restart_file', 'restart.dat')
self.initialization_file = plugin_config.get('initialization_file', 'restart_initialization.json')
self.extension_iters = plugin_config.get('extension_iters', 0)
self.max_total_iterations = westpa.rc.config.get(['west', 'propagation', 'max_total_iterations'], default=None)
self.base_total_iterations = self.max_total_iterations
self.coord_len = plugin_config.get('coord_len', 2)
self.n_restarts = plugin_config.get('n_restarts', -1)
self.n_runs = plugin_config.get('n_runs', 1)
# Number of CPUs available for parallelizing msm_we calculations
self.parallel_cpus = plugin_config.get('n_cpus', 1)
self.ray_tempdir = plugin_config.get('ray_temp_dir', None)
# .get() might return this as a bool anyways, but be safe
self.debug = bool(plugin_config.get('debug', False))
if self.debug:
log.setLevel("DEBUG")
msm_we_logger.setLevel("DEBUG")
# Default to using all restarts
self.restarts_to_use = plugin_config.get('n_restarts_to_use', self.n_restarts)
assert self.restarts_to_use > 0 or self.restarts_to_use == -1, "Invalid number of restarts to use"
if self.restarts_to_use >= 1:
assert (
self.restarts_to_use == self.restarts_to_use // 1
), "If choosing a decimal restarts_to_use, must be between 0 and 1."
struct_filetype = plugin_config.get('struct_filetype', 'mdtraj.formats.PDBTrajectoryFile')
self.struct_filetype = get_object(struct_filetype)
# This should be low priority, because it closes the H5 file and starts a new WE run. So it should run LAST
# after any other plugins.
self.priority = plugin_config.get('priority', 100) # I think a big number is lower priority...
sim_manager.register_callback(sim_manager.finalize_run, self.prepare_new_we, self.priority)
# Initialize data
self.ss_alg = None
self.ss_dist = None
self.model = None
def get_original_bins(self):
"""
Obtains the WE bins and their probabilities at the end of the previous iteration.
Returns
-------
bins : np.ndarray
Array of WE bins
binprobs: np.ndarray
WE bin weights
"""
we_driver = self.sim_manager.we_driver
bins = we_driver.next_iter_binning
n_bins = len(bins)
binprobs = np.fromiter(map(operator.attrgetter('weight'), bins), dtype=np.float64, count=n_bins)
return bins, binprobs
@property
def cur_iter(self):
"""
Get the current WE iteration.
Returns
-------
int: The current iteration. Subtract one, because in finalize_run the iter has been incremented
"""
return self.sim_manager.n_iter - 1
@property
def is_last_iteration(self):
"""
Get whether this is, or is past, the last iteration in this WE run.
Returns
-------
bool: Whether the current iteration is the final iteration
"""
final_iter = self.sim_manager.max_total_iterations
return self.cur_iter >= final_iter
def prepare_extension_run(self, run_number, restart_state, first_extension=False):
"""
Copy the necessary files for an extension run (versus initializing a fresh run)
Parameters
----------
run_number: int
The index of this run (should be 1-indexed!)
restart_state: dict
Dictionary holding the current state of the restarting procedure
first_extension: bool
True if this is the first run of an extension set. If True, then back up west.cfg, and write the extended
west.cfg file.
"""
log.debug(f"Linking run files from restart0/run{run_number}")
# Copy traj_segs, seg_logs, and west.h5 for restart0/runXX back into ./
# Later: (May only need to copy the latest iteration traj_segs, to avoid tons of back and forth)
try:
shutil.rmtree('traj_segs')
shutil.rmtree('seg_logs')
except OSError as e:
if str(e) == 'Cannot call rmtree on a symbolic link':
os.unlink('traj_segs')
os.unlink('seg_logs')
os.remove(self.data_manager.we_h5filename)
os.symlink(f'restart0/run{run_number}/traj_segs', 'traj_segs')
os.symlink(f'restart0/run{run_number}/seg_logs', 'seg_logs')
if first_extension:
# Get lines to make a new west.cfg by extending west.propagation.max_total_iterations
with open('west.cfg', 'r') as west_config:
lines = west_config.readlines()
for i, line in enumerate(lines):
# Parse out the number of maximum iterations
if 'max_total_iterations' in line:
max_iters = [int(i) for i in line.replace(':', ' ').replace('\n', ' ').split() if i.isdigit()]
new_max_iters = max_iters[0] + self.extension_iters
new_line = f"{line.split(':')[0]}: {new_max_iters}\n"
lines[i] = new_line
break
with open(self.restart_file, 'w') as fp:
json.dump(restart_state, fp)
log.info("First WE extension run ready!")
westpa.rc.pstatus(
f"\n\n===== Restart {restart_state['restarts_completed']}, "
+ f"Run {restart_state['runs_completed'] + 1} extension running =====\n"
)
# TODO: I can't just go straight into a w_run here. w_run expects some things to be set I think, that aren't.
# I can do w_init, and do a new simulation just fine...
# I can do this on ONE run repeatedly just fine
# But if I try to just copy files and continue like this, there's something screwy in state somewhere that
# causes it to fail.
# The error has to do with offsets in the HDF5 file?
# Need to figure out what state would be cleared by w_init
# Frankly, this is a really sketchy way of doing this, but it seems to work...
# I remain skeptical there's not something weird under the hood that isn't being addressed correctly with
# regard to state, but if it works, it's good enough for now..
westpa.rc.sim_manager.segments = None
shutil.copy(f'restart0/run{run_number}/west.h5', self.data_manager.we_h5filename)
self.data_manager.open_backing()
log.debug(f"Sim manager thought n_iter was {westpa.rc.sim_manager.n_iter}")
log.debug(f"Data manager thought current_iteration was {self.data_manager.current_iteration}")
log.debug(f"{self.sim_manager} vs {westpa.rc.sim_manager}")
if run_number == 1:
westpa.rc.sim_manager.max_total_iterations += self.extension_iters
w_run.run_simulation()
return
def generate_plots(self, restart_directory):
model = self.model
plot_directory = f"{restart_directory}/plots"
if not os.path.exists(plot_directory):
log.info(f"{plot_directory} doesn't exist, creating it.")
os.mkdir(plot_directory)
log.info("Producing flux-profile, pseudocommittor, and target flux comparison plots.")
flux_pcoord_fig, flux_pcoord_ax = plt.subplots()
model.plot_flux(ax=flux_pcoord_ax, suppress_validation=True)
flux_pcoord_fig.text(x=0.1, y=-0.05, s='This flux profile should become flatter after restarting', fontsize=12)
flux_pcoord_ax.legend(bbox_to_anchor=(1.01, 1.0), loc="upper left")
flux_pcoord_fig.savefig(f'{plot_directory}/flux_plot.pdf', bbox_inches="tight")
flux_pseudocomm_fig, flux_pseudocomm_ax = plt.subplots()
model.plot_flux_committor(ax=flux_pseudocomm_ax, suppress_validation=True)
flux_pseudocomm_fig.text(
x=0.1,
y=-0.05,
s='This flux profile should become flatter after restarting.'
'\nThe x-axis is a "pseudo"committor, since it may be '
'calculated from WE trajectories in the one-way ensemble.',
fontsize=12,
)
flux_pseudocomm_ax.legend(bbox_to_anchor=(1.01, 1.0), loc="upper left")
flux_pseudocomm_fig.savefig(f'{plot_directory}/pseudocomm-flux_plot.pdf', bbox_inches="tight")
flux_pcomm_color_fig, flux_pcomm_color_ax = plt.subplots()
model.plot_flux_committor_pcoordcolor(ax=flux_pcomm_color_ax)
flux_pcomm_color_fig.text(x=0.1, y=-0.05, s='This flux profile should become flatter after restarting', fontsize=12)
flux_pcomm_color_ax.legend(bbox_to_anchor=(1.01, 1.0), loc="upper left")
flux_pcomm_color_fig.savefig(f'{plot_directory}/pseudocomm-flux_plot_pcoordcolor.pdf', bbox_inches="tight")
flux_comparison_fig, flux_comparison_ax = plt.subplots(figsize=(7, 3))
# Get haMSM flux estimates
models = [model]
models.extend(model.validation_models)
n_validation_models = len(model.validation_models)
flux_estimates = []
for _model in models:
flux_estimates.append(_model.JtargetSS)
hamsm_flux_colors = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
direct_flux_colors = iter(plt.cm.cool(np.linspace(0.2, 0.8, len(model.fileList))))
# Get WE direct flux estimate
for _file in model.fileList:
run = analysis.Run(_file)
last_iter = run.num_iterations
recycled = list(run.iteration(last_iter - 1).recycled_walkers)
target_flux = sum(walker.weight for walker in recycled) / model.tau
# TODO: Correct for time!
if len(_file) >= 15:
short_filename = f"....{_file[-12:]}"
else:
short_filename = _file
if target_flux == 0:
continue
flux_comparison_ax.axhline(
target_flux,
color=next(direct_flux_colors),
label=f"Last iter WE direct {target_flux:.2e}" f"\n ({short_filename})",
linestyle='--',
)
flux_comparison_ax.axhline(
flux_estimates[0], label=f"Main model estimate\n {flux_estimates[0]:.2e}", color=next(hamsm_flux_colors)
)
for i in range(1, n_validation_models + 1):
flux_comparison_ax.axhline(
flux_estimates[i],
label=f"Validation model {i - 1} estimate\n {flux_estimates[i]:.2e}",
color=next(hamsm_flux_colors),
)
flux_comparison_ax.legend(bbox_to_anchor=(1.01, 0.9), loc='upper left')
flux_comparison_ax.set_yscale('log')
flux_comparison_ax.set_ylabel('Flux')
flux_comparison_ax.set_xticks([])
flux_comparison_fig.tight_layout()
flux_comparison_fig.savefig(f'{plot_directory}/hamsm_vs_direct_flux_comparison_plot.pdf', bbox_inches="tight")
def prepare_new_we(self):
"""
This function prepares a new WESTPA simulation using haMSM analysis to accelerate convergence.
The marathon functionality does re-implement some of the functionality of w_multi_west.
However, w_multi_west merges independent WE simulations, which may or may not be desirable.
I think for the purposes of this, it's good to keep the runs completely independent until haMSM model building.
Either that, or I'm just justifying not having known about w_multi_west when I wrote this. TBD.
# TODO: Replace all manual path-building with pathlib
The algorithm is as follows:
1. Check to see if we've just completed the final iteration
2. Handle launching multiple runs, if desired
2. Build haMSM
3. Obtain structures for each haMSM bin
4. Make each structure a start-state, with probability set by (MSM-bin SS prob / # structures in bin)
5. Potentially some renormalization?
6. Start new WE simulation
"""
# Do nothing if it's not the final iteration
if not self.is_last_iteration:
print(self.cur_iter)
return
log.debug("Final iteration, preparing restart")
restart_state = {'restarts_completed': 0, 'runs_completed': 0}
# Check for the existence of the extension lockfile here
doing_extension = os.path.exists(EXTENSION_LOCKFILE)
# Look for a restart.dat file to get the current state (how many restarts have been performed already)
if os.path.exists(self.restart_file):
with open(self.restart_file, 'r') as fp:
restart_state = json.load(fp)
# This is the final iteration of a run, so mark this run as completed
restart_state['runs_completed'] += 1
# Make the folder to store data for this marathon
restart_directory = f"restart{restart_state['restarts_completed']}"
run_directory = f"{restart_directory}/run{restart_state['runs_completed']}"
if not os.path.exists(run_directory):
os.makedirs(run_directory)
# Write coordinates to h5
prepare_coordinates(self.plugin_config, self.data_manager.we_h5file, self.data_manager.we_h5filename)
for data_folder in ['traj_segs', 'seg_logs']:
old_path = data_folder
# If you're doing an extension, this will be a symlink. So no need to copy, just unlink it and move on
if doing_extension and os.path.islink(old_path):
log.debug('Unlinking symlink')
os.unlink(old_path)
os.mkdir(old_path)
continue
new_path = f"{run_directory}/{old_path}"
log.debug(f"Moving {old_path} to {new_path}")
if os.path.exists(new_path):
log.info(f"{new_path} already exists. Removing and overwriting.")
shutil.rmtree(new_path)
try:
os.rename(old_path, new_path)
except FileNotFoundError:
log.warning(f"Folder {old_path} was not found." "This may be normal, but check your configuration.")
else:
# Make a new data folder for the next run
os.mkdir(old_path)
last_run = restart_state['runs_completed'] >= self.n_runs
last_restart = restart_state['restarts_completed'] >= self.n_restarts
# We've just finished a run. Let's check if we have to do any more runs in this marathon before doing a restart.
# In the case of n_runs == 1, then we're just doing a single run and restarting it every so often.
# Otherwise, a marathon consists of multiple runs, and restarts are performed between marathons.
if last_run:
log.info(f"All {self.n_runs} runs in this marathon completed.")
if last_restart:
log.info("All restarts completed! Performing final analysis.")
else:
log.info("Proceeding to prepare a restart.")
# Duplicating this is gross, but given the structure here, my options are either put it above these ifs
# entirely, meaning it'll be unnecessarily run at the end of the final restart, or duplicate it below.
log.info("Preparing coordinates for this run.")
# Now, continue on to haMSM calculation below.
# If we have more runs left to do in this marathon, prepare them
elif not last_run:
log.info(f"Run {restart_state['runs_completed']}/{self.n_runs} completed.")
# TODO: Initialize a new run, from the same configuration as this run was
# On the 1st run, I can write bstates/tstates/sstates into restart files, and use those for spawning
# subsequent runs in the marathon. That way, I don't make unnecessary copies of all those.
# Basis and target states are unchanged. Can I get the original parameters passed to w_init?
# Ideally, I should be able to call w_init with the exact same parameters that went to it the first time
initialization_state = {
'tstate_file': None,
'bstate_file': None,
'sstate_file': None,
'tstates': None,
'bstates': None,
'sstates': None,
'segs_per_state': None,
}
# TODO: Implement this, and get rid of the initialization_file usage right below. Placeholder for now.
if restart_state['runs_completed'] == 1:
# Get and write basis, target, start states and segs per state for this marathon to disk
pass
# Save the WESTPA h5 data from this run
self.data_manager.finalize_run()
shutil.copyfile('west.h5', f"{run_directory}/west.h5")
# If this is a regular, fresh run (not an extension)
if not doing_extension:
if os.path.exists(self.initialization_file):
with open(self.initialization_file, 'r') as fp:
initialization_dict = json.load(fp)
initialization_dict = fix_deprecated_initialization(initialization_dict)
initialization_state.update(initialization_dict)
else:
raise Exception(
"No initialization JSON file provided -- " "I don't know how to start new runs in this marathon."
)
westpa.rc.pstatus(
f"\n\n===== Restart {restart_state['restarts_completed']}, "
+ f"Run {restart_state['runs_completed']+1} initializing =====\n"
)
westpa.rc.pstatus(
f"\nRun: \n\t w_init --tstate-file {initialization_state['tstate_file']} "
+ f"--bstate-file {initialization_state['bstate_file']} "
f"--sstate-file {initialization_state['sstate_file']} "
f"--segs-per-state {initialization_state['segs_per_state']}\n"
)
w_init.initialize(
**initialization_state,
shotgun=False,
)
with open(self.restart_file, 'w') as fp:
json.dump(restart_state, fp)
log.info("New WE run ready!")
westpa.rc.pstatus(
f"\n\n===== Restart {restart_state['restarts_completed']}, "
+ f"Run {restart_state['runs_completed']+1} running =====\n"
)
w_run.run_simulation()
return
# If we're doing an extension set
# Instead of w_initting a new iteration, copy the files from restart0/runXX back into ./
elif doing_extension:
self.prepare_extension_run(run_number=restart_state['runs_completed'] + 1, restart_state=restart_state)
return
log.debug(f"{restart_state['restarts_completed']}/{self.n_restarts} restarts completed")
# Build the haMSM
log.debug("Initializing haMSM")
# Need to write the h5 file and close it out, but I need to get the current bstates first.
original_bstates = self.sim_manager.current_iter_bstates
if original_bstates is None:
original_bstates = self.data_manager.get_basis_states(self.sim_manager.n_iter - 1)
assert original_bstates is not None, "Bstates are none in the current iteration"
original_tstates = self.data_manager.get_target_states(self.cur_iter)
# Flush h5 file writes and copy it to the run directory
self.data_manager.finalize_run()
shutil.copyfile(self.data_manager.we_h5filename, f"{run_directory}/west.h5")
# Use all files in all restarts
# Restarts index at 0, because there's a 0th restart before you've... restarted anything.
# Runs index at 1, because Run 1 is the first run.
# TODO: Let the user pick last half or something in the plugin config.
marathon_west_files = []
# When doing the first restart, restarts_completed is 0 (because the first restart isn't complete yet) and
# the data generated during this restart is in /restart0.
# So when doing the Nth restart, restarts_completed is N-1
# If set to -1, use all restarts
if self.restarts_to_use == -1:
last_N_restarts = 1 + restart_state['restarts_completed']
# If this is an integer, use the last N restarts
elif self.restarts_to_use >= 1:
last_N_restarts = self.restarts_to_use
# If it's a decimal between 0 and 1, use it as a fraction
# At restart 1, and a fraction of 0.5, this should just use restart 1
elif 0 < self.restarts_to_use < 1:
last_N_restarts = int(self.restarts_to_use * (1 + restart_state['restarts_completed']))
# If this fraction is <1, use all until it's not
if last_N_restarts < 1:
last_N_restarts = 1 + restart_state['restarts_completed']
log.debug(f"Last N is {last_N_restarts}")
first_restart = max(1 + restart_state['restarts_completed'] - last_N_restarts, 0)
usable_restarts = range(first_restart, 1 + restart_state['restarts_completed'])
log.info(
f"At restart {restart_state['restarts_completed']}, building haMSM using data from restarts {list(usable_restarts)}"
)
for restart_number in usable_restarts:
for run_number in range(1, 1 + restart_state['runs_completed']):
west_file_path = f"restart{restart_number}/run{run_number}/west.h5"
marathon_west_files.append(west_file_path)
log.debug(f"WESTPA datafile for analysis are {marathon_west_files}")
#
# If this is the first restart, check to see if you got any target state flux
if restart_state['restarts_completed'] == 0:
pass
# Check to see if you got any target flux in ANY runs
target_reached = False
for west_file_path in marathon_west_files:
if check_target_reached(west_file_path):
target_reached = True
break
# If you reached the target, clean up from the extensions and then continue as normal
# If extension_iters is set to 0, then don't do extensions.
if target_reached or self.extension_iters == 0:
log.info("All runs reached target!")
# Do some cleanup from the extension run
if doing_extension and not self.extension_iters == 0:
# Remove the doing_extensions.lck lockfile
os.remove(EXTENSION_LOCKFILE)
westpa.rc.sim_manager.max_total_iterations = self.base_total_iterations
# Otherwise, just continue as normal
pass
# If no runs reached the target, then we need to extend them
elif not target_reached:
log.info("Target not reached. Preparing for extensions.")
# Create the doing_extensions.lck "lockfile" to indicate we're in extend mode (or keep if exists)
# and write the initial number of iterations to it.
if not os.path.exists(EXTENSION_LOCKFILE):
with open(EXTENSION_LOCKFILE, 'w') as lockfile:
lockfile.write(str(self.max_total_iterations))
# Reset runs_completed to 0, and rewrite restart.dat accordingly
restart_state['runs_completed'] = 0
self.prepare_extension_run(run_number=1, restart_state=restart_state, first_extension=True)
return
log.debug("Building haMSM and computing steady-state")
log.debug(f"Cur iter is {self.cur_iter}")
ss_dist, ss_flux, model = msmwe_compute_ss(self.plugin_config, marathon_west_files)
self.ss_dist = ss_dist
self.model = model
log.debug(f'Steady-state distribution: {ss_dist}')
log.info(f"Target steady-state flux is {ss_flux}")
# Obtain cluster-structures
log.debug("Obtaining cluster-structures")
model.update_cluster_structures()
# TODO: Do this with pathlib
struct_directory = f"{restart_directory}/structs"
if not os.path.exists(struct_directory):
os.makedirs(struct_directory)
flux_filename = f"{restart_directory}/JtargetSS.txt"
with open(flux_filename, 'w') as fp:
log.debug(f"Writing flux to {flux_filename}")
fp.write(str(model.JtargetSS))
fp.close()
ss_filename = f"{restart_directory}/pSS.txt"
with open(ss_filename, 'w') as fp:
log.debug(f"Writing pSS to {ss_filename}")
np.savetxt(fp, model.pSS)
fp.close()
# If this is the last run of the last restart, do nothing and exit.
# if restart_state['runs_completed'] >= self.n_runs and restart_state['restarts_completed'] >= self.n_restarts:
# log.info("All restarts completed!")
# return
# Construct start-state file with all structures and their weights
# TODO: Don't explicitly write EVERY structure to disk, or this will be a nightmare for large runs.
# However, for now, it's fine...
log.debug("Writing structures")
# TODO: Include start states from previous runs
sstates_filename = f"{restart_directory}/startstates.txt"
with open(sstates_filename, 'w') as fp:
# Track the total number of segments iterated over
seg_idx = 0
log.info(f"Obtaining potential start structures ({len(model.cluster_structures.items())} bins avail)")
# Can use these for sanity checks
total_weight = 0.0
total_bin_weights = []
# Loop over each set of (bin index, all the structures in that bin)
for (msm_bin_idx, structures) in tqdm.tqdm(model.cluster_structures.items()):
total_bin_weights.append(0)
# Don't put structures in the basis or target
if msm_bin_idx in [model.n_clusters, model.n_clusters + 1]:
continue
# The per-segment bin probability.
# Map a cluster number onto a cluster INDEX, because after cleaning the cluster numbers may no longer
# be consecutive.
bin_prob = self.ss_dist[model.cluster_mapping[msm_bin_idx]] # / len(structures)
if bin_prob == 0:
log.info(f"MSM-Bin {msm_bin_idx} has probability 0, so not saving any structs from it.")
continue
# The total amount of WE weight in this MSM microbin
msm_bin_we_weight = sum(model.cluster_structure_weights[msm_bin_idx])
# Write each structure to disk. Loop over each structure within a bin.
msm_bin_we_weight_tracker = 0
for struct_idx, structure in enumerate(structures):
structure_filename = (
f"{struct_directory}/bin{msm_bin_idx}_" f"struct{struct_idx}.{STRUCT_EXTENSIONS[self.struct_filetype]}"
)
with self.struct_filetype(structure_filename, 'w') as struct_file:
# One structure per segment
seg_we_weight = model.cluster_structure_weights[msm_bin_idx][struct_idx]
msm_bin_we_weight_tracker += seg_we_weight
# Structure weights are set according to Algorithm 5.3 in
# <NAME>. & <NAME>. Optimizing Weighted Ensemble Sampling of Steady States.
# Multiscale Model Sim 18, 646–673 (2020).
structure_weight = seg_we_weight * (bin_prob / msm_bin_we_weight)
total_bin_weights[-1] += structure_weight
total_weight += structure_weight
topology = model.reference_structure.topology
try:
angles = model.reference_structure.unitcell_angles[0]
lengths = model.reference_structure.unitcell_lengths[0] * 10
# This throws typeerror if reference_structure.unitcell_angles is None, or AttributeError
# if reference_structure.unitcell_angles doesn't exist.
except (TypeError, AttributeError):
angles, lengths = None, None
coords = structure * 10 # Correct units
# Write the structure file
if self.struct_filetype is md.formats.PDBTrajectoryFile:
struct_file.write(coords, topology, modelIndex=1, unitcell_angles=angles, unitcell_lengths=lengths)
elif self.struct_filetype is md.formats.AmberRestartFile:
# AmberRestartFile takes slightly differently named keyword args
struct_file.write(coords, time=None, cell_angles=angles, cell_lengths=lengths)
else:
# Otherwise, YOLO just hope all the positional arguments are in the right place
log.warning(
f"This output filetype ({self.struct_filetype}) is probably supported, "
f"but not explicitly handled."
" You should ensure that it takes argument as (coords, topology)"
)
struct_file.write(coords, topology)
raise Exception("Don't know what extension to use for this filetype")
# Add this start-state to the start-states file
# This path is relative to WEST_SIM_ROOT
fp.write(f'b{msm_bin_idx}_s{struct_idx} {structure_weight} {structure_filename}\n')
seg_idx += 1
# log.info(f"WE weight ({msm_bin_we_weight_tracker:.5e} / {msm_bin_we_weight:.5e})")
# TODO: Fix this check. It's never quite worked right, nor has it ever caught an actual problem, so just
# disable for now.
# In equilibrium, all probabilities count, but in steady-state the last 2 are the target/basis
# Subtract off the probabilities of the basis and target states, since those don't have structures
# assigned to them.
# assert np.isclose(total_weight, 1 - sum(model.pSS[model.n_clusters :])), (
# f"Total steady-state structure weights not normalized! (Total: {total_weight}) "
# f"\n\t pSS: {model.pSS}"
# f"\n\t Total bin weights {total_bin_weights}"
# f"\n\t pSS sum: {sum(model.pSS)}"
# f"\n\t pSS -2 sum: {sum(model.pSS[:-2])}"
# f"\n\t pSS (+target, no basis) sum: {sum(model.pSS[:-2]) + model.pSS[-1]}"
# )
### Start the new simulation
bstates_str = ""
for original_bstate in original_bstates:
orig_bstate_prob = original_bstate.probability
orig_bstate_label = original_bstate.label
orig_bstate_aux = original_bstate.auxref
bstate_str = f"{orig_bstate_label} {orig_bstate_prob} {orig_bstate_aux}\n"
bstates_str += bstate_str
bstates_filename = f"{restart_directory}/basisstates.txt"
with open(bstates_filename, 'w') as fp:
fp.write(bstates_str)
tstates_str = ""
for original_tstate in original_tstates:
orig_tstate_label = original_tstate.label
# TODO: Handle multidimensional pcoords
orig_tstate_pcoord = original_tstate.pcoord[0]
tstate_str = f"{orig_tstate_label} {orig_tstate_pcoord}\n"
tstates_str += tstate_str
tstates_filename = f"{restart_directory}/targetstates.txt"
with open(tstates_filename, 'w') as fp:
fp.write(tstates_str)
# Pickle the model
objFile = f"{restart_directory}/hamsm.obj"
with open(objFile, "wb") as objFileHandler:
log.debug("Pickling model")
pickle.dump(model, objFileHandler, protocol=4)
objFileHandler.close()
# Before finishing this restart, make a plot of the flux profile.
# This is made so the user can see whether
self.generate_plots(restart_directory)
# At this point, the restart is completed, and the data for the next one is ready (though still need to make the
# initialization file and such).
if last_restart:
log.info("All restarts completed! Finished.")
return
# Update restart_file file
restart_state['restarts_completed'] += 1
# If we're doing a restart, then reset the number of completed runs to 0 for the next marathon.
restart_state['runs_completed'] = 0
with open(self.restart_file, 'w') as fp:
json.dump(restart_state, fp)
log.info("Initializing new run")
# TODO: Read this from config if available
segs_per_state = 1
old_initialization_path = self.initialization_file
new_initialization_path = f"{restart_directory}/{self.initialization_file}"
log.debug(f"Moving initialization file from {old_initialization_path} to {new_initialization_path}.")
shutil.move(old_initialization_path, new_initialization_path)
initialization_state = {
'tstate_file': tstates_filename,
'bstate_file': bstates_filename,
'sstate_file': sstates_filename,
'tstates': None,
'bstates': None,
'sstates': None,
'segs_per_state': segs_per_state,
}
with open(self.initialization_file, 'w') as fp:
json.dump(initialization_state, fp)
westpa.rc.pstatus(
f"\n\n"
f"===== Restart {restart_state['restarts_completed']}, "
+ f"Run {restart_state['runs_completed']+1} initializing =====\n"
)
westpa.rc.pstatus(
f"\nRun: \n\t w_init --tstate-file {tstates_filename} "
+ f"--bstate-file {bstates_filename} --sstate-file {sstates_filename} --segs-per-state {segs_per_state}\n"
)
w_init.initialize(**initialization_state, shotgun=False)
log.info("New WE run ready!")
westpa.rc.pstatus(f"\n\n===== Restart {restart_state['restarts_completed']} running =====\n")
w_run.run_simulation()
```
|
{
"source": "jdrusso/westpa_test",
"score": 2
}
|
#### File: cli/tools/w_postanalysis_reweight.py
```python
from westpa.tools import WESTMasterCommand, WESTParallelTool
from westpa.cli.tools.w_reweight import RWAverage
# Just a shim to make sure everything works and is backwards compatible.
# We're making sure it has the appropriate functions so that it can be called
# as a regular tool, and not a subcommand.
class PAAverage(RWAverage):
subcommand = 'average'
help_text = ''
default_output_file = 'kinrw.h5'
# This isn't strictly necessary, but for the moment, here it is.
# We really need to modify the underlying class so that we don't pull this sort of stuff if it isn't necessary.
# That'll take some case handling, which is fine.
default_kinetics_file = 'flux_matrices.h5'
class WReweight(WESTMasterCommand, WESTParallelTool):
prog = 'w_postanalysis_reweight'
subcommands = [PAAverage]
subparsers_title = 'calculate state-to-state kinetics by tracing trajectories'
description = '''\
A convenience function to run kinetics/probs. Bin assignments,
including macrostate definitions, are required. (See
"w_assign --help" for more information).
For more information on the individual subcommands this subs in for, run
w_reweight {kinetics/probs} --help.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def entry_point():
print('WARNING: {} is being deprecated. Please use w_reweight instead.'.format(WReweight.prog))
# If we're not really supporting subcommands...
import sys
try:
if sys.argv[1] != 'average':
sys.argv.insert(1, 'average')
except Exception:
sys.argv.insert(1, 'average')
WReweight().main()
if __name__ == '__main__':
entry_point()
```
#### File: core/binning/bins.py
```python
import logging
import numpy as np
log = logging.getLogger(__name__)
EPS = np.finfo(np.float64).eps
class Bin(set):
def __init__(self, iterable=None, label=None):
super().__init__(iterable or [])
self.label = label
def __repr__(self):
return '<{classname} at 0x{id:x}, label={label!r}, count={count:d}, weight={weight:g}>'.format(
classname=self.__class__.__name__, id=id(self), label=self.label, count=len(self), weight=self.weight
)
@property
def weight(self):
'Total weight of all walkers in this bin'
weight = 0.0
for particle in self:
weight += particle.weight
return weight
def reweight(self, new_weight):
"""Reweight all walkers in this bin so that the total weight is new_weight"""
if len(self) == 0 and new_weight == 0:
return
if len(self) == 0 and new_weight != 0:
raise ValueError('cannot reweight empty ParticleCollection')
current_weight = self.weight
log.debug('reweighting collection of {:d} particles from {:g} to {:g}'.format(len(self), current_weight, new_weight))
assert (new_weight == 0 and current_weight == 0) or new_weight > 0
wrat = new_weight / current_weight
for p in self:
p.weight *= wrat
log.debug('new weight: {:g}'.format(self.weight))
assert abs(new_weight - self.weight) <= EPS * len(self)
```
#### File: oldtools/aframe/trajwalker.py
```python
import logging
log = logging.getLogger(__name__)
import numpy
class TrajWalker:
"""A class to perform analysis by walking the trajectory tree. A stack is used rather than recursion, or else
the highest number of iterations capable of being considered would be the same as the Python recursion limit.
"""
def __init__(self, data_reader, history_chunksize=100):
self.data_reader = data_reader
self.history_chunksize = history_chunksize
self.n_segs_visited = 0
# TrajTree.count_segs_in_range() is now DataReader.total_segs_in_range()
def trace_to_root(self, n_iter, seg_id):
'''Trace the given segment back to its starting point, returning a list of Segment
objects describing the entire trajectory.'''
segments = []
segment = self.data_reader.get_segments_by_id(n_iter, [seg_id])[0]
segments.append(segment)
while segment.p_parent_id >= 0:
segment = self.data_reader.get_segments_by_id(segment.n_iter - 1, [segment.p_parent_id])[0]
segments.append(segment)
return list(reversed(segments))
def get_trajectory_roots(self, first_iter, last_iter, include_pcoords=True):
'''Get segments which start new trajectories. If min_iter or max_iter is specified, restrict the
set of iterations within which the search is conducted.'''
roots = []
for n_iter in range(first_iter, last_iter + 1):
seg_ids = self.data_reader.get_created_seg_ids(n_iter)
segments = self.data_reader.get_segments_by_id(n_iter, seg_ids, include_pcoords=include_pcoords)
roots.extend(segments)
return roots
def get_initial_nodes(self, first_iter, last_iter, include_pcoords=True):
'''Get segments with which to begin a tree walk -- those alive or created within [first_iter,last_iter].'''
root_ids = dict()
# All trajectories alive or newly created in first_iter are initial nodes
root_ids[first_iter] = set(self.data_reader.get_seg_ids(first_iter))
# Find trajectories created in [first_iter, last_iter]
for n_iter in range(first_iter, last_iter + 1):
seg_ids = self.data_reader.get_created_seg_ids(n_iter)
try:
root_ids[n_iter].update(seg_ids)
except KeyError:
root_ids[n_iter] = set(seg_ids)
# Convert to Segment objects
segments = []
for (n_iter, id_set) in root_ids.items():
segments.extend(self.data_reader.get_segments_by_id(n_iter, id_set, include_pcoords=include_pcoords))
return segments
def trace_trajectories(
self, first_iter, last_iter, callable, include_pcoords=True, cargs=None, ckwargs=None, get_state=None, set_state=None
):
"""
Walk the trajectory tree depth-first, calling
``callable(segment, children, history, *cargs, **ckwargs)`` for each segment
visited. ``segment`` is the segment being visited, ``children`` is that
segment's children, ``history`` is the chain of segments leading
to ``segment`` (not including ``segment``). get_state and set_state are
used to record and reset, respectively, any state specific to
``callable`` when a new branch is traversed.
"""
cargs = cargs or tuple()
ckwargs = ckwargs or dict()
# Either both or neither of external state getter/setter required
if (get_state or set_state) and not (get_state and set_state):
raise ValueError('either both or neither of get_state/set_state must be specified')
# This will grow to contain the maximum trajectory length
history = numpy.empty((self.history_chunksize,), numpy.object_)
roots = self.get_initial_nodes(first_iter, last_iter, include_pcoords)
for root in roots:
children = self.data_reader.get_children(root, include_pcoords)
# Visit the root node of each tree unconditionally
callable(root, children, [], *cargs, **ckwargs)
self.n_segs_visited += 1
state_stack = [{'node': root, 'children': children, 'len_history': 0, 'ext': get_state() if get_state else None}]
# Walk the tree, depth-first
while state_stack:
state = state_stack.pop(-1)
node = state['node']
children = state['children']
len_history = state['len_history']
if set_state:
set_state(state['ext'])
# Descend as far as we can
while node.n_iter < last_iter and len(children):
# Save current state before descending
state_stack.append(
{'node': node, 'children': children, 'len_history': len_history, 'ext': get_state() if get_state else None}
)
# Add an item to the historical record
if len_history >= history.shape[0]:
history.resize((history.shape[0] + self.history_chunksize,))
history[len_history] = node
len_history += 1
node = children.pop(-1)
children = self.data_reader.get_children(node, include_pcoords)
# Visit the new node as we descend
callable(node, children, history[:len_history], *cargs, **ckwargs)
self.n_segs_visited += 1
```
#### File: oldtools/stats/accumulator.py
```python
import numpy
NAN = float('nan')
class RunningStatsAccumulator:
def __init__(self, shape, dtype=numpy.float64, count_dtype=numpy.uint, weight_dtype=numpy.float64, mask_value=NAN):
self.sum = numpy.zeros(shape, dtype)
self.sqsum = numpy.zeros(shape, dtype)
self.weight = numpy.zeros(shape, weight_dtype)
self.count = numpy.zeros(shape, count_dtype)
self.mask_value = mask_value
def incorporate(self, index, value, weight):
self.count[index] += 1
self.weight[index] += weight
self.sum[index] += weight * value
self.sqsum[index] += weight * value * value
def average(self):
valid = self.count > 0
avg = numpy.empty_like(self.sum)
avg[valid] = self.sum[valid] / self.weight[valid]
avg[~valid] = self.mask_value
return avg
mean = average
def std(self):
valid = self.count > 0
vavg = self.average()[valid]
std = numpy.empty_like(self.sqsum)
std[valid] = (self.sqsum[valid] / self.weight[valid] - vavg * vavg) ** 0.5
std[~valid] = self.mask_value
return std
```
#### File: westpa/tools/data_reader.py
```python
import numpy as np
from numpy import index_exp
from .core import WESTToolComponent
import westpa
from westpa.core.extloader import get_object
from westpa.core.h5io import FnDSSpec, MultiDSSpec, SingleSegmentDSSpec, SingleIterDSSpec
def _get_parent_ids(n_iter, iter_group):
seg_index = iter_group['seg_index']
try:
return seg_index['parent_id'][:]
except ValueError:
# field not found
offsets = seg_index['parents_offset'][:]
all_parents = iter_group['parents'][...]
return np.require(all_parents.take(offsets), dtype=np.int64)
else:
return seg_index['parent_id']
class WESTDataReader(WESTToolComponent):
'''Tool for reading data from WEST-related HDF5 files. Coordinates finding
the main HDF5 file from west.cfg or command line arguments, caching of certain
kinds of data (eventually), and retrieving auxiliary data sets from various
places.'''
def __init__(self):
super().__init__()
self.data_manager = westpa.rc.get_data_manager()
self.we_h5filename = None
self._weight_dsspec = None
self._parent_id_dsspec = None
def add_args(self, parser):
group = parser.add_argument_group('WEST input data options')
group.add_argument(
'-W',
'--west-data',
dest='we_h5filename',
metavar='WEST_H5FILE',
help='''Take WEST data from WEST_H5FILE (default: read from the HDF5 file specified in west.cfg).''',
)
def process_args(self, args):
if args.we_h5filename:
self.data_manager.we_h5filename = self.we_h5filename = args.we_h5filename
else:
self.we_h5filename = self.data_manager.we_h5filename
def open(self, mode='r'):
self.data_manager.open_backing(mode)
def close(self):
self.data_manager.close_backing()
def __getattr__(self, key):
return getattr(self.data_manager, key)
@property
def weight_dsspec(self):
if self._weight_dsspec is None:
assert self.we_h5filename is not None
self._weight_dsspec = SingleIterDSSpec(self.we_h5filename, 'seg_index', slice=index_exp['weight'])
return self._weight_dsspec
@property
def parent_id_dsspec(self):
if self._parent_id_dsspec is None:
assert self.we_h5filename is not None
# self._parent_id_dsspec = SingleIterDSSpec(self.we_h5filename, 'seg_index', slice=index_exp['parent_id'])
self._parent_id_dsspec = FnDSSpec(self.we_h5filename, _get_parent_ids)
return self._parent_id_dsspec
def __enter__(self):
self.open('r')
return self
def __exit__(self, exc_type, exc_val, exc_traceback):
self.close()
return False
class WESTDSSynthesizer(WESTToolComponent):
'''Tool for synthesizing a dataset for analysis from other datasets. This
may be done using a custom function, or a list of "data set specifications".
It is anticipated that if several source datasets are required, then a tool
will have multiple instances of this class.'''
group_name = 'input dataset options'
def __init__(self, default_dsname=None, h5filename=None):
super().__init__()
self.h5filename = h5filename
self.default_dsname = default_dsname
self.dsspec = None
def add_args(self, parser):
igroup = parser.add_argument_group(self.group_name).add_mutually_exclusive_group(required=not bool(self.default_dsname))
igroup.add_argument(
'--construct-dataset',
help='''Use the given function (as in module.function) to extract source data.
This function will be called once per iteration as function(n_iter, iter_group)
to construct data for one iteration. Data returned must be indexable as
[seg_id][timepoint][dimension]''',
)
igroup.add_argument('--dsspecs', nargs='+', metavar='DSSPEC', help='''Construct source data from one or more DSSPECs.''')
def process_args(self, args):
if args.construct_dataset:
self.dsspec = FnDSSpec(self.h5filename, get_object(args.construct_dataset, path=['.']))
elif args.dsspecs:
self.dsspec = MultiDSSpec([SingleSegmentDSSpec.from_string(dsspec, self.h5filename) for dsspec in args.dsspecs])
else:
# we can only get here if a default dataset name was specified
assert self.default_dsname
self.dsspec = SingleSegmentDSSpec(self.h5filename, self.default_dsname)
```
#### File: westpa/tools/progress.py
```python
import westpa
from westpa.core.progress import ProgressIndicator
from westpa.tools.core import WESTToolComponent
class ProgressIndicatorComponent(WESTToolComponent):
def __init__(self):
super().__init__()
self.indicator = None
def add_args(self, parser):
pass
def process_args(self, args):
self.indicator = ProgressIndicator()
if westpa.rc.quiet_mode or westpa.rc.verbose_mode or westpa.rc.debug_mode:
self.indicator.fancy = False
```
#### File: westext/stringmethod/fourier_fitting.py
```python
import numpy as np
import scipy
import scipy.optimize
class FourierFit:
def __init__(self, P=2, ndims=2, maxiters=100, tol=1.0e-6):
super().__init__()
self.P = P
self.maxiters = maxiters
self.ndims = ndims
self.tol = tol
self.pp = []
self.t0 = None
self.w0 = None
def calc_string(self, w, t, x_meas):
tlen = len(t)
t = np.linspace(0.0, 1.0, tlen)
x_est = x_meas[0, :] + (x_meas[-1, :] - x_meas[0, :]) * t[:, np.newaxis]
for i in range(self.ndims):
for j in range(self.P):
x_est[:, i] += w[i, j] * np.sin((j + 1) * np.pi * t)
return x_est
def _optimize_dist(self, tk, x_meas, w, k):
x_target = x_meas[k, :]
x_est = x_meas[0, :] + (x_meas[-1, :] - x_meas[0, :]) * tk
for i in range(self.ndims):
for j in range(self.P):
x_est[i] += w[i, j] * np.sin((j + 1) * np.pi * tk)
err = x_target - x_est
return err
def _optimize_w(self, w, x_meas, t, k, weight):
x_target = x_meas[:, k]
x_est = x_meas[0, k] + (x_meas[-1, k] - x_meas[0, k]) * t
for j in range(self.P):
x_est += w[j] * np.sin((j + 1) * np.pi * t)
err = weight * (x_target - x_est)
return err
def optimize(self, data, weight, w0, t0):
ncenters = data.shape[0]
self.w0 = w0
self.t0 = t0
if weight is None:
weight = np.ones_like(t0)
for iiter in range(self.maxiters):
self.pp.append(self.calc_string(self.w0, self.t0, data))
if iiter > 0:
err = np.sum((self.pp[-1] - self.pp[-2]) ** 2) / ncenters
print('{} -- {}'.format(iiter, err))
if err < self.tol:
break
else:
print(iiter)
# Optimize tk
for ci in range(ncenters):
self.t0[ci] = scipy.optimize.leastsq(self._optimize_dist, self.t0[ci], args=(data, self.w0, ci))[0]
# Optimize wij
for k in range(self.ndims):
self.w0[k, :] = scipy.optimize.leastsq(self._optimize_w, self.w0[k, :], args=(data, self.t0, k, weight))[0]
```
#### File: work_managers/zeromq/work_manager.py
```python
import json
import logging
import multiprocessing
import socket
from .core import ZMQCore, Message, Task, Result, ZMQWorkerMissing, ZMQWMEnvironmentError, IsNode
from .core import PassiveMultiTimer
from .core import randport
from .worker import ZMQWorker
from .node import ZMQNode
import westpa.work_managers as work_managers
from westpa.work_managers import WorkManager, WMFuture
import zmq
from collections import deque
log = logging.getLogger(__name__)
class ZMQWorkManager(ZMQCore, WorkManager, IsNode):
@classmethod
def add_wm_args(cls, parser, wmenv=None):
if wmenv is None:
wmenv = work_managers.environment.default_env
wm_group = parser.add_argument_group('options for ZeroMQ ("zmq") work manager (master or node)')
wm_group.add_argument(
wmenv.arg_flag('zmq_mode'),
metavar='MODE',
choices=('master', 'node', 'server', 'client'),
help='Operate as a master (server) or a node (workers/client). '
+ '"server" is a deprecated synonym for "master" and "client" is a '
+ 'deprecated synonym for "node".',
)
wm_group.add_argument(
wmenv.arg_flag('zmq_comm_mode'),
metavar='COMM_MODE',
choices=('ipc', 'tcp'),
help='Use the given communication mode -- TCP or IPC (Unix-domain) -- sockets '
+ 'for communication within a node. IPC (the default) may be more '
+ 'efficient but is not available on (exceptionally rare) systems '
+ 'without node-local storage (e.g. /tmp); on such systems, TCP may be used instead.',
)
wm_group.add_argument(
wmenv.arg_flag('zmq_write_host_info'),
metavar='INFO_FILE',
help='Store hostname and port information needed to connect to this instance '
+ 'in INFO_FILE. This allows the master and nodes assisting in '
+ 'coordinating the communication of other nodes to choose ports '
+ 'randomly. Downstream nodes read this file with '
+ wmenv.arg_flag('zmq_read_host_info')
+ ' and know where how to connect.',
)
wm_group.add_argument(
wmenv.arg_flag('zmq_read_host_info'),
metavar='INFO_FILE',
help='Read hostname and port information needed to connect to the master '
+ '(or other coordinating node) from INFO_FILE. '
+ 'This allows the master and nodes assisting in '
+ 'coordinating the communication of other nodes to choose ports '
+ 'randomly, writing that information with '
+ wmenv.arg_flag('zmq_write_host_info')
+ ' for this instance to read.',
)
wm_group.add_argument(
wmenv.arg_flag('zmq_upstream_rr_endpoint'),
metavar='ENDPOINT',
help='ZeroMQ endpoint to which to send request/response (task and result) ' + 'traffic toward the master.',
)
wm_group.add_argument(
wmenv.arg_flag('zmq_upstream_ann_endpoint'),
metavar='ENDPOINT',
help='ZeroMQ endpoint on which to receive announcement '
+ '(heartbeat and shutdown notification) traffic from the master.',
)
wm_group.add_argument(
wmenv.arg_flag('zmq_downstream_rr_endpoint'),
metavar='ENDPOINT',
help='ZeroMQ endpoint on which to listen for request/response ' + '(task and result) traffic from subsidiary workers.',
)
wm_group.add_argument(
wmenv.arg_flag('zmq_downstream_ann_endpoint'),
metavar='ENDPOINT',
help='ZeroMQ endpoint on which to send announcement ' + '(heartbeat and shutdown notification) traffic toward workers.',
)
wm_group.add_argument(
wmenv.arg_flag('zmq_master_heartbeat'),
metavar='MASTER_HEARTBEAT',
type=float,
help='Every MASTER_HEARTBEAT seconds, the master announces its presence ' + 'to workers.',
)
wm_group.add_argument(
wmenv.arg_flag('zmq_worker_heartbeat'),
metavar='WORKER_HEARTBEAT',
type=float,
help='Every WORKER_HEARTBEAT seconds, workers announce their presence ' + 'to the master.',
)
wm_group.add_argument(
wmenv.arg_flag('zmq_timeout_factor'),
metavar='FACTOR',
type=float,
help='Scaling factor for heartbeat timeouts. '
+ "If the master doesn't hear from a worker in WORKER_HEARTBEAT*FACTOR, "
+ "the worker is assumed to have crashed. If a worker doesn't hear from "
+ "the master in MASTER_HEARTBEAT*FACTOR seconds, the master is assumed "
+ "to have crashed. Both cases result in shutdown. ",
)
wm_group.add_argument(
wmenv.arg_flag('zmq_startup_timeout'),
metavar='STARTUP_TIMEOUT',
type=float,
help='Amount of time (in seconds) to wait for communication between '
+ 'the master and at least one worker. This may need to be changed '
+ 'on very large, heavily-loaded computer systems that start all processes '
+ 'simultaneously. ',
)
wm_group.add_argument(
wmenv.arg_flag('zmq_shutdown_timeout'),
metavar='SHUTDOWN_TIMEOUT',
type=float,
help='Amount of time (in seconds) to wait for workers to shut down.',
)
@classmethod
def from_environ(cls, wmenv=None):
if wmenv is None:
wmenv = work_managers.environment.default_env
# determine mode
mode = wmenv.get_val('zmq_mode', 'master').lower()
if mode in {'master', 'server'}:
mode = 'master'
elif mode in {'node', 'client'}:
mode = 'node'
else:
raise ValueError('invalid ZMQ work manager mode {!r}'.format(mode))
# determine number of workers
# 0 with mode=='master' is a dedicated master
# 0 with mode=='node' is a dedicated communications process (former ZMQRouter)
n_workers = wmenv.get_val('n_workers', multiprocessing.cpu_count(), int)
# We set this at the class level, because outside of testing, a node either
# can support IPC or it can't, and there is no obvious need (currently)
# to support both modes on an instance-by-instance basis
comm_mode = wmenv.get_val('zmq_comm_mode', cls.default_comm_mode)
ZMQWorkManager.internal_transport = comm_mode
ZMQWorker.internal_transport = comm_mode
ZMQNode.internal_transport = comm_mode
write_host_info = wmenv.get_val('zmq_write_host_info')
read_host_info = wmenv.get_val('zmq_read_host_info')
master_heartbeat = wmenv.get_val('zmq_master_heartbeat', cls.default_master_heartbeat, float)
worker_heartbeat = wmenv.get_val('zmq_worker_heartbeat', cls.default_worker_heartbeat, float)
timeout_factor = wmenv.get_val('zmq_timeout_factor', cls.default_timeout_factor, float)
startup_timeout = wmenv.get_val('zmq_startup_timeout', cls.default_startup_timeout, float)
if mode == 'master':
instance = ZMQWorkManager(n_workers)
else: # mode =='node'
upstream_info = {}
if read_host_info:
upstream_info.update(cls.read_host_info(read_host_info))
log.debug('upstream_info: {!r}'.format(upstream_info))
upstream_rr_endpoint = wmenv.get_val('zmq_upstream_rr_endpoint', upstream_info.get('rr_endpoint'))
upstream_ann_endpoint = wmenv.get_val('zmq_upstream_ann_endpoint', upstream_info.get('ann_endpoint'))
if not (upstream_rr_endpoint and upstream_ann_endpoint):
raise ZMQWMEnvironmentError('at least one upstream endpoint unspecified')
# expand hostnames, if present, to IP addresses
# reject wildcard hostnames, which is a logic error (can't connect to a host
# without specifying an address)
upstream_rr_endpoint = cls.canonicalize_endpoint(upstream_rr_endpoint, allow_wildcard_host=False)
upstream_ann_endpoint = cls.canonicalize_endpoint(upstream_ann_endpoint, allow_wildcard_host=False)
log.debug('upstream_rr_endpoint = {}'.format(upstream_rr_endpoint))
log.debug('upstream_ann_endpoint = {}'.format(upstream_ann_endpoint))
instance = ZMQNode(
upstream_ann_endpoint=upstream_ann_endpoint, upstream_rr_endpoint=upstream_rr_endpoint, n_local_workers=n_workers
)
# Both server and node bind downstream endpoints, so that users get fan-out communications
# "for free" when starting up a computational node
downstream_rr_endpoint = cls.canonicalize_endpoint(
wmenv.get_val('zmq_downstream_rr_endpoint', 'tcp://*:{}'.format(randport()))
)
downstream_ann_endpoint = cls.canonicalize_endpoint(
wmenv.get_val('zmq_downstream_ann_endpoint', 'tcp://*:{}'.format(randport()))
)
instance.downstream_rr_endpoint = downstream_rr_endpoint
instance.downstream_ann_endpoint = downstream_ann_endpoint
instance.master_beacon_period = master_heartbeat
instance.worker_beacon_period = worker_heartbeat
instance.timeout_factor = timeout_factor
instance.startup_timeout = startup_timeout
assert isinstance(instance, IsNode)
for worker in instance.local_workers:
worker.master_beacon_period = master_heartbeat
worker.worker_beacon_period = worker_heartbeat
worker.timeout_factor = timeout_factor
worker.startup_timeout = startup_timeout
# We always write host info (since we are always either master or node)
# we choose not to in the special case that read_host_info is '' but not None
# (None implies nothing found on command line or in environment variables, but ''
# implies that it was found somewhere but it is empty)
if write_host_info is not None and write_host_info != '':
instance.write_host_info(write_host_info)
log.debug('prepared {!r} with:'.format(instance))
log.debug('n_workers = {}'.format(n_workers))
for attr in (
'master_beacon_period',
'worker_beacon_period',
'startup_timeout',
'timeout_factor',
'downstream_rr_endpoint',
'downstream_ann_endpoint',
):
log.debug('{} = {!r}'.format(attr, getattr(instance, attr)))
return instance
@classmethod
def read_host_info(cls, filename):
return json.load(open(filename, 'rt'))
@classmethod
def canonicalize_endpoint(cls, endpoint, allow_wildcard_host=True):
if endpoint.startswith('ipc://'):
return endpoint
elif endpoint.startswith('tcp://'):
fields = endpoint[6:].split(':')
# get IP address
if fields[0] != '*':
ipaddr = socket.gethostbyname(fields[0])
else:
if allow_wildcard_host:
ipaddr = '*'
else:
raise ValueError('wildcard host not permitted')
# get/generate port
try:
port = fields[1]
except IndexError:
# no port given; select one
port = randport()
else:
port = int(fields[1])
return 'tcp://{}:{}'.format(ipaddr, port)
else:
raise ValueError('unrecognized/unsupported endpoint: {!r}'.format(endpoint))
def __init__(self, n_local_workers=1):
ZMQCore.__init__(self)
WorkManager.__init__(self)
IsNode.__init__(self, n_local_workers)
# Futures indexed by task ID
self.futures = dict()
# Tasks pending distribution
self.outgoing_tasks = deque()
# Tasks being processed by workers (indexed by worker_id)
self.assigned_tasks = dict()
# Identity information and last contact from workers
self.worker_information = dict() # indexed by worker_id
self.worker_timeouts = PassiveMultiTimer() # indexed by worker_id
# Number of seconds between checks to see which workers have timed out
self.worker_timeout_check = 5.0
# Amount of time to wait for stray requests to arrive so that workers shut down properly
self.shutdown_timeout = 0.5
self.master_id = self.node_id
@property
def n_workers(self):
return len(self.worker_information)
def submit(self, fn, args=None, kwargs=None):
if self.futures is None:
# We are shutting down
raise ZMQWMEnvironmentError('work manager is shutting down')
future = WMFuture()
task = Task(fn, args or (), kwargs or {}, task_id=future.task_id)
self.futures[task.task_id] = future
self.outgoing_tasks.append(task)
# Wake up the communications loop (if necessary) to announce new tasks
self.send_inproc_message(Message.TASKS_AVAILABLE)
return future
def submit_many(self, tasks):
if self.futures is None:
# We are shutting down
raise ZMQWMEnvironmentError('work manager is shutting down')
futures = []
for (fn, args, kwargs) in tasks:
future = WMFuture()
task = Task(fn, args, kwargs, task_id=future.task_id)
self.futures[task.task_id] = future
self.outgoing_tasks.append(task)
futures.append(future)
# Wake up the communications loop (if necessary) to announce new tasks
self.send_inproc_message(Message.TASKS_AVAILABLE)
return futures
def send_message(self, socket, message, payload=None, flags=0):
message = Message(message, payload)
message.master_id = self.node_id
super().send_message(socket, message, payload, flags)
def handle_result(self, socket, msg):
self.send_ack(socket, msg)
with self.message_validation(msg):
assert msg.message == Message.RESULT
assert isinstance(msg.payload, Result)
assert msg.payload.task_id in self.futures
assert self.assigned_tasks[msg.src_id].task_id == msg.payload.task_id
result = msg.payload
future = self.futures.pop(result.task_id)
del self.assigned_tasks[msg.src_id]
if result.exception is not None:
future._set_exception(result.exception, result.traceback)
else:
future._set_result(result.result)
def handle_task_request(self, socket, msg):
if not self.outgoing_tasks:
# No tasks available
self.send_nak(socket, msg)
else:
task = self.outgoing_tasks.popleft()
worker_id = msg.src_id
self.assigned_tasks[worker_id] = task
self.send_message(socket, Message.TASK, task)
def update_worker_information(self, msg):
if msg.message == Message.IDENTIFY:
with self.message_validation(msg):
assert isinstance(msg.payload, dict)
self.worker_information[msg.src_id] = msg.payload
else:
self.worker_information[msg.src_id] = {}
try:
self.worker_timeouts.reset(msg.src_id)
except KeyError:
self.worker_timeouts.add_timer(msg.src_id, self.worker_beacon_period * self.timeout_factor)
def check_workers(self):
expired_worker_ids = self.worker_timeouts.which_expired()
for expired_worker_id in expired_worker_ids:
try:
worker_description = '{!s} ({!s})'.format(
expired_worker_id, self.worker_information[expired_worker_id]['description']
)
except KeyError:
worker_description = str(expired_worker_id)
self.log.error('no contact from worker {}: {}'.format(expired_worker_id, worker_description))
self.remove_worker(expired_worker_id)
def remove_worker(self, worker_id):
try:
expired_task = self.assigned_tasks.pop(worker_id)
except KeyError:
pass
else:
self.log.error('aborting task {!r} running on expired worker {!s}'.format(expired_task, worker_id))
future = self.futures.pop(expired_task.task_id)
future._set_exception(ZMQWorkerMissing('worker running this task disappeared'))
del self.worker_information[worker_id]
def shutdown_clear_tasks(self):
'''Abort pending tasks with error on shutdown.'''
while self.futures:
task_id, future = self.futures.popitem()
future._set_exception(ZMQWMEnvironmentError('work manager shut down during task'))
self.futures = None
def comm_loop(self):
self.context = zmq.Context()
rr_socket = self.context.socket(zmq.REP)
ann_socket = self.context.socket(zmq.PUB)
for endpoint in (self.local_rr_endpoint, self.downstream_rr_endpoint):
if endpoint:
rr_socket.bind(endpoint)
for endpoint in (self.local_ann_endpoint, self.downstream_ann_endpoint):
if endpoint:
ann_socket.bind(endpoint)
inproc_socket = self.context.socket(zmq.SUB)
inproc_socket.setsockopt(zmq.SUBSCRIBE, b'')
inproc_socket.bind(self.inproc_endpoint)
poller = zmq.Poller()
poller.register(inproc_socket, zmq.POLLIN)
poller.register(rr_socket, zmq.POLLIN)
timers = PassiveMultiTimer()
timers.add_timer('tasks_avail', self.master_beacon_period)
timers.add_timer('master_beacon', self.master_beacon_period)
timers.add_timer('worker_timeout_check', self.worker_beacon_period * self.timeout_factor)
timers.add_timer('startup_timeout', self.startup_timeout)
timers.reset()
self.log.debug('master beacon period: {!r}'.format(self.master_beacon_period))
self.log.debug('startup timeout: {!r}'.format(self.startup_timeout))
peer_found = False
try:
# Send a master alive message immediately; it will get discarded if necessary
self.send_message(ann_socket, Message.MASTER_BEACON)
while True:
# If a timer is already expired, next_expiration_in() will return 0, which
# zeromq interprets as infinite wait; so instead we select a 1 ms wait in this
# case.
timeout = (timers.next_expiration_in() or 0.001) * 1000
# Wake up every second to check for signals
timeout = min(timeout, 1000)
poll_results = dict(poller.poll(timeout))
if inproc_socket in poll_results:
msgs = self.recv_all(inproc_socket, validate=False)
# Check for shutdown; do nothing else if shutdown is signalled
if Message.SHUTDOWN in (msg.message for msg in msgs):
self.log.debug('shutdown received')
break
# Check for any other wake-up messages
for msg in msgs:
if msg.message == Message.TASKS_AVAILABLE:
self.send_message(ann_socket, Message.TASKS_AVAILABLE)
if rr_socket in poll_results:
msg = self.recv_message(rr_socket)
self.update_worker_information(msg)
if msg.message == Message.TASK_REQUEST:
self.handle_task_request(rr_socket, msg)
elif msg.message == Message.RESULT:
self.handle_result(rr_socket, msg)
else:
self.send_ack(rr_socket, msg)
if self.worker_information:
peer_found = True
if timers.expired('tasks_avail'):
if self.outgoing_tasks:
self.send_message(ann_socket, Message.TASKS_AVAILABLE)
timers.reset('tasks_avail')
if timers.expired('master_beacon'):
self.send_message(ann_socket, Message.MASTER_BEACON)
timers.reset('master_beacon')
if peer_found and timers.expired('worker_timeout_check'):
self.check_workers()
if not self.worker_information:
self.log.error('all workers disappeared; exiting')
break
timers.reset('worker_timeout_check')
if not peer_found and timers.expired('startup_timeout'):
self.log.error('startup phase elapsed with no contact from workers; shutting down')
while self.futures:
future = self.futures.popitem()[1]
future._set_exception(ZMQWorkerMissing('no workers available'))
break
# Post a shutdown message
self.log.debug('sending shutdown on ann_socket')
self.send_message(ann_socket, Message.SHUTDOWN)
poller.unregister(inproc_socket)
# Clear tasks
self.shutdown_clear_tasks()
# Clear incoming queue of requests, to let clients exit request/reply states gracefully
# (clients will still timeout in these states if necessary)
timers.add_timer('shutdown', self.shutdown_timeout)
while not timers.expired('shutdown'):
poll_results = dict(poller.poll(self.shutdown_timeout / 10 * 1000))
if rr_socket in poll_results:
msg = self.recv_message(rr_socket)
self.send_nak(rr_socket, msg)
finally:
self.context.destroy(linger=1)
self.context = None
self.remove_ipc_endpoints()
def startup(self):
IsNode.startup(self)
super().startup()
def shutdown(self):
self.signal_shutdown()
IsNode.shutdown(self)
self.join()
super().shutdown()
```
|
{
"source": "jdrusso/westpa",
"score": 2
}
|
#### File: westpa/tests/test_sim_manager.py
```python
import argparse
import os
from unittest import TestCase
from unittest.mock import MagicMock
import tempfile
import pytest
import numpy as np
import westpa
from westpa.core.binning.assign import RectilinearBinMapper
from westpa.core.segment import Segment
from westpa.core.states import BasisState
from westpa.core.sim_manager import PropagationError
class TestSimManager(TestCase):
def setUp(self):
parser = argparse.ArgumentParser()
westpa.rc.add_args(parser)
here = os.path.dirname(__file__)
os.environ['WEST_SIM_ROOT'] = os.path.join(here, 'fixtures', 'odld')
config_file_name = os.path.join(here, 'fixtures', 'odld', 'west.cfg')
args = parser.parse_args(['-r={}'.format(config_file_name)])
westpa.rc.process_args(args)
self.sim_manager = westpa.rc.get_sim_manager()
self.test_dir = tempfile.mkdtemp()
self.hdf5 = os.path.join(self.test_dir, "west.h5")
self.basis_states = [BasisState(label="label", probability=1.0)]
self.segments = [self.segment(0.0, 1.5, weight=0.125) for _i in range(4)] + [
self.segment(1.5, 0.5, weight=0.125) for _i in range(4)
]
self.sim_manager.we_driver.new_iteration()
self.sim_manager.we_driver.assign(self.segments)
self.sim_manager.we_driver.construct_next()
self.sim_manager.segments = {segment.seg_id: segment for segment in self.segments}
self.sim_manager.incomplete_segments = self.sim_manager.segments
self.sim_manager.current_iter_istates = self.sim_manager.segments
self.sim_manager.completed_segments = self.sim_manager.segments
self.sim_manager.report_bin_statistics = MagicMock(return_value=True)
data = self.sim_manager.we_driver.rc.get_data_manager()
data.we_h5filename = self.hdf5
data.prepare_backing()
data.create_ibstate_group([])
data.create_initial_states(1)
data.save_target_states([])
data.update_segments = MagicMock(return_value=None)
n_iter = 0
it_name = data.iter_group_name(n_iter)
for group in ["seg_index", "parents", "ibstates", "pcoord"]:
data.we_h5file.create_group(it_name + "/" + group)
data.get_new_weight_data = MagicMock(return_value=None)
data.get_segments = MagicMock(return_value=self.segments)
self.sim_manager.we_driver.rc.get_data_manager = MagicMock(return_value=data)
self.sim_manager.n_iter = n_iter
def tearDown(self):
westpa.rc._sim_manager = None
westpa.rc._system = None
westpa.rc._data_manager = None
del os.environ['WEST_SIM_ROOT']
def dummy_callback_one(self):
system = self.sim_manager.system
bounds = [0.0, 1.0, 2.0, 3.0]
system.bin_mapper = RectilinearBinMapper([bounds])
def dummy_callback_two(self):
system = self.sim_manager.system
bounds = [0.0, 1.0, 2.0, 5.0]
system.bin_mapper = RectilinearBinMapper([bounds])
def segment(self, init_pcoord, final_pcoord, weight=1.0):
segment = Segment(n_iter=1, seg_id=1123, pcoord=self.sim_manager.system.new_pcoord_array(), weight=weight)
segment.pcoord[0] = init_pcoord
segment.pcoord[1] = final_pcoord
segment.parent_id = 1
return segment
def test_sim_manager(self):
self.assertEquals(self.sim_manager.n_propagated, 0)
self.assertEquals(len(self.sim_manager._callback_table), 0)
def test_register_callback(self):
hook = self.sim_manager.prepare_new_iteration
self.sim_manager.register_callback(hook, self.dummy_callback_one, 3)
self.sim_manager.register_callback(hook, self.dummy_callback_two, 0)
self.assertTrue(hook in self.sim_manager._callback_table)
callbacks = self.sim_manager._callback_table.get(hook, [])
self.assertTrue((3, self.dummy_callback_one.__name__, self.dummy_callback_one) in callbacks) # noqa
self.assertTrue((0, self.dummy_callback_two.__name__, self.dummy_callback_two) in callbacks) # noqa
def test_invoke_callback(self):
hook = self.sim_manager.prepare_new_iteration
self.sim_manager.register_callback(hook, self.dummy_callback_one, 3)
self.sim_manager.register_callback(hook, self.dummy_callback_two, 0)
self.sim_manager.invoke_callbacks(hook)
system = self.sim_manager.system
self.assertTrue(np.all(system.bin_mapper.boundaries == np.array([0.0, 1.0, 2.0, 3.0]))) # noqa
def test_process_config(self):
self.sim_manager.process_config()
self.assertTrue(self.sim_manager.do_gen_istates)
self.assertEquals(self.sim_manager.propagator_block_size, 10000)
self.assertFalse(self.sim_manager.save_transition_matrices)
self.assertEquals(self.sim_manager.max_run_walltime, 10800)
self.assertEquals(self.sim_manager.max_total_iterations, 100)
def test_load_plugins(self):
self.sim_manager.load_plugins()
def test_report_bin_statistics(self):
self.sim_manager.report_bin_statistics([0.0, 1.0, 2.0, 5.0])
def test_get_bstate_pcoords(self):
self.sim_manager.get_bstate_pcoords(self.basis_states)
def test_report_basis_states(self):
self.sim_manager.report_basis_states(self.basis_states)
def test_report_target_states(self):
self.sim_manager.report_target_states(self.basis_states)
@pytest.mark.skip(reason="Cannot currently test WESimManager.initialize_simulation()")
def test_initialize_simulation(self):
# TODO: determine how to test self.simulation_manager.initialize_simulation()
pass
def test_prepare_iteration(self):
self.sim_manager.prepare_new_iteration()
self.sim_manager.prepare_iteration()
def test_finalize_iteration(self):
self.sim_manager.finalize_iteration()
def test_get_istate_futures(self):
self.sim_manager.get_istate_futures()
def test_propagate(self):
westpa.core.states.pare_basis_initial_states = MagicMock(return_value=([], []))
self.sim_manager.propagate
def test_save_bin_data(self):
self.sim_manager.save_bin_data()
def test_check_propagation(self):
self.assertRaises(PropagationError, self.sim_manager.check_propagation)
def test_run_we(self):
self.sim_manager.run_we()
def test_run(self):
self.sim_manager.run()
def test_prepare_run(self):
self.sim_manager.prepare_run()
def test_finalize_run(self):
self.sim_manager.finalize_run()
def test_pre_propagation(self):
self.sim_manager.pre_propagation()
def test_post_propagation(self):
self.sim_manager.post_propagation()
def test_pre_we(self):
self.sim_manager.pre_we()
def test_post_we(self):
self.sim_manager.post_we()
```
#### File: tests/test_tools/conftest.py
```python
import pytest
import os
import glob
from shutil import copyfile, copy
import tempfile
import westpa
REFERENCE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'refs')
H5_FILENAME = 'west.h5'
CFG_FILENAME = 'west.cfg'
refs_cfg_file = 'west_ref.cfg'
refs_h5_file = 'west_ref.h5'
# TODO: Is putting this here, outside of any function bad practice?
# Need it here so that clear_state doesn't take an argument...
STARTING_PATH = os.getcwd()
def copy_ref(dest_dir):
for filename in glob.glob(os.path.join(REFERENCE_PATH, '*.*')):
copy(filename, dest_dir)
@pytest.fixture
def ref_3iter(request):
"""
Fixture that prepares a simulation directory with a completed 3-iteration WESTPA,
west.h5, plus the config file west.cfg
"""
test_dir = tempfile.mkdtemp()
os.chdir(test_dir)
copy_ref(test_dir)
copyfile(os.path.join(REFERENCE_PATH, 'west_3iter.h5'), H5_FILENAME)
copyfile(os.path.join(REFERENCE_PATH, 'west_init_ref.cfg'), CFG_FILENAME)
request.cls.cfg_filepath = CFG_FILENAME
request.cls.h5_filepath = H5_FILENAME
os.environ['WEST_SIM_ROOT'] = test_dir
request.addfinalizer(clear_state)
@pytest.fixture
def ref_cfg(request, tmpdir):
"""
Fixture that prepares a simulation directory with a populated west.cfg file.
"""
test_dir = str(tmpdir)
os.chdir(test_dir)
copy_ref(test_dir)
copyfile(os.path.join(REFERENCE_PATH, 'west_init_ref.cfg'), CFG_FILENAME)
copyfile(os.path.join(REFERENCE_PATH, 'west_init_ref.h5'), "west_init_ref.h5")
request.cls.cfg_filepath = CFG_FILENAME
request.cls.h5_filepath = H5_FILENAME
request.cls.ref_h5_filepath = 'west_init_ref.h5'
os.environ['WEST_SIM_ROOT'] = test_dir
westpa.rc = westpa.core._rc.WESTRC()
request.addfinalizer(clear_state)
@pytest.fixture
def ref_initialized(request, tmpdir):
"""
Fixture that prepares a simulation directory with an initialized WESTPA system,
west.h5, plus the config file west.cfg
"""
test_dir = str(tmpdir)
os.chdir(test_dir)
copy_ref(test_dir)
copyfile(os.path.join(REFERENCE_PATH, 'west_init_ref.h5'), H5_FILENAME)
copyfile(os.path.join(REFERENCE_PATH, 'west_init_ref.cfg'), CFG_FILENAME)
request.cls.cfg_filepath = CFG_FILENAME
request.cls.h5_filepath = H5_FILENAME
os.environ['WEST_SIM_ROOT'] = test_dir
westpa.rc = westpa.core._rc.WESTRC()
request.addfinalizer(clear_state)
@pytest.fixture
def ref_50iter(request, tmpdir):
"""
Fixture that prepares a simulation directory with a completed 50-iteration WESTPA,
west.h5, plus the config file west.cfg
"""
test_dir = str(tmpdir)
os.chdir(test_dir)
copy_ref(test_dir)
copyfile(os.path.join(REFERENCE_PATH, 'west_ref.h5'), H5_FILENAME)
copyfile(os.path.join(REFERENCE_PATH, 'west_ref.cfg'), CFG_FILENAME)
request.cls.cfg_filepath = CFG_FILENAME
request.cls.h5_filepath = H5_FILENAME
os.environ['WEST_SIM_ROOT'] = test_dir
westpa.rc = westpa.core._rc.WESTRC()
request.addfinalizer(clear_state)
@pytest.fixture
def ref_multi(request, tmpdir):
"""
Fixture that prepares a simulation directory for w_multi_west, including a master
folder with sub folders 01, 02, 03 containing west_aux_ref.h5 renamed as west.h5.
"""
test_dir = str(tmpdir)
os.chdir(test_dir)
copy_ref(test_dir)
os.mkdir('01')
os.mkdir('02')
os.mkdir('03')
copyfile(os.path.join(REFERENCE_PATH, 'west_aux_ref.h5'), "01/west.h5")
copyfile(os.path.join(REFERENCE_PATH, 'west_aux_ref.h5'), "02/west.h5")
copyfile(os.path.join(REFERENCE_PATH, 'west_aux_ref.h5'), "03/west.h5")
copyfile(os.path.join(REFERENCE_PATH, 'west_ref.cfg'), CFG_FILENAME)
request.cls.cfg_filepath = CFG_FILENAME
request.cls.h5_filepath = H5_FILENAME
os.environ['WEST_SIM_ROOT'] = test_dir
westpa.rc = westpa.core._rc.WESTRC()
request.addfinalizer(clear_state)
def clear_state():
os.chdir(STARTING_PATH)
del os.environ['WEST_SIM_ROOT']
westpa.rc = westpa.core._rc.WESTRC()
```
#### File: tests/test_tools/test_init.py
```python
from h5diff import H5Diff
import westpa.cli.core.w_init
from common import MockArgs
class Test_W_Init:
def test_run_w_init(self, ref_cfg):
'''Tests initialization of a WESTPA simulation system from a prebuilt .cfg'''
# This test is named in such a way so it always runs before test_w_assign.py. It will fail otherwise.
# The argument processing is just looking for an object with these attributes
args = MockArgs(force=True, rcfile=self.cfg_filepath, verbosity='verbose')
westpa.rc.process_args(args)
westpa.cli.core.w_init.initialize(
tstates=None,
tstate_file=None,
bstates=['initial,1.0'],
bstate_file=None,
sstates=None,
sstate_file=None,
segs_per_state=1,
shotgun=False,
)
# h5 files contain some internal information that includes timestamps, so I can't just compare md5 checksums
# to ensure that w_init is producing the same output.
# Instead, use my H5Diff class.
# If the checked contents differ, an AssertionError will be raised.
diff = H5Diff(self.ref_h5_filepath, self.h5_filepath)
diff.check()
```
#### File: tests/test_tools/test_w_direct.py
```python
import argparse
import os
import pytest
import shutil
import unittest
from h5diff import H5Diff
from westpa.cli.tools.w_direct import entry_point, DAll
from unittest import mock
class Test_W_Direct(unittest.TestCase):
test_name = 'W_DIRECT'
def test_run_w_direct(self):
'''Testing if w_direct runs as expected and the direct.h5 file looks good.'''
ref_dir = os.path.join(os.path.dirname(__file__), '../refs')
shutil.copy2(os.path.join(ref_dir, 'west_ref.h5'), './west.h5')
os.system('w_direct all')
assert os.path.isfile('./direct.h5'), "The direct.h5 file was not generated."
diff = H5Diff(os.path.join(ref_dir, 'direct_ref.h5'), './direct.h5')
diff.check()
os.remove('direct.h5')
os.remove('west.h5')
@pytest.mark.skip(reason="work-in-progress test that uses entry point")
class Test_W_Direct_New:
def test_run_w_direct(self, ref_50iter):
with mock.patch(
target='argparse.ArgumentParser.parse_args',
return_value=argparse.Namespace(
verbosity='debug',
rcfile=self.cfg_filepath,
work_manager=None,
max_queue_length=None,
west_subcommand=DAll(1),
we_h5filename=self.h5_filepath,
construct_dataset=None,
dsspecs=None,
output='direct.h5',
kinetics='direct.h5',
first_iter=1,
last_iter=None,
step_iter=None,
assignments='assign_ref.h5',
evolution_mode=None,
subsample=None,
config_from_file=True,
scheme='TEST',
bootstrap=None,
correl=None,
alpha=0.05,
acalpha=None,
nsets=None,
window_frac=1.0,
display_averages=True,
),
):
entry_point()
diff = H5Diff('./direct_ref.h5', './direct.h5')
diff.check()
# clean up
os.remove('direct.h5')
```
#### File: tests/test_tools/test_w_pdist.py
```python
import os
from unittest import mock
from h5diff import H5Diff
from westpa.cli.tools.w_pdist import entry_point
import argparse
class Test_W_PDIST:
'''Class to test w_pdist works to generate a file and that it is the same as the sample pdist.h5 file.'''
def test_run_w_pdist(self, ref_50iter):
'''Testing if w_pdist runs as expected and the pdist.h5 file looks good.'''
with mock.patch(
target='argparse.ArgumentParser.parse_args',
return_value=argparse.Namespace(
verbosity='debug',
rcfile=self.cfg_filepath,
max_queue_length=None,
we_h5filename=self.h5_filepath,
construct_dataset=None,
dsspecs=None,
group_name='pcoord',
first_iter=1,
last_iter=None,
bins='100',
output='pdist.h5',
ignore_out_of_range=False,
compress=False,
work_manager=None,
n_workers=None,
),
):
entry_point()
assert os.path.isfile('./pdist.h5'), "The pdist.h5 file was not generated."
diff = H5Diff('pdist_ref.h5', 'pdist.h5')
diff.check()
# clean up
os.remove('pdist.h5')
```
|
{
"source": "JDRyder/stardust",
"score": 3
}
|
#### File: stardust/leds/rgbLed.py
```python
import board
import neopixel
import time
pixels = neopixel.NeoPixel(board.D21, 1)
GREEN = (255, 0, 0) #
RED = (0,255,0) #
BLUE = (0,0,255) #
YELLOW = (255,255,0) #
CYAN = (255,0,255) #
VIOLET = (0,127,255) #
WHITE = (255,255,255) #
OFF = (0,0,0) #
def off():
pixels[0] = OFF
def startup():
pixels[0] = GREEN
time.sleep(1)
pixels[0] = RED
time.sleep(1)
pixels[0] = BLUE
time.sleep(1)
pixels[0] = YELLOW
time.sleep(1)
pixels[0] = CYAN
time.sleep(1)
pixels[0] = VIOLET
time.sleep(1)
pixels[0] = WHITE
time.sleep(1)
pixels[0] = OFF
def statusOk():
pixels[0] = GREEN
time.sleep(0.5)
pixels[0] = OFF
time.sleep(0.5)
pixels[0] = GREEN
time.sleep(0.5)
pixels[0] = OFF
time.sleep(0.5)
def bmpError():
pixels[0] = BLUE
time.sleep(1)
pixels[0] = OFF
time.sleep(1)
pixels[0] = BLUE
time.sleep(1)
pixels[0] = OFF
time.sleep(1)
def vemlError():
pixels[0] = YELLOW
time.sleep(1)
pixels[0] = OFF
time.sleep(1)
pixels[0] = YELLOW
time.sleep(1)
pixels[0] = OFF
time.sleep(1)
def samError():
pixels[0] = RED
time.sleep(1)
pixels[0] = OFF
time.sleep(1)
pixels[0] = RED
time.sleep(1)
pixels[0] = OFF
time.sleep(1)
def scd30Error():
pixels[0] = CYAN
time.sleep(1)
pixels[0] = OFF
time.sleep(1)
pixels[0] = CYAN
time.sleep(1)
pixels[0] = OFF
time.sleep(1)
```
#### File: stardust/sensors/veml6075.py
```python
import time
import board
import busio
import logging
import adafruit_veml6075
i2c = busio.I2C(board.SCL, board.SDA)
veml = adafruit_veml6075.VEML6075(i2c, integration_time=100)
stardustLogger = logging.getLogger("StardustLogger")
def uv():
print("UV a:", veml.uva)
print("UV b:", veml.uvb)
print("UV index:", veml.uv_index)
def logUv():
stardustLogger.debug("veml6075.logUv()")
stardustLogger.info("UV a:" + str(veml.uva))
stardustLogger.info("UV b:" + str(veml.uvb))
stardustLogger.info("UV index:" + str(veml.uv_index))
```
#### File: JDRyder/stardust/stardust.py
```python
import logging
from logging.handlers import TimedRotatingFileHandler
logFormatter = logging.Formatter("%(asctime)s - %(levelname)s :\t%(message)s")
timedHandler = TimedRotatingFileHandler(filename="./logs/stardust.log", when="m", interval=10, backupCount=0)
timedHandler.setFormatter(logFormatter)
stardustLogger = logging.getLogger("StardustLogger")
stardustLogger.addHandler(timedHandler)
stardustLogger.setLevel(logging.INFO)
try:
import leds.rgbLed as rgb
except:
stardustLogger.error("Error importing led script. Check file structure."
try:
import sensors.veml6075 as veml
except:
rgb.vemlError()
stardustLogger.error("Error importing veml6075.")
try:
import sensors.sam_m8q as sam
except:
rgb.samError()
stardustLogger.error("Error importing sam_m8q.")
try:
import sensors.scd_30 as scd
except:
rgb.scd30Error()
stardustLogger.error("Error importing scd_30.")
try:
import sensors.BMP388 as bmp
except:
rgb.bmpError()
stardustLogger.error("Error importing BMP388.")
import time
import sys
import os
def start():
try:
stardustLogger.debug("start(): Run all sensors once to remove bad reads.")
rgb.startup()
bmp.bmp388()
veml.uv()
try:
sam.gps()
except KeyError:
pass
scd.CO2()
except KeyboardInterrupt:
rgb.off()
raise KeyboardInterrupt
def main():
stardustLogger.debug("Begin startdust.py main")
start()
stardustLogger.debug("Begin mainloop")
while(True):
rgb.statusOk()
try:
stardustLogger.debug("bmp")
bmp.bmp388() #bmp to console
bmp.logPressure() #bmp log pressure
bmp.logTemperature() #bmp log temperature
time.sleep(1)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
stardustLogger.error("bmp error")
rgb.bmpError()
rgb.statusOk()
try:
stardustLogger.debug("veml")
veml.uv() #veml uv to console
time.sleep(1)
veml.logUv() #veml log uv
time.sleep(1)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
stardustLogger.error("veml error")
rgb.vemlError()
rgb.statusOk()
try:
stardustLogger.debug("sam")
sam.gps() #sam gps to console
time.sleep(1)
sam.logGps() #sam log gps
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
stardustLogger.error("sam error")
rgb.samError()
rgb.statusOk()
try:
stardustLogger.debug("scd-30")
scd.CO2() #scd CO2 to console
scd.logCO2() #scd log CO2
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
stardustLogger.error("scd-30 error")
rgb.scd30Error()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:#USE THIS BLOCK TO CLEANUP OPEN CONNECTIONS.
print("Detected Keyboard Interrupt")
rgb.off()
try:
print("trying sys.exit")
sys.exit(0)
except SystemExit:
print("doing os.exit")
logging.shutdown()
os._exit(0)
```
|
{
"source": "jdrzaic/distributed-resource-allocation",
"score": 2
}
|
#### File: jdrzaic/distributed-resource-allocation/1-out-of-m-console.py
```python
from threading import Thread, Lock
from mpi4py import MPI
import sys
from time import sleep
import commutils
from enums import CsState, REQUEST, PERMISSION
from comm import BaseCommAdapter
class Process(BaseCommAdapter):
def __init__(self, comm, logging=False, m=1, info_logging=True):
super().__init__(comm, logging=logging)
self._info_logging = info_logging
self._numer_of_processes = comm.Get_size()
self._m = m
self._id = comm.Get_rank()
self._wait_perm = [0] * self._numer_of_processes
self._perm_delayed = [0] * self._numer_of_processes
self._clock = 0
self._lrd = 0
self._cs_state = CsState.OUT
self._nb_perm = 0
self._log_thread = Thread(target=self._logThr)
self._communicate_thread = Thread(target=self._communicate)
self._lock = Lock()
def start(self):
#print(self._id, " started...")
self._communicate_thread.start()
if self._info_logging:
self._log_thread.start()
def _logThr(self):
while True:
if self._cs_state == CsState.IN:
commutils.log('Working with resource')
elif self._cs_state == CsState.TRYING:
commutils.log('Trying ...')
else:
commutils.log('Resting...')
sleep(1)
def _communicate(self):
while True:
if self._comm.Iprobe(source=MPI.ANY_SOURCE, tag=PERMISSION):
with self._lock:
permission = self._comm.recv(source=MPI.ANY_SOURCE, tag=PERMISSION)
#self._log("Dobio PERMISSION od ", permission['id'])
self._wait_perm[permission['id']] -= permission['count']
if (self._cs_state == CsState.TRYING and self._wait_perm[permission['id']] == 0):
self._nb_perm += 1
#print(self._id, ": Got permission from", permission['id'], self._wait_perm, self._nb_perm)
if self._comm.Iprobe(source=MPI.ANY_SOURCE, tag=REQUEST):
with self._lock:
request = self._comm.recv(source=MPI.ANY_SOURCE, tag=REQUEST)
#self._log("Dobio REQUEST od ", request['id'])
self._clock = max(self._clock, request['lrd'])
prio = (self._cs_state == CsState.IN or (self._cs_state == CsState.TRYING and (self._lrd < request['lrd'] or self._lrd == request['lrd'] and self._id < request['id'])))
if prio:
self._perm_delayed[request['id']] += 1
#print(self._id, " :Request from", request['id'], " is delayed")
else:
permission = {'id': self._id, 'count': 1}
#print(self._id, " :Sending permission to ", request['id'])
perm = self._comm.isend(permission, dest=request['id'], tag=PERMISSION)
def release_resource(self):
with self._lock:
self._cs_state = CsState.OUT
self._log("OTPUSTIO sam resurs")
for i in range(self._numer_of_processes):
if self._perm_delayed[i] is not 0:
permission = {'id': self._id, 'count': self._perm_delayed[i]}
self._comm.isend(permission, dest=i, tag=PERMISSION)
#self._log("Saljem PERMISSION procesu ", i)
self._perm_delayed[i] = 0
def acquire_resource(self):
with self._lock:
#print(self._id, " :Acquire_resource...")
self._cs_state = CsState.TRYING
self._lrd = self._clock + 1
self._nb_perm = 0
indexes_to_check = [ind for ind in range(self._numer_of_processes) if ind != self._id]
for i in indexes_to_check:
data = {'id': self._id, 'lrd': self._lrd}
request = self._comm.isend(data, dest=i, tag=REQUEST)
#self._log("Saljem REQUEST procesu ", i)
self._wait_perm[i] += 1
while True:
with self._lock:
if self._nb_perm >= (self._numer_of_processes - self._m):
#self._log("Dobio sam resurs...")
self._cs_state = CsState.IN
break
def get_id(self):
return self._id
def main(argv):
process = Process(MPI.COMM_WORLD, logging=True, m=int(argv[1]), info_logging=True)
process.start()
# if process.get_id()%3 == 0:
# sleep(1)
# process.acquire_resource()
# sleep(3)
# process.release_resource()
# sleep(1)
# process.acquire_resource()
# sleep(2)
# process.release_resource()
# if process.get_id()%3 == 1:
# sleep(3)
# process.acquire_resource()
# sleep(1)
# process.release_resource()
# sleep(5)
# process.acquire_resource()
# sleep(5)
# process.release_resource()
# if process.get_id()%3 == 2:
# sleep(1)
# process.acquire_resource()
# sleep(2)
# process.release_resource()
# sleep(2)
# process.acquire_resource()
# sleep(1)
# process.release_resource()
process.acquire_resource()
sleep(4)
process.release_resource()
sleep(2)
process.acquire_resource()
sleep(10)
process.release_resource()
if __name__ == "__main__":
main(sys.argv)
```
|
{
"source": "jds106/pregnaware",
"score": 3
}
|
#### File: babynames/namestatsparsers/regionparser.py
```python
import pandas as pd
import math
# Parses names by region (i.e. usual residence of mother):
#
# North East
# North West
# Yorkshire and The Humber
# East Midlands
# West Midlands
# East
# London
# South East
# South West
# Wales
def parse(year: int, gender: str, df: pd.DataFrame) -> pd.DataFrame:
region_df = pd.DataFrame(columns=['Name', 'Count', 'Region'])
df = df.loc[3:31, 'Unnamed: 1': 'Unnamed: 19'].dropna(axis=0, how='all').reset_index()
region_df = region_df.append(parse_region(df, 0, 11, ['Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3']))
region_df = region_df.append(parse_region(df, 0, 11, ['Unnamed: 5', 'Unnamed: 6', 'Unnamed: 7']))
region_df = region_df.append(parse_region(df, 0, 11, ['Unnamed: 9', 'Unnamed: 10', 'Unnamed: 11']))
region_df = region_df.append(parse_region(df, 0, 11, ['Unnamed: 13', 'Unnamed: 14', 'Unnamed: 15']))
region_df = region_df.append(parse_region(df, 0, 11, ['Unnamed: 17', 'Unnamed: 18', 'Unnamed: 19']))
region_df = region_df.append(parse_region(df, 13, 24, ['Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3']))
region_df = region_df.append(parse_region(df, 13, 24, ['Unnamed: 5', 'Unnamed: 6', 'Unnamed: 7']))
region_df = region_df.append(parse_region(df, 13, 24, ['Unnamed: 9', 'Unnamed: 10', 'Unnamed: 11']))
region_df = region_df.append(parse_region(df, 13, 24, ['Unnamed: 13', 'Unnamed: 14', 'Unnamed: 15']))
region_df = region_df.append(parse_region(df, 13, 24, ['Unnamed: 17', 'Unnamed: 18', 'Unnamed: 19']))
region_df['Year'] = year
region_df['Gender'] = gender
return region_df
# Pull the relevant name data from the specified DataFrame slice
def parse_region(df: pd.DataFrame, min_row: int, max_row: int, cols: list) -> pd.DataFrame:
df = df.loc[min_row:max_row, cols]
# Region is in either 0,0 or 0,1 of the sliced DataFrame, with the data starting from the 3rd row
region = df.iloc[0, 0]
if type(region) == float:
region = df.iloc[0, 1]
df = df.dropna(axis=0, how='all').iloc[2:, 1:]
df.columns = ['Name', 'Count']
df['Region'] = region
return df
```
#### File: src/babynames/shredder.py
```python
import pandas as pd
import os.path
import os
import mysql.connector
import namestatsparsers.monthparser as monthparser
import namestatsparsers.countryparser as countryparser
import namestatsparsers.regionparser as regionparser
import namestatsparsers.completelistparser as completelistparser
# Writes the files to the database
def save_to_database(df: pd.DataFrame, table_name: str):
con = mysql.connector.connect(
host='ec2-34-245-208-245.eu-west-1.compute.amazonaws.com',
database='pregnaware',
user='pregnaware',
password=os.environ['DB_PREGNAWARE_PWD'])
df.to_sql(con=con, name=table_name, flavor='mysql', if_exists='append', index=False)
# Parses the data from the specified Excel workbook
def parse(filepath: str, filename: str):
tokens = filename.split('.')
year = int(tokens[1])
gender = tokens[2].lower()
print('Parsing file: {0}'.format(filename))
df_dict = pd.read_excel('{0}/{1}'.format(filepath, filename), sheetname=None)
assert(isinstance(df_dict, dict))
# Top 100 names ranked by country
key_top_100_ew = None
key_top_100_e = None
key_top_100_w = None
key_top_10_region = None
key_top_10_month = None
key_full = None
for key in df_dict.keys():
assert(isinstance(key, str))
if key.lower().endswith('by region'):
key_top_10_region = key
elif key.lower().endswith('by month'):
key_top_10_month = key
# Variants for England & Wales
elif key.lower().endswith('top 100 {0}, e&w'.format(gender)):
key_top_100_ew = key
elif key.lower().endswith('top 100 {0}\' names'.format(gender)):
key_top_100_ew = key
# Variants for England
elif key.lower().endswith('top 100 {0}, england'.format(gender)):
key_top_100_e = key
elif key.lower().endswith('top 100 {0}, eng'.format(gender)):
key_top_100_e = key
elif key.lower().endswith('top 100 {0}, wales'.format(gender)):
key_top_100_w = key
elif key.lower().endswith('{0} names - e&w'.format(gender)):
key_full = key
elif key not in ['Contents', 'Metadata', 'Terms and Conditions', 'Related Publications']:
print('Unknown table: {0}'.format(key))
if key_top_100_ew:
results = countryparser.parse(year, gender, 'England and Wales', df_dict[key_top_100_ew])
save_to_database(results, 'NameStatByCountry')
if key_top_100_e:
results = countryparser.parse(year, gender, 'England', df_dict[key_top_100_e])
save_to_database(results, 'NameStatByCountry')
if key_top_100_w:
results = countryparser.parse(year, gender, 'Wales', df_dict[key_top_100_w])
save_to_database(results, 'NameStatByCountry')
if key_top_10_region:
results = regionparser.parse(year, gender, df_dict[key_top_10_region])
save_to_database(results, 'NameStatByRegion')
if key_top_10_month:
results = monthparser.parse(year, gender, df_dict[key_top_10_month])
# print(results)
save_to_database(results, 'NameStatByMonth')
if key_full:
results = completelistparser.parse(year, gender, df_dict[key_full])
save_to_database(results, 'NameStat')
path = '../../data/ons_babynames'
for file in os.listdir(path):
if file.startswith('babynames.') and file.endswith('.xls'):
parse(path, file)
```
|
{
"source": "JDSalisbury/familiar",
"score": 3
}
|
#### File: familiar/test/test_familiar.py
```python
from familiar_tools.familiar import dice_roll, get_modifier
def test_dice_roll():
roll = dice_roll(2, 6, 0)
print(roll)
assert roll <= 12
def test_get_modifier():
mod = get_modifier(12)
assert mod == 1
def test_get_modifier_big_int():
mod = get_modifier(18)
assert mod == 4
def test_negative_get_modifier():
mod = get_modifier(8)
assert mod == -1
def test_negative_get_modifier_on_big_int():
mod = get_modifier(4)
assert mod == -3
```
|
{
"source": "jdsalmonson/mushroom-rl",
"score": 2
}
|
#### File: value/batch_td/lspi.py
```python
import numpy as np
from mushroom_rl.algorithms.value.batch_td import BatchTD
from mushroom_rl.approximators.parametric import LinearApproximator
from mushroom_rl.features import get_action_features
from mushroom_rl.utils.dataset import parse_dataset
from mushroom_rl.utils.parameters import to_parameter
class LSPI(BatchTD):
"""
Least-Squares Policy Iteration algorithm.
"Least-Squares Policy Iteration". <NAME>. and <NAME>.. 2003.
"""
def __init__(self, mdp_info, policy, approximator_params=None,
epsilon=1e-2, fit_params=None, features=None):
"""
Constructor.
Args:
epsilon ([float, Parameter], 1e-2): termination coefficient.
"""
self._epsilon = to_parameter(epsilon)
self._add_save_attr(_epsilon='mushroom')
super().__init__(mdp_info, policy, LinearApproximator,
approximator_params, fit_params, features)
def fit(self, dataset):
phi_state, action, reward, phi_next_state, absorbing, _ = parse_dataset(
dataset, self.phi)
phi_state_action = get_action_features(phi_state, action,
self.mdp_info.action_space.n)
norm = np.inf
while norm > self._epsilon():
q = self.approximator.predict(phi_next_state)
if np.any(absorbing):
q *= 1 - absorbing.reshape(-1, 1)
next_action = np.argmax(q, axis=1).reshape(-1, 1)
phi_next_state_next_action = get_action_features(
phi_next_state,
next_action,
self.mdp_info.action_space.n
)
tmp = phi_state_action - self.mdp_info.gamma *\
phi_next_state_next_action
A = phi_state_action.T.dot(tmp)
b = (phi_state_action.T.dot(reward)).reshape(-1, 1)
old_w = self.approximator.get_weights()
if np.linalg.matrix_rank(A) == A.shape[1]:
w = np.linalg.solve(A, b).ravel()
else:
w = np.linalg.pinv(A).dot(b).ravel()
self.approximator.set_weights(w)
norm = np.linalg.norm(w - old_w)
```
#### File: mushroom_rl/core/environment.py
```python
import warnings
import numpy as np
from mushroom_rl.core.serialization import Serializable
class MDPInfo(Serializable):
"""
This class is used to store the information of the environment.
"""
def __init__(self, observation_space, action_space, gamma, horizon):
"""
Constructor.
Args:
observation_space ([Box, Discrete]): the state space;
action_space ([Box, Discrete]): the action space;
gamma (float): the discount factor;
horizon (int): the horizon.
"""
self.observation_space = observation_space
self.action_space = action_space
self.gamma = gamma
self.horizon = horizon
self._add_save_attr(
observation_space='mushroom',
action_space='mushroom',
gamma='primitive',
horizon='primitive'
)
@property
def size(self):
"""
Returns:
The sum of the number of discrete states and discrete actions. Only
works for discrete spaces.
"""
return self.observation_space.size + self.action_space.size
@property
def shape(self):
"""
Returns:
The concatenation of the shape tuple of the state and action
spaces.
"""
return self.observation_space.shape + self.action_space.shape
class Environment(object):
"""
Basic interface used by any mushroom environment.
"""
@classmethod
def register(cls):
"""
Register an environment in the environment list.
"""
env_name = cls.__name__
if env_name not in Environment._registered_envs:
Environment._registered_envs[env_name] = cls
@staticmethod
def list_registered():
"""
List registered environments.
Returns:
The list of the registered environments.
"""
return list(Environment._registered_envs.keys())
@staticmethod
def make(env_name, *args, **kwargs):
"""
Generate an environment given an environment name and parameters.
The environment is created using the generate method, if available. Otherwise, the constructor is used.
The generate method has a simpler interface than the constructor, making it easier to generate
a standard version of the environment. If the environment name contains a '.' separator, the string
is splitted, the first element is used to select the environment and the other elements are passed as
positional parameters.
Args:
env_name (str): Name of the environment,
*args: positional arguments to be provided to the environment generator;
**kwargs: keyword arguments to be provided to the environment generator.
Returns:
An instance of the constructed environment.
"""
if '.' in env_name:
env_data = env_name.split('.')
env_name = env_data[0]
args = env_data[1:] + list(args)
env = Environment._registered_envs[env_name]
if hasattr(env, 'generate'):
return env.generate(*args, **kwargs)
else:
return env(*args, **kwargs)
def __init__(self, mdp_info):
"""
Constructor.
Args:
mdp_info (MDPInfo): an object containing the info of the
environment.
"""
self._mdp_info = mdp_info
def seed(self, seed):
"""
Set the seed of the environment.
Args:
seed (float): the value of the seed.
"""
if hasattr(self, 'env') and hasattr(self.env, 'seed'):
self.env.seed(seed)
else:
warnings.warn('This environment has no custom seed. '
'The call will have no effect. '
'You can set the seed manually by setting numpy/torch seed')
def reset(self, state=None):
"""
Reset the current state.
Args:
state (np.ndarray, None): the state to set to the current state.
Returns:
The current state.
"""
raise NotImplementedError
def step(self, action):
"""
Move the agent from its current state according to the action.
Args:
action (np.ndarray): the action to execute.
Returns:
The state reached by the agent executing ``action`` in its current
state, the reward obtained in the transition and a flag to signal
if the next state is absorbing. Also an additional dictionary is
returned (possibly empty).
"""
raise NotImplementedError
def render(self):
raise NotImplementedError
def stop(self):
"""
Method used to stop an mdp. Useful when dealing with real world
environments, simulators, or when using openai-gym rendering
"""
pass
@property
def info(self):
"""
Returns:
An object containing the info of the environment.
"""
return self._mdp_info
@staticmethod
def _bound(x, min_value, max_value):
"""
Method used to bound state and action variables.
Args:
x: the variable to bound;
min_value: the minimum value;
max_value: the maximum value;
Returns:
The bounded variable.
"""
return np.maximum(min_value, np.minimum(x, max_value))
_registered_envs = dict()
```
#### File: tests/utils/test_preprocessors.py
```python
import numpy as np
import torch
import torch.nn.functional as F
from mushroom_rl.utils.parameters import Parameter
from mushroom_rl.policy import EpsGreedy
from mushroom_rl.algorithms.value import DQN
from mushroom_rl.core import Core
from mushroom_rl.approximators.parametric import TorchApproximator
from torch import optim, nn
from mushroom_rl.environments import Gym
from mushroom_rl.utils.preprocessors import MinMaxPreprocessor
def test_normalizing_preprocessor(tmpdir):
np.random.seed(88)
class Network(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super().__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h1 = nn.Linear(n_input, n_output)
nn.init.xavier_uniform_(self._h1.weight,
gain=nn.init.calculate_gain('relu'))
def forward(self, state, action=None):
q = F.relu(self._h1(torch.squeeze(state, 1).float()))
if action is None:
return q
else:
action = action.long()
q_acted = torch.squeeze(q.gather(1, action))
return q_acted
mdp = Gym('CartPole-v0', horizon=500, gamma=.99)
# Policy
epsilon_random = Parameter(value=1.)
pi = EpsGreedy(epsilon=epsilon_random)
# Approximator
input_shape = mdp.info.observation_space.shape
approximator_params = dict(network=Network,
optimizer={'class': optim.Adam,
'params': {'lr': .001}},
loss=F.smooth_l1_loss,
input_shape=input_shape,
output_shape=mdp.info.action_space.size,
n_actions=mdp.info.action_space.n,
n_features=2, use_cuda=False)
alg_params = dict(batch_size=5, initial_replay_size=10,
max_replay_size=500, target_update_frequency=50)
agent = DQN(mdp.info, pi, TorchApproximator,
approximator_params=approximator_params, **alg_params)
norm_box = MinMaxPreprocessor(mdp_info=mdp.info,
clip_obs=5.0, alpha=0.001)
core = Core(agent, mdp, preprocessors=[norm_box])
core.learn(n_steps=100, n_steps_per_fit=1, quiet=True)
# training correctly
assert (core._state.min() >= -norm_box._clip_obs
and core._state.max() <= norm_box._clip_obs)
# loading and setting data correctly
state_dict1 = norm_box.get_state()
norm_box.save(tmpdir / 'norm_box.msh')
core.learn(n_steps=100, n_steps_per_fit=1, quiet=True)
norm_box = MinMaxPreprocessor.load(tmpdir / 'norm_box.msh')
state_dict2 = norm_box.get_state()
assert ((state_dict1["mean"] == state_dict2["mean"]).all()
and (state_dict1["var"] == state_dict2["var"]).all()
and state_dict1["count"] == state_dict2["count"])
core = Core(agent, mdp, preprocessors=[norm_box])
core.learn(n_steps=100, n_steps_per_fit=1, quiet=True)
```
|
{
"source": "jdsalmonson/qnd",
"score": 3
}
|
#### File: qnd/qnd/adict.py
```python
from __future__ import absolute_import
import sys
# from collections import Mapping
PY2 = sys.version_info < (3,)
class ItemsAreAttrs(object):
"""Mix-in class for QArray, QGroup, or QList, and also ADict."""
__slots__ = ()
def __getattr__(self, name):
# Mixing __getattr__ with properties, as we do here, can lead to
# very obscure errors, because the way __getattr__ works is to
# simply try to retrieve the attribute, and call __getattr__ if
# that operation raises an exception, instead of propagating the
# exception as usual. When you have a property or any descriptor,
# the act of retrieving that attribute implicitly calls a method.
# If that property method raises an exception, the __getattr__
# machinery interprets that to mean the attribute does not exist
# and calls __getattr__, removing the actual faulting method from
# the call chain and making debugging difficult.
# Beware of errors with this __getattr__ in their calling chain;
# they may have originated in a different error in a property
# method!
if name.startswith('__') and len(name) > 2 or name == 'getdoc':
# Do not redirect dunder or ipython getdoc calls, as this
# confuses many simple attempts at introspection.
return super(ItemsAreAttrs, self).__getattr__(name)
# Strip single trailing _ as an interactive convenience for the
# problem of attribute names that match reserved words or property
# or method names. This is inspired by the PEP8 advice for dealing
# with this issue.
if name.endswith('_'):
name = name[:-1]
try:
return self[name]
except KeyError as e:
raise AttributeError(*e.args)
def __setattr__(self, name, value):
if name.startswith('__') and len(name) > 2:
super(ItemsAreAttrs, self).__setattr__(name, value)
if name.endswith('_'):
name = name[:-1]
self[name] = value
def __delattr__(self, name):
if name.startswith('__') and len(name) > 2:
super(ItemsAreAttrs, self).__delattr__(name)
if name.endswith('_'):
name = name[:-1]
del self[name]
def update(self, *args, **kwargs):
"""Multiple __setitem__ from positional arguments or keywords."""
for arg in args:
if hasattr(arg, 'keys'): # dict-like, not list-like
for key in arg:
self[key] = arg[key]
else:
key, value = arg
self[key] = value
for key in kwargs:
self[key] = kwargs[key[:-1] if key.endswith('_') else key]
class ADict(ItemsAreAttrs, dict):
"""Subclass of dict permitting access to items as if they were attributes.
For a ADict ad, ``ad.x`` is equivalent to ``ad['x']`` for getting,
setting, or deleting items. The exceptions are dict method names,
like `keys` or `items`, syntactically illegal names, like `class`
or `yield`, and any name beginning with `__`.
Additionally, as a work around for some of these exceptions, ADict
will remove a single trailing underscore from an attribute name,
so ``ad.x_`` is also equivalent to ``ad['x']``, and you need
``ad.x__`` to get ``ad['x_']`` (a convention inspired by the
similar PEP8 recommendation for syntatically illegal variable
names). The trailing underscore removal does not apply to names
beginning with `__`.
The trailing underscore removal convention applies to keywords
passed to the constructor or to the `update` method as well.
Use subscript syntax when a variable or expression holds an item name;
use attribute syntax when you know the item name at parse time::
ad[variable] = value # value of variable is the item name
ad.fixed = value # 'fixed' is the item name
value = ad.get('fixed', default) # except to avoid KeyError
See Also
--------
redict : recursively toggle between dict and ADict
ItemsAreAttrs : mixin base class to provide this for any class
"""
__slots__ = ()
def __init__(self, *args, **kwargs):
super(ADict, self).__init__(*args)
self.update(kwargs)
def __repr__(self):
return "ADict(" + super(ADict, self).__repr__() + ")"
def redict(d, cls=None):
"""Recursively convert a nested dict to a nested ADict and vice versa.
Parameters
----------
d : dict or ADict instance
A dict, possibly nested, to be converted.
cls : dict or subclass of dict, optional
The dict-like cls to recursively convert `d` and any sub-dicts
into. By default, if `d` is a `ADict`, `cls` is `dict`,
otherwise `cls` is `ADict`, so repeated calls to `redict` toggle
between `dict` and `ADict`.
Returns
-------
dnew : dict or ADict
A copy of `d` whose class is `cls`. Any items which are dict
instances are similarly copied to be `cls` instances. Non-dict
items are not copied unless assignment makes copies.
"""
if cls is None:
cls = dict if isinstance(d, ADict) else ADict
dnew = cls(d)
for key, value in (d.iteritems() if PY2 else d.items()):
if hasattr(value, '__iter__') and hasattr(value, 'keys'):
dnew[key] = redict(value, cls)
return dnew
```
#### File: qnd/qnd/ncf.py
```python
from __future__ import absolute_import
import sys
import weakref
from collections import OrderedDict
from warnings import warn
from numpy import (dtype, prod, fromfile, asarray, array, zeros, concatenate,
ascontiguousarray, int64)
from numpy.core.defchararray import decode as npdecode, encode as npencode
from .frontend import QGroup
from .generic import opener
from .utils import leading_args
__all__ = ['opennc']
PY2 = sys.version_info < (3,)
if PY2:
range = xrange
def itemsof(d): return d.iteritems() # noqa
else:
basestring = str
def itemsof(d): return d.items() # noqa
def opennc(filename, mode='r', auto=1, **kwargs):
"""Open netCDF-3 file returning a QnD QGroup.
A netCDF-3 file differs from other self-describing binary file formats
because no data addresses can be known until every variable to be
stored is declared. Therefore, when writing a netCDF-3 file, you
must declare every variable before you can begin writing anything.
The qnd API is somewhat at odds with this semantics because it encourages
you to declare and write each variable in a single step. The native
netCDF-3 API forces you to declare everything, then call an `enddef`
method to complete all variable declarations and permit you to begin
writing data. The qnd.ncf backend uses the first call to the ordinary
qnd `flush` method to emulate the netCDF-3 `enddef` mode switch -- thus
nothing will be written to the file until the first call to `flush`.
To minimize the difference between ncf and other qnd backends, if you
do use the usual qnd declare-and-write idiom, the ncf backend will save
the variable value in memory until the first `flush` call, which will
trigger the actual writing of all such saved values.
Note that closing the file flushes it, so that is also a viable way to
finish a netCDF-3 file. Furthermore, when you overwrite any record
variable in `recording` mode, ncf will implicitly `flush` the file,
since no new variables can be declared after that.
Parameters
----------
filename : str
Name of file to open. See notes below for file family.
mode : str
One of 'r' (default, read-only), 'r+' (read-write, must exist),
'a' (read-write, create if does not exist), 'w' (create, clobber if
exists), 'w-' (create, fail if exists).
auto : int
The intial state of auto-read mode. If the QGroup handle returned
by openh5 is `f`, then ``f.varname`` reads an array variable, but not
a subgroup when auto=1, the default. With auto=0, the variable
reference reads neither (permitting later partial reads in the case
of array variables). With auto=2, a variable reference recursively
reads subgroups, bringing a whole tree into memory.
**kwargs
Other keywords. The maxsize keyword sets the size of files in a
family generated in recording==1 mode; a new file will begin when
the first item in a new record would begin beyond `maxsize`. The
default maxsize is 128 MiB (134 MB). The v64 keyword, if provided
and true, causes new files to be created using the 64-bit netCDF
format; the default is to create 32-bit files. (But a file family
always uses a single format.)
The nextaddr_mode keyword can be used to indicate whether the next
new record in 'a' or 'r+' mode should go into a new file. The
default behavior is that it should, which is the pdbf module default;
this is nextaddr_mode true. Use nextaddr_mode=0 to continue filling
the final existing file until maxsize.
Returns
-------
f : QGroup
A file handle implementing the QnD interface.
Notes
-----
The `filename` may be an iterable, one string per file in order. The
sequence may extend beyond the files which actually exist for 'r+', 'a',
'w', or 'w-' modes.
Alternatively `filename` specifies a family if it contains shell globbing
wildcard characters. Existing matching files are sorted first by length,
then alphabetically (ensuring that 'file100' comes after 'file99', for
example). If there is only a single wildcard group, it also serves to
define a sequence of future family names beyond those currently existing
for 'r+', 'a', 'w', or 'w-' modes. A '?' pattern is treated the same as
a '[0-9]' pattern if all its matches are digits or if the pattern
matches no existing files. Similarly, a '*' acts like the minimum number
of all-digit matches, or three digits if there are no matches.
"""
maxsize = kwargs.pop('maxsize', 134217728)
v64 = kwargs.pop('v64', False)
mode = mode.lower()
if mode.startswith('a') or mode.startswith('r+'):
nextaddr_mode = kwargs.pop('nextaddr_mode', 2) or 1
else:
nextaddr_mode = 1
kwargs['nexaddr_mode'] = nextaddr_mode
handle, n = opener(filename, mode, **kwargs)
root = NCGroup(handle, maxsize, v64)
for i in range(n):
try:
ncparse(handle, root, i)
except IOError:
# Something went terribly wrong. If this is first file, we die.
name = handle.filename(i)
if not i:
raise IOError("Fatal errors opening netCDF file {}"
"".format(name))
handle.open(i-1)
warn("file family stopped by incompatible {}".format(name))
handle.callbacks(root.flusher, root.initializer)
return QGroup(root, auto=auto)
# https://www.unidata.ucar.edu/software/netcdf/docs/
# file_format_specifications.html
# All numbers are in XDR (big-endian) format.
#
# header = magic numrecs dim_list gatt_list var_list
# magic = 'C' 'D' 'F' version
# version = '\x01' (32-bit offset) | '\x02' (64-bit offset)
# numrecs = NON_NEG | STREAMING
# dim_list = ABSENT | 0x00 00 00 0A NON_NEG dim*
# gatt_list = att_list
# var_list = ABSENT | 0x00 00 00 0B NON_NEG attr*
# att_list = ABSENT | 0x00 00 00 0C NON_NEG var*
# ABSENT = 0x00 00 00 00 0x00 00 00 00
# STREAMING = 0xFF FF FF FF
# dim = name NON_NEG (0 length means record dimension)
# name = NON_NEG namestring (0 padded to 4 byte boundary, _.@+-)
# attr = name nc_type NON_NEG values (0 padded to 4 byte boundary)
# nc_type = 1|2|3|4|5|6 (byte|char|short|int|float|double)
# var = name NON_NEG dimid* att_list nc_type vsize OFFSET
# dimid = 0-origin index into dim_list
# vsize = >i4 number of bytes, or 2**32-1 if more than 4GiB
# write vsize as if padded, but if only 1 record variable of
# nc_type byte, char, or short, do not use padding
# - for record variables, byte size of entire record (as if padded)
# OFFSET = >i4 for version 1, >i8 for version 2
#
# Default fill values:
# char \x00, byte \x81, short \x80 01, int \x80 00 00 01
# float \x7C F0 00 00, double \x47 9E 00 00 00 00 00 00 =9.969209968386869e36
#
# The netCDF-3 header _almost_ has a simple XDR description; the only
# problem is that an attribute attr definition may have a value which is
# a counted array of short (2 byte integers), which XDR does not support.
# (The 64-bit format requires a hack to represent the offset values, and
# its own XDR specification using that hack.)
def ncparse(handle, root, ifile):
i4be = _netcdf_stypes[3]
if ifile:
if not root.nrecs:
raise IOError("first file in apparent family has no record vars")
f = handle.open(ifile - 1)
headsize = root.headsize
f.seek(0)
static0 = f.read(headsize)
f = handle.open(ifile)
magic = f.read(4)
if magic == static0[:4]:
nrecs = int(fromfile(f, i4be, 1)[0])
static1 = f.read(headsize - 8)
else:
static1 = nrecs = None
if static1 != static0[8:]:
raise IOError("static variables do not match previous file")
if nrecs == -1:
f.seek(0, 2)
nrecs = (f.tell() - headsize) // root.recsize
root.nrecs.append(nrecs)
return
f = handle.open(ifile)
magic = fromfile(f, 'S4', 1)[0]
version = magic[3:] # in python3, magic[3] is int(1) != b'\x01'
if magic[:3] != b'CDF' or version not in b'\x01\x02':
raise IOError("bad magic in netCDF-3 header")
v64 = version != b'\x01'
iobe = dtype('>i8') if v64 else i4be
nrecs = int(fromfile(f, i4be, 1)[0]) # -1 indicates STREAMING
tag, count = fromfile(f, i4be, 2)
if tag != 10 and (count or tag):
raise IOError("bad dim_list in netCDF-3 header")
dims, recid = [], None
while count > 0:
count -= 1
name = _get_name(f)
size = int(fromfile(f, i4be, 1)[0])
if not size:
recid = len(dims)
dims.append((name, size))
attrs = [(None, _get_attrs(f))]
tag, count = fromfile(f, i4be, 2)
if tag != 11 and (count or tag):
raise IOError("bad dim_list in netCDF-3 header")
variables, recsize, special_case = OrderedDict(), 0, 0
recaddr = lastaddr = None
nrecvar = 0
while count > 0:
count -= 1
name = _get_name(f)
ndim = int(fromfile(f, i4be, 1)[0])
shape = tuple(fromfile(f, i4be, ndim).astype(int)) if ndim else ()
attrs.append((name, _get_attrs(f)))
nctype = int(fromfile(f, i4be, 1)[0])
if nctype < 1 or nctype > 6:
raise IOError("bad nc_type (not in 1-6) in netCDF-3 header")
stype = _netcdf_stypes[nctype - 1]
fromfile(f, i4be, 1) # ignore vsize
offset = int(fromfile(f, iobe, 1)[0])
# Note: offset is the byte address of the variable in the file
# - byte address of first block of a record variable
if offset < 0:
raise IOError("bad variable offset in netCDF-3 header")
unlim = shape and shape[0] == recid
if unlim:
shape = shape[1:]
try:
sshape = tuple(dims[i][0] for i in shape)
except IndexError:
raise IOError("bad dimension index in netCDF-3 header")
shape = tuple(dims[i][1] for i in shape)
item = NCLeaf(root, len(variables), offset, stype, shape, sshape)
variables[name] = NCList(root, item) if unlim else item
if unlim:
nrecvar += 1
if nrecvar == 1:
nbytes = stype.itemsize
if nbytes & 3:
if shape:
nbytes *= prod(shape) if shape else 1
if nbytes & 3:
special_case = nbytes
recsize += _measure_item(item)
if recaddr is None or offset < recaddr:
recaddr = offset
elif lastaddr is None or offset >= lastaddr:
lastaddr = offset + _measure_item(item)
if nrecvar == 1 and special_case:
# Implement special rule for byte, char, or short single record
# variable; such records are not forced to 4 byte boundaries.
recsize = special_case
headsize = f.tell()
if nrecs == -1 and recsize:
# Handle special streaming record count by using file size.
f.seek(0, 2)
size = f.tell()
f.seek(headsize)
nrecs = (size - recaddr) // recsize
root.variables = variables
root.dims = OrderedDict(dims)
root.attrs = OrderedDict(attrs)
root.headsize = headsize
root.recaddr = recaddr or lastaddr or headsize
root.recsize = recsize
root.nrecs.append(nrecs)
root.v64 = v64
def _get_name(f):
nchar = int(fromfile(f, '>i4', 1)[0])
rem = nchar & 3
ntot = nchar + 4 - rem if rem else nchar
name = fromfile(f, 'S1', ntot)[:nchar].view('S' + str(nchar))
return _bytes_as_str(name)
def _bytes_as_str(text):
if hasattr(text, 'ravel'):
text = text.ravel()[0]
if isinstance(text, bytes):
need_unicode = False
if PY2:
try:
text.decode('ascii')
except UnicodeDecodeError:
need_unicode = True
else:
need_unicode = True
if need_unicode:
try:
text = text.decode('utf8')
except UnicodeDecodeError: # ignore, but violates netCDF-3 spec
text = text.decode('latin1')
return text
def _text_as_bytes(text):
if hasattr(text, 'ravel'):
text = text.ravel()[0]
return text if isinstance(text, bytes) else text.encode('utf8')
def _get_attrs(f):
i4be = _netcdf_stypes[3]
tag, count = fromfile(f, i4be, 2)
if tag != 12 and (count or tag):
raise IOError("bad attr_list in netCDF-3 header")
attrs = []
while count > 0:
count -= 1
name = _get_name(f)
nctype = int(fromfile(f, i4be, 1)[0])
if nctype < 1 or nctype > 6:
raise IOError("bad nc_type (not in 1-6) in netCDF-3 header")
if nctype == 2:
values = _get_name(f)
else:
nvalues = int(fromfile(f, i4be, 1)[0])
stype = _netcdf_stypes[nctype - 1]
values = fromfile(f, stype, nvalues)
rem = values.nbytes & 3
if rem:
fromfile(f, 'u1', 4 - rem)
if values.size == 1:
values = values[0]
if not stype.isnative:
values = values.astype(stype.newbyteorder('='))
attrs.append((name, values))
return OrderedDict(attrs)
class NCGroup(object):
def __init__(self, handle, maxsize=134217728, v64=False):
self.handle = handle # a generic.MultiFile
self.variables = self.dims = self.attrs = None
self.headsize = self.recaddr = self.recsize = 0
self.nrecs = [] # list of record counts in files of family
self.maxsize = maxsize
self.v64 = v64
self.pending = None # holds pre-flush variable values
@staticmethod
def isgroup():
return 1
@staticmethod
def islist():
return 0
isleaf = islist
def root(self):
return self # no such thing as directories in netCDF3
def close(self):
self.handle.close()
def flush(self):
self.handle.flush()
def __len__(self):
return len(self.variables)
def __iter__(self):
return iter(self.variables)
def lookup(self, name):
return self.variables.get(name)
def declare(self, name, dtype, shape, unlim=None):
if self.headsize:
raise RuntimeError("netCDF file defined, no more declarations")
if shape and not all(shape):
raise TypeError("netCDF does not support 0-length dimensions")
stype = _get_stype(dtype)
sshape = tuple('_' + str(s) for s in shape) if shape else None
dims, variables = self.dims, self.variables
if unlim:
dims.setdefault('_0', 0)
for s, n in zip(sshape, shape):
dims.setdefault(s, n)
# Set offset to unlim for now, will be set in initializer.
item = NCLeaf(self, len(variables), unlim, stype, shape, sshape)
if unlim:
item = NCList(self, item)
variables[name] = item
return item
# qnd.QAttribute uses only __iter__, get, items, __len__, __contains__
# In PY2, the dict returned here has an inefficient items() method,
# but it is not worth fixing that here.
def attget(self, vname):
return self.attrs.get(vname if vname else None)
def attset(self, vname, aname, dtype, shape, value):
if self.headsize:
raise RuntimeError("netCDF file defined, no setting attributes")
stype = _get_stype(dtype)
strtype = _netcdf_stypes[1]
if stype == strtype:
if shape:
raise TypeError("netCDF does not support array of strings"
"as an attribute value")
value = _bytes_as_str(value)
else:
value = asarray(value, stype)
if shape:
if len(shape) > 1:
raise TypeError("netCDF does not support "
"multi-dimensional attribute values")
if value.shape != shape:
value = value.reshape(shape)
if not stype.isnative:
value = value.astype(stype.newbyteorder('='))
if not vname:
vname = None
attrs = self.attrs.get(vname)
if not attrs:
self.attrs[vname] = attrs = OrderedDict()
attrs[aname] = value
def record_delta(self, irec):
"""Compute delta to add to record variable offset to reach irec."""
handle, nrecs, recsize = self.handle, self.nrecs, self.recsize
rec0 = array(nrecs).cumsum()
# searchsorted needs strictly monotonic array
# However, because of the 0.5 offset and the fact that irec is
# an integer, this apparently can never cause a problem here.
ifile = (rec0 - 0.5).searchsorted(irec)
if ifile >= rec0.size:
maxsize = self.maxsize
if handle.nextaddr:
# Handle special case of the first record written after a
# family is opened in 'a' or 'r+' mode.
maxsize = 0
# This is a new record. We check if maxsize has been exceeded,
# and force a new file in the family to be created if so.
n = nrecs[-1]
if n and self.recaddr + recsize*n >= maxsize:
f = handle.open(ifile) # implicit flush during open
nrecs.append(0)
self.initializer(f)
handle.nextaddr = int64(0) # special case only triggers once
irec -= rec0[-1]
nrecs[-1] += 1
elif ifile:
irec -= rec0[ifile - 1]
return handle.zero_address(ifile) + recsize * irec
def flusher(self, f):
# The only metadata that may need to be written is nrecs.
# The file handle f is the last file in the family.
if self.nrecs:
f.seek(4)
array(self.nrecs[-1], '>i4').tofile(f)
def initializer(self, f):
# The file f positioned at address 0.
i4be = _netcdf_stypes[3]
v64 = self.v64
array(b'CDF' + (b'\x02' if v64 else b'\x01')).tofile(f)
array(0, i4be).tofile(f)
handle = self.handle
ifile = handle.current_file()
if ifile:
# Just copy header and non-record variables to new file.
f = handle.open(0)
f.seek(8)
value = f.read(self.recaddr)
f = handle.open(ifile)
f.seek(8)
f.write(value)
return
# This is first file of family.
dims, variables, attrs = self.dims, self.variables, self.attrs
if not dims:
zeros(2, i4be).tofile(f)
else:
array((10, len(dims)), i4be).tofile(f)
for name, size in itemsof(dims):
_put_name(f, name)
array(size, i4be).tofile(f)
_put_attrs(f, attrs.get(None))
if not variables:
zeros(2, i4be).tofile(f)
else:
array((11, len(variables)), i4be).tofile(f)
headsize = f.tell() # including vars tag and count
if not self.headsize:
# The offsets in the variables array are unknown until the
# symbol table is written, which makes it hard to write for
# the first file in a family. We make a clumsy two passes
# to compute the length of the var_list if the offsets have
# not yet been set.
# add space for name length, ndim, nctype, vsize, and offset
headsize += (20 + 4*v64) + len(variables)
for name, item in itemsof(variables):
if isinstance(item, NCList):
item = item.leaf
ndim = len(item.shape or ()) # so shape None okay
vattrs = attrs.get(name)
headsize += 4*ndim + _measure_attrs(vattrs)
offset = self.headsize = headsize
# Now we can fill in all the offsets and find recaddr.
recitems = []
for name, item in itemsof(variables):
if isinstance(item, NCList):
item = item.leaf
if item.offset: # This is unlim, see NCGroup.declare.
recitems.append(item)
item.offset = offset
offset += _measure_item(item)
self.recaddr = offset
for item in recitems:
item.offset = offset
offset += _measure_item(item)
self.recsize = offset - self.recaddr
recaddr, recsize = self.recaddr, self.recsize
dimids = {name: i for i, name in enumerate(dims)}
recid = None
for i, (_, n) in enumerate(itemsof(dims)):
if not n:
recid = i
break
recid = [] if recid is None else [recid]
iobe = dtype('>i8') if self.v64 else i4be
rem = recsize & 3
if rem:
recsize += 4 - rem # used only for vsize
if recsize > 0xffffffff:
recsize = 0xffffffff # vsize overflow convention
for name, item in itemsof(variables):
if isinstance(item, NCList):
item = item.leaf
stype, offset = item.stype, item.offset
nctype = _netcdf_stypes.index(stype) + 1
sshape = item.sshape or ()
unlim = offset >= recaddr
_put_name(f, name)
array(len(sshape) + unlim, i4be).tofile(f)
sshape = (recid if unlim else []) + [dimids[s] for s in sshape]
array(sshape, i4be).tofile(f)
_put_attrs(attrs.get(name))
vsize = recsize if unlim else _measure_item(item)
array([nctype, vsize], i4be).tofile(f)
array(offset, iobe).tofile(f)
headsize = f.tell()
if headsize != self.headsize:
IOError("netCDF header size mismatch (BUG?)")
# Header finished, write any pending variables now.
pending = self.pending
self.pending = None
if pending:
byindex = {}
for _, item in itemsof(variables):
if isinstance(item, NCList):
item = item.leaf
byindex[item.index] = item
for index, value in itemsof(pending):
byindex[index].write(value)
def _put_name(f, name):
name = _text_as_bytes(name)
nchar = len(name)
rem = nchar & 3
if rem:
name = name + b'\0'*(4 - rem)
array(nchar, _netcdf_stypes[3]).tofile(f)
f.write(name)
def _put_attrs(f, attrs):
i4be = _netcdf_stypes[3]
if not attrs:
zeros(2, i4be).tofile(f)
return
array((12, len(attrs)), i4be).tofile(f)
for name, value in itemsof(attrs):
if isinstance(value, basestring):
nctype = 2
value = _text_as_bytes(value)
n = len(value)
rem = n & 3
if rem:
value += b'\0' * (4 - rem)
value = array(value)
else:
value = value.asarray(value)
dtype = value.dtype
size = dtype.itemsize
if dtype.kind == 'f':
nctype = 5 + (size == 8)
elif size == 1:
nctype = 1
else:
nctype = 3 + (size == 4)
stype = _netcdf_stypes[nctype - 1]
if dtype != stype:
value = value.astype(stype)
n = value.size
if nctype == 3 and (value.size & 1):
value = concatenate((value.ravel(), zeros(1, stype)))
_put_name(f, name)
array((nctype, n), i4be).tofile(f)
value.tofile(f)
def _measure_attrs(attrs):
size = 8
if attrs:
for name, value in itemsof(attrs):
size += 24 # name length, nctype, value count
size += ((len(_text_as_bytes(name)) + 3) >> 2) << 2
if isinstance(value, basestring):
size += len(_text_as_bytes(value))
else:
size += value.asarray(value).nbytes
size = ((size + 3) >> 2) << 2
return size
def _measure_item(item):
size = item.shape
size = prod(size) if size else 1
nbytes = item.stype.itemsize * size
return ((nbytes + 3) >> 2) << 2
def _get_stype(dtype):
kind = 'X' if dtype in (dict, list, None) else dtype.kind
stype = None
if kind in 'bui':
size = dtype.itemsize
sizes = (1, 2, 4, 8)
if size in sizes:
stype = _netcdf_stypes[(0, 2, 3, 3)[sizes.index(size)]]
elif kind == 'f':
size = dtype.itemsize
sizes = (2, 4, 8, 12, 16)
if size in sizes:
stype = _netcdf_stypes[(4, 4, 5, 5, 5)[sizes.index(size)]]
elif kind in 'SU':
stype = _netcdf_stypes[1]
if stype is None:
raise TypeError("netCDF-3 does not support this dtype")
return stype
_netcdf_stypes = [dtype('i1'), dtype('S1'), dtype('>i2'), dtype('>i4'),
dtype('>f4'), dtype('>f8')]
class NCLeaf(object):
__slots__ = 'parent', 'index', 'offset', 'stype', 'shape', 'sshape'
def __init__(self, parent, index, offset, stype, shape, sshape, _wrp=None):
self.parent = parent if _wrp else weakref.ref(parent)
self.index = index
self.offset = offset
self.stype = stype
self.shape = shape
self.sshape = sshape
@staticmethod
def isleaf():
return 1
@staticmethod
def isgroup():
return 0
islist = isgroup
def shift_by(self, delta):
state = [getattr(self, nm) for nm in self.__slots__]
state[2] += delta
return NCLeaf(*state, _wrp=1)
def root(self):
return self.parent()
def _dtype(self):
dtype = self.stype
return dtype if dtype.isnative else dtype.newbyteorder('=')
def query(self):
# return dtype, shape, sshape
shape, sshape = self.shape or (), self.sshape
return self._dtype(), shape, sshape if sshape else shape
def read(self, args=()):
stype, shape = self.stype, self.shape
args, shape, offset = leading_args(args, shape)
f = self.parent().handle.seek(self.offset + stype.itemsize * offset)
size = prod(shape) if shape else 1
value = fromfile(f, stype, size).reshape(shape)[args]
if not stype.isnative:
value = value.astype(stype.newbyteorder('='))
if stype == _netcdf_stypes[1]:
# Present this as a str or array of str.
# Note that final netCDF dimension is really length of string.
shape = value.shape
if shape:
shape, strlen = shape[:-1], shape[-1]
value = value.view('S' + str(strlen)).reshape(shape)
if PY2:
try:
npdecode(value, 'ascii')
need_unicode = False
except UnicodeDecodeError:
need_unicode = True
else:
need_unicode = True
if need_unicode:
try:
value = npdecode(value, 'utf8')
except UnicodeDecodeError:
value = npdecode(value, 'latin1')
if not shape:
value = value[()]
return value
def write(self, value, args=()):
parent = self.parent()
if not parent.headsize:
if args:
raise IndexError("no partial writes during declaration")
pending = parent.pending
if pending is None:
pending = parent.pending = {}
pending[self.index] = value
return
offset, stype, shape = self.offset, self.stype, self.shape
args, shape, off = leading_args(args, shape)
if off:
offset += stype.itemsize * off
value = asarray(value)
kind = value.dtype.kind
if kind in 'SU':
if kind == 'U':
value = npencode(value, 'utf8')
shape = value.shape
value = value.reshape(shape + (1,)).view('S1')
f = parent.handle.seek(offset)
if args:
# Must do read-modify-write for potentially non-contiguous write.
addr = f.tell()
v = fromfile(f, stype, prod(shape) if shape else 1).reshape(shape)
v[args] = value
value = v
f.seek(addr)
else:
value = ascontiguousarray(value, stype)
if value.shape != shape:
# Avoid the recent (numpy 1.10) broadcast_to function.
v = zeros(shape, stype)
v[()] = value
value = v
value.tofile(f)
class NCList(object):
"""NCLeaf wrapper for record variables."""
__slots__ = 'parent', 'leaf'
def __init__(self, parent, leaf):
self.parent = weakref.ref(parent)
self.leaf = leaf
@staticmethod
def islist():
return 1
@staticmethod
def isgroup():
return 0
isleaf = isgroup
def root(self):
return self.parent()
# len, iter, index, declare are list methods called by QList
def __len__(self):
return sum(self.parent().nrecs)
def __iter__(self):
for i in range(len(self)):
yield self.index(i)
def index(self, ndx):
nrecs = len(self)
if ndx < 0:
ndx = ndx + nrecs
if ndx < 0 or ndx >= nrecs:
return None # out of range, let caller raise any exception
parent = self.parent()
delta = parent.record_delta(ndx)
return self.leaf.shift_by(delta)
def declare(self, dtype, shape):
parent = self._qnd_parent
nrecs = len(self)
delta = parent.record_delta(nrecs)
return self.leaf.shift_by(delta)
```
#### File: qnd/qnd/pdbdump.py
```python
from __future__ import absolute_import
from datetime import datetime
import sys
from numpy import array, prod
from .pdbparse import _binary32, _binary64
PY2 = sys.version_info < (3,)
if PY2:
def itemsof(d): return d.iteritems() # noqa
else:
def itemsof(d): return d.items() # noqa
def flusher_for(root):
def _flusher(f):
return flusher(f, root)
return _flusher
def initializer_for(root):
def _initializer(f):
return initializer(f, root)
return _initializer
# header[:13] = b'!<<PDB:II>>!\n'
# header[13] = N count of single byte values (= 24 + sz_float + sz_double)
# header[14:20]) = sz_ptr, sz_short, sz_int, sz_long, sz_float, sz_double
# header[20:23]) = ord_shot, ord_int, ord_long (1 = >, 2 = < order)
# header[23:23+sz_float] = (4, 3, 2, 1) for <f4 byte permutation
# header[... +sz_double] = (8, 7, 6, 5, 4, 3, 2, 1) for <f8
# header[14+N:21+N] = (nbits, e#, s#, -&, e&, s&, 1?) float format
# header[21+N:28+N] = (nbits, e#, s#, -&, e&, s&, 1?) double format
# header[28+N:...] = biasF\001biasD\001\n completes float and double formats
# ( 32, 8, 23, 0, 1, 9, 0, biasF= 127) for f4
# ( 64, 11, 52, 0, 1, 12, 0, biasD= 1023) for f8
# b'127\0011023\001\n' (10 bytes)
# Since numpy assumes these f4 and f8 formats, N = 21, and header has
# a total of 59 bytes to this point.
# header[...] = chart_addr\001symtab_addr\001\n
# Yorick allows an additional 128 unused bytes here to allow for longer
# chart and symtab addresses, but that is clearly overkill. A full 64-bit
# integer has a range of 2**64 = 1.6e19, and can thus be represented in 20
# decimal digits or fewer. Hence, the address fields can occupy at most 43
# bytes, for a 102 byte header. If both float and double were 16 bytes for
# some unfathomable reason, that would add 20 bytes to the header, bringing
# it to 122. Hence, 128 bytes is essentially guaranteed to be enough for
# all possible PDB headers.
def initializer(f, root):
handle, chart = root.handle, root.chart # root is a PDBGroup
ifile = handle.current_file()
if ifile:
# Copy header and non-record vars from file 0 of the family.
f = handle.open(0)
header = f.read(128)
# We will also need to copy all non-record variables, so read
# them all now while we have file 0 open.
nrvars = [(item, item.read()) for item in _iter_nonrec(root)]
f = handle.open(ifile)
# Write wrong chart and symtab addresses, fixed by flusher later.
f.write(header)
handle.declared(0, None, len(header))
# Then write out all the non-record variables we read from file 0.
addr0 = handle.zero_address()
for item, value in nrvars:
item = item.shifted_copy(addr0)
stype, shape, addr = item.tsa
handle.declared(addr, stype[1], prod(shape) if shape else 1)
item.write(value)
return
# Initializing first file of family, so we need to initialize chart.
order, structal = chart.byteorder, chart.structal
if order is None:
# PDB byte order is 1 for >, 2 for <
# Here is a literal way to compute native byte order:
order = chart.byteorder = int(array([1]).view('u1')[0] + 1)
if structal is None:
chart.structal = structal = 0
primitives = chart.primitives
if not primitives:
descs = ((b'char', (1, 0, 0)), (b'short', (2, order, 2)),
(b'integer', (4, order, 4)), (b'long', (8, order, 8)),
(b'float', (4, order, 4, _binary32)),
(b'double', (8, order, 8, _binary64)),
(b'text', (1, 0, 0))) # QnD-specific type for strings
for name, desc in descs:
chart.add_primitive(name, desc)
names = b'short', b'integer', b'long', b'float', b'double'
prims = [primitives.get(name) for name in names]
iords = ((1 if p[0].str[0] == '>' else 2) for p in prims[:3])
ford, dord = [list(range(1, p[0].itemsize+1)) for p in prims[3:]]
if prims[3][0].str[0] == '<':
ford = ford[::-1]
if prims[4][0].str[0] == '<':
dord = dord[::-1]
sizes = [p[0].itemsize for p in [prims[2]] + prims]
header = (b'!<<PDB:II>>!\n' + tobytes((24 + sizes[4] + sizes[5],)) +
tobytes(sizes) + tobytes(iords) + tobytes(ford) + tobytes(dord))
fbits, dbits = [p[3] for p in prims[3:]]
if fbits is None:
fbits = _binary32
if dbits is None:
dbits = _binary64
header += tobytes(fbits[:7]) + tobytes(dbits[:7])
header += _byt(fbits[7]) + b'\x01' + _byt(dbits[7]) + b'\x01\n'
# Record location of chart and symtab addresses, insert dummy values.
chart.csaddr = len(header)
header += b'128\x01128\x01\n'
header += b'\x00' * (128 - len(header))
f.write(header)
# Set nextaddr to 128, which is where first data should begin.
handle.declared(0, None, len(header))
def _iter_nonrec(root):
for name in root:
item = root.items[name]
if item.isleaf():
yield item
elif item.isgroup() and '__class__' not in item:
# Recurse into groups, but not into lists or objects of any type.
_iter_nonrec(item)
def flusher(f, root):
handle, chart = root.handle, root.chart
_, chart_addr = handle.next_address(both=1)
blockadds = handle.zero_address(), chart_addr
f.seek(chart_addr)
# Begin by writing just * short integer long float double char to chart,
# reserving any additional primitives to the PrimitiveTypes extra.
# This is what yorick does.
primitives, aligns = chart.primitives, []
for name in (b'*', b'short', b'integer', b'long', b'float', b'double',
b'char'):
if name == b'*':
prim = primitives.get(b'char *', primitives[b'long'])
else:
prim = primitives[name]
aligns.append(prim[2]) # prim = stype, dtype, align, desc or None
size = prim[0].itemsize
f.write(name + b'\x01' + _byt(size) + b'\x01\n')
structs = chart.structs
for name, struct in itemsof(structs):
size = struct[0].itemsize
f.write(name + b'\x01' + _byt(size) + b'\x01')
for tname, shape in itemsof(struct[3]):
if shape:
shape = b'[' + b','.join(_byt(s) for s in shape) + b']'
else:
shape = b''
f.write(tname + b' ' + name + shape + b'\x01')
f.write(b'\n')
f.write(b'\x02\n')
# Next comes the symbol table.
symtab_addr = f.tell()
prefix = b''
for name, item in itemsof(root.items):
if item.isgroup() or item.islist() == 2:
prefix = b'/'
break
blocks = []
_dump_group(f, prefix, False, root, blockadds, blocks)
f.write(b'\n')
# Finally comes the extras section.
f.write(b'Offset:1\n') # Default index origin always 1 to match yorick.
# Alignment: char, *, short, integer, long, float, double, \n
f.write(b'Alignment:' + tobytes([1] + aligns[:6]) + b'\n')
f.write(b'Struct-Alignment:' + _byt(chart.structal) + b'\n')
# Yorick also writes synonym Struct-Align for old yorick bug workaround?
# Date format "Sun Dec 7 06:00:00 1941" exactly 24 characters.
date = datetime.today().ctime()
if not PY2:
date = date.encode()
f.write(b'Version:11|' + date + b'\n') # Yorick writes version 11.
# PDBLib requires Major-Order: extra before Blocks: extra.
# Write array dimensions in Fortran order to match yorick and basis.
f.write(b'Major-Order:102\n')
hasdirs = bool(prefix)
f.write(b'Has-Directories:' + _byt(int(hasdirs)) + b'\n')
f.write(b'Blocks:\n')
for name, addr, nitems, nblocks in blocks:
nitems = b' ' + _byt(nitems)
f.write(name + b'\x01' + _byt(nblocks) + b' ' +
b' '.join(_byt(a) + nitems for a in addr) + b'\n')
f.write(b'\x02\n'
b'Casts:\n\x02\n'
b'Primitive-Types:\n')
if hasdirs:
f.write(b'Directory\x011\x010\x01-1\x01DEFORDER\x01NO-CONV\x01\n')
for name, prim in itemsof(chart.primitives):
if name in (b'*', b'char', b'short', b'integer', b'long',
b'float', b'double', b'Directory'):
continue # skip standard data types, as yorick does
stype, _, align, desc = prim
if desc: # desc = size, order, align, fpbits
size, order, align = desc[:3]
fpbits = desc[3] if len(desc) > 3 else None
if fpbits:
kind = b'FLOAT\x01' + b'\x01'.join(_byt(i) for i in fpbits)
else:
kind = b'FIX'
if not order:
order, kind = b'-1\x01DEFORDER\x01', b'NO-CONV'
elif not hasattr(order, '__getitem__'):
order = _byt(int(order)) + b'\x01DEFORDER\x01'
else:
order = _prim_order(order, size)
else:
size, kind, order = stype.itemsize, stype.kind, stype.str[0]
if kind == 'V':
order, kind = b'-1\x01DEFORDER\x01', b'NO-CONV'
else:
if kind == b'f':
order = _prim_order(range(1, size+1) if order == '>' else
range(size+1, 0, -1), size)
kind = _FLOAT_KINDS[size]
else:
order = ((b'1' if order == '>' else b'2') +
b'\x01DEFORDER\x01')
kind = b'FIX'
f.write(name + b'\x01' + _byt(size) + b'\x01' + _byt(align) +
b'\x01' + order + kind + b'\x01\n')
f.write(b'\x02\n'
b'\n\n') # PDB extras and file ends with two newlines.
# Finally, poke chart and symtab addresses into file header.
csaddr = chart.csaddr
if csaddr:
f.seek(csaddr)
f.write(_byt(chart_addr) + b'\x01' + _byt(symtab_addr) + b'\x01\n')
def _dump_group(f, prefix, islist, group, blockadds, blocks):
# First dump the group itself as a bogus Directory object.
ignore_first = False
if islist:
group = group.parent()
ignore_first = True
if prefix:
f.write(prefix + b'\x01Directory\x011\x01127\x01\n')
for name in group:
if ignore_first:
# Write a bogus '_' symtab entry to indicate a QList.
f.write(prefix + b'_\x01char\x011\x01127\x01\n')
ignore_first = False
continue
item = group.lookup(name)
islist = item.islist()
name = prefix + (name if PY2 else name.encode('utf8'))
if item.isgroup() or islist == 2:
# dump subdirectory
_dump_group(f, name + b'/', islist, item, blockadds, blocks)
continue
# dump leaf (including block variables)
typename, shape, addr = (item.parent() if islist else item).tsa
typename = typename[3] # dtype, stype, align, typename
if islist:
shape = (1,) + (shape or ())
if shape:
size = prod(shape)
# Set all index origins to 1 to match yorick.
shape = b'\x01'.join(b'1\x01' + _byt(s)
for s in reversed(shape)) + b'\x01'
else:
size = 1
shape = b''
if islist:
a = []
amin, amax = blockadds
for ad in addr:
ad -= amin
if ad >= 0 and ad < amax:
a.append(ad)
blocks.append((name, a, size, len(a)))
addr = a[0]
f.write(name + b'\x01' + typename + b'\x01' + _byt(size) +
b'\x01' + _byt(addr) + b'\x01' + shape + b'\n')
if PY2:
def _byt(number): return str(number) # noqa
def tobytes(seq): return ''.join(chr(i) for i in seq) # noqa
else:
def _byt(number): return str(number).encode() # noqa
tobytes = bytes
def _prim_order(order, size):
return ((b'1' if order[0] <= (size >> 1) else b'2') + b'\x01ORDER\x01' +
b'\x01'.join(tobytes(order) + b'\x01'))
# Primitive-Types representations of IEEE 754 binary32 and binary64 formats:
_FLOAT_KINDS = {4: b'FLOAT\x0132\x018\x0123\x010\x011\x019\x010\x01127\x01',
8: b'FLOAT\x0164\x0111\x0152\x010\x011\x0112\x010\x011023\x01'}
# To support qnd attributes, here is the strategy in the flusher:
# 1. Create a separate symbol table for attributes, global attributes
# having names ':aname' and variable attributes 'vname:aname'.
# 2. Write this entire tree of fake symbols beginning at the first address
# after the data.
# 3. The chart address becomes the first address after the attributes.
# 4. Write the fake symbol metadata to the symtab after the real symbols.
# 5. Be sure the nextaddr for the file is set to the beginning address
# of the attribute data, rather than to the chart address.
# On read, we can recognize the first symbol with a : in its name as
# the first attribute, and make sure to reset nextaddr to its address.
# All of the attributes should be read when the file is opened; they are
# part of the metadata like the chart, symtab, and extras.
```
#### File: qnd/qnd/pdbf.py
```python
from __future__ import absolute_import
import sys
import weakref
from numbers import Integral
from collections import OrderedDict
from warnings import warn
from numpy import (zeros, arange, fromfile, prod, array, ascontiguousarray,
dtype as npdtype)
from .frontend import QGroup, QnDList
from .generic import opener
from .pdbparse import parser, PDBChart
from .pdbdump import flusher_for, initializer_for
from .utils import leading_args
__all__ = ['openpdb']
PY2 = sys.version_info < (3,)
if PY2:
range = xrange
def openpdb(filename, mode='r', auto=1, **kwargs):
"""Open PDB file or family, and wrap it in a QnD QGroup.
Parameters
----------
filename : str
Name of file to open. See notes below for file family.
mode : str
One of 'r' (default, read-only), 'r+' (read-write, must exist),
'a' (read-write, create if does not exist), 'w' (create, clobber if
exists), 'w-' (create, fail if exists).
auto : int
The intial state of auto-read mode. If the QGroup handle returned
by openh5 is `f`, then ``f.varname`` reads an array variable, but not
a subgroup when auto=1, the default. With auto=0, the variable
reference reads neither (permitting later partial reads in the case
of array variables). With auto=2, a variable reference recursively
reads subgroups, bringing a whole tree into memory.
**kwargs
Other keywords. The `maxsize` keyword sets the size of files in a
family generated in ``recording==1`` mode; a new file will begin when
the first item in a new record would begin beyond `maxsize`. The
default maxsize is 128 MiB (134 MB). The `order` keyword can be '>'
or '<' to force the byte order in a new file; by default the byte
order is the native order. File families always have the same order
for every file, so `order` is ignored if any files exist.
Returns
-------
f : QGroup
A file handle implementing the QnD interface.
Notes
-----
The `filename` may be an iterable, one string per file in order. The
sequence may extend beyond the files which actually exist for 'r+', 'a',
'w', or 'w-' modes.
Alternatively `filename` specifies a family if it contains shell globbing
wildcard characters. Existing matching files are sorted first by length,
then alphabetically (ensuring that 'file100' comes after 'file99', for
example). If there is only a single wildcard group, it also serves to
define a sequence of future family names beyond those currently existing
for 'r+', 'a', 'w', or 'w-' modes. A '?' pattern is treated the same as
a '[0-9]' pattern if all its matches are digits or if the pattern
matches no existing files. Similarly, a '*' acts like the minimum number
of all-digit matches, or three digits if there are no matches.
"""
maxsize = kwargs.pop('maxsize', 134217728)
order = kwargs.pop('order', None)
if order:
if order not in '<>':
raise ValueError("order must be either > or <")
order = 1 if order == '>' else 2
kwargs['nextaddr_mode'] = 1 # tell opener to initialize nextaddr to 0
handle, n = opener(filename, mode, **kwargs)
root = PDBGroup(handle, maxsize)
for i in range(n):
try:
parser(handle, root, i)
except IOError:
# Something went terribly wrong. If this is first file, we die.
name = handle.filename(i)
if not i:
raise IOError("Fatal errors opening PDB file "
"".format(name))
handle.open(i-1)
warn("file family stopped by incompatible {}".format(name))
if not n and order:
root.chart.byteorder = order
# If file was freshly created, setting initializer calls it.
handle.callbacks(flusher_for(root), initializer_for(root))
# If any files exist, parser has set nexaddr to the chart address
# of the last existing file in the family. If there are no record
# variables, we let this stand. However, if there are record variables,
# and the family is writable, we set nextaddr to the zero address of
# the next file beyond all existing files. This causes any new records
# to be placed in a new file, leaving all existing files in the
# family undisturbed.
mode = mode.lower()
if ((mode.startswith('a') or mode.startswith('r+')) and
handle.state[4] is not None and _has_records(root)):
handle.declared(handle.zero_address(len(handle.state[2])), None, 0)
return QGroup(root, auto=auto)
def _has_records(root):
for name in root:
item = root.items[name]
if item.islist() == 1:
return True
# Recurse into groups, but not into lists or objects of any type.
if item.isgroup() and '__class__' not in item and _has_records(item):
return True
return False
class PDBGroup(object):
"""A directory in a PDB file, or a whole file or file family.
"""
def __init__(self, parent, maxsize=134217728):
if not isinstance(parent, PDBGroup): # this is root group
self.root = weakref.ref(self)
self.maxsize = maxsize
self.maxblocks = 0
self.handle = parent
self.chart = PDBChart(self)
else:
self.root = parent.root
self.items = OrderedDict()
self.attrs = None
@staticmethod
def isgroup():
return 1
@staticmethod
def islist():
return 0
isleaf = islist
def close(self):
self.root().handle.close()
def flush(self):
self.root().handle.flush()
def __len__(self):
return len(self.items)
def __iter__(self):
return iter(self.items)
def lookup(self, name):
item = self.items.get(name)
if isinstance(item, PDBGroup):
item = QnDList.fromgroup(item)
return item
def declare(self, name, dtype, shape, unlim=None, addr=-1):
current = self.items.get(name)
if dtype == dict:
if current is not None:
if current.isgroup():
return current
raise KeyError("already a non-group item {}".format(name))
item = PDBGroup(self)
elif dtype == list:
if current is not None:
if current.islist() == 2:
return current
raise KeyError("already a non-list item {}".format(name))
item = QnDList(PDBGroup(self), 1)
else:
if current is not None:
raise KeyError("attempt to redeclare {}".format(name))
if dtype is None and name == '_':
# Assume we are creating a QList.
dtype = npdtype('u1')
elif isinstance(dtype, npdtype) and dtype.kind == 'S':
# frontend never passes 'U' dtype
shape = shape + (dtype.itemsize,)
dtype = npdtype('S1')
item = PDBLeaf(self, addr, dtype, shape, unlim)
if unlim:
item = QnDList(item, None if hasattr(addr, '__iter__') or
addr != -1 else 1)
self.items[name] = item
return item
# This is used in pdbparse._endparse to declare or check symbols
# against declarations from previous files in a family.
def _leaf_declare(self, name, dtype, shape, addr):
item = self.items.get(name)
unlim = isinstance(addr, list)
if item is not None:
tsa = None
if not isinstance(item, PDBLeaf):
item = None if isinstance(item, PDBGroup) else item.parent()
if item is not None:
tsa = item.tsa
if (unlim != isinstance(tsa[2], list) or
tsa[:2] != (dtype, shape)):
item = None
elif unlim:
tsa[2].extend(addr)
if item is None:
raise IOError("incompatible redeclaration of {}".format(name))
return
self.declare(name, dtype, shape, unlim, addr)
def attget(self, vname):
item = self.lookup(vname) if vname else self
if isinstance(item, QnDList):
item = item.parent()
attrs = item.attrs
if attrs is None:
item.attrs = attrs = PDBAttrs()
return attrs
def attset(self, vname, aname, dtype, shape, value):
item = self.lookup(vname) if vname else self
if isinstance(item, QnDList):
item = item.parent()
attrs = item.attrs
if attrs is None:
item.attrs = attrs = PDBAttrs()
if value.dtype != dtype or value.shape != shape:
v = zeros(shape, dtype)
v[()] = value
value = v
attrs[aname] = value
class PDBLeaf(object):
"""An ndarray in a PDB file.
(Eventual stretch goal is to implement None and zero-length arrays.)
"""
def __init__(self, parent, addr, dtype, shape, unlim):
if dtype is None or (shape and not all(shape)):
raise NotImplementedError("None or zero length array")
root = parent.root()
if not isinstance(dtype, tuple):
# Construct full data type: (dtype, stype, align, typename)
dtype = (dtype,) + root.chart.find_or_create(dtype)
self.parent = weakref.ref(parent)
self.attrs = None
if hasattr(addr, '__iter__'):
unlim = 1
if not isinstance(addr, list):
addr = list(addr)
elif addr == -1:
stype, align = dtype[1:3]
handle = root.handle
addr = _align(handle.next_address(), align)
handle.declared(addr, stype, prod(shape) if shape else 1)
if unlim:
addr = [addr]
elif unlim:
addr = [int(addr)]
else:
addr = int(addr)
self.tsa = dtype, shape, addr
@staticmethod
def isleaf():
return 1
@staticmethod
def isgroup():
return 0
islist = isgroup
def root(self):
return self.parent().root()
def query(self):
# return dtype, shape, sshape
dtype, shape, addr = self.tsa
if isinstance(addr, list):
# Do this for consistency with treatment of h5py chunked data.
shape = (len(addr),) + shape
return dtype[0], shape, shape
def read(self, args=()):
dtype, shape, addr = self.tsa
dtype, stype, _, typename = dtype
istext = typename == b'text'
if isinstance(addr, list):
arg0 = args[0] if args else slice(None)
args = args[1:]
if not isinstance(arg0, Integral):
arg0 = arange(len(addr))[arg0]
if arg0.ndim == 1:
return array([self.read((a,) + args) for a in arg0], dtype)
elif arg0.ndim:
raise TypeError("block variable leading index too complex")
addr = addr[arg0]
root = self.root()
chart = root.chart
nopartial = chart.nopartial(typename)
if nopartial is None:
typename = None
if typename and nopartial:
offset = 0
else:
args, shape, offset = leading_args(args, shape)
if offset:
addr += dtype.itemsize * offset
f = root.handle.seek(addr)
value = fromfile(f, stype, prod(shape) if shape else 1)
if not nopartial:
value = value.reshape(shape)[args]
if typename:
value = chart.read_special(f, typename, value)
stype = dtype = value.dtype
if nopartial:
value = value.reshape(shape)[args]
if istext and value.shape:
return value.view('S' + str(value.shape[-1]))[..., 0]
return value if stype is dtype else value.astype(dtype)
def write(self, value, args=()):
dtype, shape, addr = self.tsa
dtype, stype, align, typename = dtype[:4]
arg0 = args[0] if args else slice(None)
args = args[1:]
root = self.root()
handle = root.handle
if root.chart.nopartial(typename) is not None:
raise TypeError("write to pointer type {} unsupported"
"".format(typename.decode('latin1')))
if isinstance(addr, list):
# This variable has blocks.
if not isinstance(arg0, Integral):
arg0 = arange(len(addr))[arg0]
if arg0.size > 1:
raise TypeError("can only write block variables one "
"block at a time")
arg0 = arg0.reshape(())
newfile = arg0 == len(addr)
if newfile:
# This is a new block for this variable, but not first block.
# TODO: Should prevent partial writes here?
selfaddr = addr
addr, faddr = handle.next_address(both=1)
if addr is None:
pass # TODO: issue warning here and below?
if faddr >= root.maxsize and arg0 >= root.maxblocks:
a = handle.next_address(newfile=1)
if a is not None:
addr = a # Next file in family has been created.
else:
# No next filename, and current file exceeds maxsize.
pass # TODO: issue warning here and above?
addr = _align(addr, align)
selfaddr.append(addr)
handle.declared(addr, stype, prod(shape) if shape else 1)
else:
addr = addr[arg0]
else:
newfile = False
args, shape, offset = leading_args(args, shape)
if offset:
addr += dtype.itemsize * offset
seeker = handle.seek
f = seeker(addr)
if args:
# Must do read-modify-write for potentially non-contiguous write.
v = fromfile(f, stype, prod(shape) if shape else 1).reshape(shape)
v[args] = value
value = v
f = seeker(addr)
else:
if stype.kind == 'S' and shape:
value = value.astype('S' + str(shape[-1]))
value = value.reshape(value.shape + (1,)).view('S1')
else:
value = ascontiguousarray(value, stype)
if value.shape != shape:
# Avoid the recent (numpy 1.10) broadcast_to function.
v = zeros(shape, stype)
v[()] = value
value = v
value.tofile(f)
def shifted_copy(self, delta):
# Special helper for copying non-record variables for first file
# to later files in a family.
dtype, shape, addr = self.tsa
if isinstance(addr, list):
raise TypeError("cannot make shifted copy of record variable")
parent = self.parent()
if array(addr, 'u8') >> array(parent.root().handle.abits, 'u8'):
raise TypeError("expecting non-record vars to be in first file")
return PDBLeaf(self.parent(), addr+delta, dtype, shape, 0)
def _align(addr, align):
if align > 1:
rem = addr & (align - 1)
if rem:
addr += align - rem
return addr
class PDBAttrs(dict):
"""Variable attributes are not a standard feature of PDB.
We implement a poor man's version here as follows: Attributes are
held in memory until the metadata is flushed, at which point they
are written with name 'variable_path:attribute_name' immediately
before the metadata. If the file is extended, new data overwrites
old attributes, which are rewritten just before the metadata once
again.
Hence, in memory, a dict suffices.
"""
__slots__ = ()
# qnd.QAttribute uses only __iter__, get, items, __len__, __contains__
# PDBGroup uses __setitem__
# Only thing that needs fixing is mapping items to iteritems for python2.
if PY2:
def items(self):
return self.iteritems()
else:
pass
```
|
{
"source": "JDSanti/Server_Checker",
"score": 3
}
|
#### File: JDSanti/Server_Checker/server.py
```python
import socket
import datetime
import yagmail
import os
from twilio.rest import Client
#Yagmail SMTP
yag = yagmail.SMTP('email', 'password')
#Function to connect socket
def is_running(site):
"""This function attempts to connect to the given server using a socket.
Returns: Whether or not it was able to connect to the server."""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((site, 80))
return True
except:
return False
#Function to log into a txt file
def save_log(state):
f = open("server_log.txt", "a")
f.write(state)
f.close()
#Function to send email log
def send_email(site):
contents = [
f'There is a problem with {site} as of {date_time}\n'
]
yag.send('<EMAIL>', 'Server Status', contents)
#Function to send text log
def send_text(site):
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body='There is a problem with {site} as of {date_time}',
from_='+15555555555',
to='+15555555555'
)
#Main Function
if __name__ == "__main__":
#Array holding what servers you want to check to see if they respond
servers_to_check=["server.com"]
#Loop over every server
for x in servers_to_check:
site = (x)
date_time = datetime.datetime.now()
if is_running(f'{site}'):
#If it does respond, log successful connection
state = str(f"{site} is running as of {date_time}\n")
#Log
save_log(state)
else:
#If it doesn't respond, log the time and date of issue and send email and text notification
state = str(f'There is a problem with {site} as of {date_time}\n')
#Log
save_log(state)
# Send Email
send_email(site)
send_text(site)
```
|
{
"source": "JDSanto/frux-chat",
"score": 3
}
|
#### File: frux_chat/services/authorization.py
```python
import os
from flask import request
def requires_api_key(method):
def wrapper(*args, **kwargs):
key = request.headers.get('x-api-key')
if not key:
return {'error': 'Missing x-api-key header'}, 401
if key != os.environ.get('API_KEY'):
return {'error': 'Invalid x-api-key header'}, 401
return method(*args, **kwargs)
return wrapper
```
#### File: frux_chat/services/notifications.py
```python
import logging
from exponent_server_sdk import (
DeviceNotRegisteredError,
PushClient,
PushMessage,
PushServerError,
PushTicketError,
)
from frux_chat.services.database import database
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter('[%(asctime)s] [%(levelname)s] %(module)s: "%(message)s"')
)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
def notify_device(token, title='', body='New event!', notification_data=None):
logger.info('Sending new notification: -- %s -- %s -- %s', token, title, body)
if not notification_data:
notification_data = {}
try:
PushClient().publish(
PushMessage(to=token, title=title, body=body, data=notification_data)
)
except (ValueError, DeviceNotRegisteredError, PushServerError, PushTicketError):
pass
def set_tag_and_project(tag, project_id):
'''Mix the tag and the project to store in the database'''
return f'{tag}_{project_id}'
def notify_tag(tag, project_id, params):
# TODO: Bulk notifications/inserts
title, body = TAG_MAPPER[tag](params)
users = database.get_subscriptions_users(set_tag_and_project(tag, project_id))
notification_data = {'project_id': project_id}
for user in users:
notify_device(user['token'], title, body, notification_data)
database.insert_notification(user['_id'], title, body, project_id)
# Notifications Specccc
# NewSeederNotification -> X fundeo tu proyecto
# NewStageNotification_noncreator -> El proyecto entro en tal stage (similar a la de abajo)
# NewStageNotification_creator -> El veedor te dio los funds para tal stage
# NewSeer_creator -> Se asigno un veedor a tu proyecto
# NewSeer_seer -> Se te asigno un proyecto para que seas el seer
# ChangeStateNotification -> El proyecto entro en funding, el proyecto entro en inprogress, el proyecto se completo
def new_seeder(data):
project = data['project']
username = data['username']
return ('New seeder!', f'{username} has started funding {project}!')
def change_state(data):
project = data['project']
state = data['state']
if state == "FUNDING":
body = f'{project} is looking for new seeders!'
if state == "IN_PROGRESS":
body = f'{project} has started development!'
if state == "COMPLETE":
body = f'{project} has finished development!'
return ('New progress!', body)
def new_stage_non_creator(data):
project = data['project']
stage_number = data['stage_number']
return (
'Stage finished!',
f'{project} has started developing their Stage {stage_number}!',
)
def new_stage_creator(data):
project = data['project']
stage_number = data['stage_number']
name = data['username']
return (
'Stage funds released!',
f'{name} has released the funds for Stage {stage_number} of {project}!',
)
def new_seer_creator(data):
project = data['project']
name = data['username']
return ('Seer assigned!', f'{name} has been assigned as the {project} supervisor!')
def new_seer_seer(data):
project = data['project']
return ('Project assigned!', f'You\'ve been assigned to supervise {project}!')
TAG_MAPPER = {
'NewSeederNotification': new_seeder,
'NewStageNotification_noncreator': new_stage_non_creator,
'NewStageNotification_creator': new_stage_creator,
'NewSeer_creator': new_seer_creator,
'NewSeer_seer': new_seer_seer,
'ChangeStateNotification': change_state,
}
# Role Specccc
# ProjectCreator
# - Quien se suscribe? el creador de un proyecto al crearlo
# - Que notificaciones recibe? NewSeederNotification, NewStageNotification_creator, NewSeer_creator, ChangeStateNotification,
# ProjectWatcher
# - Quien se suscribe? los que dieron like
# - Que notificaciones recibe? ChangeStateNotification,
# ProjectSeer
# - Quien se suscribe? el veedor de un proyecto
# - Que notificaciones recibe? NewSeederNotification, NewSeer_seer, ChangeStateNotification
# ProjectSeeder
# - Quien se suscribe? los que invirtieron en el proyecto
# - Que notificaciones recibe? NewStageNotification_noncreator
# El chat NO se maneja por suscripciones
ROLE_MAPPER = {
'ProjectCreator': [
'NewSeederNotification',
'NewStageNotification_creator',
'NewSeer_creator',
'ChangeStateNotification',
],
'ProjectWatcher': ['ChangeStateNotification'],
'ProjectSeer': [
'NewSeederNotification',
'NewSeer_seer',
'ChangeStateNotification',
],
'ProjectSeeder': ['NewStageNotification_noncreator'],
}
```
|
{
"source": "jdschleicher/CumulusCI",
"score": 3
}
|
#### File: cumulusci/cli/utils.py
```python
from collections import defaultdict
def group_items(items):
"""Given a list of dicts with 'group' keys,
returns those items in lists categorized group"""
groups = defaultdict(list)
for item in items:
group_name = item["group"] or "Other"
groups[group_name].append([item["name"], item["description"]])
return groups
```
#### File: core/tests/test_keychain.py
```python
import json
import os
import tempfile
import unittest
from unittest import mock
from pathlib import Path
import pytest
from cumulusci.core.config import BaseConfig
from cumulusci.core.config import UniversalConfig
from cumulusci.core.config import BaseProjectConfig
from cumulusci.core.config import ConnectedAppOAuthConfig
from cumulusci.core.config import OrgConfig
from cumulusci.core.config import ScratchOrgConfig
from cumulusci.core.config import ServiceConfig
from cumulusci.core.keychain import BaseProjectKeychain
from cumulusci.core.keychain import BaseEncryptedProjectKeychain
from cumulusci.core.keychain import EncryptedFileProjectKeychain
from cumulusci.core.keychain import EnvironmentProjectKeychain
from cumulusci.core.keychain.encrypted_file_project_keychain import GlobalOrg
from cumulusci.core.exceptions import ConfigError
from cumulusci.core.exceptions import KeychainKeyNotFound
from cumulusci.core.exceptions import ServiceNotConfigured
from cumulusci.core.exceptions import ServiceNotValid
from cumulusci.core.exceptions import OrgNotFound
from cumulusci.core.tests.utils import EnvironmentVarGuard
__location__ = os.path.dirname(os.path.realpath(__file__))
class ProjectKeychainTestMixin(unittest.TestCase):
keychain_class = BaseProjectKeychain
def setUp(self):
self.universal_config = UniversalConfig()
self.project_config = BaseProjectConfig(
self.universal_config, config={"no_yaml": True}
)
self.project_config.config["services"] = {
"connected_app": {"attributes": {"test": {"required": True}}},
"github": {"attributes": {"name": {"required": True}, "password": {}}},
"not_configured": {"attributes": {"foo": {"required": True}}},
}
self.project_config.project__name = "TestProject"
self.services = {
"connected_app": ServiceConfig({"test": "value"}),
"github": ServiceConfig({"name": "hub"}),
}
self.org_config = OrgConfig({"foo": "bar"}, "test")
self.scratch_org_config = ScratchOrgConfig(
{"foo": "bar", "scratch": True}, "test_scratch"
)
self.key = "0123456789123456"
def test_init(self):
keychain = self.keychain_class(self.project_config, self.key)
self.assertEqual(keychain.project_config, self.project_config)
self.assertEqual(keychain.key, self.key)
def test_set_non_existant_service(self, project=False):
keychain = self.keychain_class(self.project_config, self.key)
with self.assertRaises(ServiceNotValid):
keychain.set_service("doesnotexist", ServiceConfig({"name": ""}), project)
def test_set_invalid_service(self, project=False):
keychain = self.keychain_class(self.project_config, self.key)
with self.assertRaises(ServiceNotValid):
keychain.set_service("github", ServiceConfig({"name": ""}), project)
def test_get_service_not_configured(self):
keychain = self.keychain_class(self.project_config, self.key)
with self.assertRaises(ServiceNotConfigured):
keychain.get_service("not_configured")
def test_change_key(self):
new_key = "9876543210987654"
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_org(self.org_config)
keychain.set_service("connected_app", self.services["connected_app"])
keychain.set_service("github", self.services["github"])
keychain.change_key(new_key)
self.assertEqual(keychain.key, new_key)
self.assertEqual(
keychain.get_service("connected_app").config,
self.services["connected_app"].config,
)
self.assertEqual(
keychain.get_service("github").config, self.services["github"].config
)
self.assertEqual(keychain.get_org("test").config, self.org_config.config)
def test_set_service_github(self, project=False):
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_service("github", self.services["github"], project)
self.assertEqual(
keychain.get_service("github").config, self.services["github"].config
)
def test_set_and_get_org(self, global_org=False):
keychain = self.keychain_class(self.project_config, self.key)
self.org_config.global_org = global_org
keychain.set_org(self.org_config, global_org)
self.assertEqual(list(keychain.orgs.keys()), ["test"])
self.assertEqual(keychain.get_org("test").config, self.org_config.config)
def test_set_and_get_scratch_org(self, global_org=False):
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_org(self.scratch_org_config, global_org)
self.assertEqual(list(keychain.orgs.keys()), ["test_scratch"])
org = keychain.get_org("test_scratch")
self.assertEqual(org.config, self.scratch_org_config.config)
self.assertEqual(org.__class__, ScratchOrgConfig)
def test_load_scratch_orgs_none(self):
keychain = self.keychain_class(self.project_config, self.key)
self.assertEqual(list(keychain.orgs), [])
def test_load_scratch_orgs_create_one(self):
self.project_config.config["orgs"] = {}
self.project_config.config["orgs"]["scratch"] = {}
self.project_config.config["orgs"]["scratch"]["test_scratch_auto"] = {}
keychain = self.keychain_class(self.project_config, self.key)
self.assertEqual(list(keychain.orgs), ["test_scratch_auto"])
def test_load_scratch_orgs_existing_org(self):
self.project_config.config["orgs"] = {}
self.project_config.config["orgs"]["scratch"] = {}
self.project_config.config["orgs"]["scratch"]["test"] = {}
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_org(OrgConfig({}, "test"))
self.assertEqual(list(keychain.orgs), ["test"])
org = keychain.get_org("test")
self.assertEqual(org.scratch, None)
def test_get_org_not_found(self):
keychain = self.keychain_class(self.project_config, self.key)
with self.assertRaises(OrgNotFound):
keychain.get_org("test")
def test_get_default_org(self):
keychain = self.keychain_class(self.project_config, self.key)
org_config = self.org_config.config.copy()
org_config = OrgConfig(org_config, "test", keychain=keychain)
org_config.save()
keychain.set_default_org("test")
org_config.config["default"] = True
self.assertEqual(keychain.get_default_org()[1].config, org_config.config)
def test_get_default_org_no_default(self):
keychain = self.keychain_class(self.project_config, self.key)
self.assertEqual(keychain.get_default_org()[1], None)
@mock.patch("sarge.Command")
def test_set_default_org(self, Command):
keychain = self.keychain_class(self.project_config, self.key)
org_config = self.org_config.config.copy()
org_config = OrgConfig(org_config, "test")
keychain.set_org(org_config)
keychain.set_default_org("test")
expected_org_config = org_config.config.copy()
expected_org_config["default"] = True
self.assertEqual(expected_org_config, keychain.get_default_org()[1].config)
@mock.patch("sarge.Command")
def test_unset_default_org(self, Command):
keychain = self.keychain_class(self.project_config, self.key)
org_config = self.org_config.config.copy()
org_config = OrgConfig(org_config, "test")
org_config.config["default"] = True
keychain.set_org(org_config)
keychain.unset_default_org()
self.assertEqual(keychain.get_default_org()[1], None)
def test_list_orgs(self):
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_org(self.org_config)
self.assertEqual(keychain.list_orgs(), ["test"])
def test_list_orgs_empty(self):
keychain = self.keychain_class(self.project_config, self.key)
self.assertEqual(keychain.list_orgs(), [])
class TestBaseProjectKeychain(ProjectKeychainTestMixin):
def test_convert_connected_app(self):
project_config = BaseProjectConfig(
self.universal_config,
{
"services": {
"connected_app": {
"attributes": {
"callback_url": {},
"client_id": {},
"client_secret": {},
}
}
}
},
)
keychain = self.keychain_class(project_config, self.key)
app_config = {
"callback_url": "http://localhost:8080/callback",
"client_id": "CLIENT",
"client_secret": "SECRET",
}
keychain.config["app"] = BaseConfig(app_config)
keychain._convert_connected_app()
self.assertEqual(app_config, keychain.get_service("connected_app").config)
def test_create_scratch_org(self):
project_config = BaseProjectConfig(
self.universal_config, {"orgs": {"scratch": {"dev": {}}}}
)
keychain = self.keychain_class(project_config, self.key)
keychain.set_org = mock.Mock()
keychain.create_scratch_org("test", "dev", days=3)
org_config = keychain.set_org.call_args[0][0]
self.assertEqual(3, org_config.days)
@mock.patch("cumulusci.core.keychain.base_project_keychain.cleanup_org_cache_dirs")
def test_remove_org(self, cleanup_org_cache_dirs):
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_org(self.org_config)
keychain.remove_org("test")
self.assertNotIn("test", keychain.orgs)
assert cleanup_org_cache_dirs.called_once_with(keychain, self.project_config)
class TestEnvironmentProjectKeychain(ProjectKeychainTestMixin):
keychain_class = EnvironmentProjectKeychain
def setUp(self):
super(TestEnvironmentProjectKeychain, self).setUp()
self.env = EnvironmentVarGuard().__enter__()
self._clean_env(self.env)
self.env.set(
f"{self.keychain_class.org_var_prefix}test",
json.dumps(self.org_config.config),
)
self.env.set(
f"{self.keychain_class.service_var_prefix}connected_app",
json.dumps(self.services["connected_app"].config),
)
self.env.set(
f"{self.keychain_class.service_var_prefix}github",
json.dumps(self.services["github"].config),
)
def tearDown(self):
self.env.__exit__()
def _clean_env(self, env):
for key, value in list(env.items()):
if key.startswith(self.keychain_class.org_var_prefix):
del env[key]
for key, value in list(env.items()):
if key.startswith(self.keychain_class.service_var_prefix):
del env[key]
def test_load_app(self):
self.env["CUMULUSCI_CONNECTED_APP"] = "{}"
keychain = self.keychain_class(self.project_config, self.key)
self.assertIsInstance(keychain.app, ConnectedAppOAuthConfig)
def test_get_org(self):
keychain = self.keychain_class(self.project_config, self.key)
self.assertEqual(list(keychain.orgs.keys()), ["test"])
self.assertEqual(keychain.get_org("test").config, self.org_config.config)
def test_get_org_not_found(self):
self._clean_env(self.env)
super(TestEnvironmentProjectKeychain, self).test_get_org_not_found()
def test_list_orgs(self):
keychain = self.keychain_class(self.project_config, self.key)
self.assertEqual(keychain.list_orgs(), ["test"])
def test_list_orgs_empty(self):
self._clean_env(self.env)
self.env.set(
f"{self.keychain_class.service_var_prefix}connected_app",
json.dumps(self.services["connected_app"].config),
)
super(TestEnvironmentProjectKeychain, self).test_list_orgs_empty()
def test_load_scratch_org_config(self):
self._clean_env(self.env)
self.env.set(
f"{self.keychain_class.org_var_prefix}test",
json.dumps(self.scratch_org_config.config),
)
keychain = self.keychain_class(self.project_config, self.key)
self.assertEqual(keychain.list_orgs(), ["test"])
self.assertEqual(keychain.orgs["test"].__class__, ScratchOrgConfig)
def test_load_scratch_orgs_create_one(self):
self._clean_env(self.env)
super(TestEnvironmentProjectKeychain, self).test_load_scratch_orgs_create_one()
def test_load_scratch_orgs_none(self):
self._clean_env(self.env)
super(TestEnvironmentProjectKeychain, self).test_load_scratch_orgs_none()
def test_get_default_org(self):
org_config = self.org_config.config.copy()
org_config["default"] = True
self.env.set(
f"{self.keychain_class.org_var_prefix}test", json.dumps(org_config)
)
super(TestEnvironmentProjectKeychain, self).test_get_default_org()
def test_set_default_org(self):
""" The EnvironmentProjectKeychain does not persist default org settings """
org_config = self.org_config.config.copy()
self.env.set(
f"{self.keychain_class.org_var_prefix}test", json.dumps(org_config)
)
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_default_org("test")
expected_org_config = self.org_config.config.copy()
expected_org_config["default"] = True
self.assertEqual(None, keychain.get_default_org()[1])
def test_set_and_get_scratch_org(self):
self._clean_env(self.env)
super(TestEnvironmentProjectKeychain, self).test_set_and_get_scratch_org()
class TestBaseEncryptedProjectKeychain(ProjectKeychainTestMixin):
keychain_class = BaseEncryptedProjectKeychain
def test_get_connected_app(self):
keychain = self.keychain_class(self.project_config, self.key)
keychain.app = keychain._encrypt_config(BaseConfig({}))
app = keychain.get_connected_app()
self.assertIsInstance(app, ConnectedAppOAuthConfig)
def test_decrypt_config__no_config(self):
keychain = self.keychain_class(self.project_config, self.key)
config = keychain._decrypt_config(OrgConfig, None, extra=["test", keychain])
self.assertEqual(config.__class__, OrgConfig)
self.assertEqual(config.config, {})
self.assertEqual(config.keychain, keychain)
def test_decrypt_config__no_config_2(self):
keychain = self.keychain_class(self.project_config, self.key)
config = keychain._decrypt_config(BaseConfig, None)
self.assertEqual(config.__class__, BaseConfig)
self.assertEqual(config.config, {})
def test_decrypt_config__wrong_key(self):
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_org(self.org_config, False)
keychain.key = "x" * 16
with pytest.raises(KeychainKeyNotFound):
keychain.get_org("test")
# def test_decrypt_config__py2_bytes(self):
# keychain = self.keychain_class(self.project_config, self.key)
# s =
# config = keychain._decrypt_config(BaseConfig, s)
# assert config["tést"] == "ünicode"
def test_validate_key__not_set(self):
with self.assertRaises(KeychainKeyNotFound):
self.keychain_class(self.project_config, None)
def test_validate_key__wrong_length(self):
with self.assertRaises(ConfigError):
self.keychain_class(self.project_config, "1")
class TestEncryptedFileProjectKeychain(ProjectKeychainTestMixin):
keychain_class = EncryptedFileProjectKeychain
def setUp(self):
self.universal_config = UniversalConfig()
self.project_config = BaseProjectConfig(
self.universal_config, config={"noyaml": True}
)
self.project_config.config["services"] = {
"connected_app": {"attributes": {"test": {"required": True}}},
"github": {"attributes": {"git": {"required": True}, "password": {}}},
"not_configured": {"attributes": {"foo": {"required": True}}},
}
self.project_config.project__name = "TestProject"
self.project_name = "TestProject"
self.org_config = OrgConfig({"foo": "bar"}, "test")
self.scratch_org_config = ScratchOrgConfig(
{"foo": "bar", "scratch": True}, "test_scratch"
)
self.services = {
"connected_app": ServiceConfig({"test": "value"}),
"github": ServiceConfig({"git": "hub"}),
}
self.key = "0123456789123456"
self._mk_temp_home()
self._home_patch = mock.patch(
"pathlib.Path.home", return_value=Path(self.tempdir_home)
)
self._home_patch.__enter__()
self._mk_temp_project()
os.chdir(self.tempdir_project)
def tearDown(self):
self._home_patch.__exit__(None, None, None)
def _mk_temp_home(self):
self.tempdir_home = tempfile.mkdtemp()
global_config_dir = os.path.join(self.tempdir_home, ".cumulusci")
os.makedirs(global_config_dir)
def _mk_temp_project(self):
self.tempdir_project = tempfile.mkdtemp()
git_dir = os.path.join(self.tempdir_project, ".git")
os.makedirs(git_dir)
self._create_git_config()
def _create_git_config(self):
filename = os.path.join(self.tempdir_project, ".git", "config")
content = (
'[remote "origin"]\n'
+ f" url = <EMAIL>@github.com:TestOwner/{self.project_name}"
)
self._write_file(filename, content)
def _write_file(self, filename, content):
with open(filename, "w") as f:
f.write(content)
def test_set_service_github_project(self):
self.test_set_service_github(True)
def test_set_and_get_org_global(self):
self.test_set_and_get_org(True)
def test_set_and_get_org__universal_config(self):
keychain = self.keychain_class(self.universal_config, self.key)
keychain.set_org(self.org_config, False)
self.assertEqual(list(keychain.orgs.keys()), [])
def test_load_files__empty(self):
dummy_keychain = BaseEncryptedProjectKeychain(self.project_config, self.key)
os.makedirs(os.path.join(self.tempdir_home, ".cumulusci", self.project_name))
self._write_file(
os.path.join(self.tempdir_home, "test.org"),
dummy_keychain._encrypt_config(BaseConfig({"foo": "bar"})).decode("utf-8"),
)
keychain = self.keychain_class(self.project_config, self.key)
del keychain.config["orgs"]
with mock.patch.object(
self.keychain_class, "global_config_dir", Path(self.tempdir_home)
):
keychain._load_orgs()
self.assertIn("foo", keychain.get_org("test").config)
self.assertEqual(keychain.get_org("test").keychain, keychain)
def test_load_file(self):
self._write_file(os.path.join(self.tempdir_home, "config"), "foo")
keychain = self.keychain_class(self.project_config, self.key)
keychain._load_file(self.tempdir_home, "config", "from_file")
self.assertEqual("foo", keychain.config["from_file"])
def test_load_file__universal_config(self):
self._write_file(os.path.join(self.tempdir_home, "config"), "foo")
keychain = self.keychain_class(self.project_config, self.key)
keychain._load_file(self.tempdir_home, "config", "from_file")
self.assertEqual("foo", keychain.config["from_file"])
@mock.patch("cumulusci.core.utils.cleanup_org_cache_dirs")
def test_remove_org(self, cleanup_org_cache_dirs):
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_org(self.org_config)
keychain.remove_org("test")
self.assertNotIn("test", keychain.orgs)
assert cleanup_org_cache_dirs.called_once_with(keychain, self.project_config)
def test_remove_org__not_found(self):
keychain = self.keychain_class(self.project_config, self.key)
keychain.orgs["test"] = mock.Mock()
with self.assertRaises(OrgNotFound):
keychain.remove_org("test")
def test_remove_org__global__not_found(self):
keychain = self.keychain_class(self.project_config, self.key)
keychain.orgs["test"] = mock.Mock()
with self.assertRaises(OrgNotFound):
keychain.remove_org("test", global_org=True)
def test_set_and_get_org_local_should_not_shadow_global(self):
keychain = self.keychain_class(self.project_config, self.key)
self.org_config.global_org = True
keychain.set_org(self.org_config, global_org=True)
assert ["test"] == list(keychain.orgs.keys())
assert isinstance(keychain.orgs["test"], GlobalOrg), keychain.orgs["test"]
assert self.org_config.config == keychain.get_org("test").config
assert Path(self.tempdir_home, ".cumulusci", "test.org").exists()
# check that it saves to the right place
with mock.patch(
"cumulusci.core.keychain.encrypted_file_project_keychain.open"
) as o:
self.org_config.save()
opened_filename = o.mock_calls[0][1][0]
assert ".cumulusci/test.org" in opened_filename.replace(
os.sep, "/"
), opened_filename
# check that it can be loaded in a fresh keychain
new_keychain = self.keychain_class(self.project_config, self.key)
org_config = new_keychain.get_org("test")
assert org_config.global_org
def test_cache_dir(self):
keychain = self.keychain_class(self.project_config, self.key)
assert keychain.cache_dir.name == ".cci"
def test_get_default_org__with_files(self):
keychain = self.keychain_class(self.project_config, self.key)
org_config = OrgConfig(self.org_config.config.copy(), "test", keychain=keychain)
org_config.save()
with open(self._default_org_path(), "w") as f:
f.write("test")
try:
self.assertEqual(keychain.get_default_org()[1].config, org_config.config)
finally:
self._default_org_path().unlink()
def test_get_default_org__with_files__missing_org(self):
keychain = self.keychain_class(self.project_config, self.key)
with open(self._default_org_path(), "w") as f:
f.write("should_not_exist")
assert self._default_org_path().exists()
assert keychain.get_default_org() == (None, None)
assert not self._default_org_path().exists()
@mock.patch("sarge.Command")
def test_set_default_org__with_files(self, Command):
keychain = self.keychain_class(self.project_config, self.key)
org_config = OrgConfig(self.org_config.config.copy(), "test")
keychain.set_org(org_config)
keychain.set_default_org("test")
with open(self._default_org_path()) as f:
assert f.read() == "test"
self._default_org_path().unlink()
@mock.patch("sarge.Command")
def test_unset_default_org__with_files(self, Command):
keychain = self.keychain_class(self.project_config, self.key)
org_config = self.org_config.config.copy()
org_config = OrgConfig(org_config, "test")
keychain.set_org(org_config)
keychain.set_default_org("test")
keychain.unset_default_org()
self.assertEqual(keychain.get_default_org()[1], None)
assert not self._default_org_path().exists()
def _default_org_path(self):
return Path(self.tempdir_home) / ".cumulusci/TestProject/DEFAULT_ORG.txt"
# old way of finding defaults used contents of the files themselves
# we should preserve backwards compatibiliity for a few months
def test_get_default_org__file_missing_fallback(self):
keychain = self.keychain_class(self.project_config, self.key)
org_config = OrgConfig(self.org_config.config.copy(), "test", keychain=keychain)
org_config.config["default"] = True
org_config.save()
self.assertEqual(keychain.get_default_org()[1].config, org_config.config)
def test_get_default_org__outside_project(self):
keychain = self.keychain_class(self.universal_config, self.key)
assert keychain.get_default_org() == (None, None)
```
#### File: core/tests/test_sfdx.py
```python
from unittest import mock
import io
import sys
import pytest
from cumulusci.core.exceptions import SfdxOrgException
from cumulusci.core.sfdx import get_default_devhub_username
from cumulusci.core.sfdx import sfdx
class TestSfdx:
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="This tests quoting on POSIX systems"
)
@mock.patch("sarge.Command")
def test_posix_quoting(self, Command):
sfdx("cmd", args=["a'b"])
cmd = Command.call_args[0][0]
assert cmd == r"sfdx cmd 'a'\''b'"
@pytest.mark.skipif(
not sys.platform.startswith("win"),
reason="This tests quoting on Windows systems",
)
@mock.patch("sarge.Command")
def test_windows_quoting(self, Command):
sfdx("cmd", args=['a"b'], access_token="token")
cmd = Command.call_args[0][0]
assert cmd == r'sfdx cmd "a\"b" -u token'
@mock.patch("sarge.Command")
def test_check_return(self, Command):
Command.return_value.returncode = 1
Command.return_value.stderr = io.BytesIO(b"Egads!")
with pytest.raises(Exception) as exc_info:
sfdx("cmd", check_return=True)
assert str(exc_info.value) == "Command exited with return code 1:\nEgads!"
@mock.patch("sarge.Command")
def test_get_default_devhub_username(Command):
Command.return_value.returncode = 0
Command.return_value.stdout = io.BytesIO(
b'{"result": [{"value": "<EMAIL>"}]}'
)
result = get_default_devhub_username()
assert result == "<EMAIL>"
@mock.patch("sarge.Command")
def test_get_default_devhub_username__no_result(Command):
Command.return_value.returncode = 0
Command.return_value.stdout = io.BytesIO(b"{}")
with pytest.raises(SfdxOrgException):
get_default_devhub_username()
```
#### File: cumulusci/salesforce_api/utils.py
```python
import simple_salesforce
from cumulusci import __version__
from cumulusci.core.exceptions import ServiceNotConfigured, ServiceNotValid
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
CALL_OPTS_HEADER_KEY = "Sforce-Call-Options"
def get_simple_salesforce_connection(
project_config, org_config, api_version=None, base_url: str = None
):
# Retry on long-running metadeploy jobs
retries = Retry(total=5, status_forcelist=(502, 503, 504), backoff_factor=0.3)
adapter = HTTPAdapter(max_retries=retries)
sf = simple_salesforce.Salesforce(
instance_url=org_config.instance_url,
session_id=org_config.access_token,
version=api_version or project_config.project__package__api_version,
)
try:
app = project_config.keychain.get_service("connectedapp")
client_name = app.client_id
except (ServiceNotValid, ServiceNotConfigured):
client_name = "CumulusCI/{}".format(__version__)
sf.headers.setdefault(CALL_OPTS_HEADER_KEY, "client={}".format(client_name))
sf.session.mount("http://", adapter)
sf.session.mount("https://", adapter)
if base_url:
base_url = (
base_url.strip("/") + "/"
) # exactly one training slash and no leading slashes
sf.base_url += base_url
return sf
```
#### File: bulkdata/tests/test_generatemapping.py
```python
import json
import unittest
from unittest import mock
import responses
import yaml
from tempfile import TemporaryDirectory
from pathlib import Path
import pytest
from cumulusci.tasks.bulkdata import GenerateMapping
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.tasks.bulkdata.generate_mapping import FieldData
from cumulusci.utils import temporary_dir
from cumulusci.tasks.bulkdata.tests.utils import _make_task
class TestMappingGenerator(unittest.TestCase):
def test_defaults_options(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
self.assertEqual([], t.options["ignore"])
self.assertEqual("", t.options["namespace_prefix"])
self.assertEqual("ask", t.options["break_cycles"])
self.assertEqual([], t.options["include"])
def test_postfixes_underscores_to_namespace(self):
t = _make_task(
GenerateMapping, {"options": {"namespace_prefix": "t", "path": "t"}}
)
self.assertEqual("t__", t.options["namespace_prefix"])
def test_splits_ignore_string(self):
t = _make_task(
GenerateMapping, {"options": {"ignore": "Account, Contact", "path": "t"}}
)
self.assertEqual(["Account", "Contact"], t.options["ignore"])
def test_accepts_ignore_list(self):
t = _make_task(
GenerateMapping,
{"options": {"ignore": ["Account", "Contact"], "path": "t"}},
)
self.assertEqual(["Account", "Contact"], t.options["ignore"])
def test_accepts_include_list(self):
t = _make_task(
GenerateMapping, {"options": {"include": ["Foo", "Bar"], "path": "t"}}
)
self.assertEqual(["Foo", "Bar"], t.options["include"])
@responses.activate
def test_checks_include_list(self):
t = _make_task(
GenerateMapping, {"options": {"include": ["Foo", "Bar"], "path": "t"}}
)
t.project_config.project__package__api_version = "45.0"
self._prepare_describe_mock(t, {})
t._init_task()
with pytest.raises(TaskOptionsError):
t._collect_objects()
def test_is_any_custom_api_name(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
self.assertTrue(t._is_any_custom_api_name("Custom__c"))
self.assertFalse(t._is_any_custom_api_name("Standard"))
def test_is_our_custom_api_name(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
self.assertTrue(t._is_our_custom_api_name("Custom__c"))
self.assertFalse(t._is_our_custom_api_name("Standard"))
self.assertFalse(t._is_our_custom_api_name("t__Custom__c"))
self.assertFalse(t._is_our_custom_api_name("f__Custom__c"))
t.options["namespace_prefix"] = "t__"
self.assertTrue(t._is_our_custom_api_name("Custom__c"))
self.assertTrue(t._is_our_custom_api_name("t__Custom__c"))
self.assertFalse(t._is_our_custom_api_name("f__Custom__c"))
def test_is_core_field(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
self.assertTrue(t._is_core_field("Name"))
self.assertFalse(t._is_core_field("Custom__c"))
def test_is_object_mappable(self):
t = _make_task(GenerateMapping, {"options": {"ignore": "Account", "path": "t"}})
self.assertTrue(
t._is_object_mappable({"name": "Contact", "customSetting": False})
)
self.assertFalse(
t._is_object_mappable({"name": "Account", "customSetting": False})
)
self.assertFalse(
t._is_object_mappable(
{"name": "Contact__ChangeEvent", "customSetting": False}
)
)
self.assertFalse(
t._is_object_mappable({"name": "Custom__c", "customSetting": True})
)
def test_is_field_mappable(self):
t = _make_task(
GenerateMapping, {"options": {"ignore": "Account.ParentId", "path": "t"}}
)
t.mapping_objects = ["Account", "Contact"]
self.assertTrue(
t._is_field_mappable(
"Account",
{"name": "Name", "type": "string", "label": "Name", "createable": True},
)
)
self.assertFalse(
t._is_field_mappable(
"Account",
{"name": "Name", "type": "base64", "label": "Name", "createable": True},
)
)
self.assertFalse(
t._is_field_mappable(
"Account",
{
"name": "Name",
"type": "string",
"label": "Name (Deprecated)",
"createable": True,
},
)
)
self.assertFalse(
t._is_field_mappable(
"Account",
{
"name": "ParentId",
"type": "reference",
"label": "Parent",
"createable": True,
"referenceTo": ["Account"],
},
)
)
self.assertFalse(
t._is_field_mappable(
"Account",
{
"name": "Name",
"type": "string",
"label": "Name",
"createable": False,
},
)
)
self.assertFalse(
t._is_field_mappable(
"Contact",
{
"name": "OwnerId",
"type": "reference",
"label": "Owner",
"createable": True,
"referenceTo": ["User", "Group"],
},
)
)
def test_has_our_custom_fields(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
self.assertTrue(t._has_our_custom_fields({"fields": [{"name": "Custom__c"}]}))
self.assertTrue(
t._has_our_custom_fields(
{"fields": [{"name": "Custom__c"}, {"name": "Standard"}]}
)
)
self.assertFalse(t._has_our_custom_fields({"fields": [{"name": "Standard"}]}))
self.assertFalse(t._has_our_custom_fields({"fields": []}))
def test_is_lookup_to_included_object(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
t.mapping_objects = ["Account"]
self.assertTrue(
t._is_lookup_to_included_object(
{"type": "reference", "referenceTo": ["Account"]}
)
)
self.assertFalse(
t._is_lookup_to_included_object(
{"type": "reference", "referenceTo": ["Contact"]}
)
)
self.assertFalse(
t._is_lookup_to_included_object(
{"type": "reference", "referenceTo": ["Account", "Contact"]}
)
)
def _prepare_describe_mock(self, task, describe_data):
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v45.0/sobjects",
body=json.dumps(
{
"sobjects": [
{"name": s, "customSetting": False} for s in describe_data
]
}
),
status=200,
)
for s in describe_data:
body = {"name": s, "customSetting": False}
body.update(describe_data[s])
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v45.0/sobjects/{s}/describe",
body=json.dumps(body),
status=200,
)
def _mock_field(self, name, field_type="string", **kwargs):
field_data = {
"name": name,
"type": field_type,
"createable": True,
"nillable": True,
"label": name,
}
field_data.update(kwargs)
return field_data
@responses.activate
def test_run_task(self):
t = _make_task(GenerateMapping, {"options": {"path": "mapping.yaml"}})
t.project_config.project__package__api_version = "45.0"
describe_data = {
"Parent": {
"fields": [self._mock_field("Id"), self._mock_field("Custom__c")]
},
"Child__c": {
"fields": [
self._mock_field("Id"),
self._mock_field(
"Account__c",
field_type="reference",
referenceTo=["Parent"],
relationshipOrder=None,
),
]
},
}
self._prepare_describe_mock(t, describe_data)
with temporary_dir():
t()
with open("mapping.yaml", "r") as fh:
content = yaml.safe_load(fh)
self.assertEqual(["Insert Parent", "Insert Child__c"], list(content.keys()))
self.assertEqual("Parent", t.mapping["Insert Parent"]["sf_object"])
self.assertEqual(["Custom__c"], t.mapping["Insert Parent"]["fields"])
self.assertEqual("Child__c", t.mapping["Insert Child__c"]["sf_object"])
assert "fields" not in t.mapping["Insert Child__c"]
self.assertEqual(
["Account__c"], list(t.mapping["Insert Child__c"]["lookups"].keys())
)
self.assertEqual(
"Parent", t.mapping["Insert Child__c"]["lookups"]["Account__c"]["table"]
)
@responses.activate
def test_collect_objects__simple_custom_objects(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
t.project_config.project__package__api_version = "45.0"
describe_data = {
"Account": {
"fields": [self._mock_field("Name"), self._mock_field("Custom__c")]
},
"Contact": {"fields": [self._mock_field("Name")]},
"Custom__c": {
"fields": [self._mock_field("Name"), self._mock_field("Custom__c")]
},
"User": {"fields": [self._mock_field("Name")]},
}
self._prepare_describe_mock(t, describe_data)
t._init_task()
t._collect_objects()
self.assertEqual(set(["Account", "Custom__c"]), set(t.mapping_objects))
@responses.activate
def test_collect_objects__force_include_objects(self):
t = _make_task(
GenerateMapping, {"options": {"path": "t", "include": ["Contact", "User"]}}
)
t.project_config.project__package__api_version = "45.0"
describe_data = {
"Account": {
"fields": [self._mock_field("Name"), self._mock_field("Custom__c")]
},
"Contact": {"fields": [self._mock_field("Name")]},
"Custom__c": {
"fields": [self._mock_field("Name"), self._mock_field("Custom__c")]
},
"User": {"fields": [self._mock_field("Name")]},
}
self._prepare_describe_mock(t, describe_data)
t._init_task()
t._collect_objects()
self.assertEqual(
set(["Account", "Custom__c", "Contact", "User"]), set(t.mapping_objects)
)
@responses.activate
def test_collect_objects__force_include_objects__already_included(self):
t = _make_task(
GenerateMapping,
{"options": {"path": "t", "include": ["Contact", "Custom__c"]}},
)
t.project_config.project__package__api_version = "45.0"
describe_data = {
"Account": {
"fields": [self._mock_field("Name"), self._mock_field("Custom__c")]
},
"Contact": {"fields": [self._mock_field("Name")]},
"Custom__c": {
"fields": [self._mock_field("Name"), self._mock_field("Custom__c")]
},
"User": {"fields": [self._mock_field("Name")]},
}
self._prepare_describe_mock(t, describe_data)
t._init_task()
t._collect_objects()
assert len(t.mapping_objects) == 3
self.assertEqual(
set(["Account", "Custom__c", "Contact"]), set(t.mapping_objects)
)
@responses.activate
def test_collect_objects__custom_lookup_fields(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
t.project_config.project__package__api_version = "45.0"
describe_data = {
"Account": {
"fields": [self._mock_field("Name"), self._mock_field("Custom__c")]
},
"Contact": {"fields": [self._mock_field("Name")]},
"Custom__c": {
"fields": [
self._mock_field("Name"),
self._mock_field("Custom__c"),
self._mock_field(
"Lookup__c",
field_type="reference",
relationshipOrder=None,
referenceTo=["Contact"],
),
]
},
}
self._prepare_describe_mock(t, describe_data)
t._init_task()
t._collect_objects()
self.assertEqual(
set(["Account", "Custom__c", "Contact"]), set(t.mapping_objects)
)
@responses.activate
def test_collect_objects__master_detail_fields(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
t.project_config.project__package__api_version = "45.0"
describe_data = {
"Account": {
"fields": [self._mock_field("Name"), self._mock_field("Custom__c")]
},
"Opportunity": {"fields": [self._mock_field("Name")]},
"OpportunityLineItem": {
"fields": [
self._mock_field("Name"),
self._mock_field("Custom__c"),
self._mock_field(
"OpportunityId",
field_type="reference",
relationshipOrder=1,
referenceTo=["Opportunity"],
),
]
},
}
self._prepare_describe_mock(t, describe_data)
t._init_task()
t._collect_objects()
self.assertEqual(
set(["Account", "OpportunityLineItem", "Opportunity"]),
set(t.mapping_objects),
)
@responses.activate
def test_collect_objects__duplicate_references(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
t.project_config.project__package__api_version = "45.0"
describe_data = {
"Account": {
"fields": [self._mock_field("Name"), self._mock_field("Custom__c")]
},
"Opportunity": {"fields": [self._mock_field("Name")]},
"OpportunityLineItem": {
"fields": [
self._mock_field("Name"),
self._mock_field("Custom__c"),
self._mock_field(
"OpportunityId",
field_type="reference",
relationshipOrder=1,
referenceTo=["Opportunity"],
),
self._mock_field(
"CustomLookup__c",
field_type="reference",
relationshipOrder=None,
referenceTo=["Opportunity"],
),
]
},
}
self._prepare_describe_mock(t, describe_data)
t._init_task()
t._collect_objects()
self.assertEqual(
set(["Account", "OpportunityLineItem", "Opportunity"]),
set(t.mapping_objects),
)
def test_build_schema(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
t.mapping_objects = ["Account", "Opportunity", "Child__c"]
stage_name = self._mock_field("StageName")
stage_name["nillable"] = False
t.describes = {
"Account": {
"fields": [self._mock_field("Name"), self._mock_field("Industry")]
},
"Opportunity": {"fields": [self._mock_field("Name"), stage_name]},
"Child__c": {
"fields": [
self._mock_field("Name"),
self._mock_field("Test__c"),
self._mock_field("Attachment__c", field_type="base64"),
]
},
}
t._build_schema()
self.assertEqual(
{
"Account": {"Name": self._mock_field("Name")},
"Opportunity": {
"Name": self._mock_field("Name"),
"StageName": stage_name,
},
"Child__c": {
"Name": self._mock_field("Name"),
"Test__c": self._mock_field("Test__c"),
},
},
t.schema,
)
def test_build_schema__tracks_references(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
t.mapping_objects = ["Account", "Opportunity"]
t.describes = {
"Account": {"fields": [self._mock_field("Name")]},
"Opportunity": {
"fields": [
self._mock_field("Name"),
self._mock_field(
"AccountId",
field_type="reference",
referenceTo=["Account"],
relationshipOrder=1,
),
]
},
}
t._build_schema()
self.assertEqual(
{"Opportunity": {"Account": {"AccountId": FieldData({"nillable": True})}}},
dict(t.refs),
)
def test_build_schema__includes_recordtypeid(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
t.mapping_objects = ["Account", "Opportunity"]
t.describes = {
"Account": {"fields": [self._mock_field("Name")]},
"Opportunity": {
"fields": [
self._mock_field("Name"),
self._mock_field(
"AccountId",
field_type="reference",
referenceTo=["Account"],
relationshipOrder=1,
),
self._mock_field("RecordTypeId"),
],
"recordTypeInfos": [{"Name": "Master"}, {"Name": "Donation"}],
},
}
t._build_schema()
self.assertIn("RecordTypeId", t.schema["Opportunity"])
self.assertNotIn("RecordTypeId", t.schema["Account"])
@mock.patch("click.prompt")
def test_build_mapping(self, prompt):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
prompt.return_value = "Account"
t.schema = {
"Account": {
"Name": self._mock_field("Name"),
"Dependent__c": self._mock_field(
"Dependent__c", field_type="reference", referenceTo=["Child__c"]
),
},
"Child__c": {
"Name": self._mock_field("Name"),
"Account__c": self._mock_field(
"Account__c", field_type="reference", referenceTo=["Account"]
),
"Self__c": self._mock_field(
"Self__c", field_type="reference", referenceTo=["Child__c"]
),
},
}
t.refs = {
"Child__c": {"Account": {"Account__c": FieldData({"nillable": True})}},
"Account": {"Child__c": {"Dependent__c": FieldData({"nillable": True})}},
}
t._build_mapping()
self.assertEqual(["Insert Account", "Insert Child__c"], list(t.mapping.keys()))
self.assertEqual("Account", t.mapping["Insert Account"]["sf_object"])
self.assertEqual(["Name"], t.mapping["Insert Account"]["fields"])
self.assertEqual(
["Dependent__c"], list(t.mapping["Insert Account"]["lookups"].keys())
)
self.assertEqual(
"Child__c", t.mapping["Insert Account"]["lookups"]["Dependent__c"]["table"]
)
self.assertEqual("Child__c", t.mapping["Insert Child__c"]["sf_object"])
self.assertEqual(["Name"], t.mapping["Insert Child__c"]["fields"])
self.assertEqual(
["Account__c", "Self__c"],
list(t.mapping["Insert Child__c"]["lookups"].keys()),
)
self.assertEqual(
"Account", t.mapping["Insert Child__c"]["lookups"]["Account__c"]["table"]
)
self.assertEqual(
"Child__c", t.mapping["Insert Child__c"]["lookups"]["Self__c"]["table"]
)
@mock.patch("click.prompt")
def test_build_mapping__strip_namespace(self, prompt):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
t.project_config.project__package__namespace = "ns"
prompt.return_value = "ns__Parent__c"
t.schema = {
"ns__Parent__c": {
"Name": self._mock_field("Name"),
"ns__Dependent__c": self._mock_field(
"ns__Dependent__c",
field_type="reference",
referenceTo=["ns__Child__c"],
),
},
"ns__Child__c": {
"Name": self._mock_field("Name"),
"ns__Parent__c": self._mock_field(
"ns__Parent__c",
field_type="reference",
referenceTo=["ns__Parent__c"],
),
"ns__Self__c": self._mock_field(
"ns__Self__c", field_type="reference", referenceTo=["ns__Child__c"]
),
},
}
t.refs = {
"ns__Child__c": {
"ns__Parent__c": {"ns__Parent__c": FieldData({"nillable": False})}
},
"ns__Parent__c": {
"ns__Child__c": {"ns__Dependent__c": FieldData({"nillable": True})}
},
}
t._build_mapping()
self.assertEqual(
["Insert Parent__c", "Insert Child__c"], list(t.mapping.keys())
)
self.assertEqual("Parent__c", t.mapping["Insert Parent__c"]["sf_object"])
self.assertEqual(["Name"], t.mapping["Insert Parent__c"]["fields"])
self.assertEqual(
["Dependent__c"], list(t.mapping["Insert Parent__c"]["lookups"].keys())
)
self.assertEqual(
"Child__c",
t.mapping["Insert Parent__c"]["lookups"]["Dependent__c"]["table"],
)
self.assertEqual("Child__c", t.mapping["Insert Child__c"]["sf_object"])
self.assertEqual(["Name"], t.mapping["Insert Child__c"]["fields"])
self.assertEqual(
["Parent__c", "Self__c"],
list(t.mapping["Insert Child__c"]["lookups"].keys()),
)
self.assertEqual(
"Parent__c", t.mapping["Insert Child__c"]["lookups"]["Parent__c"]["table"]
)
self.assertEqual(
"Child__c", t.mapping["Insert Child__c"]["lookups"]["Self__c"]["table"]
)
@mock.patch("click.prompt")
def test_build_mapping__no_strip_namespace_if_dup_component(self, prompt):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
t.project_config.project__package__namespace = "ns"
prompt.return_value = "ns__Parent__c"
t.schema = {
"ns__Parent__c": {"Name": self._mock_field("Name")},
"ns__Child__c": {
"Name": self._mock_field("Name"),
"Test__c": self._mock_field("Test__c"),
"ns__Test__c": self._mock_field("ns__Test__c"),
"ns__Parent__c": self._mock_field(
"ns__Parent__c",
field_type="reference",
referenceTo=["ns__Parent__c"],
),
"Parent__c": self._mock_field(
"Parent__c", field_type="reference", referenceTo=["ns__Child__c"]
),
},
"Child__c": {"Name": self._mock_field("Name")},
}
t.refs = {"ns__Child__c": {"ns__Parent__c": {"ns__Parent__c": FieldData({})}}}
t._build_mapping()
self.assertEqual(
set(["Insert Parent__c", "Insert ns__Child__c", "Insert Child__c"]),
set(t.mapping.keys()),
)
self.assertEqual("ns__Child__c", t.mapping["Insert ns__Child__c"]["sf_object"])
self.assertEqual(
["Name", "Test__c", "ns__Test__c"],
t.mapping["Insert ns__Child__c"]["fields"],
)
self.assertEqual(
set(["ns__Parent__c", "Parent__c"]),
set(t.mapping["Insert ns__Child__c"]["lookups"].keys()),
)
self.assertEqual(
"Parent__c",
t.mapping["Insert ns__Child__c"]["lookups"]["ns__Parent__c"]["table"],
)
self.assertEqual(
"ns__Child__c",
t.mapping["Insert ns__Child__c"]["lookups"]["Parent__c"]["table"],
)
self.assertEqual("Child__c", t.mapping["Insert Child__c"]["sf_object"])
self.assertEqual(["Name"], t.mapping["Insert Child__c"]["fields"])
def test_build_mapping__warns_polymorphic_lookups(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
t.mapping_objects = ["Account", "Contact", "Custom__c"]
t.schema = {
"Account": {"Name": self._mock_field("Name")},
"Contact": {"Name": self._mock_field("Name")},
"Custom__c": {
"Name": self._mock_field("Name"),
"PolyLookup__c": self._mock_field(
"PolyLookup__c",
field_type="reference",
referenceTo=["Account", "Contact"],
),
},
}
t.refs = {
"Custom__c": {
"Account": {"PolyLookup__c": FieldData({})},
"Contact": {"PolyLookup__c": FieldData({})},
}
}
t.logger = mock.Mock()
t._build_mapping()
t.logger.warning.assert_called_once_with(
"Field Custom__c.PolyLookup__c is a polymorphic lookup, which is not supported"
)
def test_split_dependencies__no_cycles(self):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
stack = t._split_dependencies(
["Account", "Contact", "Opportunity", "Custom__c"],
{
"Contact": {"Account": {"AccountId": FieldData({})}},
"Opportunity": {
"Account": {"AccountId": FieldData({})},
"Contact": {"Primary_Contact__c": FieldData({})},
},
"Custom__c": {
"Account": {"Account__c": FieldData({})},
"Contact": {"Contact__c": FieldData({})},
"Opportunity": {"Opp__c": FieldData({})},
},
},
)
self.assertEqual(["Account", "Contact", "Opportunity", "Custom__c"], stack)
@mock.patch("click.prompt")
def test_split_dependencies__interviews_for_cycles(self, prompt):
t = _make_task(GenerateMapping, {"options": {"path": "t"}})
prompt.return_value = "Account"
self.assertEqual(
["Custom__c", "Account", "Contact", "Opportunity"],
t._split_dependencies(
["Account", "Contact", "Opportunity", "Custom__c"],
{
"Account": {
"Contact": {"Primary_Contact__c": FieldData({"nillable": True})}
},
"Contact": {
"Account": {"AccountId": FieldData({"nillable": True})}
},
"Opportunity": {
"Account": {"AccountId": FieldData({"nillable": True})},
"Contact": {
"Primary_Contact__c": FieldData({"nillable": True})
},
},
},
),
)
@mock.patch("click.prompt")
@mock.patch("random.choice")
def test_split_dependencies__auto_pick_cycles_priortize_Account(
self, choice, prompt
):
t = _make_task(
GenerateMapping, {"options": {"path": "t", "break_cycles": "auto"}}
)
prompt.side_effect = AssertionError("Shouldn't be called")
choice.side_effect = AssertionError("Shouldn't be called")
split_dependencies = t._split_dependencies(
["Account", "Contact", "Opportunity", "Custom__c"],
{
"Account": {
"Contact": {"Primary_Contact__c": FieldData({"nillable": False})}
},
"Contact": {"Account": {"AccountId": FieldData({"nillable": False})}},
"Opportunity": {
"Account": {"AccountId": FieldData({"nillable": False})},
"Contact": {"Primary_Contact__c": FieldData({"nillable": False})},
},
},
)
self.assertEqual(
["Custom__c", "Account", "Contact", "Opportunity"], split_dependencies
)
assert not choice.mock_calls
@mock.patch("click.prompt")
def test_split_dependencies__auto_pick_cycles_randomly(self, prompt):
t = _make_task(
GenerateMapping, {"options": {"path": "t", "break_cycles": "auto"}}
)
prompt.side_effect = AssertionError("Shouldn't be called")
split_dependencies = t._split_dependencies(
["Account", "Contact", "Opportunity", "Custom__c"],
{
"Account": {
"Custom__c": {
"Non_Nillable_Custom__c": FieldData({"nillable": False})
}
},
"Custom__c": {"Account": {"AccountId": FieldData({"nillable": False})}},
},
)
self.assertEqual(
["Contact", "Opportunity", "Account", "Custom__c"], split_dependencies
)
@mock.patch("click.prompt")
@mock.patch("random.choice")
def test_split_dependencies__auto_pick_cycles_by_relationship_type(
self, random_choice, prompt
):
t = _make_task(
GenerateMapping, {"options": {"path": "t", "break_cycles": "auto"}}
)
prompt.side_effect = AssertionError("Shouldn't be called")
random_choice.side_effect = AssertionError("Shouldn't be called")
split_dependencies = t._split_dependencies(
["AccountLike__c", "ContactLike__c", "OpportunityLike__c", "Custom__c"],
{
# Primary_Contact__c is not nillable, so ContactLike__c must be loaded before
# AccountLike__c despite the cycle
"AccountLike__c": {
"ContactLike__c": {
"Primary_Contact__c": FieldData({"nillable": False})
}
},
"ContactLike__c": {
"AccountLike__c": {"AccountId": FieldData({"nillable": True})}
},
"OpportunityLike__c": {
"AccountLike__c": {"AccountId": FieldData({"nillable": True})},
"ContactLike__c": {
"Primary_Contact__c": FieldData({"nillable": True})
},
},
},
)
self.assertEqual(
["Custom__c", "ContactLike__c", "AccountLike__c", "OpportunityLike__c"],
split_dependencies,
)
random_choice.assert_not_called()
@mock.patch("click.prompt")
def test_split_dependencies__auto_pick_cycles(self, prompt):
t = _make_task(
GenerateMapping, {"options": {"path": "t", "break_cycles": "auto"}}
)
prompt.return_value = AssertionError("Shouldn't be called")
self.assertEqual(
set(["Custom__c", "Account", "Contact", "Opportunity"]),
set(
t._split_dependencies(
["Account", "Contact", "Opportunity", "Custom__c"],
{
"Account": {
"Contact": {
"Primary_Contact__c": FieldData({"nillable": True})
}
},
"Contact": {
"Account": {"AccountId": FieldData({"nillable": True})}
},
"Opportunity": {
"Account": {"AccountId": FieldData({"nillable": True})},
"Contact": {
"Primary_Contact__c": FieldData({"nillable": True})
},
},
},
)
),
)
@mock.patch("click.prompt")
def test_split_dependencies__ask_pick_cycles(self, prompt):
t = _make_task(
GenerateMapping, {"options": {"path": "t", "break_cycles": "ask"}}
)
prompt.return_value = "Custom__c"
self.assertEqual(
set(["Custom__c", "Account", "Contact", "Opportunity"]),
set(
t._split_dependencies(
["Account", "Contact", "Opportunity", "Custom__c"],
{
"Account": {
"Custom__c": {"Custom__c": FieldData({"nillable": False})}
},
"Custom__c": {
"Account": {"Account__c": FieldData({"nillable": False})}
},
},
)
),
)
prompt.assert_called_once()
assert prompt.mock_calls
def test_options_error(self):
with pytest.raises(TaskOptionsError):
_make_task(
GenerateMapping, {"options": {"path": "t", "break_cycles": "foo"}}
)
@pytest.mark.integration_test()
class TestIntegrationGenerateMapping:
@pytest.mark.vcr()
def test_simple_generate(self, create_task):
"Generate a mapping against a provided org."
with TemporaryDirectory() as t:
tempfile = Path(t) / "tempfile.mapping.yml"
task = create_task(GenerateMapping, {"path": tempfile})
assert not Path(tempfile).exists()
task()
assert Path(tempfile).exists()
@pytest.mark.vcr()
def test_generate_with_cycles(self, create_task):
"Generate a mapping that necessarily includes some reference cycles"
with TemporaryDirectory() as t:
tempfile = Path(t) / "tempfile.mapping.yml"
task = create_task(
GenerateMapping,
{
"path": tempfile,
"include": [
"Account",
"Contact",
"Opportunity",
"OpportunityContactRole",
],
},
)
assert not Path(tempfile).exists()
task()
assert Path(tempfile).exists()
@pytest.mark.vcr()
def test_big_generate(self, create_task, sf):
"Generate a large mapping that includes every reachable object"
with TemporaryDirectory() as t:
tempfile = Path(t) / "tempfile.mapping.yml"
every_obj = [obj["name"] for obj in sf.describe()["sobjects"]]
task = create_task(
GenerateMapping, {"path": tempfile, "include": every_obj}
)
assert not Path(tempfile).exists()
task()
assert Path(tempfile).exists()
```
#### File: push/tests/test_push_api.py
```python
import datetime
import json
from unittest import mock
import pytest
from simple_salesforce import SalesforceMalformedRequest
from cumulusci.tasks.push.push_api import (
BasePushApiObject,
MetadataPackage,
MetadataPackageVersion,
PackagePushError,
PackagePushJob,
PackagePushRequest,
PackageSubscriber,
SalesforcePushApi,
batch_list,
)
NAME = "Chewbacca"
SF_ID = "033xxxxxxxxx"
PUSH_API = "push_api"
NAMESPACE = "namespace"
ORG_KEY = "bar"
@pytest.fixture
def sf_push_api():
return SalesforcePushApi(sf=mock.Mock(), logger=mock.Mock())
@pytest.fixture
def metadata_package():
return MetadataPackage(
push_api=mock.MagicMock(), name=NAME, sf_id=SF_ID, namespace=NAMESPACE
)
@pytest.fixture
def metadata_package_version(metadata_package):
return MetadataPackageVersion(
push_api=mock.MagicMock(),
package=metadata_package,
name=NAME,
sf_id=SF_ID,
state="Beta",
major="1",
minor="2",
patch="3",
build="4",
)
@pytest.fixture
def package_push_job():
return PackagePushJob(
push_api=mock.MagicMock(),
request="",
org="00DS0000003TJJ6MAO",
status="Succeeded",
sf_id=SF_ID,
)
@pytest.fixture
def package_subscriber():
return PackageSubscriber(
push_api=mock.MagicMock(),
version="1.2.3",
status="Succeeded",
org_name="foo",
org_key="bar",
org_status="Succeeded",
org_type="Sandbox",
sf_id=SF_ID,
)
@pytest.fixture
def package_push_error():
return PackagePushError(
push_api="foo",
sf_id=SF_ID,
job="Foo",
severity="high",
error_type="bar",
title="foo_bar",
message="The foo hit the bar",
details="foo bar, foo, foo bar",
)
@pytest.fixture
def package_push_request():
return PackagePushRequest(
push_api=mock.MagicMock(),
version="1.2.3",
start_time="12:03",
status="Succeeded",
sf_id=SF_ID,
)
def test_base_push_format_where():
base_obj = BasePushApiObject()
field_name = "id_field"
sf_id = "006000000XXX000"
where_clause = "id=001000000XXX000"
base_obj.sf_id = sf_id
returned = base_obj.format_where(field_name, where_clause)
assert "{} = '{}' AND ({})".format(field_name, sf_id, where_clause) == returned
returned = base_obj.format_where(field_name, None)
assert "{} = '{}'".format(field_name, sf_id) == returned
def test_metadata_package_init():
package = MetadataPackage(PUSH_API, NAME)
assert package.push_api == PUSH_API
assert package.sf_id is None
assert package.name == NAME
assert package.namespace is None
package = MetadataPackage(PUSH_API, NAME, SF_ID, NAMESPACE)
assert package.push_api == PUSH_API
assert package.sf_id == SF_ID
assert package.name == NAME
assert package.namespace == NAMESPACE
def test_metadata_package_get_versions(metadata_package):
expected = f"MetadataPackageId = '{SF_ID}'"
metadata_package.get_package_versions()
metadata_package.push_api.get_package_versions.assert_called_once_with(
expected, None
)
def test_metadata_package_get_version_objs(metadata_package):
expected = f"MetadataPackageId = '{SF_ID}'"
metadata_package.get_package_version_objs()
metadata_package.push_api.get_package_version_objs.assert_called_once_with(
expected, None
)
def test_metadata_package_get_versions_by_id(metadata_package):
expected = f"MetadataPackageId = '{SF_ID}'"
metadata_package.get_package_versions_by_id()
metadata_package.push_api.get_package_versions_by_id.assert_called_once_with(
expected, None
)
def test_metadata_package_version_version_number(metadata_package_version):
expected = "1.2.3 (Beta 4)"
actual = metadata_package_version.version_number
assert expected == actual
def test_sf_push_return_query_records(sf_push_api):
query = "SELECT Id FROM Account"
records = ["record 1", "record 2", "record 3"]
results = {"totalSize": 10, "records": records}
sf_push_api.sf.query_all.return_value = results
returned = sf_push_api.return_query_records(query)
assert len(records) == len(returned)
results["totalSize"] = 0
sf_push_api.sf.query_all.return_value = results
returned = sf_push_api.return_query_records(query)
assert [] == returned
def test_sf_push_format_where(sf_push_api):
returned = sf_push_api.format_where_clause(None)
assert "" == returned
default_where = "Id='001000000XXX000'"
sf_push_api.default_where = {"Account": default_where}
returned = sf_push_api.format_where_clause(None, "Object__c")
assert "" == returned
returned = sf_push_api.format_where_clause(None, "Account")
assert " WHERE ({})".format(default_where) == returned
where = "IsDeleted=False"
returned = sf_push_api.format_where_clause(where)
assert " WHERE {}".format(where) == returned
# No default where for Object__C
returned = sf_push_api.format_where_clause(where, "Object__c")
assert " WHERE {}".format(where) == returned
returned = sf_push_api.format_where_clause(where, "Account")
assert " WHERE ({}) AND ({})".format(default_where, where) == returned
def test_sf_push_add_query_limit(sf_push_api):
query = "SELECT Id FROM Account"
limit = 100
returned = sf_push_api.add_query_limit(query, limit)
assert "{} LIMIT {}".format(query, limit) == returned
def test_sf_push_add_query_no_limit(sf_push_api):
query = "SELECT Id FROM Account"
returned = sf_push_api.add_query_limit(query, None)
assert f"{query}" == returned
def test_sf_push_get_packages(sf_push_api):
query = "SELECT id, name, namespaceprefix FROM MetadataPackage WHERE Name='foo'"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_packages("Name='foo'", None)
sf_push_api.return_query_records.assert_called_once_with(query)
def test_sf_push_get_package_objs(sf_push_api, metadata_package):
sf_push_api.get_packages = mock.MagicMock()
packages = {
"Id": metadata_package.sf_id,
"Name": metadata_package.name,
"NamespacePrefix": metadata_package.namespace,
}
sf_push_api.get_packages.return_value = [packages]
actual_result_list = sf_push_api.get_package_objs("Name='foo'", None)
assert len(actual_result_list) == 1
actual_result = actual_result_list[0]
assert packages["Id"] == actual_result.sf_id
assert packages["Name"] == actual_result.name
assert packages["NamespacePrefix"] == actual_result.namespace
def test_sf_push_get_packages_by_id(sf_push_api, metadata_package):
sf_push_api.get_package_objs = mock.MagicMock()
sf_push_api.get_package_objs.return_value = [metadata_package]
package_expected = {metadata_package.sf_id: metadata_package}
package_result = sf_push_api.get_packages_by_id("Name='foo'", None)
sf_push_api.get_package_objs.assert_called_with("Name='foo'", None)
assert package_expected == package_result
def test_sf_push_get_package_versions(sf_push_api):
query = "SELECT Id, Name, MetadataPackageId, ReleaseState, MajorVersion, MinorVersion, PatchVersion, BuildNumber FROM MetadataPackageVersion WHERE Name='foo' ORDER BY MajorVersion DESC, MinorVersion DESC, PatchVersion, BuildNumber DESC"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_package_versions("Name='foo'", None)
sf_push_api.return_query_records.assert_called_once_with(query)
def test_sf_push_get_package_version_objs(sf_push_api):
query = "SELECT Id, Name, MetadataPackageId, ReleaseState, MajorVersion, MinorVersion, PatchVersion, BuildNumber FROM MetadataPackageVersion WHERE Name='foo' ORDER BY MajorVersion DESC, MinorVersion DESC, PatchVersion, BuildNumber DESC"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_package_version_objs("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_package_version_by_id(sf_push_api, metadata_package_version):
sf_push_api.get_package_version_objs = mock.MagicMock()
sf_push_api.get_package_version_objs.return_value = [metadata_package_version]
package_expected = {metadata_package_version.sf_id: metadata_package_version}
package_result = sf_push_api.get_package_versions_by_id("Name='foo'", None)
sf_push_api.get_package_version_objs.assert_called_with("Name='foo'", None)
assert package_expected == package_result
def test_sf_push_get_subscribers(sf_push_api):
query = "SELECT Id, MetadataPackageVersionId, InstalledStatus, OrgName, OrgKey, OrgStatus, OrgType from PackageSubscriber WHERE Name='foo'"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_subscribers("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_subscriber_objs(sf_push_api):
query = "SELECT Id, MetadataPackageVersionId, InstalledStatus, OrgName, OrgKey, OrgStatus, OrgType from PackageSubscriber WHERE Name='foo'"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_subscriber_objs("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_subscribers_by_org_key(sf_push_api, package_subscriber):
sf_push_api.get_subscriber_objs = mock.MagicMock()
sf_push_api.get_subscriber_objs.return_value = [package_subscriber]
package_expected = {package_subscriber.org_key: package_subscriber}
package_result = sf_push_api.get_subscribers_by_org_key("Name='foo'", None)
sf_push_api.get_subscriber_objs.assert_called_with("Name='foo'", None)
assert package_expected == package_result
def test_sf_push_get_push_requests(sf_push_api):
query = "SELECT Id, PackageVersionId, ScheduledStartTime, Status FROM PackagePushRequest WHERE Name='foo' ORDER BY ScheduledStartTime DESC"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_push_requests("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_push_request_objs(sf_push_api):
query = "SELECT Id, PackageVersionId, ScheduledStartTime, Status FROM PackagePushRequest WHERE Name='foo' ORDER BY ScheduledStartTime DESC"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_push_request_objs("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_push_requests_by_id(sf_push_api, package_push_request):
sf_push_api.get_push_request_objs = mock.MagicMock()
sf_push_api.get_push_request_objs.return_value = [package_push_request]
package_expected = {package_push_request.sf_id: package_push_request}
package_result = sf_push_api.get_push_requests_by_id("Name='foo'", None)
sf_push_api.get_push_request_objs.assert_called_with("Name='foo'", None)
assert package_expected == package_result
def test_sf_push_get_push_jobs(sf_push_api):
query = "SELECT Id, PackagePushRequestId, SubscriberOrganizationKey, Status FROM PackagePushJob WHERE Name='foo'"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_push_jobs("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_push_job_objs(sf_push_api):
query = "SELECT Id, PackagePushRequestId, SubscriberOrganizationKey, Status FROM PackagePushJob WHERE Name='foo'"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_push_job_objs("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_push_jobs_by_id(sf_push_api, package_push_job):
sf_push_api.get_push_job_objs = mock.MagicMock()
sf_push_api.get_push_job_objs.return_value = [package_push_job]
package_expected = {package_push_job.sf_id: package_push_job}
package_result = sf_push_api.get_push_jobs_by_id("Name='foo'", None)
sf_push_api.get_push_job_objs.assert_called_with("Name='foo'", None)
assert package_expected == package_result
def test_sf_push_get_push_errors(sf_push_api):
query = "SELECT Id, PackagePushJobId, ErrorSeverity, ErrorType, ErrorTitle, ErrorMessage, ErrorDetails FROM PackagePushError WHERE Name='foo'"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_push_errors("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_push_error_objs(sf_push_api, package_push_job, package_push_error):
sf_push_api.get_push_job_objs = mock.MagicMock()
sf_push_api.get_push_job_objs.return_value = [package_push_job]
sf_push_api.lazy = ["jobs"]
sf_push_api.get_push_errors = mock.MagicMock()
record = {
"ErrorSeverity": "high",
"ErrorType": "bar",
"ErrorTitle": "foo_bar",
"ErrorMessage": "The foo hit the bar",
"ErrorDetails": "foo bar, foo, foo bar",
"Id": SF_ID,
"PackagePushJobId": "pkg_push_id",
}
sf_push_api.get_push_errors.return_value = [record]
actual_result_list = sf_push_api.get_push_error_objs("Name='foo'", None)
sf_push_api.get_push_job_objs.assert_called_once_with(where="Id = 'pkg_push_id'")
assert len(actual_result_list) == 1
actual_result = actual_result_list[0]
assert record["ErrorMessage"] == actual_result.message
assert record["ErrorDetails"] == actual_result.details
assert record["Id"] == actual_result.sf_id
assert actual_result.job == package_push_job
def test_sf_push_get_push_errors_by_id(sf_push_api, package_push_error):
sf_push_api.get_push_error_objs = mock.MagicMock()
sf_push_api.get_push_error_objs.return_value = [package_push_error]
push_error_expected = {package_push_error.sf_id: package_push_error}
push_error_result = sf_push_api.get_push_errors_by_id("Name='foo'", None)
sf_push_api.get_push_error_objs.assert_called_with("Name='foo'", None)
assert push_error_expected == push_error_result
def test_sf_push_cancel_push_request(sf_push_api):
ref_id = "12"
sf_push_api.cancel_push_request(ref_id)
sf_push_api.sf.PackagePushRequest.update.assert_called_once_with(
ref_id, {"Status": "Canceled"}
)
def test_sf_push_run_push_request(sf_push_api):
ref_id = "12"
sf_push_api.run_push_request(ref_id)
sf_push_api.sf.PackagePushRequest.update.assert_called_once_with(
ref_id, {"Status": "Pending"}
)
def test_sf_push_create_push_request(sf_push_api, metadata_package_version):
sf_push_api.batch_size = 1
push_request_id = "0DV?xxxxxx?"
version_id = metadata_package_version.sf_id = "0KM?xxxxx?"
orgs = ["00D000000001", "00D000000002"]
batch_0, batch_1 = [orgs[0]], [orgs[1]]
start_time = datetime.datetime.now()
sf_push_api.sf.PackagePushRequest.create.return_value = {"id": push_request_id}
sf_push_api.sf.base_url = "url"
sf_push_api._add_batch = mock.MagicMock(side_effect=[batch_0, batch_1])
actual_id, actual_org_count = sf_push_api.create_push_request(
metadata_package_version, orgs, start_time
)
sf_push_api.sf.PackagePushRequest.create.assert_called_once_with(
{"PackageVersionId": version_id, "ScheduledStartTime": start_time.isoformat()}
)
assert mock.call(batch_0, push_request_id) in sf_push_api._add_batch.call_args_list
assert mock.call(batch_1, push_request_id) in sf_push_api._add_batch.call_args_list
assert push_request_id == actual_id
assert 2 == actual_org_count
def test_sf_push_add_push_batch(sf_push_api, metadata_package_version):
push_request_id = "0DV?xxxxxx?"
metadata_package_version.sf_id = "0KM?xxxxx?"
orgs = ["00D000000001", "00D000000002"]
expected_records_json = json.dumps(
{
"records": [
{
"attributes": {"type": "PackagePushJob", "referenceId": orgs[0]},
"PackagePushRequestId": push_request_id,
"SubscriberOrganizationKey": orgs[0],
},
{
"attributes": {"type": "PackagePushJob", "referenceId": orgs[1]},
"PackagePushRequestId": push_request_id,
"SubscriberOrganizationKey": orgs[1],
},
]
}
)
sf_push_api.sf.base_url = "base_url/"
returned_batch = sf_push_api._add_batch(orgs, push_request_id)
sf_push_api.sf._call_salesforce.assert_called_once_with(
"POST", "base_url/composite/tree/PackagePushJob", data=expected_records_json
)
assert ["00D000000001", "00D000000002"] == returned_batch
def test_sf_push_add_push_batch_retry(sf_push_api, metadata_package_version):
push_request_id = "0DV?xxxxxx?"
orgs = ["00D000000001", "00D000000002", "00D000000003"]
retry_response = {
"results": [
{
"referenceId": orgs[0],
"errors": [
{"message": "Something bad has happened! Whatever could it be?"}
],
}
]
}
duplicate_response = {
"results": [
{
"referenceId": orgs[1],
"errors": [{"message": "", "statusCode": "DUPLICATE_VALUE"}],
}
]
}
invalid_response = {
"results": [
{
"referenceId": orgs[2],
"errors": [{"message": "", "statusCode": "INVALID_OPERATION"}],
}
]
}
sf_push_api.sf.base_url = "base_url/"
sf_push_api.sf._call_salesforce.side_effect = [
SalesforceMalformedRequest(
"base_url/composite/tree/PackagePushJob",
400,
"resource_name",
retry_response,
),
SalesforceMalformedRequest(
"base_url/composite/tree/PackagePushJob",
400,
"resource_name",
duplicate_response,
),
SalesforceMalformedRequest(
"base_url/composite/tree/PackagePushJob",
400,
"resource_name",
invalid_response,
),
[],
]
returned_batch = sf_push_api._add_batch(orgs, push_request_id)
assert [orgs[0]] == returned_batch # only remaining org should be retry-able
assert 4 == sf_push_api.sf._call_salesforce.call_count
def test_push_batch_list():
data = ["zero", "one", "two", "three"]
actual_batch_list = batch_list(data, 1)
expected_batch_list = [["zero"], ["one"], ["two"], ["three"]]
assert expected_batch_list == actual_batch_list
actual_batch_list = batch_list(data, 2)
expected_batch_list = [["zero", "one"], ["two", "three"]]
assert expected_batch_list == actual_batch_list
actual_batch_list = batch_list(data, 3)
expected_batch_list = [["zero", "one", "two"], ["three"]]
assert expected_batch_list == actual_batch_list
actual_batch_list = batch_list(data, 4)
expected_batch_list = [["zero", "one", "two", "three"]]
assert expected_batch_list == actual_batch_list
actual_batch_list = batch_list(data, 5)
assert expected_batch_list == actual_batch_list
actual_batch_list = batch_list([], 2)
expected_batch_list = []
assert expected_batch_list == actual_batch_list
def test_version_init(metadata_package):
package = MetadataPackageVersion(
push_api=PUSH_API,
package=metadata_package,
name=NAME,
sf_id=SF_ID,
state="Beta",
major="1",
minor="2",
patch="3",
build="4",
)
assert package.push_api == PUSH_API
assert package.package == metadata_package
assert package.name == NAME
assert package.sf_id == SF_ID
assert package.state == "Beta"
assert package.major == "1"
assert package.minor == "2"
assert package.patch == "3"
assert package.build == "4"
def test_version_number(metadata_package_version):
actual = metadata_package_version.version_number
expected = "1.2.3 (Beta 4)"
assert actual == expected
def test_metadata_package_get_subscribers(metadata_package_version):
expected = f"MetadataPackageVersionId = '{SF_ID}'"
metadata_package_version.get_subscribers()
metadata_package_version.push_api.get_subscribers.assert_called_once_with(
expected, None
)
def test_metadata_package_get_subscriber_objects(metadata_package_version):
expected = f"MetadataPackageVersionId = '{SF_ID}'"
metadata_package_version.get_subscriber_objs()
metadata_package_version.push_api.get_subscriber_objs.assert_called_once_with(
expected, None
)
def test_metadata_package_get_subscribers_by_org_key(metadata_package_version):
expected = f"MetadataPackageVersionId = '{SF_ID}'"
metadata_package_version.get_subscribers_by_org_key()
metadata_package_version.push_api.get_subscribers_by_org_key.assert_called_once_with(
expected, None
)
def test_metadata_package_push_requests(metadata_package_version):
expected = f"PackageVersionId = '{SF_ID}'"
metadata_package_version.get_push_requests()
metadata_package_version.push_api.get_push_requests.assert_called_once_with(
expected, None
)
def test_metadata_package_push_request_objs(metadata_package_version):
expected = f"PackageVersionId = '{SF_ID}'"
metadata_package_version.get_push_request_objs()
metadata_package_version.push_api.get_push_request_objs.assert_called_once_with(
expected, None
)
def test_metadata_package_push_requests_by_id(metadata_package_version):
expected = f"PackageVersionId = '{SF_ID}'"
metadata_package_version.get_push_requests_by_id()
metadata_package_version.push_api.get_push_requests_by_id.assert_called_once_with(
expected, None
)
def test_version_get_newer_query(metadata_package_version):
expected = "MetadataPackageId = '033xxxxxxxxx' AND (MetadataPackageId = '033xxxxxxxxx' AND ReleaseState = 'Released' AND (MajorVersion > 1 OR (MajorVersion = 1 AND MinorVersion > 2) OR (MajorVersion = 1 AND MinorVersion = 2 AND PatchVersion > 3)))"
metadata_package_version.get_newer_released_version_objs(None)
metadata_package_version.package.push_api.get_package_version_objs.assert_called_once_with(
expected, None
)
def test_version_get_older_query(metadata_package_version):
expected = "MetadataPackageId = '033xxxxxxxxx' AND (MetadataPackageId = '033xxxxxxxxx' AND ReleaseState = 'Released' AND (MajorVersion < 1 OR (MajorVersion = 1 AND MinorVersion < 2) OR (MajorVersion = 1 AND MinorVersion = 2 AND PatchVersion < 3)))"
metadata_package_version.get_older_released_version_objs()
metadata_package_version.package.push_api.get_package_version_objs.assert_called_once_with(
expected, None
)
def test_version_less_than_query(metadata_package_version, metadata_package):
less_than = MetadataPackageVersion(
push_api=PUSH_API,
package=metadata_package,
name=NAME,
sf_id=SF_ID,
state="Beta",
major="2",
minor="2",
patch="2",
build="1",
)
metadata_package_version.get_newer_released_version_objs(less_than)
expected_where = "MetadataPackageId = '033xxxxxxxxx' AND (MetadataPackageId = '033xxxxxxxxx' AND ReleaseState = 'Released' AND (MajorVersion > 1 OR (MajorVersion = 1 AND MinorVersion > 2) OR (MajorVersion = 1 AND MinorVersion = 2 AND PatchVersion > 3)) AND (MajorVersion < 2 OR (MajorVersion = 2 AND MinorVersion < 2) OR (MajorVersion = 2 AND MinorVersion = 2 AND PatchVersion < 2)))"
metadata_package_version.package.push_api.get_package_version_objs.assert_called_once_with(
expected_where, None
)
def test_version_greater_than_query(metadata_package_version, metadata_package):
expected = "MetadataPackageId = '033xxxxxxxxx' AND (MetadataPackageId = '033xxxxxxxxx' AND ReleaseState = 'Released' AND (MajorVersion < 1 OR (MajorVersion = 1 AND MinorVersion < 2) OR (MajorVersion = 1 AND MinorVersion = 2 AND PatchVersion < 3)) AND (MajorVersion > 2 OR (MajorVersion = 2 AND MinorVersion > 2) OR (MajorVersion = 2 AND MinorVersion = 2 AND PatchVersion > 2)))"
greater_than = MetadataPackageVersion(
push_api=PUSH_API,
package=metadata_package,
name=NAME,
sf_id=SF_ID,
state="Beta",
major="2",
minor="2",
patch="2",
build="1",
)
metadata_package_version.get_older_released_version_objs(greater_than)
metadata_package_version.package.push_api.get_package_version_objs.assert_called_once_with(
expected, None
)
def test_version_get_newer(metadata_package_version):
expected = "MetadataPackageId = '033xxxxxxxxx' AND ReleaseState = 'Released' AND (MajorVersion > 1 OR (MajorVersion = 1 AND MinorVersion > 2) OR (MajorVersion = 1 AND MinorVersion = 2 AND PatchVersion > 3))"
metadata_package_version.package.get_package_version_objs = mock.MagicMock()
metadata_package_version.get_newer_released_version_objs(None)
metadata_package_version.package.get_package_version_objs.assert_called_once_with(
expected
)
def test_version_get_older(metadata_package_version):
expected = "MetadataPackageId = '033xxxxxxxxx' AND ReleaseState = 'Released' AND (MajorVersion < 1 OR (MajorVersion = 1 AND MinorVersion < 2) OR (MajorVersion = 1 AND MinorVersion = 2 AND PatchVersion < 3))"
metadata_package_version.package.get_package_version_objs = mock.MagicMock()
metadata_package_version.get_older_released_version_objs()
metadata_package_version.package.get_package_version_objs.assert_called_once_with(
expected
)
def test_package_push_job_get_push_errors(package_push_job):
expected = f"PackagePushJobId = '{SF_ID}'"
package_push_job.get_push_errors()
package_push_job.push_api.get_push_errors.assert_called_once_with(expected, None)
def test_package_push_job_get_push_error_objects(package_push_job):
expected = f"PackagePushJobId = '{SF_ID}'"
package_push_job.get_push_error_objs()
package_push_job.push_api.get_push_error_objs.assert_called_once_with(
expected, None
)
def test_package_push_job_get_push_errors_by_id(package_push_job):
expected = f"PackagePushJobId = '{SF_ID}'"
package_push_job.get_push_errors_by_id()
package_push_job.push_api.get_push_errors_by_id.assert_called_once_with(
expected, None
)
def test_package_push_errors(package_push_error):
assert package_push_error.push_api == "foo"
assert package_push_error.sf_id == SF_ID
assert package_push_error.job == "Foo"
assert package_push_error.severity == "high"
assert package_push_error.error_type == "bar"
assert package_push_error.title == "foo_bar"
assert package_push_error.message == "The foo hit the bar"
assert package_push_error.details == "foo bar, foo, foo bar"
def test_package_push_request_get_push_jobs(package_push_request):
expected = f"PackagePushRequestId = '{SF_ID}'"
package_push_request.get_push_jobs()
package_push_request.push_api.get_push_jobs.assert_called_once_with(expected, None)
def test_package_push_request_get_push_job_objects(package_push_request):
expected = f"PackagePushRequestId = '{SF_ID}'"
package_push_request.get_push_job_objs()
package_push_request.push_api.get_push_job_objs.assert_called_once_with(
expected, None
)
def test_package_push_request_get_push_jobs_by_id(package_push_request):
expected = f"PackagePushRequestId = '{SF_ID}'"
package_push_request.get_push_jobs_by_id()
package_push_request.push_api.get_push_jobs_by_id.assert_called_once_with(
expected, None
)
def test_format_where(package_subscriber):
assert package_subscriber.format_where("foo") == "foo = 'bar'"
assert (
package_subscriber.format_where("foo", "foobar") == "foo = 'bar' AND (foobar)"
)
def test_package_subscriber_get_push_jobs(package_subscriber):
expected = f"SubscriberOrganizationKey = '{ORG_KEY}'"
package_subscriber.get_push_jobs()
package_subscriber.push_api.get_push_jobs.assert_called_once_with(expected, None)
def test_package_subscriber_get_push_job_objects(package_subscriber):
expected = f"SubscriberOrganizationKey = '{ORG_KEY}'"
package_subscriber.get_push_job_objs()
package_subscriber.push_api.get_push_job_objs.assert_called_once_with(
expected, None
)
def test_package_subscriber_get_push_jobs_by_id(package_subscriber):
expected = f"SubscriberOrganizationKey = '{ORG_KEY}'"
package_subscriber.get_push_jobs_by_id()
package_subscriber.push_api.get_push_jobs_by_id.assert_called_once_with(
expected, None
)
```
#### File: tasks/salesforce/update_dependencies.py
```python
from distutils.version import LooseVersion
from cumulusci.core.utils import process_bool_arg
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.salesforce_api.metadata import ApiDeploy
from cumulusci.salesforce_api.metadata import ApiRetrieveInstalledPackages
from cumulusci.salesforce_api.package_install import install_package_version
from cumulusci.salesforce_api.package_zip import InstallPackageZipBuilder
from cumulusci.salesforce_api.package_zip import MetadataPackageZipBuilder
from cumulusci.salesforce_api.package_zip import UninstallPackageZipBuilder
from cumulusci.tasks.salesforce.BaseSalesforceMetadataApiTask import (
BaseSalesforceMetadataApiTask,
)
from cumulusci.utils import download_extract_zip
from cumulusci.utils import download_extract_github
class UpdateDependencies(BaseSalesforceMetadataApiTask):
api_class = ApiDeploy
name = "UpdateDependencies"
task_options = {
"dependencies": {
"description": "List of dependencies to update. Defaults to project__dependencies. "
"Each dependency is a dict with either 'github' set to a github repository URL "
"or 'namespace' set to a Salesforce package namespace. "
"Github dependencies may include 'tag' to install a particular git ref. "
"Package dependencies may include 'version' to install a particular version."
},
"ignore_dependencies": {
"description": "List of dependencies to be ignored, including if they are present as transitive "
"dependencies. Dependencies can be specified using the 'github' or 'namespace' keys (all other keys "
"are not used). Note that this can cause installations to fail if required prerequisites are not available."
},
"purge_on_delete": {
"description": "Sets the purgeOnDelete option for the deployment. Defaults to True"
},
"include_beta": {
"description": "Install the most recent release, even if beta. Defaults to False. "
"This option is only supported for scratch orgs, "
"to avoid installing a package that can't be upgraded in persistent orgs."
},
"allow_newer": {
"description": "If the org already has a newer release, use it. Defaults to True."
},
"allow_uninstalls": {
"description": "Allow uninstalling a beta release or newer final release "
"in order to install the requested version. Defaults to False. "
"Warning: Enabling this may destroy data."
},
"security_type": {
"description": "Which users to install packages for (FULL = all users, NONE = admins only)"
},
"prefer_2gp_from_release_branch": {
"description": "If True and this build is on a release branch (feature/NNN, where NNN is an integer), "
"or a child branch of a release branch, resolve GitHub managed package dependencies to 2GP builds present on "
"a matching release branch on the dependency."
},
}
def _init_options(self, kwargs):
super(UpdateDependencies, self)._init_options(kwargs)
self.options["purge_on_delete"] = process_bool_arg(
self.options.get("purge_on_delete", True)
)
self.options["include_beta"] = process_bool_arg(
self.options.get("include_beta", False)
)
self.options["dependencies"] = (
self.options.get("dependencies")
or self.project_config.project__dependencies
)
self.options["allow_newer"] = process_bool_arg(
self.options.get("allow_newer", True)
)
self.options["allow_uninstalls"] = process_bool_arg(
self.options.get("allow_uninstalls", False)
)
self.options["security_type"] = self.options.get("security_type", "FULL")
if self.options["security_type"] not in ("FULL", "NONE", "PUSH"):
raise TaskOptionsError(
f"Unsupported value for security_type: {self.options['security_type']}"
)
self.options["prefer_2gp_from_release_branch"] = process_bool_arg(
self.options.get("prefer_2gp_from_release_branch", False)
)
if "ignore_dependencies" in self.options:
if any(
"github" not in dep and "namespace" not in dep
for dep in self.options["ignore_dependencies"]
):
raise TaskOptionsError(
"An invalid dependency was specified for ignore_dependencies."
)
if (
self.org_config
and self.options["include_beta"]
and not self.org_config.scratch
):
self.logger.warning(
"The `include_beta` option is enabled but this not a scratch org.\n"
"Setting `include_beta` to False to avoid installing beta package versions in a persistent org."
)
self.options["include_beta"] = False
def _run_task(self):
if not self.options["dependencies"]:
self.logger.info("Project has no dependencies, doing nothing")
return
self.logger.info("Preparing static dependencies map")
dependencies = self.project_config.get_static_dependencies(
self.options["dependencies"],
include_beta=self.options["include_beta"],
ignore_deps=self.options.get("ignore_dependencies"),
match_release_branch=self.options["prefer_2gp_from_release_branch"],
)
self.installed = None
self.uninstall_queue = []
self.install_queue = []
self.logger.info("Dependencies:")
for line in self.project_config.pretty_dependencies(dependencies):
self.logger.info(line)
self._process_dependencies(dependencies)
installs = []
for dep in self.install_queue:
if dep not in installs:
installs.append(dep)
self.install_queue = installs
# Reverse the uninstall queue
self.uninstall_queue.reverse()
self._uninstall_dependencies()
self._install_dependencies()
self.org_config.reset_installed_packages()
def _process_dependencies(self, dependencies):
for dependency in dependencies:
# Process child dependencies
dependency_uninstalled = False
subdependencies = dependency.get("dependencies")
if subdependencies:
count_uninstall = len(self.uninstall_queue)
self._process_dependencies(subdependencies)
if count_uninstall != len(self.uninstall_queue):
dependency_uninstalled = True
# Process namespace dependencies (managed packages)
if "namespace" in dependency:
self._process_namespace_dependency(dependency, dependency_uninstalled)
else:
# zip_url or repo dependency
self.install_queue.append(dependency)
if self.uninstall_queue and not self.options["allow_uninstalls"]:
raise TaskOptionsError(
"Updating dependencies would require uninstalling these packages "
"but uninstalls are not enabled: {}".format(
", ".join(dep["namespace"] for dep in self.uninstall_queue)
)
)
def _process_namespace_dependency(self, dependency, dependency_uninstalled=None):
dependency_version = str(dependency["version"])
if self.installed is None:
self.installed = self._get_installed()
if dependency["namespace"] in self.installed:
# Some version is installed, check what to do
installed_version = self.installed[dependency["namespace"]]
required_version = LooseVersion(dependency_version)
installed_version = LooseVersion(installed_version)
if installed_version > required_version and self.options["allow_newer"]:
# Avoid downgrading if allow_newer = True
required_version = installed_version
if required_version == installed_version and not dependency_uninstalled:
self.logger.info(
" {}: version {} already installed".format(
dependency["namespace"], dependency_version
)
)
return
if "Beta" in installed_version.vstring:
# Always uninstall Beta versions if required is different
self.uninstall_queue.append(dependency)
self.logger.info(
" {}: Uninstall {} to upgrade to {}".format(
dependency["namespace"],
installed_version,
dependency["version"],
)
)
elif dependency_uninstalled:
# If a dependency of this one needs to be uninstalled, always uninstall the package
self.uninstall_queue.append(dependency)
self.logger.info(
" {}: Uninstall and Reinstall to allow downgrade of dependency".format(
dependency["namespace"]
)
)
elif required_version < installed_version:
# Uninstall to downgrade
self.uninstall_queue.append(dependency)
self.logger.info(
" {}: Downgrade from {} to {} (requires uninstall/install)".format(
dependency["namespace"],
installed_version,
dependency["version"],
)
)
else:
self.logger.info(
" {}: Upgrade from {} to {}".format(
dependency["namespace"],
installed_version,
dependency["version"],
)
)
self.install_queue.append(dependency)
else:
# Just a regular install
self.logger.info(
" {}: Install version {}".format(
dependency["namespace"], dependency["version"]
)
)
self.install_queue.append(dependency)
def _get_installed(self):
# @@@ use org_config.installed_packages instead
self.logger.info("Retrieving list of packages from target org")
api = ApiRetrieveInstalledPackages(self)
return api()
def _uninstall_dependencies(self):
for dependency in self.uninstall_queue:
self._uninstall_dependency(dependency)
def _install_dependencies(self):
for dependency in self.install_queue:
self._install_dependency(dependency)
# hooks for tests
_download_extract_github = staticmethod(download_extract_github)
_download_extract_zip = staticmethod(download_extract_zip)
def _install_dependency(self, dependency):
package_zip = None
zip_src = None
if "zip_url" in dependency:
self.logger.info(
"Deploying unmanaged metadata from /{} of {}".format(
dependency.get("subfolder") or "", dependency["zip_url"]
)
)
zip_src = self._download_extract_zip(
dependency["zip_url"], subfolder=dependency.get("subfolder")
)
elif "repo_name" in dependency:
self.logger.info(
"Deploying unmanaged metadata from /{} of {}/{}".format(
dependency["subfolder"],
dependency["repo_owner"],
dependency["repo_name"],
)
)
gh_for_repo = self.project_config.get_github_api(
dependency["repo_owner"], dependency["repo_name"]
)
zip_src = self._download_extract_github(
gh_for_repo,
dependency["repo_owner"],
dependency["repo_name"],
dependency["subfolder"],
ref=dependency.get("ref"),
)
if zip_src:
# determine whether to inject namespace prefixes or not
options = dependency.copy()
if "unmanaged" not in options:
namespace = options.get("namespace_inject")
options["unmanaged"] = (
not namespace
) or namespace not in self.org_config.installed_packages
package_zip = MetadataPackageZipBuilder.from_zipfile(
zip_src, options=options, logger=self.logger
).as_base64()
elif "namespace" in dependency:
self.logger.info(
"Installing {} version {}".format(
dependency["namespace"], dependency["version"]
)
)
package_zip = InstallPackageZipBuilder(
dependency["namespace"],
dependency["version"],
securityType=self.options["security_type"],
)()
if package_zip:
api = self.api_class(
self, package_zip, purge_on_delete=self.options["purge_on_delete"]
)
return api()
elif "version_id" in dependency:
self.logger.info(f"Installing {dependency['version_id']}")
install_package_version(self.project_config, self.org_config, dependency)
else:
raise TaskOptionsError(f"Could not find package for {dependency}")
def _uninstall_dependency(self, dependency):
self.logger.info("Uninstalling {}".format(dependency["namespace"]))
package_zip = UninstallPackageZipBuilder(
dependency["namespace"], self.project_config.project__package__api_version
)
api = self.api_class(
self, package_zip(), purge_on_delete=self.options["purge_on_delete"]
)
return api()
def freeze(self, step):
ui_options = self.task_config.config.get("ui_options", {})
dependencies = self.project_config.get_static_dependencies(
self.options["dependencies"],
include_beta=self.options["include_beta"],
ignore_deps=self.options.get("ignore_dependencies"),
)
steps = []
for i, dependency in enumerate(self._flatten(dependencies), start=1):
name = dependency.pop("name", None)
if "namespace" in dependency:
kind = "managed"
name = name or "Install {} {}".format(
dependency["namespace"], dependency["version"]
)
else:
kind = "metadata"
name = name or "Deploy {}".format(dependency["subfolder"])
task_config = {
"options": self.options.copy(),
"checks": self.task_config.checks or [],
}
task_config["options"]["dependencies"] = [dependency]
ui_step = {"name": name, "kind": kind, "is_required": True}
ui_step.update(ui_options.get(i, {}))
ui_step.update(
{
"path": "{}.{}".format(step.path, i),
"step_num": "{}.{}".format(step.step_num, i),
"task_class": self.task_config.class_path,
"task_config": task_config,
"source": step.project_config.source.frozenspec,
}
)
steps.append(ui_step)
return steps
def _flatten(self, dependencies):
result = []
for dependency in dependencies:
subdeps = dependency.pop("dependencies", [])
for subdep in self._flatten(subdeps):
if subdep not in result:
result.append(subdep)
if dependency not in result:
result.append(dependency)
return result
```
#### File: users/tests/test_permsets.py
```python
import responses
import pytest
from cumulusci.core.exceptions import CumulusCIException
from cumulusci.tasks.salesforce.users.permsets import (
AssignPermissionSets,
AssignPermissionSetLicenses,
AssignPermissionSetGroups,
)
from cumulusci.tasks.salesforce.tests.util import create_task
class TestCreatePermissionSet:
@responses.activate
def test_create_permset(self):
task = create_task(
AssignPermissionSets,
{
"api_names": "PermSet1,PermSet2",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetId": "0PS000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CName+FROM+PermissionSet+WHERE+Name+IN+%28%27PermSet1%27%2C+%27PermSet2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PS000000000000",
"Name": "PermSet1",
},
{
"Id": "0PS000000000001",
"Name": "PermSet2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetAssignment/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 3
assert "0PS000000000001" in responses.calls[2].request.body
@responses.activate
def test_create_permset__alias(self):
task = create_task(
AssignPermissionSets,
{
"api_names": "PermSet1,PermSet2",
"user_alias": "test",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Alias+%3D+%27test%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetId": "0PS000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CName+FROM+PermissionSet+WHERE+Name+IN+%28%27PermSet1%27%2C+%27PermSet2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PS000000000000",
"Name": "PermSet1",
},
{
"Id": "0PS000000000001",
"Name": "PermSet2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetAssignment/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 3
assert "0PS000000000001" in responses.calls[2].request.body
@responses.activate
def test_create_permset__alias_raises(self):
task = create_task(
AssignPermissionSets,
{
"api_names": "PermSet1,PermSet2",
"user_alias": "test",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Alias+%3D+%27test%27",
status=200,
json={
"done": True,
"totalSize": 0,
"records": [],
},
)
with pytest.raises(CumulusCIException):
task()
@responses.activate
def test_create_permset_raises(self):
task = create_task(
AssignPermissionSets,
{
"api_names": "PermSet1,PermSet2,PermSet3",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetId": "0PS000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CName+FROM+PermissionSet+WHERE+Name+IN+%28%27PermSet1%27%2C+%27PermSet2%27%2C+%27PermSet3%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PS000000000000",
"Name": "PermSet1",
},
{
"Id": "0PS000000000001",
"Name": "PermSet2",
},
],
},
)
with pytest.raises(CumulusCIException):
task()
class TestCreatePermissionSetLicense:
@responses.activate
def test_create_permsetlicense(self):
task = create_task(
AssignPermissionSetLicenses,
{
"api_names": "PermSetLicense1,PermSetLicense2",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetLicenseId+FROM+PermissionSetLicenseAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetLicenseAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetLicenseId": "0PL000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetLicense+WHERE+DeveloperName+IN+%28%27PermSetLicense1%27%2C+%27PermSetLicense2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PL000000000000",
"DeveloperName": "PermSetLicense1",
},
{
"Id": "0PL000000000001",
"DeveloperName": "PermSetLicense2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetLicenseAssign/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 3
assert "0PL000000000001" in responses.calls[2].request.body
@responses.activate
def test_create_permsetlicense__no_assignments(self):
task = create_task(
AssignPermissionSetLicenses,
{
"api_names": "PermSetLicense1,PermSetLicense2",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetLicenseId+FROM+PermissionSetLicenseAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
# This seems like a bug: the PermissionSetLicenseAssignments sub-query returns None if no PSLs are already assigned instead of returning an "empty list".
"PermissionSetLicenseAssignments": None,
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetLicense+WHERE+DeveloperName+IN+%28%27PermSetLicense1%27%2C+%27PermSetLicense2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PL000000000000",
"DeveloperName": "PermSetLicense1",
},
{
"Id": "0PL000000000001",
"DeveloperName": "PermSetLicense2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetLicenseAssign/",
status=200,
json={"id": "0Pa000000000000", "success": True, "errors": []},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetLicenseAssign/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 4
assert "0PL000000000000" in responses.calls[2].request.body
assert "0PL000000000001" in responses.calls[3].request.body
@responses.activate
def test_create_permsetlicense__alias(self):
task = create_task(
AssignPermissionSetLicenses,
{
"api_names": "PermSetLicense1,PermSetLicense2",
"user_alias": "test",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetLicenseId+FROM+PermissionSetLicenseAssignments%29+FROM+User+WHERE+Alias+%3D+%27test%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetLicenseAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetLicenseId": "0PL000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetLicense+WHERE+DeveloperName+IN+%28%27PermSetLicense1%27%2C+%27PermSetLicense2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PL000000000000",
"DeveloperName": "PermSetLicense1",
},
{
"Id": "0PL000000000001",
"DeveloperName": "PermSetLicense2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetLicenseAssign/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 3
assert "0PL000000000001" in responses.calls[2].request.body
@responses.activate
def test_create_permsetlicense__alias_raises(self):
task = create_task(
AssignPermissionSetLicenses,
{
"api_names": "PermSetLicense1,PermSetLicense2",
"user_alias": "test",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetLicenseId+FROM+PermissionSetLicenseAssignments%29+FROM+User+WHERE+Alias+%3D+%27test%27",
status=200,
json={
"done": True,
"totalSize": 0,
"records": [],
},
)
with pytest.raises(CumulusCIException):
task()
@responses.activate
def test_create_permsetlicense_raises(self):
task = create_task(
AssignPermissionSetLicenses,
{
"api_names": "PermSetLicense1,PermSetLicense2,PermSetLicense3",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetLicenseId+FROM+PermissionSetLicenseAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetLicenseAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetLicenseId": "0PL000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetLicense+WHERE+DeveloperName+IN+%28%27PermSetLicense1%27%2C+%27PermSetLicense2%27%2C+%27PermSetLicense3%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PL000000000000",
"DeveloperName": "PermSetLicense1",
},
{
"Id": "0PL000000000001",
"DeveloperName": "PermSetLicense2",
},
],
},
)
with pytest.raises(CumulusCIException):
task()
class TestCreatePermissionSetGroup:
@responses.activate
def test_create_permsetgroup(self):
task = create_task(
AssignPermissionSetGroups,
{
"api_names": "PermSetGroup1,PermSetGroup2",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetGroupId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetGroupId": "0PG000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetGroup+WHERE+DeveloperName+IN+%28%27PermSetGroup1%27%2C+%27PermSetGroup2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PG000000000000",
"DeveloperName": "PermSetGroup1",
},
{
"Id": "0PG000000000001",
"DeveloperName": "PermSetGroup2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetAssignment/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 3
assert "0PG000000000001" in responses.calls[2].request.body
@responses.activate
def test_create_permsetgroup__alias(self):
task = create_task(
AssignPermissionSetGroups,
{
"api_names": "PermSetGroup1,PermSetGroup2",
"user_alias": "test",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetGroupId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Alias+%3D+%27test%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetGroupId": "0PG000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetGroup+WHERE+DeveloperName+IN+%28%27PermSetGroup1%27%2C+%27PermSetGroup2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PG000000000000",
"DeveloperName": "PermSetGroup1",
},
{
"Id": "0PG000000000001",
"DeveloperName": "PermSetGroup2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetAssignment/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 3
assert "0PG000000000001" in responses.calls[2].request.body
@responses.activate
def test_create_permsetgroup__alias_raises(self):
task = create_task(
AssignPermissionSetGroups,
{
"api_names": "PermSetGroup1,PermSetGroup2",
"user_alias": "test",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetGroupId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Alias+%3D+%27test%27",
status=200,
json={
"done": True,
"totalSize": 0,
"records": [],
},
)
with pytest.raises(CumulusCIException):
task()
@responses.activate
def test_create_permsetgroup_raises(self):
task = create_task(
AssignPermissionSetGroups,
{
"api_names": "PermSetGroup1,PermSetGroup2,PermSetGroup3",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetGroupId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetGroupId": "0PG000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetGroup+WHERE+DeveloperName+IN+%28%27PermSetGroup1%27%2C+%27PermSetGroup2%27%2C+%27PermSetGroup3%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PG000000000000",
"DeveloperName": "PermSetGroup1",
},
{
"Id": "0PG000000000001",
"DeveloperName": "PermSetGroup2",
},
],
},
)
with pytest.raises(CumulusCIException):
task()
```
#### File: CumulusCI/integration_tests/conftest.py
```python
from pytest import fixture
@fixture(scope="session")
def fallback_orgconfig(request):
def fallback_orgconfig():
raise AssertionError("--org orgname is required for integration tests.")
return fallback_orgconfig
```
|
{
"source": "jdschoneman/c19_plotting",
"score": 2
}
|
#### File: c19_plotting/plotting/plot_state_data.py
```python
import os
import numpy as np
from scipy.integrate import solve_ivp
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
from datetime import date
from scipy.signal import medfilt
from read_data import get_data_ctrack, get_data_ihme, format_date_ihme
def intfun(s):
try:
return int(s)
except ValueError:
return 0
# Select states and set data dates for display
#state = 'NY'
#state_long = 'New York'
#state = 'GA'
#state_long = 'Georgia'
#state = 'KY'
#state_long = 'Kentucky'
#state = 'CA'
#state_long = 'California'
#state = 'WI'
#state_long = 'Wisconsin'
#ylpct = [0., 15.]
#state = 'IA'
#state_long = 'Iowa'
state = 'AL'
state_long = 'Alabama'
#state = 'OR'
#state_long = 'Oregon'
#state = 'FL'
#state_long = 'Florida'
#ylpct = [0.,25.]
#state = 'MI'
#state_long = 'Michigan'
#state = 'WA'
#state_long = 'Washington'
#state = 'DC'
#state_long = 'District of Columbia'
#state = 'NJ'
#state_long = 'New Jersey'
#state = 'OK'
#state_long = 'Oklahoma'
#state = 'SD'
#state_long = 'South Dakota'
# TODO: Have to add all state data together for the covid tracker data
#state = 'US'
#state_long = 'US'
#state = 'TX'
#state_long = 'Texas'
#state = 'GA'
#state_long = 'Georgia'
#state = 'MN'
#state_long = 'Minnesota'
#state = 'CO'
#state_long = 'Colorado'
ylpct = [0., 30.]
# Set files which we're loading from and set data dates for display
data_filename = r'..\data\covid19_tracker\states-daily_20200504.csv'
data_date = '04 May'
#model_fname = r'..\data\ihme\2020_03_31.1\Hospitalization_all_locs.csv'
#project_date = '31 March'
#model_fname = r'..\data\ihme\2020_04_12.02\Hospitalization_all_locs.csv'
#project_date = '13 April'
model_fname = r'..\data\ihme\2020_04_16.05\Hospitalization_all_locs.csv'
project_date = '17 April'
# When to stop the plotting
start_date = '20200401'
stop_date = '20200510'
# Which plots to make
plot_testing = True
plot_hosp_death = True
today = date.today()
# Load data and format
data = get_data_ctrack(state, data_filename)
dates = data['date']
start_date_ind = list(dates).index(start_date)
dates = dates[start_date_ind:]
pos = data['positive']
neg = data['negative']
hosp = data['hospitalizedCurrently']
icu = data['inIcuCurrently']
vent = data['onVentilatorCurrently']
death = data['death']
date_inds = range(len(dates))
dpos = np.diff(pos, prepend = 0)
dneg = np.diff(neg, prepend = 0)
dhosp = np.diff(hosp, prepend = 0.)
ddhosp = np.diff(dhosp, prepend = 0)
ddeath = np.diff(death, prepend = 0)
pos = pos[start_date_ind:]
neg = neg[start_date_ind:]
hosp = hosp[start_date_ind:]
death = death[start_date_ind:]
dpos = dpos[start_date_ind:]
dneg = dneg[start_date_ind:]
dhosp = dhosp[start_date_ind:]
ddeath = ddeath[start_date_ind:]
xticks = date_inds[::4]
xticklabels = ['%s/%s' % (s[-3], s[-2:]) for s in dates[::4]]
# Load ihme data
data_ihme = get_data_ihme(model_fname)[state_long]
dates_ihme = [format_date_ihme(s) for s in data_ihme['date']]
# Trim to desired range
start_ihme = dates_ihme.index(start_date)
stop_ihme = dates_ihme.index(stop_date)
dates_ihme = dates_ihme[start_ihme:stop_ihme]
date_inds_ihme = range(len(dates_ihme))
dhosp_ihme_m, dhosp_ihme_l, dhosp_ihme_u = (data_ihme['admis_mean'][start_ihme:stop_ihme],
data_ihme['admis_lower'][start_ihme:stop_ihme],
data_ihme['admis_upper'][start_ihme:stop_ihme])
hosp_ihme_m, hosp_ihme_l, hosp_ihme_u = (data_ihme['allbed_mean'][start_ihme:stop_ihme],
data_ihme['allbed_lower'][start_ihme:stop_ihme],
data_ihme['allbed_upper'][start_ihme:stop_ihme])
death_ihme_m, death_ihme_l, death_ihme_u = (data_ihme['totdea_mean'][start_ihme:stop_ihme],
data_ihme['totdea_lower'][start_ihme:stop_ihme],
data_ihme['totdea_upper'][start_ihme:stop_ihme])
ddeath_ihme_m, ddeath_ihme_l, ddeath_ihme_u = (data_ihme['deaths_mean'][start_ihme:stop_ihme],
data_ihme['deaths_lower'][start_ihme:stop_ihme],
data_ihme['deaths_upper'][start_ihme:stop_ihme])
xticks = date_inds_ihme[::4]
xticklabels = ['%s/%s' % (s[-3], s[-2:]) for s in dates_ihme[::4]]
#%% Data on tests
if plot_testing:
fig, ax = plt.subplots(1, 3, figsize = (17, 5))
gray = 0.3*np.array([1, 1, 1])
lightblue = [0.3, 0.3, 0.8]
darkblue = [0.2, 0.2, 0.6]
red = [0.6, 0.2, 0.2]
lightred = [0.8, 0.4, 0.4]
dtotal = dpos + dneg
avg_7 = medfilt(dtotal, 7)
ax[0].plot(dates, dtotal, 'o', label = 'Total Tests',
color = darkblue, markerfacecolor = lightblue)
ax[0].plot(dates, avg_7, 'k--', label = '7 Day Moving Average')
ax[0].set_xticks(xticks)
ax[0].set_xticklabels(xticklabels)
ax[0].set_ylabel('Number of Tests', fontsize = 12, fontweight = 'bold')
ax[0].set_xlabel('Date', fontsize = 12, fontweight = 'bold')
ax[1].plot(dates, dpos, 'o', label = 'Positive Tests',
color = red, markerfacecolor = lightred)
avg_3 = medfilt(dpos, 3)
avg_7 = medfilt(dpos, 7)
# ax[1].plot(dates, avg_3, 'b--', label = '3 Day Moving Average')
ax[1].plot(dates, avg_7, 'k--', label = '7 Day Moving Average')
ax[1].set_xticks(xticks)
ax[1].set_xticklabels(xticklabels)
ax[1].set_ylabel('Number of Positives', fontsize = 12, fontweight = 'bold')
ax[1].set_xlabel('Date', fontsize = 12, fontweight = 'bold')
avg_7 = medfilt(100*dpos/dtotal, 7)
ax[2].plot(dates, avg_7, 'k--', label = '7 Day Moving Average')
ax[2].plot(dates, 100*dpos/dtotal, 'o', color = 'k',
markerfacecolor = gray)
ax[2].set_xticks(xticks)
ax[2].set_xticklabels(xticklabels)
ax[2].set_xlabel('Date', fontweight = 'bold', fontsize = 12)
ax[2].set_ylabel('Percentage of Positive Tests',
fontweight = 'bold', fontsize = 12)
ax[0].set_title('All Tests', fontsize = 12, fontweight = 'bold')
ax[1].set_title('Positive Tests', fontsize = 12, fontweight = 'bold')
ax[2].set_title('Percentage of Tests Positive', fontsize = 12, fontweight = 'bold')
yl0 = ax[0].get_ylim()
yl1 = ax[1].get_ylim()
yl2 = ax[2].get_ylim()
ax[0].set_ylim([-5, yl0[1]])
ax[0].set_xlim([0, len(dates)])
ax[1].set_ylim([-5, yl1[1]])
ax[1].set_xlim([0, len(dates)])
ax[1].legend()
if ylpct is None:
ax[2].set_ylim([-5, yl2[1]])
else:
ax[2].set_ylim(ylpct)
ax[2].set_xlim([0, len(dates)])
fig.suptitle('%s: All Tests, Positive Tests, and Positive Test Percentages' %
state_long, fontsize = 14, fontweight = 'bold')
impath = '../images/test_data'
imname = '%s_data%s_%s.png' % (state_long, data_date, str(today))
plt.savefig(os.path.join(impath, imname), bbox_inches = 'tight')
#%% Show info on hospitalizations and deaths
if plot_hosp_death:
impath = '../images/ihme_compare'
imname = '%s_data%s_project%s_%s.png' % (state_long, data_date, project_date, str(today))
lightblue = [0.3, 0.3, 0.8]
darkblue = [0.2, 0.2, 0.6]
fig, ax = plt.subplots(2, 2, figsize = (12, 6))
ax = ax.flatten()
ax[0].plot(dates, hosp, 'o', label = 'Reported',
color = darkblue, markerfacecolor = lightblue)
ax[0].plot(dates_ihme, hosp_ihme_m, 'k-', label = 'IHME Projected [Mean]')
ax[0].plot(dates_ihme, hosp_ihme_l, 'r--', label = 'IHME Projected [Lower CI]')
ax[0].plot(dates_ihme, hosp_ihme_u, 'r--', label = 'IHME Projected [Upper CI]')
ax[0].set_xlim(0, date_inds_ihme[-1])
ax[0].set_xticks(xticks)
ax[0].set_xticklabels(xticklabels)
ax[0].legend()
ax[0].set_ylabel('Total Hospitalized', fontsize = 12, fontweight = 'bold')
ax[0].set_title('Hospitalizations', fontsize = 12, fontweight = 'bold')
ax[2].plot(dates, dhosp, 'o',
color = darkblue, markerfacecolor = lightblue)
ax[2].plot(dates_ihme, dhosp_ihme_m, 'k-')
ax[2].plot(dates_ihme, dhosp_ihme_l, 'r--')
ax[2].plot(dates_ihme, dhosp_ihme_u, 'r--')
ax[2].set_xlim(0, date_inds_ihme[-1])
ax[2].set_xticks(xticks)
ax[2].set_xticklabels(xticklabels)
ax[2].set_ylabel('New Hospitalized', fontsize = 12, fontweight = 'bold')
ax[2].set_xlabel('Date', fontsize = 12, fontweight = 'bold')
ax[1].plot(dates, death, 'o', label = 'Reported',
color = darkblue, markerfacecolor = lightblue)
ax[1].plot(dates_ihme, death_ihme_m, 'k-', label = 'IHME Projected [Mean]')
ax[1].plot(dates_ihme, death_ihme_l, 'r--', label = 'IHME Projected [Lower CI]')
ax[1].plot(dates_ihme, death_ihme_u, 'r--', label = 'IHME Projected [Upper CI]')
ax[1].set_xlim(0, date_inds_ihme[-1])
ax[1].set_xticks(xticks)
ax[1].set_xticklabels(xticklabels)
ax[1].legend()
ax[1].set_ylabel('Total Deaths', fontsize = 12, fontweight = 'bold')
ax[1].set_title('Deaths', fontsize = 12, fontweight = 'bold')
ax[3].plot(dates, ddeath, 'o',
color = darkblue, markerfacecolor = lightblue)
ax[3].plot(dates_ihme, ddeath_ihme_m, 'k-')
ax[3].plot(dates_ihme, ddeath_ihme_l, 'r--')
ax[3].plot(dates_ihme, ddeath_ihme_u, 'r--')
ax[3].set_xlim(0, date_inds_ihme[-1])
ax[3].set_xticks(xticks)
ax[3].set_xticklabels(xticklabels)
ax[3].set_ylabel('New Deaths', fontsize = 12, fontweight = 'bold')
ax[3].set_xlabel('Date', fontsize = 12, fontweight = 'bold')
# plt.tight_layout()
fig.suptitle('%s: Reported Data [%s] vs IHME Projections [%s]' %
(state_long, data_date, project_date), fontsize = 14, fontweight = 'bold')
plt.savefig(os.path.join(impath, imname), bbox_inches = 'tight')
```
#### File: c19_plotting/plotting/sweden_comparisons.py
```python
import os
import numpy as np
from scipy.integrate import solve_ivp
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from datetime import date
from read_data import get_data_c19, format_date_c19, get_data_ctrack
# Class-based method for reading/tracking/formatting data associated with a country
class country_data:
def __init__(self, name, population, datafile):
"""
Create country and load data per COVID-19 Tracker. Enter
population in millions.
"""
self.name = name
self.population = population
self.datafile = datafile
self.death, dates = get_data_c19(name, datafile)
self.dates = [format_date_c19(s) for s in dates]
self.n_death = None
self.trim_death = None
self.trim_dates = None
self.trim_days = None
def trim_to_first(self, n_death):
"""
Trims dataset to start from day with first N deaths. Saves results
to trim_death, trim_dates, and trim_days properties.
"""
self.n_death = n_death
try:
start_ind = np.where(self.death >= n_death)[0][0]
except IndexError:
start_ind = -1
self.trim_death = self.death[start_ind:]
self.trim_dates = self.dates[start_ind:]
self.trim_days = np.arange(len(self.trim_death))
def plot_trim(self, ax = None, dark_lines = False, label = False, **kwargs):
"""
Plots trimmed and normalized (per million) on an optionally supplied
axis.
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
if label is False:
# display_date = '%s-%s' % (self.trim_dates[0][5], self.trim_dates[0][6:])
label = '%s [Pop %.1fM]' % (self.name, self.population)
g = ax.plot(self.trim_days, self.trim_death/self.population,
label = label, **kwargs)
if dark_lines:
color = colors.to_rgb(g[0].get_color())
print(color)
g[0].set_color([0.5*c for c in color])
g[0].set_markerfacecolor(color)
def plot_dtrim(self, ax = None, dark_lines = False, label = False, **kwargs):
"""
Plots trimmed and normalized new deaths per day (per million) on an
optionally supplied axis.
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
if label is False:
label = '%s [%s]' % (self.name, self.trim_dates[0])
g = ax.plot(self.trim_days, np.diff(self.trim_death, prepend = 0)/self.population,
label = label, **kwargs)
if dark_lines:
color = colors.to_rgb(g[0].get_color())
print(color)
g[0].set_color([0.5*c for c in color])
g[0].set_markerfacecolor(color)
class state_data(country_data):
def __init__(self, name, population, datafile):
"""
Create country and load data per COVID-19 Tracker. Enter
population in millions.
"""
self.name = name
self.population = population
self.datafile = datafile
data = get_data_ctrack(name, datafile)
self.death = data['death']
self.dates = data['date']
self.n_death = None
self.trim_death = None
self.trim_dates = None
self.trim_days = None
country_pops = {'Denmark': 5.6,
'Norway': 5.4,
'Netherlands': 17.3,
'United Kingdom': 66.7,
'France': 67.,
'Italy': 60.4,
'Spain': 47.,
'Sweden': 10.2,
'Germany': 83.02,
'Finland': 5.52,
'US': 328.2,
'Belgium': 11.46,
'Canada': 37.59}
state_pops = {'CA': 39.51,
'TX': 28.99,
'FL': 21.48,
'NY': 20.2,
'PA': 12.8,
'IL': 12.67,
'OH': 11.69,
'GA': 10.62,
'NC': 10.49,
'MI': 9.9,
'NJ': 8.88,
'VA': 8.54,
'WA': 7.61,
'AZ': 7.28,
'MA': 6.95,
'TN': 6.83,
'IN': 6.72,
'MO': 6.14,
'MD': 6.05,
'WI': 5.82,
'CO': 5.76,
'MN': 5.64,
'SC': 5.45,
'AL': 4.90,
'LA': 4.65,
'KY': 4.47,
'OR': 4.22,
'OK': 3.96,
'CT': 3.57,
'UT': 3.2,
'IA': 3.16,
'NV': 3.08,
'AR': 3.02,
'MS': 2.98,
'KS': 2.91,
'NM': 2.10,
'NE': 1.93,
'WV': 1.76,
'ID': 1.79,
'HI': 1.42,
'NH': 1.36,
'ME': 1.34,
'MT': 1.01,
'RI': 1.06,
'DE': 0.97,
'SD': 0.88,
'ND': 0.76,
'AK': 0.73,
'DC': 0.71,
'VT': 0.62,
'WY': 0.78}
# Load data for Sweden
datapath = r'..\data\COVID-19\csse_covid_19_data\csse_covid_19_time_series'
dataname = 'time_series_covid19_deaths_global.csv'
country_filename = os.path.join(datapath, dataname)
state_filename = r'..\data\covid19_tracker\states-daily_20200429.csv'
state_data_date = '29 April'
country_data_date = '1 May'
n_death = 10
sweden = country_data('Sweden', country_pops['Sweden'], country_filename)
sweden.trim_to_first(n_death)
# Make lists of countries, populations, and plot styles
countries = ['Denmark', 'United Kingdom', 'Spain', 'Italy', 'Germany', 'Sweden']
highlight_countries = {'Canada': {'color': 'r', 'linestyle': '-'},
'United Kingdom': {'color': 'b', 'linestyle': '-'},
'US': {'color': 'k', 'linestyle': '-.'},
'Italy': {'color': 'r', 'linestyle': '--'},
'Netherlands': {'color': 'b', 'linestyle': '--'},
'Sweden': {'color': 'k', 'linestyle': '--'}}
# States to plot and their styles
highlight_states = {'NY': {'color': 'r', 'linestyle': '-'},
'WA': {'color': 'b', 'linestyle': '-'},
'CA': {'color': 'k', 'linestyle': '-'},
'WI': {'color': 'r', 'linestyle': '--'},
'GA': {'color': 'b', 'linestyle': '--'},
'FL': {'color': 'k', 'linestyle': '--'}}
state_objs = list()
for state in highlight_states.keys():
cdata = state_data(state, state_pops[state], state_filename)
cdata.trim_to_first(n_death)
state_objs.append(cdata)
state_obj_dict = {s.name: s for s in state_objs}
country_objs = list()
for country in highlight_countries.keys():
cdata = country_data(country, country_pops[country], country_filename)
cdata.trim_to_first(n_death)
country_objs.append(cdata)
country_obj_dict = {s.name: s for s in country_objs}
#%%
xl = [0, 70]
yl = [0, 1000]
fig, ax = plt.subplots(1, 2, figsize = (12, 6))
for cdata in state_objs:
cdata.plot_trim(ax[0], linewidth = 2, **highlight_states[cdata.name])
ax[0].legend()
for cdata in country_objs:
cdata.plot_trim(ax[1], **highlight_countries[cdata.name])
ax[1].legend()
ax[0].set_xlim(xl)
ax[1].set_xlim(xl)
ax[0].set_ylim(yl)
ax[1].set_ylim(yl)
ax[0].grid()
ax[1].grid()
ax[0].set_ylabel('Covid-19 Attributed Deaths Per Million', fontsize = 12, fontweight = 'bold')
ax[0].set_xlabel('Days Since 10 Deaths', fontsize = 12, fontweight = 'bold')
ax[1].set_xlabel('Days Since 10 Deaths', fontsize = 12, fontweight = 'bold')
plt.suptitle('Population-Adjusted Covid-19 Deaths vs. Days Since 10 Deaths\n' +
'US Data per Covid Tracking Project [%s]; European Data per COVID-19 Github [%s]' % (state_data_date, country_data_date),
fontsize = 12, fontweight = 'bold')
figname = '../images/pop_comparisons_us%s_europe%s.png' % (state_data_date, country_data_date)
plt.savefig(figname, bbox_inches = 'tight')
#ax.set_title('Sweden vs. US States; Population-Adjusted Fatalities [State Data %s; Swedish Data %s]\n%i of 50 States Have Exceed %i Deaths'
# % (state_data_date, country_data_date, states_over_n, n_death),
# fontsize = 13,
# fontweight = 'bold')
#
#plt.tight_layout()
```
|
{
"source": "jdsdba/alta3research-python-cert",
"score": 2
}
|
#### File: jdsdba/alta3research-python-cert/CallCthulhuChaseData.py
```python
import CallCthulhuChaseObjects as cobj
def createdata():
headstart = 3
# add stats for both pursuers and pursues
creatures = []
creatures.append(cobj.Creature(name = 'Scooby', mov = 9, con = 50, dex = 75, luck = 90, jump = 60, climb = 20, ispursuer = False, symbol = 's'))
creatures.append(cobj.Creature(name = 'Shaggy', mov = 8, con = 40, dex = 65, luck = 80, jump = 40, climb = 30, ispursuer = False, symbol = 'h'))
creatures.append(cobj.Creature(name = 'Velma', mov = 7, con = 40, dex = 45, luck = 70, jump = 30, climb = 35, ispursuer = False, symbol = 'v'))
creatures.append(cobj.Creature(name = 'Daphne', mov = 8, con = 70, dex = 75, luck = 60, jump = 65, climb = 75, ispursuer = False, symbol = 'd'))
creatures.append(cobj.Creature(name = 'Fred', mov = 8, con = 90, dex = 65, luck = 60, jump = 60, climb = 65, ispursuer = False, symbol = 'f'))
creatures.append(cobj.Creature(name = 'Zombie', mov = 6, con = 80, dex = 35, luck = 40, jump = 10, climb = 20, ispursuer = True, symbol = 'Z'))
creatures.append(cobj.Creature(name = 'Mummy', mov = 6, con = 80, dex = 35, luck = 40, jump = 10, climb = 20, ispursuer = True, symbol = 'M'))
creatures.append(cobj.Creature(name = 'Ghoul', mov = 9, con = 65, dex = 65, luck = 40, jump = 32, climb = 32, ispursuer = True, symbol = 'G'))
creatures.append(cobj.Creature(name = 'Deep One', mov = 8, con = 50, dex = 55, luck = 30, jump = 22, climb = 22, ispursuer = True, symbol = 'D'))
creatures.append(cobj.Creature(name = 'Dark Young', mov = 8, con = 80, dex = 85, luck = 30, jump = 40, climb = 40, ispursuer = True, symbol = 'Y'))
# defines the one dimensional chase map
mapitems = []
mapitems.append(cobj.MapContents(name = 'Grassland', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Grassland', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Grassland', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Grassland', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Grassland', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Grassland', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Grassland', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Grassland', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Grassland', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Grassland', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Grassland', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Grassland', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Grassland', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Jungle', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Jungle', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Jungle', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Craig', contenttype = 'hill', skill = 'con'))
mapitems.append(cobj.MapContents(name = 'Jungle', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Jungle', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Jungle', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Craig', contenttype = 'cliff', skill = 'climb'))
mapitems.append(cobj.MapContents(name = 'Jungle', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Jungle', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Jungle', contenttype = 'pit', skill = 'jump'))
mapitems.append(cobj.MapContents(name = 'Jungle', contenttype = 'empty', skill = 'none'))
mapitems.append(cobj.MapContents(name = 'Jungle', contenttype = 'pit', skill = 'jump'))
return(creatures, mapitems, headstart) # returns data to main
```
|
{
"source": "jd-s/DocClustering",
"score": 3
}
|
#### File: src/DocClustering/testClu.py
```python
import random
def clustering(run):
newcluster=[]
for i in range(0,run):
rand=random.randint(0, len(cluster.G.nodes()))
if str(rand) in cluster.docList:
if cluster.docList[str(rand)].externalid.rstrip() in list1.list:
colorDoc = dict((key, value) for key, value in cluster.docList.items() if value.color == cluster.docList[str(rand)].color)
for doc in colorDoc.values():
if not doc.externalid.rstrip() in newcluster:
newcluster.append (doc.externalid.rstrip())
else:
colorDoc = dict((key, value) for key, value in cluster.docList.items() if value.color == cluster.docList[str(rand)].color)
for doc in colorDoc.values():
if doc.externalid.rstrip() in newcluster:
newcluster.remove (doc.externalid.rstrip())
rand=random.randint(0, len(cluster2.G.nodes()))
if str(rand) in cluster2.docList:
if cluster2.docList[str(rand)].externalid.rstrip() in list1.list:
colorDoc = dict((key, value) for key, value in cluster2.docList.items() if value.color == cluster2.docList[str(rand)].color)
for doc in colorDoc.values():
if not doc.externalid.rstrip() in newcluster:
newcluster.append (doc.externalid.rstrip())
else:
colorDoc = dict((key, value) for key, value in cluster2.docList.items() if value.color == cluster2.docList[str(rand)].color)
for doc in colorDoc.values():
if doc.externalid.rstrip() in newcluster:
newcluster.remove (doc.externalid.rstrip())
return newcluster
# CALL
for i in range(1,60):
count = 0
newcluster = clustering(i)
for doc in newcluster:
if doc in list1.list:
count += 1
#else:
#print ("??")
ratio = 0
if len(newcluster)>0:
ratio = count/len(newcluster)
ratio2 = 0
if count > 0:
ratio2 = count/len(list1.list)
print (str(i)+" & "+ str(len(newcluster))+ " & "+str(count) + " &" + str(ratio) + " & "+ str(len(list1.list)) +" & "+str(ratio2))
```
|
{
"source": "jdsecurity/binjitsu",
"score": 2
}
|
#### File: pwnlib/elf/corefile.py
```python
import collections
import ctypes
import elftools
from elftools.common.utils import roundup, struct_parse
from elftools.common.py3compat import bytes2str
from elftools.construct import CString
from ..context import context
from ..log import getLogger
from .datatypes import *
from .elf import ELF
from ..tubes.tube import tube
log = getLogger(__name__)
types = {
'i386': elf_prstatus_i386,
'amd64': elf_prstatus_amd64,
}
# Slightly modified copy of the pyelftools version of the same function,
# until they fix this issue:
# https://github.com/eliben/pyelftools/issues/93
def iter_notes(self):
""" Iterates the list of notes in the segment.
"""
offset = self['p_offset']
end = self['p_offset'] + self['p_filesz']
while offset < end:
note = struct_parse(
self._elfstructs.Elf_Nhdr,
self.stream,
stream_pos=offset)
note['n_offset'] = offset
offset += self._elfstructs.Elf_Nhdr.sizeof()
self.stream.seek(offset)
# n_namesz is 4-byte aligned.
disk_namesz = roundup(note['n_namesz'], 2)
note['n_name'] = bytes2str(
CString('').parse(self.stream.read(disk_namesz)))
offset += disk_namesz
desc_data = bytes2str(self.stream.read(note['n_descsz']))
note['n_desc'] = desc_data
offset += roundup(note['n_descsz'], 2)
note['n_size'] = offset - note['n_offset']
yield note
class Mapping(object):
def __init__(self, name, start, stop, flags):
self.name=name
self.start=start
self.stop=stop
self.size=stop-start
self.flags=flags
@property
def permstr(self):
flags = self.flags
return ''.join(['r' if flags & 4 else '-',
'w' if flags & 2 else '-',
'x' if flags & 1 else '-',
'p'])
def __str__(self):
return '%x-%x %s %x %s' % (self.start,self.stop,self.permstr,self.size,self.name)
def __repr__(self):
return '%s(%r, %#x, %#x, %#x, %#x)' % (self.__class__.__name__,
self.name,
self.start,
self.stop,
self.size,
self.flags)
def __int__(self):
return self.start
class Core(ELF):
"""Core(*a, **kw) -> Core
Enhances the inforation available about a corefile (which is an extension
of the ELF format) by permitting extraction of information about the mapped
data segments, and register state.
Registers can be accessed directly, e.g. via ``core_obj.eax``.
Mappings can be iterated in order via ``core_obj.mappings``.
"""
def __init__(self, *a, **kw):
self.prstatus = None
self.files = {}
self.mappings = []
self.stack = None
self.env = {}
try:
super(Core, self).__init__(*a, **kw)
except IOError:
log.warning("No corefile. Have you set /proc/sys/kernel/core_pattern?")
raise
self.load_addr = 0
self._address = 0
if not self.elftype == 'CORE':
log.error("%s is not a valid corefile" % e.file.name)
if not self.arch in ('i386','amd64'):
log.error("%s does not use a supported corefile architecture" % e.file.name)
prstatus_type = types[self.arch]
with log.waitfor("Parsing corefile...") as w:
self._load_mappings()
for segment in self.segments:
if not isinstance(segment, elftools.elf.segments.NoteSegment):
continue
for note in iter_notes(segment):
# Try to find NT_PRSTATUS. Note that pyelftools currently
# mis-identifies the enum name as 'NT_GNU_ABI_TAG'.
if note.n_descsz == ctypes.sizeof(prstatus_type) and \
note.n_type == 'NT_GNU_ABI_TAG':
self.NT_PRSTATUS = note
self.prstatus = prstatus_type.from_buffer_copy(note.n_desc)
# Try to find the list of mapped files
if note.n_type == constants.NT_FILE:
with context.local(bytes=self.bytes):
self._parse_nt_file(note)
# Try to find the auxiliary vector, which will tell us
# where the top of the stack is.
if note.n_type == constants.NT_AUXV:
with context.local(bytes=self.bytes):
self._parse_auxv(note)
if self.stack and self.mappings:
for mapping in self.mappings:
if mapping.stop == self.stack:
mapping.name = '[stack]'
self.stack = mapping
with context.local(bytes=self.bytes, log_level='error'):
try:
self._parse_stack()
except ValueError:
# If there are no environment variables, we die by running
# off the end of the stack.
pass
def _parse_nt_file(self, note):
t = tube()
t.unrecv(note.n_desc)
count = t.unpack()
page_size = t.unpack()
starts = []
addresses = {}
for i in range(count):
start = t.unpack()
end = t.unpack()
ofs = t.unpack()
starts.append(start)
for i in range(count):
filename = t.recvuntil('\x00', drop=True)
start = starts[i]
for mapping in self.mappings:
if mapping.start == start:
mapping.name = filename
self.mappings = sorted(self.mappings, key=lambda m: m.start)
def _load_mappings(self):
for s in self.segments:
if s.header.p_type != 'PT_LOAD':
continue
mapping = Mapping(None,
s.header.p_vaddr,
s.header.p_vaddr + s.header.p_memsz,
s.header.p_flags)
self.mappings.append(mapping)
def _parse_auxv(self, note):
t = tube()
t.unrecv(note.n_desc)
for i in range(0, note.n_descsz, context.bytes * 2):
key = t.unpack()
value = t.unpack()
# The AT_EXECFN entry is a pointer to the executable's filename
# at the very top of the stack, followed by a word's with of
# NULL bytes. For example, on a 64-bit system...
#
# 0x7fffffffefe8 53 3d 31 34 33 00 2f 62 69 6e 2f 62 61 73 68 00 |S=14|3./b|in/b|ash.|
# 0x7fffffffeff8 00 00 00 00 00 00 00 00 |....|....| | |
if key == constants.AT_EXECFN:
self.at_execfn = value
value = value & ~0xfff
value += 0x1000
self.stack = value
def _parse_stack(self):
# AT_EXECFN is the start of the filename, e.g. '/bin/sh'
# Immediately preceding is a NULL-terminated environment variable string.
# We want to find the beginning of it
address = self.at_execfn-1
# Sanity check!
try:
assert self.u8(address) == 0
except AssertionError:
# Something weird is happening. Just don't touch it.
return
except ValueError:
# If the stack is not actually present in the coredump, we can't
# read from the stack. This will fail as:
# ValueError: 'seek out of range'
return
# Find the next NULL, which is 1 byte past the environment variable.
while self.u8(address-1) != 0:
address -= 1
# We've found the beginning of the last environment variable.
# We should be able to search up the stack for the envp[] array to
# find a pointer to this address, followed by a NULL.
last_env_addr = address
address &= ~(context.bytes-1)
while self.unpack(address) != last_env_addr:
address -= context.bytes
assert self.unpack(address+context.bytes) == 0
# We've successfully located the end of the envp[] array.
# It comes immediately after the argv[] array, which itself
# is NULL-terminated.
end_of_envp = address+context.bytes
while self.unpack(address - context.bytes) != 0:
address -= context.bytes
start_of_envp = address
# Now we can fill in the environment easier.
for env in range(start_of_envp, end_of_envp, context.bytes):
envaddr = self.unpack(env)
value = self.string(envaddr)
name, value = value.split('=', 1)
self.env[name] = envaddr + len(name) + 1
@property
def maps(self):
"""A printable string which is similar to /proc/xx/maps."""
return '\n'.join(map(str, self.mappings))
def getenv(self, name):
"""getenv(name) -> int
Read an environment variable off the stack, and return its address.
Arguments:
name(str): Name of the environment variable to read.
Returns:
The address of the environment variable.
"""
if name not in self.env:
log.error("Environment variable %r not set" % name)
return self.string(self.env[name]).split('=',1)[-1]
def __getattr__(self, attribute):
if self.prstatus:
if hasattr(self.prstatus, attribute):
return getattr(self.prstatus, attribute)
if hasattr(self.prstatus.pr_reg, attribute):
return getattr(self.prstatus.pr_reg, attribute)
return super(Core, self).__getattribute__(attribute)
```
|
{
"source": "jdseel/myapp",
"score": 3
}
|
#### File: myapp/myapp/myapp.py
```python
from flask import Flask, render_template, request
import random
app = Flask(__name__)
greeting_list = ['Ciao', 'Hei', 'Salut', 'Hola', 'Nihao']
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/user')
def user():
return render_template('user.html')
@app.route('/user/<string:username>')
def users(username):
#return "<h1>Hello %s</h1>"% username
return render_template('user.html', uname=username)
@app.route('/form')
def form():
return render_template('form-basics.html')
@app.route('/form-demo', methods=['GET','POST'])
def form_demo():
if request.method == 'GET':
return render_template('success.html')
if __name__ == '__main__':
app.run(debug=True, port=4500)
```
|
{
"source": "jdselsor/limitaize-image-color-palette",
"score": 4
}
|
#### File: jdselsor/limitaize-image-color-palette/image_convert.py
```python
from PIL import Image
from math import pi, sqrt
import glob
import time
"""
Loads the color palette
Parameter
palette: The palette file to load.
Return
A list of 3 element tuples
"""
def load_color_palette (palette):
pal = {}
with open("./palettes/" + palette + ".pal") as file:
for line in file:
l = line.split()
pal[l[0]] = (int(l[len(l)-3].strip(',')), int(l[len(l)-2].strip(',')), int(l[len(l)-1].strip(',')))
# color = (int(l[len(l)-3].strip(',')), int(l[len(l)-2].strip(',')), int(l[len(l)-1].strip(',')))
# pal.append(color)
return pal
"""
Loads all the images in the images folder.
Parameter
extension: the file extension for the image. Default parameter is the jpg
Return
Returns a list of PIL Images.
"""
def load_images (extension = "jpg"):
images = []
for filename in glob.glob('./images/*.*'):
if filename[-3:] == extension:
img = Image.open(filename)
images.append(img)
return images
"""
Gets the color distance between color1 and color2.
Uses the formula from: https://en.wikipedia.org/wiki/Color_difference
Parameter
color1: A 3 element tuple where each element is a value representing a r, g, b value.
color2: A 3 element tuple where each element is a value representing a r, g, b value.
Return
The distance between the colors.
"""
def get_color_distance_rgb (color1, color2):
dR = color1[0] - color2[0]
dG = color1[1] - color2[1]
dB = color1[2] - color2[2]
avg_red = (color1[0] + color2[0]) / 2
distance = sqrt (dR ** 2 + dG ** 2 + dB ** 2)
return distance
"""
Gets the distance between the color in the lab format.
Parameters
color1: a 3 element tuple in the lab color format.
color2: a 3 element tuple in the lab color format.
Return
Returns the distance between the color.
"""
def get_color_distance_lab (color1, color2):
da = color2[1] - color1[1]
db = color2[2] - color1[2]
dL = color2[0] - color1[0]
return sqrt(da ** 2 + db ** 2) + abs(dL)
"""
Converts the color from an color in the palette.
Parameter
color: the color to convert to the closet color in the palette.
palette: the list of colors in the palette.
Return
A image in the palette that is the closet color in the palette.
"""
def convert_color (color, palette):
res = (-1, -1, -1)
dist = 999999999999
# threshold Testing
dRG = abs(color[0] - color[1])
dGB = abs(color[1] - color[2])
dRB = abs(color[0] - color[2])
threshold = 5
greys = []
nongreys = []
pal = []
for c in palette.keys():
if "grey" in c.lower():
greys.append(palette[c])
else:
nongreys.append(palette[c])
pal.append(palette[c])
for c in pal:
c1 = rgb2lab(color)
c2 = rgb2lab(c)
d = get_color_distance_lab(c1, c2)
if d < dist:
res = c
dist = d
# if dRG < threshold and dGB < threshold and dRB < threshold:
# for c in greys:
# c1 = rgb2lab(color)
# c2 = rgb2lab(c)
# d = get_color_distance_lab(c1, c2)
# if d < dist:
# res = c
# dist = d
# else:
# for c in nongreys:
# c1 = rgb2lab(color)
# c2 = rgb2lab(c)
# d = get_color_distance_lab(c1, c2)
# if d < dist:
# res = c
# dist = d
return res
"""
Converts a color from the rgb format to the lab format.
From: https://stackoverflow.com/questions/13405956/convert-an-image-rgb-lab-with-python
Parameter
color: a rgb represented as a 3 element tuple.
Returns
A color in Lab formats as a 3 element tuple.
"""
def rgb2lab (color):
num = 0
RGB = [0, 0, 0]
for value in color:
value = float(value) / 255
if value > 0.04045:
value = ((value + 0.055) / 1.055) ** 2.4
else:
value = value / 12.92
RGB[num] = value * 100
num = num + 1
XYZ = [0, 0, 0]
X = RGB[0] * 0.4124 + RGB[1] * 0.3576 + RGB[2] * 0.1805
Y = RGB[0] * 0.2126 + RGB[1] * 0.7152 + RGB[2] * 0.0722
Z = RGB[0] * 0.0193 + RGB[1] * 0.1192 + RGB[2] * 0.9505
XYZ[0] = round(X, 4)
XYZ[1] = round(Y, 4)
XYZ[2] = round(Z, 4)
XYZ[0] = float(XYZ[0]) / 95.047
XYZ[1] = float(XYZ[1]) / 100.0
XYZ[2] = float(XYZ[2]) / 108.883
num = 0
for value in XYZ:
if value > 0.008865:
value = value ** (0.3333333333333333)
else:
value = (7.787 * value) + (16 / 116)
XYZ[num] = value
num = num + 1
Lab = [0, 0, 0]
L = (116 * XYZ[1]) - 16
a = 500 * (XYZ[0] - XYZ[1])
b = 200 * (XYZ[1] - XYZ[2])
Lab[0] = round(L, 4)
Lab[1] = round(a, 4)
Lab[2] = round(b, 4)
return (Lab[0], Lab[1], Lab[2])
def get_adjacent_colors (color, range_val):
colors = []
colors.append(color)
for r in range(-range_val, range_val):
colors.append((color[0] + r, color[1], color[2]))
colors.append((color[0], color[1] + r, color[2]))
colors.append((color[0], color[1], color[2] + r))
colors.append((color[0] + r, color[1] + r, color[2]))
colors.append((color[0], color[1] + r, color[2] + r))
colors.append((color[0] + r, color[1], color[2] + r))
colors.append((color[0] + r, color[1] + r, color[2] + r))
return colors
"""
Lanch point function.
"""
def main ():
palette_name = 'c64'
palette = load_color_palette(palette_name)
images = load_images()
saved_pixels = {}
iteration = 1
for img in images:
pixels = img.load()
st = time.time()
for x in range(img.size[0]):
for y in range(img.size[1]):
if pixels[x, y] in saved_pixels:
pixels[x, y] = saved_pixels[pixels[x, y]]
else:
colors = get_adjacent_colors(pixels[x, y], 35)
color = convert_color(colors[0], palette)
for c in colors:
saved_pixels[c] = color
pixels[x, y] = color
et = time.time()
img.save('./results/' + palette_name + '_image' + str(iteration) + '.jpg')
iteration = iteration + 1
print(str(et-st) + " seconds")
if __name__ == "__main__":
main()
```
|
{
"source": "jdsgomes/ClassyVision-1",
"score": 2
}
|
#### File: classy_vision/dataset/classy_synthetic_image_streaming.py
```python
import torchvision.transforms as transforms
from classy_vision.dataset import register_dataset
from classy_vision.dataset.classy_dataset import ClassyDataset
from classy_vision.dataset.core import RandomImageBinaryClassDataset
from classy_vision.dataset.dataloader_async_gpu_wrapper import DataloaderAsyncGPUWrapper
from classy_vision.dataset.dataloader_limit_wrapper import DataloaderLimitWrapper
from classy_vision.dataset.transforms.util import (
ImagenetConstants,
build_field_transform_default_imagenet,
)
@register_dataset("synthetic_image_streaming")
class SyntheticImageStreamingDataset(ClassyDataset):
"""
Synthetic image dataset that behaves like a streaming dataset.
Requires a "num_samples" argument which decides the number of samples in the
phase. Also takes an optional "length" input which sets the length of the
dataset.
"""
def __init__(
self,
batchsize_per_replica,
shuffle,
transform,
num_samples,
crop_size,
class_ratio,
seed,
length=None,
async_gpu_copy: bool = False,
):
if length is None:
# If length not provided, set to be same as num_samples
length = num_samples
dataset = RandomImageBinaryClassDataset(crop_size, class_ratio, length, seed)
super().__init__(
dataset, batchsize_per_replica, shuffle, transform, num_samples
)
self.async_gpu_copy = async_gpu_copy
@classmethod
def from_config(cls, config):
# Parse the config
assert all(key in config for key in ["crop_size", "class_ratio", "seed"])
length = config.get("length")
crop_size = config["crop_size"]
class_ratio = config["class_ratio"]
seed = config["seed"]
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
) = cls.parse_config(config)
async_gpu_copy = config.get("async_gpu_copy", False)
# Build the transforms
default_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
mean=ImagenetConstants.MEAN, std=ImagenetConstants.STD
),
]
)
transform = build_field_transform_default_imagenet(
transform_config, default_transform=default_transform
)
return cls(
batchsize_per_replica,
shuffle,
transform,
num_samples,
crop_size,
class_ratio,
seed,
length=length,
async_gpu_copy=async_gpu_copy,
)
def iterator(self, *args, **kwargs):
dataloader = DataloaderLimitWrapper(
super().iterator(*args, **kwargs),
self.num_samples // self.get_global_batchsize(),
)
if self.async_gpu_copy:
dataloader = DataloaderAsyncGPUWrapper(dataloader)
return dataloader
```
#### File: dataset/core/random_video_datasets.py
```python
import torch
from ...generic.util import torch_seed
class RandomVideoDataset:
def __init__(
self,
num_classes,
split,
num_samples,
frames_per_clip,
video_width,
video_height,
audio_samples,
clips_per_video,
seed=10,
):
self.num_classes = num_classes
self.split = split
# video config
self.video_channels = 3
self.num_samples = num_samples
self.frames_per_clip = frames_per_clip
self.video_width = video_width
self.video_height = video_height
# audio config
self.audio_samples = audio_samples
self.clips_per_video = clips_per_video
# misc config
self.seed = seed
def __getitem__(self, idx):
if self.split == "train":
# assume we only sample 1 clip from each training video
target_seed_offset = idx
else:
# for video model testing, clips from the same video share the same
# target label
target_seed_offset = idx // self.clips_per_video
with torch_seed(self.seed + target_seed_offset):
target = torch.randint(0, self.num_classes, (1,)).item()
with torch_seed(self.seed + idx):
return {
"input": {
"video": torch.randint(
0,
256,
(
self.frames_per_clip,
self.video_height,
self.video_width,
self.video_channels,
),
dtype=torch.uint8,
),
"audio": torch.rand((self.audio_samples, 1), dtype=torch.float),
},
"target": target,
}
def __len__(self):
return self.num_samples
```
#### File: classy_vision/heads/identity_head.py
```python
from typing import Any, Dict
from classy_vision.heads import ClassyHead, register_head
@register_head("identity")
class IdentityHead(ClassyHead):
"""This head returns the input without changing it. This can
be attached to a model, if the output of the model is the
desired result.
"""
def forward(self, x):
return x
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "IdentityHead":
"""Instantiates a IdentityHead from a configuration.
Args:
config: A configuration for a IdentityHead.
See :func:`__init__` for parameters expected in the config.
Returns:
A IdentityHead instance.
"""
return cls(config["unique_id"])
```
#### File: classy_vision/optim/sgd.py
```python
from typing import Any, Dict
import torch.optim
from . import ClassyOptimizer, register_optimizer
@register_optimizer("sgd")
class SGD(ClassyOptimizer):
def __init__(
self,
larc_config: Dict[str, Any] = None,
lr: float = 0.1,
momentum: float = 0.0,
weight_decay: float = 0.0,
nesterov: bool = False,
use_larc: bool = False,
):
super().__init__()
self._lr = lr
self._momentum = momentum
self._weight_decay = weight_decay
self._nesterov = nesterov
self._use_larc = use_larc
self._larc_config = larc_config
def prepare(self, param_groups):
self.optimizer = torch.optim.SGD(
param_groups,
lr=self._lr,
nesterov=self._nesterov,
momentum=self._momentum,
weight_decay=self._weight_decay,
)
if self._use_larc:
try:
from apex.parallel.LARC import LARC
except ImportError:
raise RuntimeError("Apex needed for LARC")
self.optimizer = LARC(optimizer=self.optimizer, **self._larc_config)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "SGD":
"""Instantiates a SGD from a configuration.
Args:
config: A configuration for a SGD.
See :func:`__init__` for parameters expected in the config.
Returns:
A SGD instance.
"""
# Default params
config.setdefault("lr", 0.1)
config.setdefault("momentum", 0.0)
config.setdefault("weight_decay", 0.0)
config.setdefault("nesterov", False)
config.setdefault("use_larc", False)
config.setdefault(
"larc_config", {"clip": True, "eps": 1e-08, "trust_coefficient": 0.02}
)
assert (
config["momentum"] >= 0.0
and config["momentum"] < 1.0
and type(config["momentum"]) == float
), "Config must contain a 'momentum' in [0, 1) for SGD optimizer"
assert isinstance(
config["nesterov"], bool
), "Config must contain a boolean 'nesterov' param for SGD optimizer"
assert isinstance(
config["use_larc"], bool
), "Config must contain a boolean 'use_larc' param for SGD optimizer"
return cls(
larc_config=config["larc_config"],
lr=config["lr"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
nesterov=config["nesterov"],
use_larc=config["use_larc"],
)
```
#### File: hydra_plugins/classy_vision_path/classy_vision_path.py
```python
from hydra.core.config_search_path import ConfigSearchPath
from hydra.plugins.search_path_plugin import SearchPathPlugin
class ClassyVisionPathPlugin(SearchPathPlugin):
def manipulate_search_path(self, search_path: ConfigSearchPath) -> None:
search_path.append(
provider="classy_vision", path="pkg://classy_vision.hydra.conf"
)
```
#### File: ClassyVision-1/test/dataset_transforms_lighting_transform_test.py
```python
import unittest
from classy_vision.dataset.core.random_image_datasets import (
RandomImageBinaryClassDataset,
)
from classy_vision.dataset.transforms.util import build_field_transform_default_imagenet
class LightingTransformTest(unittest.TestCase):
def get_test_image_dataset(self):
return RandomImageBinaryClassDataset(
crop_size=224, class_ratio=0.5, num_samples=100, seed=0
)
def test_lighting_transform_no_errors(self):
"""
Tests that the lighting transform runs without any errors.
"""
dataset = self.get_test_image_dataset()
config = [{"name": "ToTensor"}, {"name": "lighting"}]
transform = build_field_transform_default_imagenet(config)
sample = dataset[0]
try:
# test that lighting has been registered and runs without errors
transform(sample)
except Exception:
self.fail("LightingTransform raised an exception")
return
```
#### File: ClassyVision-1/test/losses_multi_output_sum_loss_test.py
```python
import unittest
import torch
from classy_vision.losses import (
ClassyLoss,
MultiOutputSumLoss,
build_loss,
register_loss,
)
@register_loss("mock_1")
class MockLoss1(ClassyLoss):
def forward(self, pred, target):
return torch.tensor(1.0)
@classmethod
def from_config(cls, config):
return cls()
class TestMultiOutputSumLoss(unittest.TestCase):
def test_multi_output_sum_loss(self):
config = {"name": "multi_output_sum_loss", "loss": {"name": "mock_1"}}
crit = build_loss(config)
self.assertTrue(isinstance(crit, MultiOutputSumLoss))
# test with a single output
output = torch.tensor([1.0, 2.3])
target = torch.tensor(1.0)
self.assertAlmostEqual(crit(output, target).item(), 1.0)
# test with a list of outputs
output = [torch.tensor([1.2, 3.2])] * 5
target = torch.tensor(2.3)
self.assertAlmostEqual(crit(output, target).item(), 5.0)
```
#### File: ClassyVision-1/test/losses_test.py
```python
import unittest
import torch
from classy_vision.losses import build_loss
class CriterionsTest(unittest.TestCase):
"""
Test that build_transform is able to build torch losses correctly.
"""
def _test_loss(self, config, output, target, expected_loss):
# test that we are able to build losses from torch.nn.modules.loss
# and that they work correctly
crit = build_loss(config)
# test that the weights are set correctly
self.assertAlmostEqual(crit.weight.numpy().tolist(), [1.0, 1.0])
# test that the loss is computed correctly
self.assertAlmostEqual(crit(output, target).item(), expected_loss)
# verify ignore index works
if "ignore_index" in config:
self.assertAlmostEqual(crit(output, torch.tensor([-1])).item(), 0.0)
def test_cross_entropy_loss(self):
"""
Test CrossEntropyLoss
"""
config = {
"name": "CrossEntropyLoss",
"weight": [1.0, 1.0],
"ignore_index": -1,
"reduction": "mean",
}
output = torch.tensor([[9.0, 1.0]])
target = torch.tensor([1])
expected_loss = 8.000335693359375
self._test_loss(config, output, target, expected_loss)
def test_bce_with_logits_loss(self):
"""
Test BCEWithLogitsLoss
"""
config = {
"name": "BCEWithLogitsLoss",
"weight": [1.0, 1.0],
"reduction": "mean",
}
output = torch.tensor([0.999, 0.999])
target = torch.tensor([1.0, 1.0])
expected_loss = 0.313530727260701
self._test_loss(config, output, target, expected_loss)
```
#### File: ClassyVision-1/test/meters_video_accuracy_meter_test.py
```python
import torch
from classy_vision import meters
from classy_vision.meters import VideoAccuracyMeter
from test.generic.meter_test_utils import ClassificationMeterTest
class TestVideoAccuracyMeter(ClassificationMeterTest):
def test_accuracy_meter_registry(self):
accuracy_meter = meters.build_meter(
{
"name": "video_accuracy",
"topk": [1, 2],
"clips_per_video_train": 1,
"clips_per_video_test": 2,
}
)
self.assertTrue(isinstance(accuracy_meter, VideoAccuracyMeter))
def test_single_meter_update_and_reset(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update.
"""
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
# Batchsize = 3, num classes = 3, clips_per_video is 2,
# score is a value in {1, 2, 3}
model_output = torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
)
# Class 0 is the correct class for video 1, class 2 for video 2, and
# class 1 for video
target = torch.tensor([0, 0, 1, 1, 2, 2])
# Only the first sample has top class correct, first and third
# sample have correct class in top 2
expected_value = {"top_1": 1 / 3.0, "top_2": 3 / 3.0}
self.meter_update_and_reset_test(
meter, model_output, target, expected_value, is_train=False
)
def test_double_meter_update_and_reset(self):
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
# Batchsize = 3, num classes = 3, clips_per_video is 2,
# score is a value in {1, 2, 3}.
# Data of two batch is provided
model_outputs = [
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
),
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
),
]
# Class 0 is the correct class for video 1, class 2 for video 2, and
# class 1 for video, in both batches
targets = [torch.tensor([0, 0, 1, 1, 2, 2]), torch.tensor([0, 0, 1, 1, 2, 2])]
# First batch has top-1 accuracy of 1/3.0, top-2 accuracy of 2/3.0
# Second batch has top-1 accuracy of 2/3.0, top-2 accuracy of 3/3.0
expected_value = {"top_1": 2 / 6.0, "top_2": 6 / 6.0}
self.meter_update_and_reset_test(
meter, model_outputs, targets, expected_value, is_train=False
)
def test_meter_invalid_model_output(self):
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
# This model output has 3 dimensions instead of expected 2
model_output = torch.tensor(
[[[3, 2, 1], [1, 2, 3]], [[-1, -3, -4], [-10, -90, -100]]],
dtype=torch.float,
)
target = torch.tensor([0, 1, 2])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_target(self):
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
model_output = torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
)
# Target has 3 dimensions instead of expected 1 or 2
target = torch.tensor([[[0, 1, 2], [0, 1, 2]]])
self.meter_invalid_meter_input_test(meter, model_output, target)
# Target of clips from the same video is not consistent
target = torch.tensor([0, 2, 1, 1, 2, 2])
self.meter_invalid_update_test(meter, model_output, target, is_train=False)
def test_meter_invalid_topk(self):
meter = VideoAccuracyMeter(
topk=[1, 5], clips_per_video_train=1, clips_per_video_test=2
)
model_output = torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
)
target = torch.tensor([0, 1, 2])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_get_set_classy_state_test(self):
# In this test we update meter0 with model_output0 & target0
# and we update meter1 with model_output1 & target1 then
# transfer the state from meter1 to meter0 and validate they
# give same expected value.
# Expected value is the expected value of meter1
meters = [
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
]
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_outputs = [
torch.tensor(
[[1, 2, 3], [1, 1, 3], [2, 2, 1], [3, 2, 1], [2, 2, 2], [2, 3, 1]],
dtype=torch.float,
),
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
),
]
# Class 2 is the correct class for sample 1, class 0 for sample 2, etc
targets = [torch.tensor([0, 0, 1, 1, 2, 2]), torch.tensor([0, 0, 1, 1, 2, 2])]
# Value for second update
expected_value = {"top_1": 1 / 3.0, "top_2": 3 / 3.0}
self.meter_get_set_classy_state_test(
meters, model_outputs, targets, expected_value, is_train=False
)
def test_meter_distributed(self):
# Meter0 will execute on one process, Meter1 on the other
meters = [
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
]
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_outputs = [
torch.tensor(
[[1, 2, 3], [1, 1, 3], [2, 2, 1], [3, 2, 1], [2, 2, 2], [2, 3, 1]],
dtype=torch.float,
), # Meter 0
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
), # Meter 1
torch.tensor(
[[1, 2, 3], [1, 1, 3], [2, 2, 1], [3, 2, 1], [2, 2, 2], [2, 3, 1]],
dtype=torch.float,
), # Meter 0
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
), # Meter 1
]
# For meter 0, class 2 is the correct class for sample 1, class 0 for sample 2,
# etc
targets = [
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 0
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 1
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 0
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 1
]
# In first two updates there are 3 correct top-2, 5 correct in top 2
# The same occurs in the second two updates and is added to first
expected_values = [
{"top_1": 1 / 6.0, "top_2": 4 / 6.0}, # After one update to each meter
{"top_1": 2 / 12.0, "top_2": 8 / 12.0}, # After two updates to each meter
]
self.meter_distributed_test(
meters, model_outputs, targets, expected_values, is_train=False
)
```
#### File: ClassyVision-1/test/optim_param_scheduler_step_test.py
```python
import copy
import unittest
from classy_vision.optim.param_scheduler import (
build_param_scheduler,
StepParamScheduler,
)
class TestStepScheduler(unittest.TestCase):
_num_epochs = 12
def _get_valid_config(self):
return {
"name": "step",
"num_epochs": self._num_epochs,
"values": [0.1, 0.01, 0.001, 0.0001],
}
def test_invalid_config(self):
# Invalid num epochs
config = self._get_valid_config()
bad_config = copy.deepcopy(config)
bad_config["num_epochs"] = -1
with self.assertRaises(ValueError):
StepParamScheduler.from_config(bad_config)
# Invalid Values
bad_config["num_epochs"] = config["num_epochs"]
del bad_config["values"]
with self.assertRaises(TypeError):
StepParamScheduler.from_config(bad_config)
bad_config["values"] = {"a": "b"}
with self.assertRaises(ValueError):
StepParamScheduler.from_config(bad_config)
bad_config["values"] = []
with self.assertRaises(ValueError):
StepParamScheduler.from_config(bad_config)
def test_scheduler(self):
config = self._get_valid_config()
scheduler = StepParamScheduler.from_config(config)
schedule = [
scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [
0.1,
0.1,
0.1,
0.01,
0.01,
0.01,
0.001,
0.001,
0.001,
0.0001,
0.0001,
0.0001,
]
self.assertEqual(schedule, expected_schedule)
def test_build_step_scheduler(self):
config = self._get_valid_config()
scheduler = build_param_scheduler(config)
self.assertTrue(isinstance(scheduler, StepParamScheduler))
```
|
{
"source": "jdsheehan/cristata",
"score": 3
}
|
#### File: cristata/openwhisk/devices.py
```python
import json
import os
import requests
import csv
from db2 import DB2
SQL_TEMPLATE = ''
SQL_TEMPLATE += 'CALL {database}(); '
def build_query(database):
sql_template = SQL_TEMPLATE.format(database=database)
return sql_template, True
def process_response(text):
t = []
rows = text.split('\n')
if len(rows) > 0:
reader = csv.reader(rows, delimiter=',', lineterminator='\\n')
for row in reader:
if len(row) > 0:
t.append(row[0])
return { 'devices': t, 'deviceCount': len(t), 'deviceMetadata': ["Device Id"] }
def main(args):
print('Args %r' % args)
result = {}
try:
db2 = DB2(args, ['database', 'database_userid', 'database_password', 'database_rest_url'])
sql, r = build_query(args['database'])
r = db2.execute(sql)
if r.status_code != 200:
raise Exception(r.json())
response = process_response(r.text)
result = db2.success(response)
except Exception as e:
result = {'status': 400, 'state': 'Failed', 'result': str(e)}
print('Error: %r' % result)
dbg = args.get('debug', False)
if dbg:
print('%r' % result)
return result
```
|
{
"source": "jdsheehan/pycloudmessenger",
"score": 2
}
|
#### File: pycloudmessenger/pycloudmessenger/rabbitmq.py
```python
import os
import ssl
import logging
import json
from abc import ABC, abstractmethod
import pika
import pycloudmessenger.utils as utils
__rabbit_helper_version_info__ = ('0', '1', '2')
LOGGER = logging.getLogger(__package__)
class RabbitContext():
"""
Holds connection details for a RabbitMQ service
"""
def __init__(self, args: dict, user: str = None, password: str = None):
self.cert_file = None
self.args = args.copy()
#First set up some defaults
if 'broker_timeout' not in self.args:
self.args['broker_timeout'] = 60.0
self.args['broker_user'] = user if user else self.arg_value(self.args, ['broker_user', 'broker_guest_user', 'client_user'])
self.args['broker_password'] = password if password else self.arg_value(self.args, ['broker_password', 'broker_guest_password', 'client_pwd'])
if 'broker_cert_b64' in self.args:
self.cert_file = utils.Certificate(args['broker_cert_b64'])
self.args['broker_pem_path'] = self.cert_file.filename
self.args['broker_tls'] = True
else:
self.args['broker_tls'] = False
#Now check that all required fields are present
cfg = ['broker_host', 'broker_port', 'broker_vhost',
'broker_user', 'broker_password']
for key in cfg:
if not self.args.get(key):
raise Exception(f'{key} is missing from RabbitContext initialisation.')
def __str__(self):
return json.dumps(self.args)
def arg_value(self, args, possibilities):
for p in possibilities:
val = args.get(p)
if val:
return val
raise Exception(f'{possibilities} missing from arguments.')
@classmethod
def from_credentials_file(self, cred_file: str, user: str = None, password: str = None):
with open(cred_file) as creds:
args = json.load(creds)
#First, we need to support legacy credential formats
if 'broker' in args:
args['broker_host'] = args.pop('broker')
args['broker_port'] = args.pop('port')
args['broker_vhost'] = args.pop('vhost')
args['broker_user'] = args.pop('client_user')
args['broker_password'] = args.pop('client_pwd')
args['broker_cert_b64'] = args.pop('cert_b64')
return RabbitContext(args, user, password)
def get(self, key: str):
try:
return self.args[key]
except:
return None
def user(self):
return self.get('broker_user')
def pwd(self):
return self.get('broker_password')
def host(self):
return self.get('broker_host')
def port(self):
return self.get('broker_port')
def vhost(self):
return self.get('broker_vhost')
def cert(self):
return self.get('broker_pem_path')
def ssl(self):
return self.get('broker_tls')
def feeds(self):
return self.get('broker_request_queue')
def replies(self):
return self.get('broker_response_queue')
def timeout(self):
return self.get('broker_timeout')
class RabbitQueue():
"""
Holds configuration details for a RabbitMQ Queue
"""
def __init__(self, queue: str = None, auto_delete: bool = False, durable: bool = False, purge: bool = False, prefetch: int = 1):
self.durable = durable
self.auto_delete = auto_delete
self.purge = purge
self.prefetch = prefetch
#If no queue specified, create a temporary, exclusive queue
#This will force a server generated queue name like 'amq.gen....'
if queue:
self.name = queue
self.exclusive = False
else:
self.name = ''
self.exclusive = True
self.name = self.name.strip()
class AbstractRabbitMessenger(ABC):
"""
Communicates with a RabbitMQ service
"""
def __init__(self, context: RabbitContext):
self.context = context
self.pub_queue = None
self.sub_queue = None
self.inbound = 0
self.outbound = 0
self.connection = None
self.channel = None
self.cancel_on_close = False
self.credentials = pika.PlainCredentials(self.context.user(), self.context.pwd())
self.ssl_options = {}
if self.context.ssl():
self.ssl_options['ssl_version'] = ssl.PROTOCOL_TLSv1_2
if self.context.cert():
self.ssl_options['ca_certs'] = self.context.cert()
self.ssl_options['cert_reqs'] = ssl.CERT_REQUIRED
def __enter__(self):
return self
def __exit__(self, *args):
self.stop()
def declare_queue(self, queue: RabbitQueue) -> RabbitQueue:
"""
Declare a queue, creating if required
Throws:
An exception if connection attempt is not successful
Returns:
None
"""
if queue.exclusive or queue.durable:
#Will not raise an exception if access rights insufficient on the queue
#Exception only raised when channel consume takes place
result = self.channel.queue_declare(
queue=queue.name,
exclusive=queue.exclusive,
auto_delete=queue.auto_delete,
durable=queue.durable)
queue.name = result.method.queue
#Useful when testing - clear the queue
if queue.purge:
self.channel.queue_purge(queue=queue.name)
return queue
def establish_connection(self, parameters: pika.ConnectionParameters):
"""
Connect to RabbitMQ service
Throws:
An exception if connection attempt is not successful
Returns:
None
"""
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
def connect(self, connection_attempts: int, retry_delay: int):
"""
Setup connection settings to RabbitMQ service
Throws:
An exception if connection attempt is not successful
Returns:
None
"""
parameters = pika.ConnectionParameters(
self.context.host(), self.context.port(), self.context.vhost(),
self.credentials, ssl=self.context.ssl(), ssl_options=self.ssl_options,
connection_attempts=connection_attempts,
retry_delay=retry_delay)
self.establish_connection(parameters)
def publish(self, message, queue: str, exchange: str = '', mode: int = 1):
"""
Publish a message to a queue
Throws:
Exception - maybe access rights are insufficient on the queue
Returns:
None
"""
self.channel.basic_publish(
exchange=exchange, routing_key=queue, body=message,
properties=pika.BasicProperties(delivery_mode=mode)
)
self.outbound += 1
def stop(self):
"""
Closes open channels and connections
Throws:
Nothing
Returns:
None
"""
try:
if self.channel:
if self.cancel_on_close:
self.channel.cancel()
self.channel.close()
if self.connection:
self.connection.close()
except:
pass
@abstractmethod
def receive(self, handler, timeout: int, max_messages: int):
pass
@abstractmethod
def start(self, publish: RabbitQueue = None, subscribe: RabbitQueue = None, connection_attempts: int = 10, retry_delay: int = 1):
pass
class RabbitTimedOutException(Exception):
pass
class RabbitConsumerException(Exception):
pass
class RabbitClient(AbstractRabbitMessenger):
"""
Communicates with a RabbitMQ service
"""
def start(self, publish: RabbitQueue = None, subscribe: RabbitQueue = None, connection_attempts: int = 10, retry_delay: int = 1):
if publish:
self.pub_queue = publish
if subscribe:
self.sub_queue = subscribe
self.connect(connection_attempts, retry_delay)
def get_subscribe_queue(self):
return self.sub_queue.name if self.sub_queue else None
def establish_connection(self, parameters: pika.ConnectionParameters):
super(RabbitClient, self).establish_connection(parameters)
if self.pub_queue:
self.declare_queue(self.pub_queue)
if self.sub_queue:
self.declare_queue(self.sub_queue)
#Ensure the consumer only gets 'prefetch' unacknowledged message
self.channel.basic_qos(prefetch_count=self.sub_queue.prefetch)
def publish(self, message, queue: RabbitQueue = None, exchange: str = '', mode: int = 1):
if not queue:
queue = self.pub_queue
super(RabbitClient, self).publish(message, queue.name, exchange, mode)
def receive(self, handler=None, timeout: int = 30, max_messages: int = 0) -> str:
"""
Start receiving messages, up to max_messages
Throws:
Exception if consume fails
Returns:
The last message received
"""
msgs = 0
body = None
try:
for msg in self.channel.consume(
self.sub_queue.name,
exclusive=self.sub_queue.exclusive,
inactivity_timeout=timeout):
method_frame, properties, body = msg
if not method_frame and not properties and not body:
raise RabbitTimedOutException("Operation timeout reached.")
msgs += 1
self.inbound += 1
self.channel.basic_ack(method_frame.delivery_tag)
if handler:
#body is of type 'bytes' in Python 3+
handler(body)
elif not max_messages:
break
#Stop consuming if message limit reached
if msgs == max_messages:
break
except pika.exceptions.AMQPError as exc:
LOGGER.error(exc)
finally:
self.channel.cancel()
if not msgs:
raise RabbitConsumerException('Consumer cancelled prior to timeout.')
return body
class RabbitDualClient():
"""
Communicates with a RabbitMQ service
"""
def __init__(self, context):
"""
Class initializer
"""
self.context = context
self.subscriber = None
self.publisher = None
self.last_recv_msg = None
def start_subscriber(self, queue: RabbitQueue, client=RabbitClient):
"""
Connect to Castor service and create a queue
Throws:
An exception if connection attempt is not successful
Returns:
Nothing
"""
self.subscriber = client(self.context)
self.subscriber.start(subscribe=queue)
def get_subscribe_queue(self):
return self.subscriber.get_subscribe_queue()
def start_publisher(self, queue: RabbitQueue, client=RabbitClient):
"""
Connect to Castor service and create a queue
Throws:
An exception if connection attempt is not successful
Returns:
Nothing
"""
self.publisher = client(self.context)
self.publisher.start(publish=queue)
def send_message(self, message, queue: RabbitQueue = None):
"""
Publish a message to Castor service
Throws:
An exception if publish is not successful
Returns:
Nothing
"""
self.publisher.publish(message, queue)
def receive_message(self, handler, timeout: int, max_messages: int):
"""
Receive messages from Castor service
Throws:
An exception if receive is not successful
Returns:
Nothing
"""
self.subscriber.receive(handler, timeout, max_messages)
def internal_handler(self, message):
"""
Handler for invoke_service method
Throws:
Nothing
Returns:
Nothing
"""
self.last_recv_msg = message
def invoke_service(self, message, timeout: int = 30):
"""
Publish a message and receive a reply
Throws:
An exception if not successful or timedout
Returns:
The reply dictionary
"""
self.last_recv_msg = None
LOGGER.debug(f"Sending message: {message}")
self.send_message(message)
LOGGER.debug("Waiting for reply...")
#Now wait for the reply
self.subscriber.receive(self.internal_handler, timeout, 1)
LOGGER.debug(f"Received: {self.last_recv_msg}")
return self.last_recv_msg
def stop(self):
"""
Close connection to service
Throws:
An exception if not successful
Returns:
Nothing
"""
self.subscriber.stop()
self.publisher.stop()
```
|
{
"source": "jdsika/TUM_HOly",
"score": 2
}
|
#### File: finder/doxygen/__init__.py
```python
from breathe.finder.doxygen import index as indexfinder
from breathe.finder.doxygen import compound as compoundfinder
from breathe.parser.doxygen import index, compound
from breathe.finder import Matcher
class ItemMatcher(Matcher):
def __init__(self, name, type_):
self.name = name
self.type_ = type_
def match(self, data_object):
return self.name == data_object.name and self.type_ == data_object.kind
def __repr__(self):
return "<ItemMatcher - name:%s, type_:%s>" % (self.name, self.type_)
class RefMatcher(Matcher):
def __init__(self, refid):
self.refid = refid
def match(self, data_object):
return self.refid == data_object.refid
class ItemMatcherFactory(Matcher):
def create_name_type_matcher(self, name, type_):
return ItemMatcher(name, type_)
def create_ref_matcher(self, ref):
return RefMatcher(ref)
class CreateCompoundTypeSubFinder(object):
def __init__(self, parser_factory, matcher_factory):
self.parser_factory = parser_factory
self.matcher_factory = matcher_factory
def __call__(self, project_info, *args):
compound_parser = self.parser_factory.create_compound_parser(project_info)
return indexfinder.CompoundTypeSubItemFinder(self.matcher_factory, compound_parser, project_info, *args)
class DoxygenItemFinderFactory(object):
def __init__(self, finders, project_info):
self.finders = finders
self.project_info = project_info
def create_finder(self, data_object):
return self.finders[data_object.__class__](self.project_info, data_object, self)
class DoxygenItemFinderFactoryCreator(object):
def __init__(self, parser_factory, matcher_factory):
self.parser_factory = parser_factory
self.matcher_factory = matcher_factory
def create_factory(self, project_info):
finders = {
index.DoxygenTypeSub : indexfinder.DoxygenTypeSubItemFinder,
index.CompoundTypeSub : CreateCompoundTypeSubFinder(self.parser_factory, self.matcher_factory),
index.MemberTypeSub : indexfinder.MemberTypeSubItemFinder,
compound.DoxygenTypeSub : compoundfinder.DoxygenTypeSubItemFinder,
compound.compounddefTypeSub : compoundfinder.CompoundDefTypeSubItemFinder,
compound.sectiondefTypeSub : compoundfinder.SectionDefTypeSubItemFinder,
compound.memberdefTypeSub : compoundfinder.MemberDefTypeSubItemFinder,
}
return DoxygenItemFinderFactory(finders, project_info)
```
#### File: TUM_HOly/openrave/myrelpath.py
```python
try:
from os.path import relpath
except ImportError:
from posixpath import curdir, sep, pardir, join, abspath, commonprefix
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
```
#### File: python/databases/inversekinematics.py
```python
from __future__ import with_statement # for python 2.5
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2009-2012 <NAME> <<EMAIL>>'
__license__ = 'Apache License, Version 2.0'
if not __openravepy_build_doc__:
from numpy import *
else:
from numpy import array
from ..openravepy_ext import openrave_exception, RobotStateSaver
from ..openravepy_int import RaveCreateModule, RaveCreateIkSolver, IkParameterization, IkParameterizationType, RaveFindDatabaseFile, RaveDestroy, Environment, openravepyCompilerVersion, IkFilterOptions, KinBody, normalizeAxisRotation, quatFromRotationMatrix
from . import DatabaseGenerator
from ..misc import relpath, TSP
import time,platform,shutil,sys
import os.path
from os import getcwd, remove
import distutils
from distutils import ccompiler
from optparse import OptionParser
try:
import cPickle as pickle
except:
import pickle
import logging
log = logging.getLogger('openravepy.'+__name__.split('.',2)[-1])
class InverseKinematicsError(Exception):
def __init__(self,parameter=u''):
self.parameter = unicode(parameter)
def __unicode__(self):
s = u'Inverse Kinematics Error\n%s'%self.parameter
return s
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
return '<openravepy.databases.inversekinematics.InverseKinematicsError(%r)>'%(self.parameter)
def __eq__(self, r):
return self.parameter == r.parameter
def __ne__(self, r):
return self.parameter != r.parameter
class InverseKinematicsModel(DatabaseGenerator):
"""Generates analytical inverse-kinematics solutions, compiles them into a shared object/DLL, and sets the robot's iksolver. Only generates the models for the robot's active manipulator. To generate IK models for each manipulator in the robot, mulitple InverseKinematicsModel classes have to be created.
"""
class ArmVisibility:
"""When 'entered' will hide all the non-arm links in order to facilitate visiblity of the gripper"""
def __init__(self,manip,transparency=1):
self.manip = manip
self.robot = self.manip.GetRobot()
self.hiddengeoms = []
self.transparency = transparency
def __enter__(self):
self.hiddengeoms = []
with self.robot.GetEnv():
childlinks = self.robot.GetChain(self.manip.GetBase().GetIndex(),self.manip.GetEndEffector().GetIndex(),returnjoints=False)
for link in self.robot.GetLinks():
if link not in childlinks:
for geom in link.GetGeometries():
self.hiddengeoms.append((geom,geom.IsDraw(),geom.GetTransparency()))
if self.transparency >= 1:
geom.SetDraw(False)
else:
geom.SetDraw(True)
geom.SetTransparency(self.transparency)
def __exit__(self,type,value,traceback):
with self.robot.GetEnv():
for geom,isdraw,tr in self.hiddengeoms:
geom.SetDraw(isdraw)
geom.SetTransparency(tr)
_cachedKinematicsHash = None # manip.GetInverseKinematicsStructureHash() when the ik was built with
def __init__(self,robot=None,iktype=None,forceikfast=False,freeindices=None,freejoints=None,manip=None):
"""
:param robot: if not None, will use the robot's active manipulator
:param manip: if not None, will the manipulator, takes precedence over robot
:param forceikfast: if set will always force the ikfast solver
:param freeindices: force the following freeindices on the ik solver
"""
self.ikfastproblem = None
if manip is not None:
robot = manip.GetRobot()
else:
manip = robot.GetActiveManipulator()
DatabaseGenerator.__init__(self,robot=robot)
self.manip = manip
# check if robot manipulator has no static links (except the base)
for link in robot.GetChain(manip.GetBase().GetIndex(),manip.GetEndEffector().GetIndex(),returnjoints=False)[1:]:
for rigidlyattached in link.GetRigidlyAttachedLinks():
if rigidlyattached.IsStatic():
raise InverseKinematicsError(u'link %s part of IK chain cannot be declared static'%link)
try:
self.ikfast = __import__('openravepy.ikfast',fromlist=['openravepy'])
except ImportError,e:
log.warn('failed to import ikfast, so reverting to older version: %s',e)
self.ikfast = __import__('openravepy.ikfast_sympy0_6',fromlist=['openravepy'])
for handler in log.handlers:
self.ikfast.log.addHandler(handler)
self.ikfastproblem = RaveCreateModule(self.env,'ikfast')
if self.ikfastproblem is not None:
self.env.Add(self.ikfastproblem)
self.iktype = iktype
self.iksolver = None
self.freeinc = None
if freeindices is not None:
self.freeindices = freeindices
elif freejoints is not None:
self.freeindices = self.getIndicesFromJointNames(freejoints)
else:
self.freeindices = None
if self.freeindices is None:
self.solveindices = None
else:
if not all([ifree in manip.GetArmIndices() for ifree in self.freeindices]):
raise InverseKinematicsError(u'not all free indices %r are part of the manipulator indices %r'%(self.freeindices, manip.GetArmIndices()))
self.solveindices = [i for i in manip.GetArmIndices() if not i in self.freeindices]
self.forceikfast = forceikfast
self.ikfeasibility = None # if not None, ik is NOT feasibile and contains the error message
self.statistics = dict()
def __del__(self):
if self.ikfastproblem is not None:
# need to lock the environment since Remove locks it
if self.env.Lock(1.0):
try:
self.env.Remove(self.ikfastproblem)
finally:
self.env.Unlock()
else:
log.warn('failed to lock environment for InverseKinematicsModel.__del__!')
DatabaseGenerator.__del__(self)
def clone(self,envother):
clone = DatabaseGenerator.clone(self,envother)
clone.ikfastproblem = RaveCreateModule(envother,'ikfast')
if clone.ikfastproblem is not None:
envother.Add(clone.ikfastproblem)
if self.has():
clone.setrobot(self.freeinc)
return clone
def has(self):
return self.iksolver is not None and self.manip.GetIkSolver() is not None and self.manip.GetIkSolver().Supports(self.iktype)
def save(self):
statsfilename=self.getstatsfilename(False)
try:
os.makedirs(os.path.split(statsfilename)[0])
except OSError:
pass
pickle.dump((self.getversion(),self.statistics,self.ikfeasibility,self.solveindices,self.freeindices,self.freeinc), open(statsfilename, 'w'))
log.info('inversekinematics generation is done, compiled shared object: %s',self.getfilename(False))
def load(self,freeinc=None,checkforloaded=True,*args,**kwargs):
try:
filename = self.getstatsfilename(True)
if len(filename) == 0:
return checkforloaded and self.manip.GetIkSolver() is not None and self.manip.GetIkSolver().Supports(self.iktype) # might have ik already loaded
modelversion,self.statistics,self.ikfeasibility,self.solveindices,self.freeindices,self.freeinc = pickle.load(open(filename, 'r'))
if modelversion != self.getversion():
log.warn('version is wrong %s!=%s',modelversion,self.getversion())
return checkforloaded and self.manip.GetIkSolver() is not None and self.manip.GetIkSolver().Supports(self.iktype) # might have ik already loaded
except Exception,e:
log.warn(e)
return checkforloaded and self.manip.GetIkSolver() is not None and self.manip.GetIkSolver().Supports(self.iktype) # might have ik already loaded
if self.ikfeasibility is not None:
# ik is infeasible, but load successfully completed, so return success
return True
return self.setrobot(freeinc,*args,**kwargs)
def getversion(self):
return int(self.ikfast.__version__, 16)
def getikname(self):
return 'ikfast ikfast.%s.%s.%s'%(self.manip.GetInverseKinematicsStructureHash(self.iktype),str(self.iktype),self.manip.GetName())
def setrobot(self,freeinc=None):
"""Sets the ik solver on the robot.
freeinc is a list of the delta increments of the freejoint values that can override the default values.
"""
if freeinc is not None:
self.freeinc=freeinc
if self.freeinc is not None:
try:
iksuffix = ' ' + ' '.join(str(f) for f in self.freeinc)
except TypeError:
# possibly just a float
iksuffix = ' %f'%self.freeinc
else:
iksuffix = ' ' + ' '.join(str(f) for f in self.getDefaultFreeIncrements(0.1, 0.01))
# if self.manip.GetIkSolver() is not None:
# self.iksolver = RaveCreateIkSolver(self.env,self.manip.GetIKSolverName()+iksuffix)
if self.iksolver is None:
with self.env:
ikname = self.getikname()
iktype = self.ikfastproblem.SendCommand('AddIkLibrary %s %s'%(ikname.split()[1],self.getfilename(True)))
if iktype is None:
if self.forceikfast:
return False
self.iksolver = RaveCreateIkSolver(self.env,self.manip.GetIkSolver().GetXMLId().split(' ',1)[0]+iksuffix) if self.manip.GetIkSolver() is not None else None
else:
if int(self.iktype) != int(iktype):
raise InverseKinematicsError('ik does not match types %s!=%s'%(self.iktype,iktype))
self.iksolver = RaveCreateIkSolver(self.env,ikname+iksuffix)
if self.iksolver is not None and self.iksolver.Supports(self.iktype):
success = self.manip.SetIKSolver(self.iksolver)
if success and self.manip.GetIkSolver() is not None and self.freeinc is not None:
freeincvalue = 0.01
try:
if len(self.freeinc) > 0:
freeincvalue = self.freeinc[0]
except TypeError:
freeincvalue = float(self.freeinc)
self.manip.GetIkSolver().SendCommand('SetDefaultIncrements %f 100 %f 10'%(freeincvalue,pi/8))
return success
return self.has()
def getDefaultFreeIncrements(self,freeincrot, freeinctrans):
"""Returns a list of delta increments appropriate for each free index
"""
with self.env:
values = []
eetrans = self.manip.GetEndEffectorTransform()[0:3,3]
armlength = 0
orderedarmindices = [j for j in self.robot.GetDependencyOrderedJoints() if j.GetJointIndex() in self.manip.GetArmIndices()]
for j in orderedarmindices[::-1]:
armlength += sqrt(sum((eetrans-j.GetAnchor())**2))
eetrans = j.GetAnchor()
freeinc = []
for index in self.freeindices:
joint = self.robot.GetJointFromDOFIndex(index)
if joint.IsRevolute(index-joint.GetDOFIndex()):
freeinc.append(freeincrot)
elif joint.IsPrismatic(index-joint.GetDOFIndex()):
freeinc.append(freeinctrans*armlength)
else:
log.warn('cannot set increment for joint type %s'%joint.GetType())
return freeinc
def GetDefaultIndices(self,avoidPrismaticAsFree=False):
"""Returns a default set of free indices if the robot has more joints than required by the IK.
In the futrue, this function will contain heuristics in order to select the best indices candidates.
:param avoidPrismaticAsFree: if True for redundant manipulators, will attempt to avoid setting prismatic joints as free joints unless the IK gets really complicated (and usually cannot be solved)
"""
if self.iktype is None:
raise InverseKinematicsError(u'ik type is not set')
freeindices = []
dofexpected = IkParameterization.GetDOFFromType(self.iktype)
remainingindices = list(self.manip.GetArmIndices())
if len(remainingindices) > dofexpected:
N = len(remainingindices)
# need to choose a free index so that
# 1. the IK can be computed
# 2. the IK has the most success rate (ie choose joint with least impact on performance)
#
# the compatiblity of the IK depends a lot on what axes are intersecting, and whether they are orthogonal with each other
# In general, take from the top or the bottom depending on the complexity of the arm.
robot=self.manip.GetRobot()
jointanchors = []
jointaxes = []
jointtypes = []
for i,index in enumerate(self.manip.GetArmIndices()):
joint=robot.GetJointFromDOFIndex(index)
jointanchors.append(joint.GetAnchor())
jointaxes.append(joint.GetAxis(index-joint.GetDOFIndex()))
jointtypes.append(joint.GetType())
intersectingaxes = eye(N)
for i in range(N):
for j in range(i+1,N):
norm = cross(jointaxes[j], jointaxes[i])
diff = jointanchors[j]-jointanchors[i]
if sum(norm**2) > 1e-7:
# axes are not parallel
if abs(dot(norm, diff)) <= 1e-5:
# axes
intersectingaxes[i,j] = intersectingaxes[j,i] = 1
else:
# axes are parallel
if sum(cross(jointaxes[i],diff)**2) <= 1e-10:
intersectingaxes[i,j] = intersectingaxes[j,i] = 1
# adjacent intersecting revolute joints
intersecting3axes = [0]*N
num3intersecting = 0
for i in range(1,N-1):
if jointtypes[i-1] == KinBody.JointType.Revolute and jointtypes[i] == KinBody.JointType.Revolute and jointtypes[i+1] == KinBody.JointType.Revolute:
if intersectingaxes[i-1,i] and intersectingaxes[i,i+1] and intersectingaxes[i-1,i+1]:
# have to check if they intersect at a common point
intersection = jointanchors[i] + jointaxes[i] * dot(jointaxes[i], jointanchors[i-1]-jointanchors[i])
distfromintersection = sum(cross(jointaxes[i+1],intersection - jointanchors[i+1])**2)
if distfromintersection < 1e-10:
intersecting3axes[i-1] |= 1 << num3intersecting
intersecting3axes[i] |= 1 << num3intersecting
intersecting3axes[i+1] |= 1 << num3intersecting
log.info('found 3-intersection centered on index %d', remainingindices[i])
num3intersecting += 1
for i in range(N - dofexpected):
# by default always choose first
indextopop = 0
if self.iktype == IkParameterizationType.Transform6D:
if num3intersecting > 0:
# try to preserve the intersecting axes
# only choose wrist if wrist isn't intersecting and [2] is
if intersecting3axes[2] > 0 and intersecting3axes[-1] == 0:
indextopop = len(intersecting3axes)-1
else:
hasother = False
# prioritize 2 by checking if there exists other intersecting axes
for j in range(len(intersecting3axes)-1,-1,-1):
if (intersecting3axes[j] & ~intersecting3axes[2]) > 0:
hasother = True
if hasother:
indextopop = 2
else:
# prioritize the first index that is not in intersecting
for j in range(len(intersecting3axes)-1,-1,-1):
if intersecting3axes[j] == 0:
indextopop = j
break
else:
# already complicated enough, so take from the bottom in order to avoid variables coming inside the kinematics
indextopop = 0
if avoidPrismaticAsFree and jointtypes[indextopop] == KinBody.JointType.Prismatic:
# it's either one or the other
indextopop = len(remainingindices)-1
elif self.iktype == IkParameterizationType.Lookat3D:
# usually head (rotation joints) are at the end
#freeindices = remainingindices[len(remainingindices)-2:]
#remainingindices=remainingindices[:-2]
#len(remainingindices)
indextopop = len(remainingindices)-1
#avoidPrismaticAsFree?
elif self.iktype == IkParameterizationType.TranslationDirection5D:
# check if ray aligns with furthest axis
dirfromanchor = self.manip.GetTransform()[0:3,3]-jointanchors[-1]
if abs(dot(jointaxes[-1],dot(self.manip.GetTransform()[0:3,0:3],self.manip.GetLocalToolDirection()))) > 0.99999 and linalg.norm(cross(jointaxes[-1],dirfromanchor)) <= 1e-5:
# have to take the last index since last axis aligns and is useless anyway
indextopop = len(remainingindices)-1
else:
for indextopop in range(len(remainingindices)):
if not avoidPrismaticAsFree or jointtypes[indextopop] != KinBody.JointType.Prismatic:
# done
break
else:
# self.iktype == IkParameterizationType.Translation3D or self.iktype == IkParameterizationType.TranslationLocalGlobal6D
# if not 6D, then don't need to worry about intersecting joints
# so remove the least important joints
for indextopop in range(len(remainingindices)-1,-1,-1):
if not avoidPrismaticAsFree or jointtypes[indextopop] != KinBody.JointType.Prismatic:
# done
break
freeindices.append(remainingindices.pop(indextopop))
jointanchors.pop(indextopop)
jointaxes.pop(indextopop)
jointtypes.pop(indextopop)
# have to clear any intersecting axes
mask = intersecting3axes.pop(indextopop)
for j in range(len(intersecting3axes)):
intersecting3axes[j] &= ~mask
solveindices = [i for i in self.manip.GetArmIndices() if not i in freeindices]
return solveindices,freeindices
def getfilename(self,read=False):
if self.iktype is None:
raise InverseKinematicsError(u'ik type is not set')
if self.solveindices is None or self.freeindices is None:
solveindices, freeindices = self.GetDefaultIndices()
else:
solveindices, freeindices = self.solveindices, self.freeindices
index = -1
allfreeindices = None
while True:
basename = 'ikfast%s.%s.%s.'%(self.ikfast.__version__,self.iktype,platform.machine()) + '_'.join(str(ind) for ind in sorted(solveindices))
if len(freeindices)>0:
basename += '_f'+'_'.join(str(ind) for ind in sorted(freeindices))
filename = RaveFindDatabaseFile(os.path.join('kinematics.'+self.manip.GetInverseKinematicsStructureHash(self.iktype),ccompiler.new_compiler().shared_object_filename(basename=basename)),read)
if not read or len(filename) > 0 or self.freeindices is not None:
break
# user did not specify a set of freeindices, so the expected behavior is to search for the next loadable one
index += 1
dofexpected = IkParameterization.GetDOFFromType(self.iktype)
if allfreeindices is None:
allfreeindices = [f for f in self.ikfast.permutations(self.manip.GetArmIndices(),len(self.manip.GetArmIndices())-dofexpected)]
if index >= len(allfreeindices):
break
freeindices = allfreeindices[index]
solveindices = [i for i in self.manip.GetArmIndices() if not i in freeindices]
return filename
def getsourcefilename(self,read=False,outputlang='cpp'):
if self.iktype is None:
raise InverseKinematicsError(u'ik type is not set')
if self.solveindices is None or self.freeindices is None:
solveindices, freeindices = self.GetDefaultIndices()
else:
solveindices, freeindices = self.solveindices, self.freeindices
basename = 'ikfast%s.%s.'%(self.ikfast.__version__,self.iktype)
basename += '_'.join(str(ind) for ind in sorted(solveindices))
if len(freeindices)>0:
basename += '_f'+'_'.join(str(ind) for ind in sorted(freeindices))
basename += '.' + outputlang
return RaveFindDatabaseFile(os.path.join('kinematics.'+self.manip.GetInverseKinematicsStructureHash(self.iktype),basename),read)
def getstatsfilename(self,read=False):
if self.iktype is None:
raise InverseKinematicsError(u'ik type is not set')
if self.solveindices is None or self.freeindices is None:
solveindices, freeindices = self.GetDefaultIndices()
else:
solveindices, freeindices = self.solveindices, self.freeindices
index = -1
while True:
freeindicesstrings = []
if len(freeindices)>0:
for _freeindices in self.ikfast.permutations(freeindices):
freeindicesstrings.append(['_f'+'_'.join(str(ind) for ind in sorted(_freeindices)),_freeindices])
else:
freeindicesstrings.append(['',[]])
for freeindicesstring, fi in freeindicesstrings:
basename = 'ikfast%s.%s.'%(self.ikfast.__version__,self.iktype)
basename += '_'.join(str(ind) for ind in sorted(solveindices))
basename += freeindicesstring
basename += '.pp'
filename = RaveFindDatabaseFile(os.path.join('kinematics.'+self.manip.GetInverseKinematicsStructureHash(self.iktype),basename),read)
if not read or len(filename) > 0 or self.freeindices is not None:
self.freeindices = fi
return filename
# user did not specify a set of freeindices, so the expected behavior is to search for the next loadable one
index += 1
dofexpected = IkParameterization.GetDOFFromType(self.iktype)
allfreeindices = [f for f in self.ikfast.combinations(self.manip.GetArmIndices(),len(self.manip.GetArmIndices())-dofexpected)]
if index >= len(allfreeindices):
break
freeindices = allfreeindices[index]
solveindices = [i for i in self.manip.GetArmIndices() if not i in freeindices]
return filename
def autogenerate(self,options=None):
freejoints = None
iktype = self.iktype
precision = None
forceikbuild = True
outputlang = None
ipython = None
freeinc = None
ikfastmaxcasedepth = 3
if options is not None:
forceikbuild=options.force
precision=options.precision
if options.freejoints is not None:
freejoints=options.freejoints
outputlang=options.outputlang
ipython=options.ipython
if options.freeinc is not None:
freeinc = [float64(s) for s in options.freeinc]
ikfastmaxcasedepth = options.maxcasedepth
if self.manip.GetKinematicsStructureHash() == 'f17f58ee53cc9d185c2634e721af7cd3': # wam 4dof
if iktype is None:
iktype=IkParameterizationType.Translation3D
if iktype == IkParameterizationType.Translation3D and freejoints is None:
freejoints = ['Shoulder_Roll']
elif self.manip.GetKinematicsStructureHash() == 'bfc61bd497e9993b85f1ab511ee7bdbc': # stage
if iktype is None:
iktype=IkParameterizationType.Rotation3D
elif self.manip.GetKinematicsStructureHash() == 'c363859a2d7a151a22dc1e251d6d8669' or self.manip.GetKinematicsStructureHash() == '12ceb0aaa06143fe305efa6e48faae0b': # pr2
if iktype == None:
iktype=IkParameterizationType.Transform6D
if iktype == IkParameterizationType.Transform6D and freejoints is None:
# take the torso and roll joint
freejoints=[self.robot.GetJoints()[self.manip.GetArmIndices()[ind]].GetName() for ind in [0,3]]
elif self.manip.GetKinematicsStructureHash()=='a1e9aea0dc0fda631ca376c03d500927' or self.manip.GetKinematicsStructureHash()=='ceb6be51bd14f345e22997cc0bca9f2f': # pr2 cameras
if iktype is None:
iktype=IkParameterizationType.Ray4D
if freejoints is None:
# take the torso joint
freejoints=[self.robot.GetJoints()[self.manip.GetArmIndices()[0]].GetName()]
elif self.manip.GetKinematicsStructureHash()=='2640ae411e0c87b03f56bf289296f9d8' and iktype == IkParameterizationType.Lookat3D: # pr2 head_torso
if freejoints is None:
freejoints=[self.robot.GetJoints()[self.manip.GetArmIndices()[0]].GetName()]
elif self.manip.GetKinematicsStructureHash()=='ab9d03903279e44bc692e896791bcd05' or self.manip.GetKinematicsStructureHash()=='afe50514bf09aff5f2a84beb078bafbd': # katana
if iktype==IkParameterizationType.Translation3D or (iktype==None and self.iktype==IkParameterizationType.Translation3D):
freejoints = [self.robot.GetJoints()[ind].GetName() for ind in self.manip.GetArmIndices()[3:]]
if iktype==None:
iktype == IkParameterizationType.TranslationDirection5D
self.generate(iktype=iktype,freejoints=freejoints,precision=precision,forceikbuild=forceikbuild,outputlang=outputlang,ipython=ipython,ikfastmaxcasedepth=ikfastmaxcasedepth)
self.save()
def getIndicesFromJointNames(self,freejoints):
freeindices = []
for jointname in freejoints:
if type(jointname) == int:
freeindices.append(jointname)
else:
# find the correct joint index
dofindices = [joint.GetDOFIndex() for joint in self.robot.GetJoints() if joint.GetName()==jointname]
if len(dofindices) == 0:
raise LookupError("cannot find '%s' joint in %s robot"%(jointname,self.robot.GetName()))
if not dofindices[0] in self.manip.GetArmIndices():
raise LookupError("cannot find joint '%s(%d)' in solve joints: %s"%(jointname,dofindices[0],self.manip.GetArmIndices()))
freeindices.append(dofindices[0])
print 'getIndicesFromJointNames',freeindices,freejoints
return freeindices
def generate(self,iktype=None, freejoints=None, freeinc=None, freeindices=None, precision=None, forceikbuild=True, outputlang=None, avoidPrismaticAsFree=False, ipython=False, ikfastoptions=0, ikfastmaxcasedepth=3):
"""
:param ikfastoptions: see IKFastSolver.generateIkSolver
:param ikfastmaxcasedepth: the max level of degenerate cases to solve for
:param avoidPrismaticAsFree: if True for redundant manipulators, will attempt to avoid setting prismatic joints as free joints.
"""
self.iksolver = None
if iktype is not None:
self.iktype = iktype
if self.iktype is None:
self.iktype = iktype = IkParameterizationType.Transform6D
if self.iktype == IkParameterizationType.Rotation3D:
Rbaseraw=self.manip.GetLocalToolTransform()[0:3,0:3]
def solveFullIK_Rotation3D(*args,**kwargs):
kwargs['Rbaseraw'] = Rbaseraw
return self.ikfast.IKFastSolver.solveFullIK_Rotation3D(*args,**kwargs)
solvefn=solveFullIK_Rotation3D
elif self.iktype == IkParameterizationType.Direction3D:
rawbasedir=dot(self.manip.GetLocalToolTransform()[0:3,0:3],self.manip.GetDirection())
def solveFullIK_Direction3D(*args,**kwargs):
kwargs['rawbasedir'] = rawbasedir
return self.ikfast.IKFastSolver.solveFullIK_Direction3D(*args,**kwargs)
solvefn=solveFullIK_Direction3D
elif self.iktype == IkParameterizationType.Ray4D:
rawbasedir=dot(self.manip.GetLocalToolTransform()[0:3,0:3],self.manip.GetDirection())
rawbasepos=self.manip.GetLocalToolTransform()[0:3,3]
def solveFullIK_Ray4D(*args,**kwargs):
kwargs['rawbasedir'] = rawbasedir
kwargs['rawbasepos'] = rawbasepos
return self.ikfast.IKFastSolver.solveFullIK_Ray4D(*args,**kwargs)
solvefn=solveFullIK_Ray4D
elif self.iktype == IkParameterizationType.TranslationDirection5D:
rawbasedir=dot(self.manip.GetLocalToolTransform()[0:3,0:3],self.manip.GetDirection())
rawbasepos=self.manip.GetLocalToolTransform()[0:3,3]
def solveFullIK_TranslationDirection5D(*args,**kwargs):
kwargs['rawbasedir'] = rawbasedir
kwargs['rawbasepos'] = rawbasepos
return self.ikfast.IKFastSolver.solveFullIK_TranslationDirection5D(*args,**kwargs)
solvefn=solveFullIK_TranslationDirection5D
elif self.iktype == IkParameterizationType.Translation3D:
rawbasepos=self.manip.GetLocalToolTransform()[0:3,3]
def solveFullIK_Translation3D(*args,**kwargs):
kwargs['rawbasepos'] = rawbasepos
return self.ikfast.IKFastSolver.solveFullIK_Translation3D(*args,**kwargs)
solvefn=solveFullIK_Translation3D
elif self.iktype == IkParameterizationType.TranslationXY2D:
rawbasepos=self.manip.GetLocalToolTransform()[0:2,3]
def solveFullIK_TranslationXY2D(*args,**kwargs):
kwargs['rawbasepos'] = rawbasepos
return self.ikfast.IKFastSolver.solveFullIK_TranslationXY2D(*args,**kwargs)
solvefn=solveFullIK_TranslationXY2D
elif self.iktype == IkParameterizationType.TranslationXYOrientation3D:
rawbasedir=dot(self.manip.GetLocalToolTransform()[0:3,0:3],self.manip.GetDirection())
rawbasepos=self.manip.GetLocalToolTransform()[0:3,3]
def solveFullIK_TranslationXAxisAngleZNorm4D(*args,**kwargs):
kwargs['rawbasedir'] = rawbasedir
kwargs['rawbasepos'] = rawbasepos
kwargs['rawglobaldir'] = [1.0,0.0,0.0]
kwargs['rawnormaldir'] = [0.0,0.0,1.0]
kwargs['ignoreaxis'] = 2
return self.ikfast.IKFastSolver.solveFullIK_TranslationAxisAngle4D(*args,**kwargs)
solvefn=solveFullIK_TranslationXAxisAngleZNorm4D
# rawbasepos=self.manip.GetLocalToolTransform()[0:3,3]
# rawbasedir=dot(self.manip.GetLocalToolTransform()[0:3,0:3],self.manip.GetDirection())
# def solveFullIK_TranslationXYOrientation3D(*args,**kwargs):
# kwargs['rawbasepos'] = rawbasepos
# kwargs['rawbasedir'] = rawbasedir
# return self.ikfast.IKFastSolver.solveFullIK_TranslationXYOrientation3D(*args,**kwargs)
# solvefn=solveFullIK_TranslationXYOrientation3D
elif self.iktype == IkParameterizationType.Transform6D:
Tgripperraw = eye(4) # newer ikfast versions don't compile with self.manip.GetLocalToolTransform() in order to re-use the same 6D IK for multiple local transforms
def solveFullIK_6D(*args,**kwargs):
kwargs['Tgripperraw'] = Tgripperraw
return self.ikfast.IKFastSolver.solveFullIK_6D(*args,**kwargs)
solvefn=solveFullIK_6D
elif self.iktype == IkParameterizationType.Lookat3D:
rawbasedir=dot(self.manip.GetLocalToolTransform()[0:3,0:3],self.manip.GetDirection())
rawbasepos=self.manip.GetLocalToolTransform()[0:3,3]
def solveFullIK_Lookat3D(*args,**kwargs):
kwargs['rawbasedir'] = rawbasedir
kwargs['rawbasepos'] = rawbasepos
return self.ikfast.IKFastSolver.solveFullIK_Lookat3D(*args,**kwargs)
solvefn=solveFullIK_Lookat3D
elif self.iktype == IkParameterizationType.TranslationLocalGlobal6D:
Tgripperraw=self.manip.GetLocalToolTransform()
def solveFullIK_TranslationLocalGlobal6D(*args,**kwargs):
kwargs['Tgripperraw'] = Tgripperraw
return self.ikfast.IKFastSolver.solveFullIK_TranslationLocalGlobal6D(*args,**kwargs)
solvefn=solveFullIK_TranslationLocalGlobal6D
elif self.iktype == IkParameterizationType.TranslationXAxisAngle4D:
rawbasedir=dot(self.manip.GetLocalToolTransform()[0:3,0:3],self.manip.GetDirection())
rawbasepos=self.manip.GetLocalToolTransform()[0:3,3]
def solveFullIK_TranslationXAxisAngle4D(*args,**kwargs):
kwargs['rawbasedir'] = rawbasedir
kwargs['rawbasepos'] = rawbasepos
kwargs['rawglobaldir'] = [1.0,0.0,0.0]
return self.ikfast.IKFastSolver.solveFullIK_TranslationAxisAngle4D(*args,**kwargs)
solvefn=solveFullIK_TranslationXAxisAngle4D
elif self.iktype == IkParameterizationType.TranslationYAxisAngle4D:
rawbasedir=dot(self.manip.GetLocalToolTransform()[0:3,0:3],self.manip.GetDirection())
rawbasepos=self.manip.GetLocalToolTransform()[0:3,3]
def solveFullIK_TranslationYAxisAngle4D(*args,**kwargs):
kwargs['rawbasedir'] = rawbasedir
kwargs['rawbasepos'] = rawbasepos
kwargs['rawglobaldir'] = [0.0,1.0,0.0]
return self.ikfast.IKFastSolver.solveFullIK_TranslationAxisAngle4D(*args,**kwargs)
solvefn=solveFullIK_TranslationYAxisAngle4D
elif self.iktype == IkParameterizationType.TranslationZAxisAngle4D:
rawbasedir=dot(self.manip.GetLocalToolTransform()[0:3,0:3],self.manip.GetDirection())
rawbasepos=self.manip.GetLocalToolTransform()[0:3,3]
def solveFullIK_TranslationZAxisAngle4D(*args,**kwargs):
kwargs['rawbasedir'] = rawbasedir
kwargs['rawbasepos'] = rawbasepos
kwargs['rawglobaldir'] = [0.0,0.0,1.0]
return self.ikfast.IKFastSolver.solveFullIK_TranslationAxisAngle4D(*args,**kwargs)
solvefn=solveFullIK_TranslationZAxisAngle4D
elif self.iktype == IkParameterizationType.TranslationXAxisAngleZNorm4D:
rawbasedir=dot(self.manip.GetLocalToolTransform()[0:3,0:3],self.manip.GetDirection())
rawbasepos=self.manip.GetLocalToolTransform()[0:3,3]
def solveFullIK_TranslationXAxisAngleZNorm4D(*args,**kwargs):
kwargs['rawbasedir'] = rawbasedir
kwargs['rawbasepos'] = rawbasepos
kwargs['rawglobaldir'] = [1.0,0.0,0.0]
kwargs['rawnormaldir'] = [0.0,0.0,1.0]
return self.ikfast.IKFastSolver.solveFullIK_TranslationAxisAngle4D(*args,**kwargs)
solvefn=solveFullIK_TranslationXAxisAngleZNorm4D
elif self.iktype == IkParameterizationType.TranslationYAxisAngleXNorm4D:
rawbasedir=dot(self.manip.GetLocalToolTransform()[0:3,0:3],self.manip.GetDirection())
rawbasepos=self.manip.GetLocalToolTransform()[0:3,3]
def solveFullIK_TranslationYAxisAngleXNorm4D(*args,**kwargs):
kwargs['rawbasedir'] = rawbasedir
kwargs['rawbasepos'] = rawbasepos
kwargs['rawglobaldir'] = [0.0,1.0,0.0]
kwargs['rawnormaldir'] = [1.0,0.0,0.0]
return self.ikfast.IKFastSolver.solveFullIK_TranslationAxisAngle4D(*args,**kwargs)
solvefn=solveFullIK_TranslationYAxisAngleXNorm4D
elif self.iktype == IkParameterizationType.TranslationZAxisAngleYNorm4D:
rawbasedir=dot(self.manip.GetLocalToolTransform()[0:3,0:3],self.manip.GetDirection())
rawbasepos=self.manip.GetLocalToolTransform()[0:3,3]
def solveFullIK_TranslationZAxisAngleYNorm4D(*args,**kwargs):
kwargs['rawbasedir'] = rawbasedir
kwargs['rawbasepos'] = rawbasepos
kwargs['rawglobaldir'] = [0.0,0.0,1.0]
kwargs['rawnormaldir'] = [0.0,1.0,0.0]
return self.ikfast.IKFastSolver.solveFullIK_TranslationAxisAngle4D(*args,**kwargs)
solvefn=solveFullIK_TranslationZAxisAngleYNorm4D
else:
raise InverseKinematicsError(u'bad type')
dofexpected = IkParameterization.GetDOFFromType(self.iktype)
if freeindices is not None:
self.freeindices = freeindices
if self.freeindices is None:
if freejoints is not None:
self.freeindices = self.getIndicesFromJointNames(freejoints)
else:
self.solveindices,self.freeindices = self.GetDefaultIndices(avoidPrismaticAsFree=avoidPrismaticAsFree)
self.solveindices = [i for i in self.manip.GetArmIndices() if not i in self.freeindices]
if len(self.solveindices) != dofexpected:
raise InverseKinematicsError(u'Manipulator %(manip)s (indices=%(manipindices)r) joint indices to solve for (%(solveindices)r) is not equal to number of expected joints (%(dofexpected)d) for IK type %(iktype)s'%{'manip':self.manip.GetName(),'manipindices':list(self.manip.GetArmIndices()), 'solveindices':list(self.solveindices), 'dofexpected':dofexpected, 'iktype':self.iktype.name})
if freeinc is not None:
self.freeinc = freeinc
if self.freeinc is None:
self.freeinc = self.getDefaultFreeIncrements(0.1,0.01)
log.info('Generating inverse kinematics for manip %s: %s %s, precision=%s, maxcasedepth=%d (this might take up to 10 min)',self.manip.GetName(),self.iktype,self.solveindices, precision, ikfastmaxcasedepth)
if outputlang is None:
outputlang = 'cpp'
sourcefilename = self.getsourcefilename(False,outputlang)
statsfilename = self.getstatsfilename(False)
output_filename = self.getfilename(False)
sourcedir = os.path.split(sourcefilename)[0]
if forceikbuild or not os.path.isfile(sourcefilename):
log.info('creating ik file %s',sourcefilename)
try:
os.makedirs(sourcedir)
except OSError:
pass
solver = self.ikfast.IKFastSolver(kinbody=self.robot,kinematicshash=self.manip.GetInverseKinematicsStructureHash(self.iktype),precision=precision)
solver.maxcasedepth = ikfastmaxcasedepth
if self.iktype == IkParameterizationType.TranslationXAxisAngle4D or self.iktype == IkParameterizationType.TranslationYAxisAngle4D or self.iktype == IkParameterizationType.TranslationZAxisAngle4D or self.iktype == IkParameterizationType.TranslationXAxisAngleZNorm4D or self.iktype == IkParameterizationType.TranslationYAxisAngleXNorm4D or self.iktype == IkParameterizationType.TranslationZAxisAngleYNorm4D or self.iktype == IkParameterizationType.TranslationXYOrientation3D:
solver.useleftmultiply = False
baselink=self.manip.GetBase().GetIndex()
eelink=self.manip.GetEndEffector().GetIndex()
if ipython:
# requires ipython v0.11+
IPython = __import__('IPython')
if IPython.__version__.startswith("0.10"):
ipshell = IPython.Shell.IPShellEmbed(argv='',banner = 'inversekinematics dropping into ipython',exit_msg = 'Leaving Interpreter and continuing solver.')
ipshell(local_ns=locals())
else:
from IPython.terminal import embed; ipshell=embed.InteractiveShellEmbed(config=embed.load_default_config())(local_ns=locals())
# m=__import__('IPython.config.loader',fromlist=['Config'])
# Config = getattr(m,'Config')
# cfg = Config()
# cfg.InteractiveShellEmbed.local_ns = locals()
# cfg.InteractiveShellEmbed.global_ns = globals()
# IPython.embed(config=cfg, banner2 = 'inversekinematics dropping into ipython')
# from IPython.frontend.terminal.embed import InteractiveShellEmbed
# ipshell = InteractiveShellEmbed(config=cfg)
reload(self.ikfast) # in case changes occurred
try:
generationstart = time.time()
chaintree = solver.generateIkSolver(baselink=baselink,eelink=eelink,freeindices=self.freeindices,solvefn=solvefn)
self.ikfeasibility = None
code = solver.writeIkSolver(chaintree,lang=outputlang)
if len(code) == 0:
raise InverseKinematicsError(u'failed to generate ik solver for robot %s:%s'%(self.robot.GetName(),self.manip.GetName()))
self.statistics['generationtime'] = time.time()-generationstart
self.statistics['usinglapack'] = solver.usinglapack
open(sourcefilename,'w').write(code)
try:
from pkg_resources import resource_filename
shutil.copyfile(resource_filename('openravepy','ikfast.h'), os.path.join(sourcedir,'ikfast.h'))
except ImportError,e:
log.warn(e)
log.info(u'successfully generated c++ ik in %fs, file=%s', self.statistics['generationtime'], sourcefilename)
except self.ikfast.IKFastSolver.IKFeasibilityError, e:
self.ikfeasibility = str(e)
log.warn(e)
if self.ikfeasibility is None:
log.info('compiling ik file to %s',output_filename)
if outputlang == 'cpp':
# compile the code and create the shared object
compiler,compile_flags = self.getcompiler()
try:
output_dir = os.path.relpath('/',getcwd())
except AttributeError: # python 2.5 does not have os.path.relpath
output_dir = relpath('/',getcwd())
platformsourcefilename = os.path.splitext(output_filename)[0]+'.cpp' # needed in order to prevent interference with machines with different architectures
shutil.copyfile(sourcefilename, platformsourcefilename)
objectfiles=[]
try:
objectfiles = compiler.compile(sources=[platformsourcefilename],macros=[('IKFAST_CLIBRARY',1),('IKFAST_NO_MAIN',1)],extra_postargs=compile_flags,output_dir=output_dir)
# because some parts of ikfast require lapack, always try to link with it
try:
iswindows = sys.platform.startswith('win') or platform.system().lower() == 'windows'
libraries = None
if self.statistics.get('usinglapack',False) or not iswindows:
libraries = ['lapack']
compiler.link_shared_object(objectfiles,output_filename=output_filename, libraries=libraries)
except distutils.errors.LinkError,e:
log.warn(e)
if libraries is not None and 'lapack' in libraries:
libraries.remove('lapack')
if len(libraries) == 0:
libraries = None
log.info('linking again with %r... (MSVC bug?)',libraries)
compiler.link_shared_object(objectfiles,output_filename=output_filename, libraries=libraries)
if not self.setrobot():
return ValueError('failed to generate ik solver')
finally:
# cleanup intermediate files
if os.path.isfile(platformsourcefilename):
remove(platformsourcefilename)
for objectfile in objectfiles:
try:
remove(objectfile)
except:
pass
else:
log.warn('cannot continue further if outputlang %s is not cpp',outputlang)
self._cachedKinematicsHash = self.manip.GetInverseKinematicsStructureHash(self.iktype)
def perftiming(self,num):
with self.env:
results = self.ikfastproblem.SendCommand('PerfTiming num %d %s'%(num,self.getfilename(True)))
return [double(s)*1e-9 for s in results.split()]
def testik(self,iktests,jacobianthreshold=None):
"""Tests the iksolver.
:param iktests: the number of tests, or a filename that describes the tests
:param jacobianthreshold: When testing configurations, the eigenvalues of the jacobian all have to be greater than this value
"""
if self.ikfeasibility is not None:
raise InverseKinematicsError(u'ik is infeasible')
with self.robot:
self.robot.SetActiveManipulator(self.manip)
# set base to identity to avoid complications when reporting errors
self.robot.SetTransform(dot(linalg.inv(self.manip.GetBase().GetTransform()),self.robot.GetTransform()))
cmd = 'DebugIK sampledegeneratecases 0.2 robot %s '%self.robot.GetName()
if iktests.isdigit():
assert(int(iktests) > 0)
cmd += 'numtests %d '%int(iktests)
else:
cmd += 'readfile %s '%iktests
if jacobianthreshold is not None:
cmd += 'jacobianthreshold %s '%jacobianthreshold
res = self.ikfastproblem.SendCommand(cmd).split()
numtested = float(res[0])
successrate = float(res[1])/numtested
solutionresults = []
index = 2
numvalues=1+IkParameterization.GetNumberOfValuesFromType(self.iktype)+self.manip.GetIkSolver().GetNumFreeParameters()
for iresults in range(3):
num = int(res[index])
index += 1
samples = reshape(array([float64(s) for s in res[index:(index+num*numvalues)]]),(num,numvalues))
solutionresults.append(samples)
index += num*numvalues
wrongrate = len(solutionresults[0])/numtested
log.info('success rate: %f, wrong solutions: %f, no solutions: %f, missing solution: %f',float(res[1])/numtested,wrongrate,len(solutionresults[1])/numtested,len(solutionresults[2])/numtested)
return successrate, wrongrate
def show(self,delay=0.1,options=None,forceclosure=True):
if self.env.GetViewer() is None:
self.env.SetViewer('qtcoin')
time.sleep(0.4) # give time for viewer to initialize
with RobotStateSaver(self.robot):
with self.ArmVisibility(self.manip,0.95):
time.sleep(3) # let viewer load
self.setrobot(0.05)
while True:
with self.env:
lower,upper = self.robot.GetDOFLimits(self.manip.GetArmIndices())
self.robot.SetDOFValues(lower+random.rand(len(lower))*(upper-lower),self.manip.GetArmIndices())
ikparam = self.manip.GetIkParameterization(self.iktype)
sols = self.manip.FindIKSolutions(ikparam,IkFilterOptions.CheckEnvCollisions)
weights = self.robot.GetDOFWeights(self.manip.GetArmIndices())
log.info('found %d solutions'%len(sols))
sols = TSP(sols,lambda x,y: sum(weights*(x-y)**2))
# find shortest route
for sol in sols:
self.robot.SetDOFValues(sol,self.manip.GetArmIndices())
self.env.UpdatePublishedBodies()
time.sleep(delay)
@staticmethod
def getcompiler():
compiler = ccompiler.new_compiler()
compile_flags = []
if compiler.compiler_type == 'msvc':
compile_flags.append('/Ox')
try:
# make sure it is correct version!
cname,cver = openravepyCompilerVersion().split()
if cname == 'msvc':
majorVersion = int(cver)/100-6
minorVersion = mod(int(cver),100)/10.0
if abs(compiler._MSVCCompiler__version - majorVersion+minorVersion) > 0.001:
log.warn('default compiler v %s the same version as openrave compiler v %f, look for a different compiler',compiler._MSVCCompiler__version, majorVersion+minorVersion);
distutils.msvc9compiler.VERSION = majorVersion + minorVersion
newcompiler = ccompiler.new_compiler()
if newcompiler is not None:
compiler = newcompiler
except Exception, e:
log.warn(e)
else:
compiler.add_library('stdc++')
if compiler.compiler_type == 'unix':
compile_flags.append('-O3')
compile_flags.append('-fPIC')
return compiler,compile_flags
@staticmethod
def CreateOptionParser():
parser = DatabaseGenerator.CreateOptionParser()
parser.description='Uses ikfast to compute the closed-form inverse kinematics equations of a robot manipulator, generates a C++ file, and compiles this file into a shared object which can then be loaded by OpenRAVE.'
parser.usage='openrave.py --database inversekinematics [options]'
parser.add_option('--freejoint', action='append', type='string', dest='freejoints',default=None,
help='Optional joint name specifying a free parameter of the manipulator. The value of a free joint is known at runtime, but not known at IK generation time. If nothing specified, assumes all joints not solving for are free parameters. Can be specified multiple times for multiple free parameters.')
parser.add_option('--precision', action='store', type='int', dest='precision',default=8,
help='The precision to compute the inverse kinematics in, (default=%default).')
parser.add_option('--maxcasedepth', action='store', type='int', dest='maxcasedepth',default=3,
help='The max depth to go into degenerate cases. If ikfast file is too big, try reducing this, (default=%default).')
parser.add_option('--usecached', action='store_false', dest='force',default=True,
help='If set, will always try to use the cached ik c++ file, instead of generating a new one.')
parser.add_option('--freeinc', action='append', type='float', dest='freeinc',default=None,
help='The discretization value of freejoints.')
parser.add_option('--numiktests','--iktests',action='store',type='string',dest='iktests',default=None,
help='Will test the ik solver and return the success rate. IKTESTS can be an integer to specify number of random tests, it can also be a filename to specify the joint values of the manipulator to test. The formst of the filename is #numiktests [dof values]*')
parser.add_option('--iktestjthresh',action='store',type='float',dest='iktestjthresh',default=None,
help='When testing configurations, the eigenvalues of the jacobian all have to be greater than this value')
parser.add_option('--perftiming', action='store',type='int',dest='perftiming',default=None,
help='Number of IK calls for measuring the internal ikfast solver.')
parser.add_option('--outputlang', action='store',type='string',dest='outputlang',default=None,
help='If specified, will output the generated code in that language (ie --outputlang=cpp).')
parser.add_option('--ipython', '-i',action="store_true",dest='ipython',default=False,
help='if true will drop into the ipython interpreter right before ikfast is called')
parser.add_option('--iktype', action='store',type='string',dest='iktype',default=None,
help='The ik type to build the solver current types are: %s'%(', '.join(iktype.name for iktype in IkParameterizationType.values.values() if not int(iktype) & IkParameterizationType.VelocityDataBit )))
return parser
@staticmethod
def RunFromParser(Model=None,parser=None,args=None,**kwargs):
if parser is None:
parser = InverseKinematicsModel.CreateOptionParser()
(options, leftargs) = parser.parse_args(args=args)
if options.iktype is not None:
# cannot use .names due to python 2.5 (or is it boost version?)
for value,type in IkParameterizationType.values.iteritems():
if type.name.lower() == options.iktype.lower():
iktype = type
break
else:
iktype = IkParameterizationType.Transform6D
Model = lambda robot: InverseKinematicsModel(robot=robot,iktype=iktype,forceikfast=True)
robotatts={}
if not options.show:
robotatts = {'skipgeometry':'1'}
model = DatabaseGenerator.RunFromParser(Model=Model,parser=parser,robotatts=robotatts,args=args,**kwargs)
if options.iktests is not None or options.perftiming is not None:
log.info('testing the success rate of robot %s ',options.robot)
env = Environment()
try:
#robot = env.ReadRobotXMLFile(options.robot,{'skipgeometry':'1'})
#env.Add(robot)
env.Load(options.robot,{'skipgeometry':'1'})
manip = None
if options.manipname is None:
robot = env.GetRobots()[0]
else:
for robot in env.GetRobots():
manip = robot.GetManipulator(options.manipname)
if manip is not None:
break
ikmodel = InverseKinematicsModel(robot,iktype=model.iktype,forceikfast=True,freeindices=model.freeindices,manip=manip)
if not ikmodel.load(freeinc=options.freeinc):
raise InverseKinematicsError(u'failed to load ik')
if options.iktests is not None:
successrate, wrongrate = ikmodel.testik(iktests=options.iktests,jacobianthreshold=options.iktestjthresh)
if wrongrate > 0:
raise InverseKinematicsError(u'wrong rate %f > 0!'%wrongrate)
elif options.perftiming:
results = array(ikmodel.perftiming(num=options.perftiming))
log.info('running time mean: %fs, median: %fs, min: %fs, max: %fs', mean(results),median(results),min(results),max(results))
finally:
env.Destroy()
RaveDestroy()
def run(*args,**kwargs):
"""Command-line execution of the example. ``args`` specifies a list of the arguments to the script.
"""
InverseKinematicsModel.RunFromParser(*args,**kwargs)
```
#### File: python/databases/visibilitymodel.py
```python
from __future__ import with_statement # for python 2.5
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2009-2010 <NAME> (<EMAIL>)'
__license__ = 'Apache License, Version 2.0'
import time
import os.path
if not __openravepy_build_doc__:
from ..openravepy_int import *
from ..openravepy_ext import *
from numpy import *
else:
from numpy import array
from . import DatabaseGenerator
import inversekinematics, kinematicreachability
from .. import interfaces
import logging
log = logging.getLogger('openravepy.'+__name__.split('.',2)[-1])
class VisibilityModel(DatabaseGenerator):
class GripperVisibility:
"""Used to hide links not beloning to gripper.
When 'entered' will hide all the non-gripper links in order to facilitate visiblity of the gripper
"""
def __init__(self,manip):
self.manip = manip
self.robot = self.manip.GetRobot()
self.hiddengeoms = []
def __enter__(self):
self.hiddengeoms = []
with self.robot.GetEnv():
# stop rendering the non-gripper links
childlinkids = [link.GetIndex() for link in self.manip.GetChildLinks()]
for link in self.robot.GetLinks():
if link.GetIndex() not in childlinkids:
for geom in link.GetGeometries():
self.hiddengeoms.append((geom,geom.IsDraw()))
geom.SetDraw(False)
def __exit__(self,type,value,traceback):
with self.robot.GetEnv():
for geom,isdraw in self.hiddengeoms:
geom.SetDraw(isdraw)
def __init__(self,robot,target,sensorrobot=None,sensorname=None,maxvelmult=None, ignoresensorcollision=None):
"""Starts a visibility model using a robot, a sensor, and a target
The minimum needed to be specified is the robot and a sensorname. Supports sensors that do
not belong to the current robot in the case that a robot is holding the target with its
manipulator. Providing the target allows visibility information to be computed.
"""
DatabaseGenerator.__init__(self,robot=robot)
self.sensorrobot = sensorrobot if sensorrobot is not None else robot
self.target = target
self.visualprob = interfaces.VisualFeedback(self.robot,maxvelmult=maxvelmult,ignoresensorcollision=ignoresensorcollision)
self.basemanip = interfaces.BaseManipulation(self.robot,maxvelmult=maxvelmult)
self.convexhull = None
self.sensorname = sensorname
if self.sensorname is None:
possiblesensors = [s.GetName() for s in self.sensorrobot.GetAttachedSensors() if s.GetSensor() is not None and s.GetSensor().Supports(Sensor.Type.Camera)]
if len(possiblesensors) > 0:
self.sensorname = possiblesensors[0]
self.manip = robot.GetActiveManipulator()
self.manipname = None if self.manip is None else self.manip.GetName()
self.visibilitytransforms = None
self.rmodel = self.ikmodel = None
self.preshapes = None
self.preprocess()
def clone(self,envother):
clone = DatabaseGenerator.clone(self,envother)
clone.rmodel = self.rmodel.clone(envother) if not self.rmodel is None else None
clone.preshapes = array(self.preshapes) if not self.preshapes is None else None
clone.ikmodel = self.ikmodel.clone(envother) if not self.ikmodel is None else None
clone.visualprob = self.visualprob.clone(envother)
clone.basemanip = self.basemanip.clone(envother)
clone.preprocess()
return clone
def has(self):
return self.visibilitytransforms is not None and len(self.visibilitytransforms) > 0
def getversion(self):
return 2
def getfilename(self,read=False):
return RaveFindDatabaseFile(os.path.join('robot.'+self.robot.GetKinematicsGeometryHash(), 'visibility.' + self.manip.GetStructureHash() + '.' + self.attachedsensor.GetStructureHash() + '.' + self.target.GetKinematicsGeometryHash()+'.pp'),read)
def load(self):
try:
params = DatabaseGenerator.load(self)
if params is None:
return False
self.visibilitytransforms,self.convexhull,self.KK,self.dims,self.preshapes = params
self.preprocess()
return self.has()
except e:
return False
def save(self):
DatabaseGenerator.save(self,(self.visibilitytransforms,self.convexhull,self.KK,self.dims,self.preshapes))
def preprocess(self):
with self.env:
manipname = self.visualprob.SetCameraAndTarget(sensorname=self.sensorname,sensorrobot=self.sensorrobot,manipname=self.manipname,target=self.target)
assert(self.manipname is None or self.manipname==manipname)
self.manip = self.robot.SetActiveManipulator(manipname)
self.attachedsensor = [s for s in self.sensorrobot.GetAttachedSensors() if s.GetName() == self.sensorname][0]
self.ikmodel = inversekinematics.InverseKinematicsModel(robot=self.robot,iktype=IkParameterization.Type.Transform6D)
if not self.ikmodel.load():
self.ikmodel.autogenerate()
if self.visibilitytransforms is not None:
self.visualprob.SetCameraTransforms(transforms=self.visibilitytransforms)
def autogenerate(self,options=None,gmodel=None):
preshapes = None
sphere =None
conedirangles = None
if options is not None:
if options.preshapes is not None:
preshapes = zeros((0,len(self.manip.GetGripperIndices())))
for preshape in options.preshapes:
preshapes = r_[preshapes,[array([float(s) for s in preshape.split()])]]
if options.sphere is not None:
sphere = [float(s) for s in options.sphere.split()]
if options.conedirangles is not None:
conedirangles = []
for conediranglestring in options.conedirangles:
conedirangles.append([float(s) for s in conediranglestring.split()])
if not gmodel is None:
preshapes = array([gmodel.grasps[0][gmodel.graspindices['igrasppreshape']]])
if len(self.manip.GetGripperIndices()) > 0:
if preshapes is None:
with self.target:
self.target.Enable(False)
taskmanip = interfaces.TaskManipulation(self.robot)
final,traj = taskmanip.ReleaseFingers(execute=False,outputfinal=True)
preshapes = array([final])
else:
preshapes = array(())
self.generate(preshapes=preshapes,sphere=sphere,conedirangles=conedirangles)
self.save()
def generate(self,preshapes,sphere=None,conedirangles=None,localtransforms=None):
self.preshapes=preshapes
self.preprocess()
self.sensorname = self.attachedsensor.GetName()
self.manipname = self.manip.GetName()
bodies = [(b,b.IsEnabled()) for b in self.env.GetBodies() if b != self.robot and b != self.target]
for b in bodies:
b[0].Enable(False)
try:
with self.env:
sensor = self.attachedsensor.GetSensor()
if sensor is not None: # set power to 0?
sensorgeom = sensor.GetSensorGeometry(Sensor.Type.Camera)
sensordata = sensor.GetSensorData(Sensor.Type.Camera)
self.KK = sensorgeom.KK.K
self.dims = sensordata.imagedata.shape
with RobotStateSaver(self.robot):
# find better way of handling multiple grasps
if len(self.preshapes) > 0:
self.robot.SetDOFValues(self.preshapes[0],self.manip.GetGripperIndices())
extentsfile = os.path.join(RaveGetHomeDirectory(),'kinbody.'+self.target.GetKinematicsGeometryHash(),'visibility.txt')
if sphere is None and os.path.isfile(extentsfile):
self.visibilitytransforms = self.visualprob.ProcessVisibilityExtents(extents=loadtxt(extentsfile,float),conedirangles=conedirangles)
elif localtransforms is not None:
self.visibilitytransforms = self.visualprob.ProcessVisibilityExtents(transforms=localtransforms)
else:
if sphere is None:
sphere = [3,0.1,0.15,0.2,0.25,0.3]
self.visibilitytransforms = self.visualprob.ProcessVisibilityExtents(sphere=sphere,conedirangles=conedirangles)
print 'total transforms: ',len(self.visibilitytransforms)
self.visualprob.SetCameraTransforms(transforms=self.visibilitytransforms)
finally:
for b,enable in bodies:
b.Enable(enable)
def SetCameraTransforms(self,transforms):
"""Sets the camera transforms to the visual feedback problem"""
self.visualprob.SetCameraTransforms(transforms=transforms)
def showtransforms(self,options=None):
if self.robot != self.sensorrobot:
pts = poseMultArrayT(self.sensorrobot.GetTransformPose(), InvertPoses(self.visibilitytransforms))[:,4:7]
else:
pts = poseMultArrayT(self.target.GetTransformPose(), self.visibilitytransforms)[:,4:7]
h=self.env.plot3(pts,5,colors=array([0.5,0.5,1,0.2]))
try:
with RobotStateSaver(self.robot):
# disable all non-child links
for link in self.robot.GetLinks():
link.Enable(link in self.manip.GetChildLinks())
with self.GripperVisibility(self.manip):
for i,pose in enumerate(self.visibilitytransforms):
with self.env:
if len(self.preshapes) > 0:
self.robot.SetDOFValues(self.preshapes[0],self.manip.GetGripperIndices())
if self.robot != self.sensorrobot:
# sensor is not attached to robot
# robot should be grabbing the targt
assert(self.robot.IsGrabbing(self.target) is not None)
relativepose = poseMult(poseMult(self.attachedsensor.GetTransformPose(),InvertPose(pose)), InvertPose(self.target.GetTransformPose()))
for link in self.manip.GetChildLinks():
link.SetTransform(poseMult(relativepose, link.GetTransformPose()))
else:
# robot should not be grabbing the targt
assert(self.robot.IsGrabbing(self.target) is None)
relativepose = poseMult(InvertPose(self.attachedsensor.GetTransformPose()),self.manip.GetTransformPose())
globalCameraPose = poseMult(self.target.GetTransformPose(), pose)
grasppose = poseMult(globalCameraPose,relativepose)
deltapose = poseMult(grasppose,InvertPose(self.manip.GetTransformPose()))
for link in self.manip.GetChildLinks():
link.SetTransform(poseMult(deltapose,link.GetTransformPose()))
visibility = self.visualprob.ComputeVisibility()
self.env.UpdatePublishedBodies()
msg='%d/%d visibility=%d, press any key to continue: '%(i,len(self.visibilitytransforms),visibility)
if options is not None and options.showimage:
pilutil=__import__('scipy.misc',fromlist=['pilutil'])
I=self.getCameraImage()
print(msg)
pilutil.imshow(I)
else:
raw_input(msg)
finally:
# have to destroy the plot handle
h = None
def ShowTransform(self, relativepose, options=None):
"""moves the robot links temporarily to show a transform
"""
if self.robot != self.sensorrobot:
pts = poseMult(self.sensorrobot.GetTransformPose(), InvertPose(relativepose))[4:7]
else:
pts = poseMult(self.target.GetTransformPose(), relativepose)[4:7]
h=self.env.plot3(pts,5,colors=array([0.5,0.5,1,0.2]))
try:
with RobotStateSaver(self.robot):
# disable all non-child links
for link in self.robot.GetLinks():
link.Enable(link in self.manip.GetChildLinks())
with self.GripperVisibility(self.manip):
with self.env:
if len(self.preshapes) > 0:
self.robot.SetDOFValues(self.preshapes[0],self.manip.GetGripperIndices())
if self.robot != self.sensorrobot:
# sensor is not attached to robot
# robot should be grabbing the targt
assert(self.robot.IsGrabbing(self.target) is not None)
linkrelativepose = poseMult(poseMult(self.attachedsensor.GetTransformPose(),InvertPose(relativepose)), InvertPose(self.target.GetTransformPose()))
for link in self.manip.GetChildLinks():
link.SetTransform(poseMult(linkrelativepose, link.GetTransformPose()))
else:
# robot should not be grabbing the targt
assert(self.robot.IsGrabbing(self.target) is None)
linkrelativepose = poseMult(InvertPose(self.attachedsensor.GetTransformPose()),self.manip.GetTransformPose())
globalCameraPose = poseMult(self.target.GetTransformPose(), relativepose)
grasppose = poseMult(globalCameraPose, linkrelativepose)
deltapose = poseMult(grasppose,InvertPose(self.manip.GetTransformPose()))
for link in self.manip.GetChildLinks():
link.SetTransform(poseMult(deltapose,link.GetTransformPose()))
visibility = self.visualprob.ComputeVisibility()
self.env.UpdatePublishedBodies()
msg='visibility=%d, press any key to continue: '%(visibility)
if options is not None and options.showimage:
pilutil=__import__('scipy.misc',fromlist=['pilutil'])
I=self.getCameraImage()
print(msg)
pilutil.imshow(I)
else:
raw_input(msg)
finally:
# have to destroy the plot handle
h = None
def show(self,options=None):
if self.env.GetViewer() is None:
self.env.SetViewer('qtcoin')
time.sleep(0.4) # give time for viewer to initialize
self.attachedsensor.GetSensor().Configure(Sensor.ConfigureCommand.PowerOn)
self.attachedsensor.GetSensor().Configure(Sensor.ConfigureCommand.RenderDataOn)
return self.showtransforms(options)
def moveToPreshape(self):
"""uses a planner to safely move the hand to the preshape and returns the trajectory"""
if len(self.preshapes) > 0:
preshape=self.preshapes[0]
with self.robot:
self.robot.SetActiveDOFs(self.manip.GetArmIndices())
self.basemanip.MoveUnsyncJoints(jointvalues=preshape,jointinds=self.manip.GetGripperIndices())
while not self.robot.GetController().IsDone(): # busy wait
time.sleep(0.01)
with self.robot:
self.robot.SetActiveDOFs(self.manip.GetGripperIndices())
self.basemanip.MoveActiveJoints(goal=preshape)
while not self.robot.GetController().IsDone(): # busy wait
time.sleep(0.01)
def computeValidTransform(self,returnall=False,checkcollision=True,computevisibility=True,randomize=False):
with self.robot:
if self.manip.CheckIndependentCollision():
raise planning_error('robot independent links are initiallly in collision')
validjoints = []
if randomize:
order = random.permutation(len(self.visibilitytransforms))
else:
order = xrange(len(self.visibilitytransforms))
for i in order:
pose = self.visibilitytransforms[i]
Trelative = dot(linalg.inv(self.attachedsensor.GetTransform()),self.manip.GetEndEffectorTransform())
Tcamera = dot(self.target.GetTransform(),matrixFromPose(pose))
Tgrasp = dot(Tcamera,Trelative)
s = self.manip.FindIKSolution(Tgrasp,checkcollision)
if s is not None:
self.robot.SetDOFValues(s,self.manip.GetArmIndices())
if computevisibility and not self.visualprob.ComputeVisibility():
continue
validjoints.append((s,i))
if not returnall:
return validjoints
print 'found',len(validjoints)
return validjoints
def pruneTransformations(self,thresh=0.04,numminneighs=10,maxdist=None,translationonly=True):
if self.rmodel is None:
self.rmodel = kinematicreachability.ReachabilityModel(robot=self.robot)
if not self.rmodel.load():
# do not autogenerate since that would force this model to depend on the reachability
self.rmodel = None
return array(visibilitytransforms)
kdtree=self.rmodel.ComputeNN(translationonly)
if maxdist is not None:
visibilitytransforms = self.visibilitytransforms[invertPoses(self.visibilitytransforms)[:,6]<maxdist]
else:
visibilitytransforms = self.visibilitytransforms
newtrans = poseMultArrayT(poseFromMatrix(dot(linalg.inv(self.manip.GetBase().GetTransform()),self.target.GetTransform())),visibilitytransforms)
if translationonly:
transdensity = kdtree.kFRSearchArray(newtrans[:,4:7],thresh**2,0,thresh*0.01)[2]
I=flatnonzero(transdensity>numminneighs)
return visibilitytransforms[I[argsort(-transdensity[I])]]
raise ValueError('not supported')
# Imask = GetCameraRobotMask(orenv,options.robotfile,sensorindex=options.sensorindex,gripperjoints=gripperjoints,robotjoints=robotjoints,robotjointinds=robotjointinds,rayoffset=options.rayoffset)
# # save as a ascii matfile
# numpy.savetxt(options.savefile,Imask,'%d')
# print 'mask saved to ' + options.savefile
# try:
# scipy.misc.pilutil.imshow(array(Imask*255,'uint8'))
# except:
# pass
# def GetCameraRobotMask(self,rayoffset=0):
# with self.env:
# inds = array(range(self.width*self.height))
# imagepoints = array((mod(inds,self.width),floor(inds/self.width)))
# camerapoints = dot(linalg.inv(self.KK), r_[imagepoints,ones((1,imagepoints.shape[1]))])
# Tcamera = self.attached.GetSensor().GetTransform()
# raydirs = dot(Tcamera[0:3,0:3], camerapoints / tile(sqrt(sum(camerapoints**2,0)),(3,1)))
# rays = r_[tile(Tcamera[0:3,3:4],(1,raydirs.shape[1]))+rayoffset*raydirs,100.0*raydirs]
# hitindices,hitpositions = self.prob.GetEnv().CheckCollisionRays(rays,self.robot,False)
# # gather all the rays that hit and form an image
# return reshape(array(hitindices,'float'),(height,width))
def getCameraImage(self,delay=1.0):
sensor=self.attachedsensor.GetSensor()
sensor.Configure(Sensor.ConfigureCommand.PowerOn)
try:
time.sleep(delay)
return sensor.GetSensorData().imagedata
finally:
sensor.Configure(Sensor.ConfigureCommand.PowerOff)
@staticmethod
def CreateOptionParser():
parser = DatabaseGenerator.CreateOptionParser()
parser.description='Computes and manages the visibility transforms for a manipulator/target.'
parser.add_option('--target',action="store",type='string',dest='target',
help='OpenRAVE kinbody target filename')
parser.add_option('--sensorname',action="store",type='string',dest='sensorname',default=None,
help='Name of the sensor to build visibilty model for (has to be camera). If none, takes first possible sensor.')
parser.add_option('--preshape', action='append', type='string',dest='preshapes',default=None,
help='Add a preshape for the manipulator gripper joints')
parser.add_option('--sphere', action='store', type='string',dest='sphere',default=None,
help='Force detectability extents to be distributed around a sphere. Parameter is a string with the first value being density (3 is default) and the rest being distances')
parser.add_option('--conedirangle', action='append', type='string',dest='conedirangles',default=None,
help='The direction of the cone multiplied with the half-angle (radian) that the detectability extents are constrained to. Multiple cones can be provided.')
parser.add_option('--rayoffset',action="store",type='float',dest='rayoffset',default=0.03,
help='The offset to move the ray origin (prevents meaningless collisions), default is 0.03')
parser.add_option('--showimage',action="store_true",dest='showimage',default=False,
help='If set, will show the camera image when showing the models')
return parser
@staticmethod
def RunFromParser(Model=None,parser=None,args=None,**kwargs):
if parser is None:
parser = VisibilityModel.CreateOptionParser()
(options, leftargs) = parser.parse_args(args=args)
env = Environment()
try:
target = None
with env:
target = env.ReadKinBodyXMLFile(options.target)
target.SetTransform(eye(4))
env.Add(target)
if Model is None:
Model = lambda robot: VisibilityModel(robot=robot,target=target,sensorname=options.sensorname)
DatabaseGenerator.RunFromParser(env=env,Model=Model,parser=parser,args=args,**kwargs)
finally:
env.Destroy()
RaveDestroy()
def run(*args,**kwargs):
"""Command-line execution of the example. ``args`` specifies a list of the arguments to the script.
"""
VisibilityModel.RunFromParser(*args,**kwargs)
```
#### File: python/examples/graspplanning.py
```python
from __future__ import with_statement # for python 2.5
__author__ = '<NAME>'
import time
from itertools import izip
import openravepy
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
try:
from multiprocessing import cpu_count
except:
def cpu_count(): return 1
class GraspPlanning(openravepy.metaclass.AutoReloader):
def __init__(self,robot,randomize=True,dests=None,nodestinations=False,switchpatterns=None,plannername=None,minimumgoalpaths=1):
self.envreal = robot.GetEnv()
self.robot = robot
self.plannername=plannername
self.nodestinations = nodestinations
self.minimumgoalpaths=minimumgoalpaths
try:
self.ikmodel = databases.inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.Transform6D)
if not self.ikmodel.load():
self.ikmodel.autogenerate()
except ValueError:
print '6D IK failed, trying 5D IK'
self.ikmodel = databases.inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.TranslationDirection5D)
if not self.ikmodel.load():
self.ikmodel.autogenerate()
self.lmodel = databases.linkstatistics.LinkStatisticsModel(self.robot)
if not self.lmodel.load():
self.lmodel.autogenerate()
self.lmodel.setRobotWeights()
self.lmodel.setRobotResolutions(xyzdelta=0.005)
print 'robot resolutions: ',robot.GetDOFResolutions()
print 'robot weights: ',robot.GetDOFWeights()
# could possibly affect generated grasp sets?
# self.cdmodel = databases.convexdecomposition.ConvexDecompositionModel(self.robot)
# if not self.cdmodel.load():
# self.cdmodel.autogenerate()
self.switchpatterns = switchpatterns
with self.envreal:
self.basemanip = interfaces.BaseManipulation(self.robot,plannername=plannername)
self.basemanip.prob.SendCommand('SetMinimumGoalPaths %d'%self.minimumgoalpaths)
self.taskmanip = None
self.updir = array((0,0,1))
# find all the bodies to manipulate
self.graspables = self.getGraspables(dests=dests)
if len(self.graspables) == 0:
print 'attempting to auto-generate a grasp table'
targets=[t for t in self.envreal.GetBodies() if t.GetName().find('mug')>=0 or t.GetName().find('target')>=0]
if len(targets) > 0:
gmodel = databases.grasping.GraspingModel(robot=self.robot,target=targets[0])
if not gmodel.load():
gmodel.numthreads = cpu_count()
gmodel.autogenerate()
self.graspables = self.getGraspables(dests=dests)
self.randomize=randomize
if self.randomize:
self.randomizeObjects()
if dests is None and not self.nodestinations:
tablename = 'table'
table = self.envreal.GetKinBody(tablename)
if table is not None:
alltargets = [graspable[0].target for graspable in self.graspables]
for target in alltargets:
target.Enable(False)
try:
needdests_graspables = [graspable for graspable in self.graspables if graspable[1] is None]
curdests = [graspable[0].target.GetTransform() for graspable in needdests_graspables]
alldests = self.setRandomDestinations([graspable[0].target for graspable in needdests_graspables],table)
for graspable,dests in izip(needdests_graspables,alldests):
graspable[1] = dests+curdests
finally:
for target in alltargets:
target.Enable(True)
else:
print 'could not find %s'%tablename
def getGraspables(self,dests=None):
graspables = []
print 'searching for graspable objects (robot=%s)...'%(self.robot.GetRobotStructureHash())
for target in self.envreal.GetBodies():
if not target.IsRobot():
gmodel = databases.grasping.GraspingModel(robot=self.robot,target=target)
if gmodel.load():
print '%s is graspable'%target.GetName()
graspables.append([gmodel,dests])
return graspables
def GetGraspable(self,name):
for graspable in self.graspables:
if graspable[0].target.GetName() == name:
return graspable
return None
def randomizeObjects(self):
for graspable in self.graspables:
target = graspable[0].target
Tbody = target.GetTransform()
for iter in range(5):
Tnew = array(Tbody)
Tnew[0,3] += -0.1 + 0.2 * random.rand()
Tnew[1,3] += -0.1 + 0.2 * random.rand()
target.SetTransform(Tnew)
if not self.envreal.CheckCollision(target):
Tbody = Tnew
break
target.SetTransform(Tbody)
# randomize the robot
Trobot = self.robot.GetTransform()
for iter in range(5):
Tnew = array(Trobot)
Tnew[0,3] += -0.1 + 0.2 * random.rand()
Tnew[1,3] += -0.1 + 0.2 * random.rand()
self.robot.SetTransform(Tnew)
if not self.envreal.CheckCollision(self.robot):
Trobot = Tnew
break
self.robot.SetTransform(Trobot)
@staticmethod
def setRandomDestinations(targets, table,transdelta=0.1,zoffset=0.01,Trolls=None,randomize=False,preserverotation=True):
with table.GetEnv():
print 'searching for destinations on %s...'%table.GetName()
Ttable = table.GetTransform()
table.SetTransform(eye(4))
ab = table.ComputeAABB()
table.SetTransform(Ttable)
p = ab.pos()
e = ab.extents()
Nx = floor(2*e[0]/transdelta)
Ny = floor(2*e[1]/transdelta)
X = []
Y = []
if randomize:
for x in arange(Nx):
X = r_[X, random.rand(Ny)*0.5/(Nx+1) + (x+1)/(Nx+1)]
Y = r_[Y, random.rand(Ny)*0.5/(Ny+1) + arange(0.5,Ny,1.0)/(Ny+1)]
else:
for x in arange(Nx):
X = r_[X, tile((x+1)/(Nx+1),Ny)]
Y = r_[Y, arange(0.5,Ny,1.0)/(Ny+1)]
translations = c_[p[0]-e[0]+2*e[0]*X,p[1]-e[1]+2*e[1]*Y,tile(p[2]+e[2]+zoffset,len(X))]
if Trolls is None:
Trolls = [matrixFromAxisAngle(array((0,0,1)),roll) for roll in arange(0,2*pi,pi/2)] + [matrixFromAxisAngle(array((1,0,0)),roll) for roll in [pi/2,pi,1.5*pi]]
for target in targets:
target.Enable(False)
try:
alldests = []
for target in targets:
Torg = eye(4)
if preserverotation:
Torg[0:3,0:3] = target.GetTransform()[0:3,0:3]
with target.CreateKinBodyStateSaver():
target.Enable(True)
dests = []
for translation in translations:
for Troll in Trolls:
Troll = array(Troll)
Troll[0:3,3] = translation
target.SetTransform(dot(Ttable, dot(Troll, Torg)))
if not table.GetEnv().CheckCollision(target):
dests.append(target.GetTransform())
alldests.append(dests)
return alldests
finally:
for target in targets:
target.Enable(True)
def viewDestinations(self,gmodel,Tdests,delay=0.5):
with gmodel.target:
for i,T in enumerate(Tdests):
print 'target %s dest %d/%d'%(gmodel.target.GetName(),i,len(Tdests))
gmodel.target.SetTransform(T)
validgrasps, indices = gmodel.computeValidGrasps(returnnum=1)
gmodel.target.GetEnv().UpdatePublishedBodies()
gmodel.showgrasp(validgrasps[0],useik=True,collisionfree=True,delay=delay)
def waitrobot(self,robot=None):
"""busy wait for robot completion"""
if robot is None:
robot = self.robot
while not robot.GetController().IsDone():
time.sleep(0.01)
def graspAndPlaceObject(self,gmodel,dests,waitforkey=False,movehanddown=True,**kwargs):
"""grasps an object and places it in one of the destinations. If no destination is specified, will just grasp it"""
env = self.envreal#.CloneSelf(CloningOptions.Bodies)
robot = self.robot
with env:
self.taskmanip = interfaces.TaskManipulation(self.robot,graspername=gmodel.grasper.plannername,plannername=self.plannername)
self.taskmanip.prob.SendCommand('SetMinimumGoalPaths %d'%self.minimumgoalpaths)
if self.switchpatterns is not None:
self.taskmanip.SwitchModels(switchpatterns=self.switchpatterns)
robot.SetActiveManipulator(gmodel.manip)
robot.SetActiveDOFs(gmodel.manip.GetArmIndices())
istartgrasp = 0
approachoffset = 0.02 if self.ikmodel.iktype == IkParameterization.Type.Transform6D else 0.0
target = gmodel.target
stepsize = 0.001
while istartgrasp < len(gmodel.grasps):
goals,graspindex,searchtime,trajdata = self.taskmanip.GraspPlanning(gmodel=gmodel,grasps=gmodel.grasps[istartgrasp:], approachoffset=approachoffset,destposes=dests, seedgrasps = 3,seeddests=8,seedik=1,maxiter=1000, randomgrasps=self.randomize,randomdests=self.randomize)
istartgrasp = graspindex+1
grasp = gmodel.grasps[graspindex]
Tglobalgrasp = gmodel.getGlobalGraspTransform(grasp,collisionfree=True)
self.waitrobot(robot)
print 'grasp %d initial planning time: %f'%(graspindex,searchtime)
if approachoffset != 0:
print 'moving hand'
expectedsteps = floor(approachoffset/stepsize)
try:
# should not allow any error since destination goal depends on accurate relative placement
# of the gripper with respect to the object
with gmodel.target:
print 'current robot', repr(robot.GetDOFValues())
print 'global direction',repr(dot(gmodel.manip.GetTransform()[0:3,0:3],gmodel.manip.GetDirection())), gmodel.getGlobalApproachDir(grasp)
print 'local direction',grasp[gmodel.graspindices.get('igraspdir')]
gmodel.target.Enable(False)
res = self.basemanip.MoveHandStraight(direction=gmodel.getGlobalApproachDir(grasp), ignorefirstcollision=0,stepsize=stepsize,minsteps=expectedsteps,maxsteps=expectedsteps)
except planning_error:
print 'use a planner to move the rest of the way'
try:
self.basemanip.MoveToHandPosition(matrices=[Tglobalgrasp],maxiter=1000,maxtries=1,seedik=4)
except planning_error,e:
print 'failed to reach grasp',e
continue
self.waitrobot(robot)
self.taskmanip.CloseFingers(translationstepmult=gmodel.translationstepmult,finestep=gmodel.finestep)
self.waitrobot(robot)
with env:
robot.Grab(target)
if waitforkey:
raw_input('press any key to continue grasp')
success = graspindex
if movehanddown:
try:
print 'move hand up'
self.basemanip.MoveHandStraight(direction=self.updir,stepsize=0.003,minsteps=1,maxsteps=60)
except:
print 'failed to move hand up'
self.waitrobot(robot)
if len(goals) > 0:
print 'planning to destination'
try:
self.basemanip.MoveToHandPosition(ikparams=goals,maxiter=2000,maxtries=2,seedik=8)
self.waitrobot(robot)
except planning_error,e:
print 'failed to reach a goal, trying to move goal a little up',e
if goals[0].GetType() == IkParameterizationType.Transform6D:
Tgoal = goals[0].GetTransform6D()
Tgoal[0:3,3] += self.updir*0.015
try:
self.basemanip.MoveToHandPosition(matrices=[Tgoal],maxiter=3000,maxtries=2,seedik=8)
self.waitrobot(robot)
self.basemanip.MoveToHandPosition(ikparams=goals,maxiter=2000,maxtries=2,seedik=8)
self.waitrobot(robot)
except planning_error,e:
print e
success = -1
if movehanddown:
print 'moving hand down'
try:
res = self.basemanip.MoveHandStraight(direction=-self.updir,stepsize=0.003,minsteps=1,maxsteps=100)
except:
print 'failed to move hand down'
self.waitrobot(robot)
try:
res = self.taskmanip.ReleaseFingers(target=target,translationstepmult=gmodel.translationstepmult,finestep=gmodel.finestep)
except planning_error:
res = None
if res is None:
print 'problems releasing, releasing target first'
with env:
robot.ReleaseAllGrabbed()
try:
res = self.taskmanip.ReleaseFingers(target=target,translationstepmult=gmodel.translationstepmult,finestep=gmodel.finestep)
except planning_error:
res = None
if res is None:
print 'forcing fingers'
with env:
robot.SetDOFValues(gmodel.grasps[graspindex][gmodel.graspindices['igrasppreshape']],gmodel.manip.GetGripperIndices())
self.waitrobot(robot)
with env:
robot.ReleaseAllGrabbed()
with CollisionOptionsStateSaver(env.GetCollisionChecker(),CollisionOptions.ActiveDOFs):
if env.CheckCollision(robot):
print 'robot in collision, moving back a little'
try:
self.basemanip.MoveHandStraight(direction=-dot(gmodel.manip.GetTransform()[0:3,0:3],gmodel.manip.GetDirection()), stepsize=stepsize,minsteps=1,maxsteps=10)
self.waitrobot(robot)
except planning_error,e:
pass
if env.CheckCollision(robot):
try:
self.taskmanip.ReleaseFingers(target=target,translationstepmult=gmodel.translationstepmult,finestep=gmodel.finestep)
except planning_error:
res = None
#raise ValueError('robot still in collision?')
if success >= 0:
return success # return successful grasp index
# exhausted all grasps
return -1
def performGraspPlanning(self,withreplacement=True,**kwargs):
print 'starting to pick and place random objects'
graspables = self.graspables[:]
failures = 0
while True:
if len(graspables) == 0 or failures > len(graspables)+1:
if withreplacement:
time.sleep(4)
self.randomizeObjects()
graspables = self.graspables[:]
else:
break
if self.randomize:
i=random.randint(len(graspables))
else:
i = 0
try:
print 'grasping object %s'%graspables[i][0].target.GetName()
with self.envreal:
self.robot.ReleaseAllGrabbed()
success = self.graspAndPlaceObject(graspables[i][0],graspables[i][1],**kwargs)
print 'success: ',success
graspables.pop(i)
failures = 0
except planning_error, e:
print 'failed to grasp object %s'%graspables[i][0].target.GetName()
failures += 1
graspables.append(graspables.pop(0)) # push front to back
print e
def main(env,options):
"Main example code."
env.Load(options.scene)
robot = env.GetRobots()[0]
env.UpdatePublishedBodies()
time.sleep(0.1) # give time for environment to update
self = GraspPlanning(robot,randomize=options.randomize,nodestinations=options.nodestinations,plannername=options.planner)
self.performGraspPlanning(withreplacement=not options.testmode)
from optparse import OptionParser
from openravepy.misc import OpenRAVEGlobalArguments
@openravepy.with_destroy
def run(args=None):
"""Command-line execution of the example.
:param args: arguments for script to parse, if not specified will use sys.argv
"""
parser = OptionParser(description='Autonomous grasp and manipulation planning example.')
OpenRAVEGlobalArguments.addOptions(parser)
parser.add_option('--scene',
action="store",type='string',dest='scene',default='data/lab1.env.xml',
help='Scene file to load (default=%default)')
parser.add_option('--nodestinations', action='store_true',dest='nodestinations',default=False,
help='If set, will plan without destinations.')
parser.add_option('--norandomize', action='store_false',dest='randomize',default=True,
help='If set, will not randomize the bodies and robot position in the scene.')
parser.add_option('--planner',action="store",type='string',dest='planner',default=None,
help='the planner to use')
(options, leftargs) = parser.parse_args(args=args)
OpenRAVEGlobalArguments.parseAndCreateThreadedUser(options,main,defaultviewer=True)
if __name__ == "__main__":
run()
def test():
import graspplanning
self=graspplanning.GraspPlanning(robot,randomize=False,nodestinations=False)
success = self.graspAndPlaceObject(self.graspables[2][0],self.graspables[2][1])
```
#### File: python/examples/hanoi.py
```python
from __future__ import with_statement # for python 2.5
__author__ = '<NAME>'
import time
import openravepy
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
class HanoiPuzzle:
def __init__(self,env,robot,plannername=None):
self.env = env
self.robot = robot
# load the IK solver
self.ikmodel = databases.inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.Transform6D)
if not self.ikmodel.load():
self.ikmodel.autogenerate() # autogenerate if one doesn't exist
self.lmodel = databases.linkstatistics.LinkStatisticsModel(self.robot)
if not self.lmodel.load():
self.lmodel.autogenerate()
self.lmodel.setRobotWeights()
self.lmodel.setRobotResolutions(xyzdelta=0.002) # the pegs are really thin
print 'robot resolutions: ',robot.GetDOFResolutions()
print 'robot weights: ',robot.GetDOFWeights()
with self.env: # lock the environment
self.basemanip = interfaces.BaseManipulation(self.robot,plannername=plannername)
self.taskmanip = interfaces.TaskManipulation(self.robot,plannername=plannername)
disknames = ['disk0','disk1','disk2']
self.heights = array([0.021,0.062,0.103])+0.01
disks = []
diskradius = []
for name in disknames:
disk = env.GetKinBody(name)
ab = disk.ComputeAABB()
disk.radius = ab.extents()[1]-0.02
disks.append(disk)
self.srcpeg = env.GetKinBody('srcpeg')
self.destpeg = env.GetKinBody('destpeg')
self.peg = env.GetKinBody('peg')
self.srcpeg.disks = disks
self.destpeg.disks = []
self.peg.disks = []
def waitrobot(self):
"""busy wait for robot completion"""
while not self.robot.GetController().IsDone():
time.sleep(0.01)
def MoveToPosition(self, values,indices):
"""uses a planner to safely move the hand to the preshape and returns the trajectory.
move the robot out of the way so it can complete a preshape
"""
with self.robot:
self.robot.SetActiveDOFs(indices)
self.basemanip.MoveUnsyncJoints(jointvalues=values,jointinds=indices)
self.waitrobot()
with self.robot:
# move the hand to the preshape
self.robot.SetActiveDOFs(indices)
self.basemanip.MoveActiveJoints(goal=values)
self.waitrobot()
def putblock(self, disk, srcpeg, destpeg, height):
with self.env:
srcpegbox = srcpeg.ComputeAABB()
destpegbox = destpeg.ComputeAABB()
# get all the transformations
Thand = self.robot.GetActiveManipulator().GetTransform()
Tdisk = disk.GetTransform()
Tsrcpeg = srcpeg.GetTransform()
Tpeg = destpeg.GetTransform()
src_upvec = Tsrcpeg[0:3,2:3]
dest_upvec = Tpeg[0:3,2:3]
Tdiff = dot(linalg.inv(Tdisk), Thand)
# iterate across all possible orientations the destination peg can be in
for ang in arange(-pi,pi,0.3):
# find the dest position
p = Tpeg[0:3,3:4] + height * dest_upvec
R = dot(Tpeg[0:3,0:3], array(((cos(ang),-sin(ang),0),(sin(ang),cos(ang),0),(0,0,1))))
T = dot(r_[c_[R,p], [[0,0,0,1]]], Tdiff)
with self.env:
# check the IK of the destination
if self.robot.GetActiveManipulator().FindIKSolution(T,True) is None:
continue
# add two intermediate positions, one right above the source peg
# and one right above the destination peg
Tnewhand = array(Thand)
Tnewhand[0:3,3:4] += src_upvec*(max(srcpegbox.extents())*2.5-0.02)
# check the IK of the destination
if self.robot.GetActiveManipulator().FindIKSolution(Tnewhand,True) is None:
print('Tnewhand invalid')
continue
Tnewhand2 = array(T)
Tnewhand2[0:3,3:4] += dest_upvec*(max(destpegbox.extents())*2.5-height)
# check the IK of the destination
if self.robot.GetActiveManipulator().FindIKSolution(Tnewhand2,True) is None:
print('Tnewhand2 invalid')
continue
try:
self.basemanip.MoveToHandPosition(matrices=[Tnewhand],maxtries=1)
raveLogInfo('move to position above source peg')
self.waitrobot() # wait for robot to complete all trajectories
self.basemanip.MoveToHandPosition(matrices=[Tnewhand2],maxtries=1)
raveLogInfo('move to position above dest peg')
self.waitrobot() # wait for robot to complete all trajectories
self.basemanip.MoveToHandPosition(matrices=[T],maxtries=1)
raveLogInfo('move to dest peg')
self.waitrobot() # wait for robot to complete all trajectories
return True
except planning_error, e:
raveLogWarn(str(e))
raise planning_error('failed to put block')
def GetGrasp(self, Tdisk, radius, angles):
""" returns the transform of the grasp given its orientation and the location/size of the disk"""
zdir = -dot(Tdisk[0:3,0:3],vstack([cos(angles[0])*cos(angles[1]),-cos(angles[0])*sin(angles[1]),-sin(angles[0])]))
pos = Tdisk[0:3,3:4] + radius*dot(Tdisk[0:3,0:3],vstack([cos(angles[1]),-sin(angles[1]),0]))
xdir = cross(Tdisk[0:3,1:2],zdir,axis=0)
xdir = xdir / linalg.norm(xdir)
ydir = cross(zdir,xdir,axis=0)
Tgrasp = r_[c_[xdir,ydir,zdir,pos],[[0,0,0,1]]]
return [Tgrasp,dot(Tgrasp, array([[-1,0,0,0],[0,1,0,0],[0,0,-1,0],[0,0,0,1]]))]
def hanoimove(self, disk, srcpeg, destpeg, height):
"""Moves the arm and manipulator to grasp a peg and place it on a different peg"""
openhandfn = lambda: self.MoveToPosition([-0.7],self.robot.GetActiveManipulator().GetGripperIndices())
Tdisk = disk.GetTransform()
for ang2 in arange(-pi/2,1.5*pi,0.4):
for ang1 in arange(-0.6,0,0.2):
Tgrasps = self.GetGrasp(Tdisk, disk.radius, [ang1,ang2]) # get the grasp transform given the two angles
for Tgrasp in Tgrasps: # for each of the grasps
try:
raveLogInfo('opening hand')
openhandfn()
raveLogInfo('moving hand to location')
self.basemanip.MoveToHandPosition(matrices=[Tgrasp],maxtries=1)
self.waitrobot()
raveLogInfo('succeeded so grab the disk')
self.taskmanip.CloseFingers()
self.waitrobot()
with self.env:
self.robot.Grab(disk)
raveLogInfo('try to put the disk in the destination peg')
self.putblock(disk, srcpeg, destpeg, height)
raveLogInfo('wait for robot to complete all trajectories')
self.waitrobot()
self.taskmanip.ReleaseFingers(target=disk)
self.waitrobot()
raveLogInfo('done with one disk')
return True
except planning_error,e:
raveLogWarn(str(e))
with self.env:
self.robot.ReleaseAllGrabbed()
disk.Enable(False)
openhandfn()
with self.env:
disk.Enable(True)
return False
def hanoisolve(self, n, pegfrom, pegto, pegby):
if n == 1:
# move the disk
disk = pegfrom.disks[-1]
print('hanoimove %s from %s to %s'%(disk.GetName(), pegfrom.GetName(), pegto.GetName()))
if not self.hanoimove(disk, pegfrom, pegto, self.heights[len(pegto.disks)]):
raise ValueError('failed to solve hanoi')
# add the disk onto the correct peg list
pegto.disks.append(disk)
pegfrom.disks.pop()
else:
self.hanoisolve(n-1, pegfrom, pegby, pegto)
self.hanoisolve(1, pegfrom, pegto, pegby)
self.hanoisolve(n-1, pegby, pegto, pegfrom)
def main(env,options):
"Main example code."
while True:
env.Reset()
env.Load(options.scene)
hanoi = HanoiPuzzle(env,env.GetRobots()[0],plannername=options.planner)
hanoi.hanoisolve(3,hanoi.srcpeg,hanoi.destpeg,hanoi.peg)
if options.testmode:
break
from optparse import OptionParser
from openravepy.misc import OpenRAVEGlobalArguments
@openravepy.with_destroy
def run(args=None):
"""Command-line execution of the example.
:param args: arguments for script to parse, if not specified will use sys.argv
"""
parser = OptionParser(description='Manipulation planning example solving the hanoi problem.', usage='openrave.py --example hanoi [options]')
OpenRAVEGlobalArguments.addOptions(parser)
parser.add_option('--scene',action="store",type='string',dest='scene',default='data/hanoi_complex2.env.xml',
help='Scene file to load (default=%default)')
parser.add_option('--planner',action="store",type='string',dest='planner',default=None,
help='the planner to use')
(options, leftargs) = parser.parse_args(args=args)
OpenRAVEGlobalArguments.parseAndCreateThreadedUser(options,main,defaultviewer=True)
if __name__ == "__main__":
run()
```
#### File: python/examples/simplenavigation.py
```python
from __future__ import with_statement # for python 2.5
__author__ = '<NAME>'
import time,numpy
import openravepy
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
class SimpleNavigationPlanning:
def __init__(self,robot,randomize=False,dests=None,switchpatterns=None):
self.env = robot.GetEnv()
self.robot = robot
self.cdmodel = databases.convexdecomposition.ConvexDecompositionModel(self.robot)
if not self.cdmodel.load():
self.cdmodel.autogenerate()
self.basemanip = interfaces.BaseManipulation(self.robot)
def performNavigationPlanning(self):
# find the boundaries of the environment
with self.env:
envmin = []
envmax = []
for b in self.env.GetBodies():
ab = b.ComputeAABB()
envmin.append(ab.pos()-ab.extents())
envmax.append(ab.pos()+ab.extents())
abrobot = self.robot.ComputeAABB()
envmin = numpy.min(array(envmin),0)+abrobot.extents()
envmax = numpy.max(array(envmax),0)-abrobot.extents()
bounds = array(((envmin[0],envmin[1],-pi),(envmax[0],envmax[1],pi)))
while True:
with self.env:
self.robot.SetAffineTranslationLimits(envmin,envmax)
self.robot.SetAffineTranslationMaxVels([0.5,0.5,0.5])
self.robot.SetAffineRotationAxisMaxVels(ones(4))
self.robot.SetActiveDOFs([],DOFAffine.X|DOFAffine.Y|DOFAffine.RotationAxis,[0,0,1])
# pick a random position
with self.robot:
while True:
goal = bounds[0,:]+random.rand(3)*(bounds[1,:]-bounds[0,:])
self.robot.SetActiveDOFValues(goal)
if not self.env.CheckCollision(self.robot):
break
print 'planning to: ',goal
# draw the marker
center = r_[goal[0:2],0.2]
xaxis = 0.5*array((cos(goal[2]),sin(goal[2]),0))
yaxis = 0.25*array((-sin(goal[2]),cos(goal[2]),0))
h = self.env.drawlinelist(transpose(c_[center-xaxis,center+xaxis,center-yaxis,center+yaxis,center+xaxis,center+0.5*xaxis+0.5*yaxis,center+xaxis,center+0.5*xaxis-0.5*yaxis]),linewidth=5.0,colors=array((0,1,0)))
if self.basemanip.MoveActiveJoints(goal=goal,maxiter=3000,steplength=0.1) is None:
print 'retrying...'
continue
print 'waiting for controller'
self.robot.WaitForController(0)
def main(env,options):
"Main example code."
env.Load(options.scene)
robot = env.GetRobots()[0]
env.UpdatePublishedBodies()
time.sleep(0.1) # give time for environment to update
self = SimpleNavigationPlanning(robot)
self.performNavigationPlanning()
from optparse import OptionParser
from openravepy.misc import OpenRAVEGlobalArguments
@openravepy.with_destroy
def run(args=None):
"""Command-line execution of the example.
:param args: arguments for script to parse, if not specified will use sys.argv
"""
parser = OptionParser(description='Simple navigation planning using RRTs.')
OpenRAVEGlobalArguments.addOptions(parser)
parser.add_option('--scene',
action="store",type='string',dest='scene',default='data/lab1.env.xml',
help='Scene file to load (default=%default)')
(options, leftargs) = parser.parse_args(args=args)
OpenRAVEGlobalArguments.parseAndCreateThreadedUser(options,main,defaultviewer=True)
if __name__ == "__main__":
run()
```
#### File: python/examples/tutorial_iklookat.py
```python
from __future__ import with_statement # for python 2.5
__author__ = '<NAME>'
import time
import openravepy
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
def main(env,options):
"Main example code."
env.Load(options.scene)
robot = env.GetRobots()[0]
robot.SetActiveManipulator(options.manipname)
# generate the ik solver
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot, iktype=IkParameterization.Type.Lookat3D)
if not ikmodel.load():
ikmodel.autogenerate()
while True:
with env:
# move the robot in a random collision-free position and call the IK
while True:
target=ikmodel.manip.GetTransform()[0:3,3]+(random.rand(3)-0.5)
solutions = ikmodel.manip.FindIKSolutions(IkParameterization(target,IkParameterization.Type.Lookat3D),IkFilterOptions.CheckEnvCollisions)
if len(solutions) > 0:
break
h=env.plot3(array([target]),20.0)
for i in random.permutation(len(solutions))[0:min(100,len(solutions))]:
with env:
robot.SetDOFValues(solutions[i],ikmodel.manip.GetArmIndices())
T = ikmodel.manip.GetTransform()
globaldir = numpy.dot(T[0:3,0:3],ikmodel.manip.GetDirection())
dist = linalg.norm(T[0:3,3]-target)+0.4
hray = env.drawlinelist(array([T[0:3,3], T[0:3,3]+dist*globaldir]),5,colors=[0.1,0.1,1])
env.UpdatePublishedBodies()
time.sleep(0.1)
from optparse import OptionParser
from openravepy.misc import OpenRAVEGlobalArguments
@openravepy.with_destroy
def run(args=None):
"""Command-line execution of the example.
:param args: arguments for script to parse, if not specified will use sys.argv
"""
parser = OptionParser(description='Shows how to use different IK solutions for arms with few joints.')
OpenRAVEGlobalArguments.addOptions(parser)
parser.add_option('--scene',action="store",type='string',dest='scene',default='data/pr2test1.env.xml',
help='Scene file to load (default=%default)')
parser.add_option('--manipname',action="store",type='string',dest='manipname',default='head_torso',
help='name of manipulator to use (default=%default)')
(options, leftargs) = parser.parse_args(args=args)
OpenRAVEGlobalArguments.parseAndCreateThreadedUser(options,main,defaultviewer=True)
if __name__ == "__main__":
run()
```
#### File: openrave/python/ikfast_sympy0_6.py
```python
from __future__ import with_statement # for python 2.5
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2009-2012 <NAME> <<EMAIL>>'
__license__ = 'Lesser GPL, Version 3'
__version__ = '56'
from sympy import __version__ as sympy_version
if sympy_version >= '0.7.0':
raise ImportError('ikfast needs sympy 0.6.x')
import sys, copy, time, math, datetime
import __builtin__
from optparse import OptionParser
try:
from openravepy.metaclass import AutoReloader
from openravepy import axisAngleFromRotationMatrix
except:
axisAngleFromRotationMatrix = None
class AutoReloader:
pass
import numpy # required for fast eigenvalue computation
from sympy import *
try:
import mpmath # on some distributions, sympy does not have mpmath in its scope
except ImportError:
pass
try:
import re # for latex cleanup
except ImportError:
pass
try:
from math import isinf, isnan
except ImportError:
# python 2.5
from numpy import isinf as _isinf
from numpy import isnan as _isnan
def isinf(x): return _isinf(float(x))
def isnan(x): return _isnan(float(x))
from itertools import izip
try:
from itertools import combinations, permutations
except ImportError:
def combinations(items,n):
if n == 0: yield[]
else:
_internal_items=list(items)
for i in xrange(len(_internal_items)):
for cc in combinations(_internal_items[i+1:],n-1):
yield [_internal_items[i]]+cc
def permutations(iterable, r=None):
# permutations('ABCD', 2) --> AB AC AD BA BC BD CA CB CD DA DB DC
# permutations(range(3)) --> 012 021 102 120 201 210
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
import logging
log = logging.getLogger('openravepy.ikfast')
log.warn('using sympy v%s, which is too old. please upgrade sympy'%sympy_version)
CodeGenerators = {}
# try:
# import ikfast_generator_vb
# CodeGenerators['vb'] = ikfast_generator_vb.CodeGenerator
# CodeGenerators['vb6'] = ikfast_generator_vb.CodeGeneratorVB6
# CodeGenerators['vb6special'] = ikfast_generator_vb.CodeGeneratorVB6Special
# except ImportError:
# pass
try:
import ikfast_generator_cpp_sympy0_6
CodeGenerators['cpp'] = ikfast_generator_cpp_sympy0_6.CodeGenerator
IkType = ikfast_generator_cpp_sympy0_6.IkType
except ImportError:
pass
# changes to sympy:
# core/power.py Pow
def Pow_eval_subs(self, old, new):
if self == old:
return new
if old.func is self.func and self.base == old.base:
coeff1, terms1 = self.exp.as_coeff_terms()
coeff2, terms2 = old.exp.as_coeff_terms()
if terms1==terms2:
# only divide if coeff2 is a divisor of coeff1
if coeff1.is_integer and coeff2.is_integer and (coeff1/coeff2).is_integer:
return new ** (coeff1/coeff2) # (x**(2*y)).subs(x**(3*y),z) -> z**(2/3*y)
if old.func is C.exp:
coeff1,terms1 = old.args[0].as_coeff_terms()
coeff2,terms2 = (self.exp * C.log(self.base)).as_coeff_terms()
if terms1==terms2:
# only divide if coeff2 is a divisor of coeff1
if coeff1.is_integer and coeff2.is_integer and (coeff1/coeff2).is_integer:
return new ** (coeff1/coeff2) # (x**(2*y)).subs(exp(3*y*log(x)),z) -> z**(2/3*y)
return self.base._eval_subs(old, new) ** self.exp._eval_subs(old, new)
power.Pow._eval_subs = Pow_eval_subs
# simplify/simplify.py
# def custom_trigsimp_nonrecursive(expr, deep=False):
# """
# A nonrecursive trig simplifier, used from trigsimp.
#
# == Usage ==
# trigsimp_nonrecursive(expr) -> reduces expression by using known trig
# identities
#
# == Notes ==
#
# deep ........ apply trigsimp inside functions
#
# == Examples ==
# >>> from sympy import cos, sin, log
# >>> from sympy.simplify.simplify import trigsimp, trigsimp_nonrecursive
# >>> from sympy.abc import x, y
# >>> e = 2*sin(x)**2 + 2*cos(x)**2
# >>> trigsimp(e)
# 2
# >>> trigsimp_nonrecursive(log(e))
# log(2*cos(x)**2 + 2*sin(x)**2)
# >>> trigsimp_nonrecursive(log(e), deep=True)
# log(2)
#
# """
# from sympy.core.basic import S
# sin, cos, tan, cot = C.sin, C.cos, C.tan, C.cot
#
# if expr.is_Function:
# if deep:
# return expr.func(trigsimp_nonrecursive(expr.args[0], deep))
# elif expr.is_Mul:
# ret = S.One
# for x in expr.args:
# ret *= trigsimp_nonrecursive(x, deep)
#
# return ret
# elif expr.is_Pow:
# return Pow(trigsimp_nonrecursive(expr.base, deep),
# trigsimp_nonrecursive(expr.exp, deep))
# elif expr.is_Add:
# # TODO this needs to be faster
#
# # The types of trig functions we are looking for
# a,b,c = map(Wild, 'abc')
# matchers = (
# (a*sin(b)**2, a - a*cos(b)**2),
# (a*tan(b)**2, a*(1/cos(b))**2 - a),
# (a*cot(b)**2, a*(1/sin(b))**2 - a)
# )
#
# # Scan for the terms we need
# ret = S.Zero
# for term in expr.args:
# term = trigsimp_nonrecursive(term, deep)
# res = None
# for pattern, result in matchers:
# res = term.match(pattern)
# if res is not None:
# ret += result.subs(res)
# break
# if res is None:
# ret += term
#
# # Reduce any lingering artifacts, such as sin(x)**2 changing
# # to 1-cos(x)**2 when sin(x)**2 was "simpler"
# artifacts = (
# (a - a*cos(b)**2 + c, a*sin(b)**2 + c, cos),
# (a - a*(1/cos(b))**2 + c, -a*tan(b)**2 + c, cos),
# (a - a*(1/sin(b))**2 + c, -a*cot(b)**2 + c, sin)
# )
#
# expr = ret
# for pattern, result, ex in artifacts:
# # Substitute a new wild that excludes some function(s)
# # to help influence a better match. This is because
# # sometimes, for example, 'a' would match sec(x)**2
# a_t = Wild('a', exclude=[ex])
# pattern = pattern.subs(a, a_t)
# result = result.subs(a, a_t)
# if expr.is_number:
# continue
# try:
# m = expr.match(pattern)
# except (TypeError):
# break
#
# while m is not None:
# if m[a_t] == 0 or -m[a_t] in m[c].args or m[a_t] + m[c] == 0:
# break
# expr = result.subs(m)
# if expr.is_number:
# continue
# try:
# m = expr.match(pattern)
# except (TypeError):
# break
#
#
# return expr
# return expr
#
# simplify.simplify.trigsimp_nonrecursive = custom_trigsimp_nonrecursive
class AST:
"""Abstarct Syntax Tree class definitions specific for evaluating complex math equations."""
class SolverSolution:
"""Contains equations for evaluating one unknown variable. The variable can have multiple solutions, and the solution is only valid if every equation in checkforzeros is non-zero
"""
jointname = None
jointeval = None
jointevalcos = None
jointevalsin = None
AddPiIfNegativeEq = None
isHinge = True
checkforzeros = None
thresh = None
AddHalfTanValue = False
dictequations = None
presetcheckforzeros = None
equationsused = None
"""Meaning of FeasibleIsZeros:
If set to false, then solution is feasible only if all of these equations evalute to non-zero.
If set to true, solution is feasible only if all these equations evaluate to zero.
"""
FeasibleIsZeros = False
score = None
def __init__(self, jointname, jointeval=None,jointevalcos=None,jointevalsin=None,AddPiIfNegativeEq=None,isHinge=True,thresh=0.000001):
self.jointname = jointname
self.jointeval = jointeval
self.jointevalcos = jointevalcos
self.jointevalsin = jointevalsin
self.AddPiIfNegativeEq = AddPiIfNegativeEq
self.isHinge=isHinge
self.thresh = thresh
self.presetcheckforzeros = []
self.dictequations = []
self.equationsused = []
assert self.checkValidSolution()
def subs(self,solsubs):
if self.jointeval is not None:
self.jointeval = [e.subs(solsubs) for e in self.jointeval]
if self.jointevalcos is not None:
self.jointevalcos = [e.subs(solsubs) for e in self.jointevalcos]
if self.jointevalsin is not None:
self.jointevalsin = [e.subs(solsubs) for e in self.jointevalsin]
if self.checkforzeros is not None:
self.checkforzeros = [e.subs(solsubs) for e in self.checkforzeros]
self.dictequations = [(s,v.subs(solsubs)) for s,v in self.dictequations]
self.presetcheckforzeros = [e.subs(solsubs) for e in self.presetcheckforzeros]
self.equationsused = [e.subs(solsubs) for e in self.equationsused]
if not self.checkValidSolution():
raise IKFastSolver.CannotSolveError('substitution produced invalid results')
return self
def generate(self, generator):
assert self.checkValidSolution()
return generator.generateSolution(self)
def end(self, generator):
return generator.endSolution(self)
def numsolutions(self):
n=0
if self.jointeval is not None:
n += len(self.jointeval)
if self.jointevalcos is not None:
n += 2*len(self.jointevalcos)
if self.jointevalsin is not None:
n += 2*len(self.jointevalsin)
return n
def checkValidSolution(self):
valid=True
if self.jointeval is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointeval])
if self.jointevalsin is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointevalsin])
if self.jointevalcos is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointevalcos])
return valid
def getPresetCheckForZeros(self):
return self.presetcheckforzeros
def getEquationsUsed(self):
return self.equationsused
class SolverPolynomialRoots:
"""find all roots of the polynomial and plug it into jointeval. poly should be polys.polynomial.Poly
"""
jointname = None
poly = None
jointeval = None
jointevalcos = None # not used
jointevalsin = None # not used
checkforzeros = None
postcheckforzeros = None # fail if zero
postcheckfornonzeros = None # fail if nonzero
postcheckforrange = None # checks that value is within [-1,1]
dictequations = None
thresh = 1e-7
isHinge = True
FeasibleIsZeros = False
AddHalfTanValue = False
score = None
equationsused = None
def __init__(self, jointname, poly=None, jointeval=None,isHinge=True):
self.poly = poly
self.jointname=jointname
self.jointeval = jointeval
self.isHinge = isHinge
self.dictequations = []
self.equationsused = []
def numsolutions(self):
return self.poly.degree
def subs(self,solsubs):
if self.jointeval is not None:
self.jointeval = [e.subs(solsubs) for e in self.jointeval]
if self.checkforzeros is not None:
self.checkforzeros = [e.subs(solsubs) for e in self.checkforzeros]
if self.postcheckforzeros is not None:
self.postcheckforzeros = [e.subs(solsubs) for e in self.postcheckforzeros]
if self.postcheckfornonzeros is not None:
self.postcheckfornonzeros = [e.subs(solsubs) for e in self.postcheckfornonzeros]
if self.postcheckforrange is not None:
self.postcheckforrange = [e.subs(solsubs) for e in self.postcheckforrange]
self.dictequations = [(s,v.subs(solsubs)) for s,v in self.dictequations]
self.equationsused = [e.subs(solsubs) for e in self.equationsused]
if self.poly is not None:
self.poly = self.poly.subs(solsubs)
assert self.checkValidSolution()
return self
def generate(self, generator):
return generator.generatePolynomialRoots(self)
def end(self, generator):
return generator.endPolynomialRoots(self)
def checkValidSolution(self):
if self.poly is not None:
valid = IKFastSolver.isValidSolution(self.poly.as_basic())
if self.jointeval is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointeval])
return valid
def getPresetCheckForZeros(self):
return [self.poly.coeffs[0]] # make sure highest coefficient is not 0!
def getEquationsUsed(self):
return self.equationsused
class SolverCoeffFunction:
"""Evaluate a set of coefficients and pass them to a custom function which will then return all possible values of the specified variables in jointnames.
"""
jointnames = None
jointeval = None
isHinges = True
exportvar = None
exportcoeffeqs = None
rootmaxdim = None
exportfnname = None
jointevalcos = None # used for half angles
jointevalsin = None # used for half angles
checkforzeros = None
FeasibleIsZeros = False
score = None
presetcheckforzeros = None
dictequations = None
equationsused = None
def __init__(self, jointnames, jointeval=None, exportvar=None, exportcoeffeqs=None,exportfnname=None,isHinges=None,rootmaxdim=16,jointevalcos=None,jointevalsin=None):
self.jointnames=jointnames
self.jointeval = jointeval
self.isHinges = isHinges
self.exportvar=exportvar
self.exportcoeffeqs=exportcoeffeqs
self.exportfnname=exportfnname
self.rootmaxdim=rootmaxdim
self.jointevalsin=jointevalsin
self.jointevalcos=jointevalcos
self.presetcheckforzeros = []
self.dictequations = []
self.equationsused = []
def numsolutions(self):
return self.rootmaxdim
def subs(self,solsubs):
if self.jointeval is not None:
self.jointeval = [e.subs(solsubs) for e in self.jointeval]
if self.jointevalcos is not None:
self.jointevalcos = [e.subs(solsubs) for e in self.jointevalcos]
if self.jointevalsin is not None:
self.jointevalsin = [e.subs(solsubs) for e in self.jointevalsin]
if self.checkforzeros is not None:
self.checkforzeros = [e.subs(solsubs) for e in self.checkforzeros]
self.dictequations = [(s,v.subs(solsubs)) for s,v in self.dictequations]
self.presetcheckforzeros = [e.subs(solsubs) for e in self.presetcheckforzeros]
self.equationsused = [e.subs(solsubs) for e in self.equationsused]
#if self.poly is not None:
# self.poly = self.poly.subs(solsubs)
assert self.checkValidSolution()
return self
def generate(self, generator):
return generator.generateCoeffFunction(self)
def end(self, generator):
return generator.endCoeffFunction(self)
def checkValidSolution(self):
#if self.poly is not None:
# valid = IKFastSolver.isValidSolution(self.poly.as_basic())
if self.jointeval is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointeval])
if self.jointevalcos is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointevalcos])
if self.jointevalsin is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointevalsin])
return valid
def getPresetCheckForZeros(self):
return self.presetcheckforzeros
def getEquationsUsed(self):
return self.equationsused
class SolverMatrixInverse:
"""Take the inverse of a large matirx and set the coefficients of the inverse to the symbols in Asymbols.
"""
A = None
Asymbols = None # has to be same size as B
checkforzeros = None
def __init__(self, A, Asymbols):
self.A = A
self.Asymbols = Asymbols
def subs(self,solsubs):
return self
def generate(self, generator):
return generator.generateMatrixInverse(self)
def end(self, generator):
return generator.endMatrixInverse(self)
def checkValidSolution(self):
return True
def getsubs(self,psubs):
Anew = self.A.subs(psubs).inv()
subs = []
for i in range(self.A.shape[0]):
for j in range(self.A.shape[1]):
if self.Asymbols[i][j] is not None:
subs.append((self.Asymbols[i][j],Anew[i,j]))
return subs
class SolverConditionedSolution:
dictequations = None
solversolutions = None
thresh=0.000001
def __init__(self, solversolutions):
self.solversolutions = solversolutions
self.dictequations = []
def subs(self,solsubs):
for s in self.solversolutions:
s.subs(solsubs)
return self
def generate(self, generator):
return generator.generateConditionedSolution(self)
def end(self, generator):
return generator.endConditionedSolution(self)
class SolverBranchConds:
jointbranches = None
thresh = 0.000001
def __init__(self, jointbranches):
self.jointbranches = jointbranches
def generate(self, generator):
return generator.generateBranchConds(self)
def end(self, generator):
return generator.endBranchConds(self)
class SolverCheckZeros:
jointname = None
jointcheckeqs = None # only used for evaluation
zerobranch = None
nonzerobranch = None
anycondition=None
dictequations=None
thresh=None # a threshold of 1e-6 breaks hiro ik
equationsused = None
def __init__(self, jointname, jointcheckeqs, zerobranch, nonzerobranch,thresh=0.000001,anycondition=True):
self.jointname = jointname
self.jointcheckeqs = jointcheckeqs
self.zerobranch = zerobranch
self.nonzerobranch = nonzerobranch
self.thresh = thresh
self.anycondition = anycondition
self.dictequations = []
def generate(self, generator):
return generator.generateCheckZeros(self)
def end(self, generator):
return generator.endCheckZeros(self)
def getPresetCheckForZeros(self):
return []
def checkValidSolution(self):
for branch in self.nonzerobranch:
if not branch.checkValidSolution():
return False
for branch in self.zerobranch:
if not branch.checkValidSolution():
return False
return True
def numsolutions(self):
return 1
def subs(self,solsubs):
for branch in self.nonzerobranch:
if hasattr(branch,'subs'):
branch.subs(solsubs)
for branch in self.zerobranch:
if hasattr(branch,'subs'):
branch.subs(solsubs)
return self
def getEquationsUsed(self):
return self.equationsused
class SolverFreeParameter:
jointname = None
jointtree = None
def __init__(self, jointname, jointtree):
self.jointname = jointname
self.jointtree = jointtree
def generate(self, generator):
return generator.generateFreeParameter(self)
def end(self, generator):
return generator.endFreeParameter(self)
class SolverRotation:
T = None
jointtree = None
functionid=0
def __init__(self, T, jointtree):
self.T = T
self.jointtree = jointtree
self.dictequations = []
def generate(self, generator):
return generator.generateRotation(self)
def end(self, generator):
return generator.endRotation(self)
class SolverStoreSolution:
"""Called when all the unknowns have been solved to add a solution.
"""
alljointvars = None
checkgreaterzero = None # used for final sanity checks to ensure IK solution is consistent
thresh = 0
offsetvalues = None
isHinge = None
def __init__(self, alljointvars,checkgreaterzero=None,isHinge=None):
self.alljointvars = alljointvars
self.checkgreaterzero = checkgreaterzero
self.isHinge=isHinge
if isHinge is None:
log.warn('SolverStoreSolution.isHinge is not initialized')
self.isHinge = [True]*len(self.alljointvars)
def generate(self, generator):
return generator.generateStoreSolution(self)
def end(self, generator):
return generator.endStoreSolution(self)
class SolverSequence:
jointtrees = None
def __init__(self, jointtrees):
self.jointtrees = jointtrees
def generate(self, generator):
return generator.generateSequence(self)
def end(self, generator):
return generator.endSequence(self)
class SolverBreak:
"""Terminates this scope"""
def generate(self,generator):
return generator.generateBreak(self)
def end(self,generator):
return generator.endBreak(self)
def checkValidSolution(self):
return True
class SolverIKChainTransform6D:
solvejointvars = None
freejointvars = None
jointtree = None
Tfk = None
Tee = None
dictequations = None
def __init__(self, solvejointvars, freejointvars, Tee, jointtree,Tfk=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Tee = Tee
self.jointtree = jointtree
self.Tfk = Tfk
self.dictequations = []
def generate(self, generator):
return generator.generateChain(self)
def end(self, generator):
return generator.endChain(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Tfk = Tleft*self.Tfk
self.Tee = Tleftinv*self.Tee
class SolverIKChainRotation3D:
solvejointvars = None
freejointvars = None
Rfk = None
Ree = None
jointtree = None
dictequations = None
def __init__(self, solvejointvars, freejointvars, Ree, jointtree,Rfk=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Ree = Ree
self.Rfk=Rfk
self.jointtree = jointtree
self.dictequations = []
def generate(self, generator):
return generator.generateIKChainRotation3D(self)
def end(self, generator):
return generator.endIKChainRotation3D(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Rfk = Tleft[0:3,0:3]*self.Rfk
self.Ree = Tleftinv[0:3,0:3]*self.Ree
class SolverIKChainTranslation3D:
solvejointvars = None
freejointvars = None
jointtree = None
Pfk = None
Pee = None
dictequations = None
uselocaltrans = False
def __init__(self, solvejointvars, freejointvars, Pee, jointtree,Pfk=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Pee = Pee
self.jointtree = jointtree
self.Pfk=Pfk
self.dictequations = []
def generate(self, generator):
return generator.generateIKChainTranslation3D(self)
def end(self, generator):
return generator.endIKChainTranslation3D(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Pfk = Tleft[0:3,0:3]*self.Pfk+Tleft[0:3,3]
self.Pee = Tleftinv[0:3,0:3]*self.Pee+Tleftinv[0:3,3]
class SolverIKChainTranslationXY2D:
solvejointvars = None
freejointvars = None
jointtree = None
Pfk = None
Pee = None
dictequations = None
def __init__(self, solvejointvars, freejointvars, Pee, jointtree,Pfk=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Pee = Pee
self.jointtree = jointtree
self.Pfk=Pfk
self.dictequations = []
def generate(self, generator):
return generator.generateIKChainTranslationXY2D(self)
def end(self, generator):
return generator.endIKChainTranslationXY2D(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Pfk = Tleft[0:2,0:2]*self.Pfk+Tleft[0:2,3]
self.Pee = Tleftinv[0:2,0:2]*self.Pee+Tleftinv[0:2,3]
class SolverIKChainDirection3D:
solvejointvars = None
freejointvars = None
jointtree = None
Dfk = None
Dee = None
dictequations = None
def __init__(self, solvejointvars, freejointvars, Dee, jointtree,Dfk=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Dee = Dee
self.jointtree = jointtree
self.Dfk=Dfk
self.dictequations = []
def generate(self, generator):
return generator.generateIKChainDirection3D(self)
def end(self, generator):
return generator.endIKChainDirection3D(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Dfk = Tleft[0:3,0:3]*self.Dfk
self.Dee = Tleftinv[0:3,0:3]*self.Dee
class SolverIKChainRay:
solvejointvars = None
freejointvars = None
jointtree = None
Pfk = None
Dfk = None
Pee = None
Dee = None
dictequations = None
is5dray = False # if True, then full 3D position becomes important and things shouldn't be normalized
def __init__(self, solvejointvars, freejointvars, Pee, Dee, jointtree,Pfk=None,Dfk=None,is5dray=False):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Pee = Pee
self.Dee = Dee
self.jointtree = jointtree
self.Pfk = Pfk
self.Dfk = Dfk
self.dictequations = []
self.is5dray=is5dray
def generate(self, generator):
return generator.generateIKChainRay(self)
def end(self, generator):
return generator.endIKChainRay(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Pfk = Tleft[0:3,0:3]*self.Pfk+Tleft[0:3,3]
self.Dfk = Tleft[0:3,0:3]*self.Dfk
self.Pee = Tleftinv[0:3,0:3]*self.Pee+Tleftinv[0:3,3]
self.Dee = Tleftinv[0:3,0:3]*self.Dee
class SolverIKChainLookat3D:
solvejointvars = None
freejointvars = None
jointtree = None
Pfk = None
Dfk = None
Pee = None
dictequations = None
def __init__(self, solvejointvars, freejointvars, Pee, jointtree,Pfk=None,Dfk=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Pee = Pee
self.jointtree = jointtree
self.Pfk=Pfk
self.Dfk=Dfk
self.dictequations = []
def generate(self, generator):
return generator.generateIKChainLookat3D(self)
def end(self, generator):
return generator.endIKChainLookat3D(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Pfk = Tleft[0:3,0:3]*self.Pfk+Tleft[0:3,3]
self.Dfk = Tleft[0:3,0:3]*self.Dfk
self.Pee = Tleftinv[0:3,0:3]*self.Pee+Tleftinv[0:3,3]
class SolverIKChainAxisAngle:
solvejointvars = None
freejointvars = None
jointtree = None
Pfk = None
Pee = None
dictequations = None
angleee=None
anglefk=None
iktype=None
def __init__(self, solvejointvars, freejointvars, Pee, angleee,jointtree,Pfk=None,anglefk=None,iktype=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Pee = Pee
self.anglefk=anglefk
self.jointtree = jointtree
self.Pfk=Pfk
self.angleee=angleee
self.dictequations = []
self.iktype=iktype
def generate(self, generator):
return generator.generateSolverIKChainAxisAngle(self)
def end(self, generator):
return generator.endSolverIKChainAxisAngle(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Pfk = Tleft[0:2,0:2]*self.Pfk+Tleft[0:2,3]
self.Pee = Tleftinv[0:2,0:2]*self.Pee+Tleftinv[0:2,3]
assert(0) # need to change angle
from sympy.core import function # for sympy 0.7.1+
class fmod(function.Function):
"""defines floating-point mod"""
nargs = 2
is_real = True
is_Function = True
class atan2check(atan2):
"""defines floating-point mod"""
nargs = 2
is_real = True
is_Function = True
class IKFastSolver(AutoReloader):
"""Solves the analytical inverse kinematics equations. The symbol naming conventions are as follows:
cjX - cos joint angle
constX - temporary constant used to simplify computations
dummyX - dummy intermediate variables to solve for
gconstX - global constant that is also used during ik generation phase
htjX - half tan of joint angle
jX - joint angle
pX - end effector position information
rX - end effector rotation information
sjX - sin joint angle
tconstX - second-level temporary constant
tjX - tan of joint angle
"""
class CannotSolveError(Exception):
"""thrown when ikfast fails to solve a particular set of equations with the given knowns and unknowns
"""
def __init__(self,value):
self.value=value
def __str__(self):
return repr(self.value)
class IKFeasibilityError(Exception):
"""thrown when it is not possible to solve the IK due to robot not having enough degrees of freedom. For example, a robot with 5 joints does not have 6D IK
"""
def __init__(self,equations,checkvars):
self.equations=equations
self.checkvars=checkvars
def __str__(self):
s = "Not enough equations to solve variables %s!\nThis means one of several things: not enough constraints to solve all variables, or the manipulator does not span the target IK space. This is not an ikfast failure, it just means the robot kinematics are invalid for this type of IK. Equations that are not uniquely solvable are:\n"%str(self.checkvars)
for eq in self.equations:
s += str(eq) + '\n'
return s
class JointAxis:
__slots__ = ['joint','iaxis']
class Variable:
__slots__ = ['var','svar','cvar','tvar','htvar']
def __init__(self, var):
self.name = var.name
self.var = var
self.svar = Symbol("s%s"%var.name)
self.cvar = Symbol("c%s"%var.name)
self.tvar = Symbol("t%s"%var.name)
self.htvar = Symbol("ht%s"%var.name)
self.vars = [self.var,self.svar,self.cvar,self.tvar,self.htvar]
self.subs = [(cos(self.var),self.cvar),(sin(self.var),self.svar),(tan(self.var),self.tvar),(tan(self.var/2),self.htvar)]
self.subsinv = [(self.cvar,cos(self.var)),(self.svar, sin(self.var)),(self.tvar,tan(self.tvar))]
def getsubs(self,value):
return [(self.var,value)]+[(s,v.subs(self.var,value).evalf()) for v,s in self.subs]
class DegenerateCases:
def __init__(self):
self.handleddegeneratecases = []
def clone(self):
clone=IKFastSolver.DegenerateCases()
clone.handleddegeneratecases = self.handleddegeneratecases[:]
return clone
def addcasesconds(self,newconds,currentcases):
for case in newconds:
newcases = set(currentcases)
newcases.add(case)
assert not self.hascases(newcases)
self.handleddegeneratecases.append(newcases)
def addcases(self,currentcases):
assert not self.hascases(currentcases)
self.handleddegeneratecases.append(currentcases)
def gethandledconds(self,currentcases):
handledconds = []
for handledcases in self.handleddegeneratecases:
if len(currentcases)+1==len(handledcases) and currentcases < handledcases:
handledconds.append((handledcases - currentcases).pop())
return handledconds
def hascases(self,currentcases):
for handledcases in self.handleddegeneratecases:
if handledcases == currentcases:
return True
return False
def __init__(self, kinbody=None,kinematicshash='',precision=None):
self.usinglapack = False
self.useleftmultiply = True
self.freevarsubs = []
self.degeneratecases = None
self.kinematicshash = kinematicshash
self.testconsistentvalues = None
if precision is None:
self.precision=8
else:
self.precision=precision
self.kinbody = kinbody
self.axismap = {}
self.axismapinv = {}
with self.kinbody:
for idof in range(self.kinbody.GetDOF()):
axis = IKFastSolver.JointAxis()
axis.joint = self.kinbody.GetJointFromDOFIndex(idof)
axis.iaxis = idof-axis.joint.GetDOFIndex()
name = str('j%d')%idof
self.axismap[name] = axis
self.axismapinv[idof] = name
def convertRealToRational(self, x,precision=None):
if precision is None:
precision=self.precision
if abs(x) < 10**-precision:
return S.Zero
r0 = Rational(str(round(Real(float(x),30),precision)))
if x == 0:
return r0
r1 = 1/Rational(str(round(Real(1/float(x),30),precision)))
return r0 if len(str(r0)) < len(str(r1)) else r1
def normalizeRotation(self,M):
"""error from openrave can be on the order of 1e-6 (especially if they are defined diagonal to some axis)
"""
right = Matrix(3,1,[self.convertRealToRational(x,self.precision-3) for x in M[0,0:3]])
right = right/right.norm()
up = Matrix(3,1,[self.convertRealToRational(x,self.precision-3) for x in M[1,0:3]])
up = up - right*right.dot(up)
up = up/up.norm()
d = right.cross(up)
for i in range(3):
# don't round the rotational part anymore since it could lead to unnormalized rotations!
M[0,i] = right[i]
M[1,i] = up[i]
M[2,i] = d[i]
M[i,3] = self.convertRealToRational(M[i,3])
M[3,i] = S.Zero
M[3,3] = S.One
return M
def numpyMatrixToSympy(self,T):
if axisAngleFromRotationMatrix is not None:
axisangle = axisAngleFromRotationMatrix(T)
angle = sqrt(axisangle[0]**2+axisangle[1]**2+axisangle[2]**2)
axisangle /= angle
log.debug('rotation angle: %f, axis=[%f,%f,%f]', (angle*180/pi).evalf(),axisangle[0],axisangle[1],axisangle[2])
return self.normalizeRotation(Matrix(4,4,[x for x in T.flat]))
def numpyVectorToSympy(self,v,precision=None):
return Matrix(len(v),1,[self.convertRealToRational(x,precision) for x in v])
@staticmethod
def rodrigues(axis, angle):
return IKFastSolver.rodrigues2(axis,cos(angle),sin(angle))
@staticmethod
def matrixFromQuat(quat):
M = eye(3)
qq1 = 2*quat[1]*quat[1]
qq2 = 2*quat[2]*quat[2]
qq3 = 2*quat[3]*quat[3]
M[0,0] = 1 - qq2 - qq3
M[0,1] = 2*(quat[1]*quat[2] - quat[0]*quat[3])
M[0,2] = 2*(quat[1]*quat[3] + quat[0]*quat[2])
M[1,0] = 2*(quat[1]*quat[2] + quat[0]*quat[3])
M[1,1]= 1 - qq1 - qq3
M[1,2]= 2*(quat[2]*quat[3] - quat[0]*quat[1])
M[2,0] = 2*(quat[1]*quat[3] - quat[0]*quat[2])
M[2,1] = 2*(quat[2]*quat[3] + quat[0]*quat[1])
M[2,2] = 1 - qq1 - qq2
return M
@staticmethod
def rodrigues2(axis, cosangle, sinangle):
skewsymmetric = Matrix(3, 3, [S.Zero,-axis[2],axis[1],axis[2],S.Zero,-axis[0],-axis[1],axis[0],S.Zero])
return eye(3) + sinangle * skewsymmetric + (S.One-cosangle)*skewsymmetric*skewsymmetric
@staticmethod
def affineInverse(affinematrix):
T = eye(4)
T[0:3,0:3] = affinematrix[0:3,0:3].transpose()
T[0:3,3] = -affinematrix[0:3,0:3].transpose() * affinematrix[0:3,3]
return T
@staticmethod
def affineSimplify(T):
return Matrix(T.shape[0],T.shape[1],[trigsimp(x.expand()) for x in T])
@staticmethod
def multiplyMatrix(Ts):
Tfinal = eye(4)
for T in Ts:
Tfinal = Tfinal*T
return Tfinal
@staticmethod
def equal(eq0,eq1):
return (eq0-eq1).expand() == S.Zero
def chop(self,expr,precision=None):
return expr
def isHinge(self,axisname):
if axisname[0]!='j' or not axisname in self.axismap:
log.info('isHinge returning false for variable %s'%axisname)
return False # dummy joint most likely for angles
return self.axismap[axisname].joint.IsRevolute(self.axismap[axisname].iaxis)
def forwardKinematicsChain(self, chainlinks, chainjoints):
"""The first and last matrices returned are always non-symbolic
"""
with self.kinbody:
assert(len(chainjoints)+1==len(chainlinks))
Links = []
Tright = eye(4)
jointvars = []
jointinds = []
for i,joint in enumerate(chainjoints):
if len(joint.GetName()) == 0:
raise self.CannotSolveError('chain %s:%s contains a joint with no name!'%(chainlinks[0].GetName(),chainlinks[-1].GetName()))
if chainjoints[i].GetHierarchyParentLink() == chainlinks[i]:
TLeftjoint = self.numpyMatrixToSympy(joint.GetInternalHierarchyLeftTransform())
TRightjoint = self.numpyMatrixToSympy(joint.GetInternalHierarchyRightTransform())
axissign = S.One
else:
TLeftjoint = self.affineInverse(self.numpyMatrixToSympy(joint.GetInternalHierarchyRightTransform()))
TRightjoint = self.affineInverse(self.numpyMatrixToSympy(joint.GetInternalHierarchyLeftTransform()))
axissign = -S.One
#print i,TLeftjoint,TRightjoint
if joint.IsStatic():
Tright = self.affineSimplify(Tright * TLeftjoint * TRightjoint)
else:
Tjoints = []
for iaxis in range(joint.GetDOF()):
if joint.GetDOFIndex() >= 0:
var = Symbol(self.axismapinv[joint.GetDOFIndex()])
cosvar = cos(var)
sinvar = sin(var)
jointvars.append(var)
elif joint.IsMimic(iaxis):
# get the mimic equation
var = joint.GetMimicEquation(iaxis)
# this needs to be reduced!
cosvar = cos(var)
sinvar = sin(var)
else:
raise ValueError('cannot solve for mechanism when a non-mimic passive joint %s is in chain'%str(joint))
Tj = eye(4)
jaxis = axissign*self.numpyVectorToSympy(joint.GetInternalHierarchyAxis(iaxis))
if joint.IsRevolute(iaxis):
Tj[0:3,0:3] = self.rodrigues2(jaxis,cosvar,sinvar)
elif joint.IsPrismatic(iaxis):
Tj[0:3,3] = jaxis*(var)
else:
raise ValueError('failed to process joint %s'%joint.GetName())
Tjoints.append(Tj)
if axisAngleFromRotationMatrix is not None:
axisangle = axisAngleFromRotationMatrix(numpy.array(numpy.array(Tright * TLeftjoint),numpy.float64))
angle = sqrt(axisangle[0]**2+axisangle[1]**2+axisangle[2]**2)
axisangle /= angle
log.debug('rotation angle of Links[%d]: %f, axis=[%f,%f,%f]', len(Links), (angle*180/pi).evalf(),axisangle[0],axisangle[1],axisangle[2])
Links.append(Tright * TLeftjoint)
for Tj in Tjoints:
jointinds.append(len(Links))
Links.append(Tj)
Tright = TRightjoint
Links.append(Tright)
# before returning the final links, try to push as much translation components
# outwards to both ends. Sometimes these components can get in the way of detecting
# intersecting axes
if len(jointinds) > 0:
iright = jointinds[-1]
Ttrans = eye(4)
Ttrans[0:3,3] = Links[iright-1][0:3,0:3].transpose() * Links[iright-1][0:3,3]
Trot_with_trans = Ttrans * Links[iright]
separated_trans = Trot_with_trans[0:3,0:3].transpose() * Trot_with_trans[0:3,3]
for j in range(0,3):
if separated_trans[j].has_any_symbols(*jointvars):
Ttrans[j,3] = Rational(0)
else:
Ttrans[j,3] = separated_trans[j]
Links[iright+1] = Ttrans * Links[iright+1]
Links[iright-1] = Links[iright-1] * self.affineInverse(Ttrans)
log.info("moved translation %s to right end",Ttrans[0:3,3].transpose())
if len(jointinds) > 1:
ileft = jointinds[0]
separated_trans = Links[ileft][0:3,0:3] * Links[ileft+1][0:3,3]
Ttrans = eye(4)
for j in range(0,3):
if not separated_trans[j].has_any_symbols(*jointvars):
Ttrans[j,3] = separated_trans[j]
Links[ileft-1] = Links[ileft-1] * Ttrans
Links[ileft+1] = self.affineInverse(Ttrans) * Links[ileft+1]
log.info("moved translation %s to left end",Ttrans[0:3,3].transpose())
if len(jointinds) > 3: # last 3 axes always have to be intersecting, move the translation of the first axis to the left
ileft = jointinds[-3]
separated_trans = Links[ileft][0:3,0:3] * Links[ileft+1][0:3,3]
Ttrans = eye(4)
for j in range(0,3):
if not separated_trans[j].has_any_symbols(*jointvars):
Ttrans[j,3] = separated_trans[j]
Links[ileft-1] = Links[ileft-1] * Ttrans
Links[ileft+1] = self.affineInverse(Ttrans) * Links[ileft+1]
log.info("moved translation on intersecting axis %s to left",Ttrans[0:3,3].transpose())
return Links, jointvars
def countVariables(self,expr,var):
"""Counts number of terms variable appears in"""
if not expr.is_Add:
if expr.has_any_symbols(var):
return 1
return 0
num = 0
for term in expr.args:
if term.has_any_symbols(var):
num += 1
return num
@staticmethod
def isValidPowers(expr):
if expr.is_Pow:
if not expr.exp.is_number or expr.exp < 0:
return False
return IKFastSolver.isValidPowers(expr.base)
elif expr.is_Add or expr.is_Mul or expr.is_Function:
return all([IKFastSolver.isValidPowers(arg) for arg in expr.args])
else:
return True
@staticmethod
def rotateDirection(sourcedir,targetdir):
sourcedir /= sqrt(sourcedir.dot(sourcedir))
targetdir /= sqrt(targetdir.dot(targetdir))
rottodirection = sourcedir.cross(targetdir)
fsin = sqrt(rottodirection.dot(rottodirection))
fcos = sourcedir.dot(targetdir)
M = eye(4)
if fsin > 1e-6:
M[0:3,0:3] = IKFastSolver.rodrigues(rottodirection*(1/fsin),atan2(fsin,fcos))
elif fcos < 0: # hand is flipped 180, rotate around x axis
rottodirection = Matrix(3,1,[S.One,S.Zero,S.Zero])
rottodirection -= sourcedir * sourcedir.dot(rottodirection)
M[0:3,0:3] = IKFastSolver.rodrigues(rottodirection.normalized(), atan2(fsin, fcos))
return M
@staticmethod
def has_any_symbols(eqs,*sym):
return any([eq.has_any_symbols(*sym) for eq in eqs]) if len(sym) > 0 else False
def trigsimp(self, eq,trigvars):
trigsubs = [(sin(v)**2,1-cos(v)**2) for v in trigvars if self.isHinge(v.name)]
eq=eq.expand()
curcount = eq.count_ops()
while True:
eq=eq.subs(trigsubs).expand()
newcount = eq.count_ops()
if IKFastSolver.equal(curcount,newcount):
break
curcount=newcount
return eq
def codeComplexity(self,expr):
complexity = 1
if expr.is_Add:
for term in expr.args:
complexity += self.codeComplexity(term)
elif expr.is_Mul:
for term in expr.args:
complexity += self.codeComplexity(term)
elif expr.is_Pow:
complexity += self.codeComplexity(expr.base)+self.codeComplexity(expr.exp)
elif expr.is_Function:
complexity += 1
for term in expr.args:
complexity += self.codeComplexity(term)
return complexity
def sortComplexity(self,exprs):
exprs.sort(lambda x, y: self.codeComplexity(x)-self.codeComplexity(y))
def checkForDivideByZero(self,eq):
"""returns the equations to check for zero
"""
checkforzeros = []
try:
if eq.is_Function:
for arg in eq.args:
checkforzeros += self.checkForDivideByZero(arg)
elif eq.is_Add:
for arg in eq.args:
checkforzeros += self.checkForDivideByZero(arg)
elif eq.is_Mul:
for arg in eq.args:
checkforzeros += self.checkForDivideByZero(arg)
elif eq.is_Pow:
for arg in eq.args:
checkforzeros += self.checkForDivideByZero(arg)
if eq.exp.is_number and eq.exp < 0:
checkforzeros.append(eq.base)
except AssertionError,e:
log.warn('%s',e)
if len(checkforzeros) > 0:
newcheckforzeros = []
for eqtemp in checkforzeros:
# check for abs(x**y), in that case choose x
if eqtemp.is_Function and eqtemp.func == abs:
eqtemp = eqtemp.args[0]
while eqtemp.is_Pow:
eqtemp = eqtemp.base
checkeq = self.removecommonexprs(eqtemp,onlygcd=False,onlynumbers=True)
if self.isExpressionUnique(newcheckforzeros,checkeq) and self.isExpressionUnique(newcheckforzeros,-checkeq):
newcheckforzeros.append(checkeq)
return newcheckforzeros
return checkforzeros
def solutionComplexity(self,sol,solvedvars,unsolvedvars):
# for all solutions, check if there is a divide by zero
sol.checkforzeros = sol.getPresetCheckForZeros()
sol.score = 20000*sol.numsolutions()
try:
# multiby by 400 in order to prioritize equations with less solutions
if hasattr(sol,'jointeval') and sol.jointeval is not None:
for s in sol.jointeval:
sol.score += self.codeComplexity(s)
sol.checkforzeros += self.checkForDivideByZero(s)
subexprs = sol.jointeval
elif hasattr(sol,'jointevalsin') and sol.jointevalsin is not None:
for s in sol.jointevalsin:
sol.score += self.codeComplexity(s)
sol.checkforzeros += self.checkForDivideByZero(s)
subexprs = sol.jointevalsin
elif hasattr(sol,'jointevalcos') and sol.jointevalcos is not None:
for s in sol.jointevalcos:
sol.score += self.codeComplexity(s)
sol.checkforzeros += self.checkForDivideByZero(s)
subexprs = sol.jointevalcos
else:
return sol.score
# have to also check solution dictionary
for s,v in sol.dictequations:
sol.score += self.codeComplexity(v)
sol.checkforzeros += self.checkForDivideByZero(v)
def checkpow(expr,sexprs):
score = 0
if expr.is_Pow:
sexprs.append(expr.base)
if expr.base.is_finite is not None and not expr.base.is_finite:
return oo # infinity
if expr.exp.is_number and expr.exp < 0:
# check if exprbase contains any variables that have already been solved
containsjointvar = expr.base.has_any_symbols(*solvedvars)
cancheckexpr = not expr.base.has_any_symbols(*unsolvedvars)
score += 10000
if not cancheckexpr:
score += 100000
elif not self.isValidSolution(expr):
return oo # infinity
return score
sexprs = subexprs[:]
while len(sexprs) > 0:
sexpr = sexprs.pop(0)
if sexpr.is_Add:
for arg in sexpr.args:
if arg.is_Mul:
for arg2 in arg.args:
sol.score += checkpow(arg2,sexprs)
else:
sol.score += checkpow(arg,sexprs)
elif sexpr.is_Mul:
for arg in sexpr.args:
sol.score += checkpow(arg,sexprs)
elif sexpr.is_Function:
sexprs += sexpr.args
elif not self.isValidSolution(sexpr):
log.warn('not valid: %s',sexpr)
sol.score = oo # infinity
else:
sol.score += checkpow(sexpr,sexprs)
except AssertionError, e:
log.warn('%s',e)
sol.score=1e10
newcheckforzeros = []
for eqtemp in sol.checkforzeros:
checkeq = self.removecommonexprs(eqtemp,onlygcd=False,onlynumbers=True)
if self.isExpressionUnique(newcheckforzeros,checkeq) and self.isExpressionUnique(newcheckforzeros,-checkeq):
newcheckforzeros.append(checkeq)
sol.checkforzeros = newcheckforzeros
return sol.score
def checkSolvability(self,AllEquations,checkvars,othervars):
pass
def checkSolvabilityReal(self,AllEquations,checkvars,othervars):
"""returns true if there are enough equations to solve for checkvars
"""
subs = []
checksymbols = []
allsymbols = []
for var in checkvars:
subs += self.Variable(var).subs
checksymbols += self.Variable(var).vars
allsymbols = checksymbols[:]
for var in othervars:
subs += self.Variable(var).subs
allsymbols += self.Variable(var).vars
found = False
for testconsistentvalue in self.testconsistentvalues:
psubvalues = [(s,v) for s,v in testconsistentvalue if not s.has_any_symbols(*checksymbols)]
eqs = [eq.subs(self.globalsymbols).subs(subs).subs(psubvalues) for eq in AllEquations]
usedsymbols = [s for s in checksymbols if self.has_any_symbols(eqs,s)]
eqs = [Poly(eq,*usedsymbols) for eq in eqs if eq != S.Zero]
# check if any equations have monos of degree more than 1, if yes, then quit with success since 0.6.7 sympy solver will freeze
numhigherpowers = 0
for eq in eqs:
for monom in eq.monoms:
if any([m > 1 for m in monom]):
numhigherpowers += 1
if numhigherpowers > 0:
log.info('checkSolvability has %d higher powers, returning solvable if > 6'%numhigherpowers)
if numhigherpowers > 6:
found = True
break
for var in checkvars:
varsym = self.Variable(var)
if self.isHinge(var.name):
if varsym.cvar in usedsymbols and varsym.svar in usedsymbols:
eqs.append(Poly(varsym.cvar**2+varsym.svar**2-1,*usedsymbols))
# have to make sure there are representative symbols of all the checkvars, otherwise degenerate solution
setusedsymbols = set(usedsymbols)
if any([len(setusedsymbols.intersection(self.Variable(var).vars)) == 0 for var in checkvars]):
continue
try:
sol=solve_poly_system(eqs)
if sol is not None and len(sol) > 0 and len(sol[0]) == len(usedsymbols):
found = True
break
except:
pass
if not found:
raise self.IKFeasibilityError(AllEquations,checkvars)
def writeIkSolver(self,chaintree,lang=None):
"""write the ast into a specific langauge, prioritize c++
"""
if lang is None:
if CodeGenerators.has_key('cpp'):
lang = 'cpp'
else:
lang = CodeGenerators.keys()[0]
log.info('generating %s code...'%lang)
return CodeGenerators[lang](kinematicshash=self.kinematicshash,version=__version__).generate(chaintree)
def generateIkSolver(self, baselink, eelink, freeindices=None,solvefn=None):
if solvefn is None:
solvefn = IKFastSolver.solveFullIK_6D
chainlinks = self.kinbody.GetChain(baselink,eelink,returnjoints=False)
chainjoints = self.kinbody.GetChain(baselink,eelink,returnjoints=True)
LinksRaw, jointvars = self.forwardKinematicsChain(chainlinks,chainjoints)
for T in LinksRaw:
log.info('[' + ','.join(['[%s, %s, %s, %s]'%(T[i,0],T[i,1],T[i,2],T[i,3]) for i in range(3)]) + ']')
self.degeneratecases = None
if freeindices is None:
# need to iterate through all combinations of free joints
assert(0)
isolvejointvars = []
solvejointvars = []
self.ifreejointvars = []
self.freevarsubs = []
self.freevarsubsinv = []
self.freevars = []
self.freejointvars = []
self.invsubs = []
for i,v in enumerate(jointvars):
var = self.Variable(v)
axis = self.axismap[v.name]
dofindex = axis.joint.GetDOFIndex()+axis.iaxis
if dofindex in freeindices:
# convert all free variables to constants
self.ifreejointvars.append(i)
self.freevarsubs += [(cos(var.var), var.cvar), (sin(var.var), var.svar)]
self.freevarsubsinv += [(var.cvar,cos(var.var)), (var.svar,sin(var.var))]
self.freevars += [var.cvar,var.svar]
self.freejointvars.append(var.var)
else:
solvejointvars.append(v)
isolvejointvars.append(i)
self.invsubs += [(var.cvar,cos(v)),(var.svar,sin(v))]
# set up the destination symbols
self.Tee = eye(4)
for i in range(0,3):
for j in range(0,3):
self.Tee[i,j] = Symbol("r%d%d"%(i,j))
self.Tee[0,3] = Symbol("px")
self.Tee[1,3] = Symbol("py")
self.Tee[2,3] = Symbol("pz")
r00,r01,r02,px,r10,r11,r12,py,r20,r21,r22,pz = self.Tee[0:12]
self.pp = Symbol('pp')
self.ppsubs = [(self.pp,px**2+py**2+pz**2)]
self.npxyz = [Symbol('npx'),Symbol('npy'),Symbol('npz')]
self.npxyzsubs = [(self.npxyz[i],px*self.Tee[0,i]+py*self.Tee[1,i]+pz*self.Tee[2,i]) for i in range(3)]
# cross products between columns of self.Tee
self.rxp = []
self.rxpsubs = []
for i in range(3):
self.rxp.append([Symbol('rxp%d_%d'%(i,j)) for j in range(3)])
c = self.Tee[0:3,i].cross(self.Tee[0:3,3])
self.rxpsubs += [(self.rxp[-1][j],c[j]) for j in range(3)]
self.pvars = self.Tee[0:12]+self.npxyz+[self.pp]+self.rxp[0]+self.rxp[1]+self.rxp[2]
self.Teeinv = self.affineInverse(self.Tee)
LinksLeft = []
if self.useleftmultiply:
while not self.has_any_symbols(LinksRaw[0],*solvejointvars):
LinksLeft.append(LinksRaw.pop(0))
LinksLeftInv = [self.affineInverse(T) for T in LinksLeft]
self.testconsistentvalues = None
# before passing to the solver, set big numbers to constants, this will greatly reduce computation times
self.gsymbolgen = cse_main.numbered_symbols('gconst')
self.globalsymbols = []
# for T in LinksRaw:
# for i in range(12):
# if T[i].is_number and len(str(T[i])) > 30:
# sym = None
# for c,v in self.globalsymbols:
# if self.equal(v,T[i]):
# sym = c
# break
# if sym is None:
# sym = self.gsymbolgen.next()
# log.info('adding global symbol %s=%s'%(sym,T[i]))
# self.globalsymbols.append((sym,T[i]))
# T[i] = sym
chaintree = solvefn(self, LinksRaw, jointvars, isolvejointvars)
if self.useleftmultiply:
chaintree.leftmultiply(Tleft=self.multiplyMatrix(LinksLeft), Tleftinv=self.multiplyMatrix(LinksLeftInv[::-1]))
chaintree.dictequations += self.globalsymbols
return chaintree
def computeConsistentValues(self,jointvars,T,numsolutions=1,subs=None):
possibleangles = [S.Zero, pi.evalf()/2, asin(3.0/5).evalf(), asin(4.0/5).evalf(), asin(5.0/13).evalf(), asin(12.0/13).evalf()]
possibleanglescos = [S.One, S.Zero, Rational(4,5), Rational(3,5), Rational(12,13), Rational(5,13)]
possibleanglessin = [S.Zero, S.One, Rational(3,5), Rational(4,5), Rational(5,13), Rational(12,13)]
testconsistentvalues = []
varsubs = []
for jointvar in jointvars:
varsubs += self.Variable(jointvar).subs
for isol in range(numsolutions):
inds = [0]*len(jointvars)
if isol < numsolutions-1:
for j in range(len(jointvars)):
inds[j] = (isol+j)%len(possibleangles)
valsubs = []
for i,ind in enumerate(inds):
v,s,c = possibleangles[ind],possibleanglessin[ind],possibleanglescos[ind]
var = self.Variable(jointvars[i])
valsubs += [(var.var,v),(var.cvar,c),(var.svar,s),(var.tvar,s/c),(var.htvar,s/(1+c))]
psubs = []
for i in range(12):
psubs.append((self.pvars[i],T[i].subs(varsubs).subs(self.globalsymbols+valsubs)))
for s,v in self.ppsubs+self.npxyzsubs+self.rxpsubs:
psubs.append((s,v.subs(psubs)))
allsubs = valsubs+psubs
if subs is not None:
allsubs += [(dvar,var.subs(varsubs).subs(valsubs)) for dvar,var in subs]
testconsistentvalues.append(allsubs)
return testconsistentvalues
def solveFullIK_Direction3D(self,LinksRaw, jointvars, isolvejointvars, rawbasedir=Matrix(3,1,[S.Zero,S.Zero,S.One])):
"""basedir needs to be filled with a 3elemtn vector of the initial direction to control"""
basedir = Matrix(3,1,[Real(x,30) for x in rawbasedir])
basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2])
for i in range(3):
basedir[i] = self.convertRealToRational(basedir[i])
Links = LinksRaw[:]
LinksInv = [self.affineInverse(link) for link in Links]
T = self.multiplyMatrix(Links)
Tfinal = zeros((4,4))
Tfinal[0,0:3] = (T[0:3,0:3]*basedir).transpose()
self.testconsistentvalues = self.computeConsistentValues(jointvars,Tfinal,numsolutions=4)
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.isHinge(var.name) for var in jointvars])]
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 2:
raise self.CannotSolveError('need 2 joints')
log.info('ikfast direction3d: %s',solvejointvars)
Daccum = self.Tee[0,0:3].transpose()
numvarsdone = 2
Ds = []
Dsee = []
for i in range(len(Links)-1):
T = self.multiplyMatrix(Links[i:])
D = T[0:3,0:3]*basedir
hasvars = [self.has_any_symbols(D,v) for v in solvejointvars]
if __builtin__.sum(hasvars) == numvarsdone:
Ds.append(D)
Dsee.append(Daccum)
numvarsdone -= 1
Tinv = self.affineInverse(Links[i])
Daccum = Tinv[0:3,0:3]*Daccum
AllEquations = self.buildEquationsFromTwoSides(Ds,Dsee,jointvars,uselength=False)
self.checkSolvability(AllEquations,solvejointvars,self.freejointvars)
tree = self.solveAllEquations(AllEquations,curvars=solvejointvars,othersolvedvars = self.freejointvars[:],solsubs = self.freevarsubs[:],endbranchtree=endbranchtree)
tree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,tree)
return AST.SolverIKChainDirection3D([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Dee=self.Tee[0,0:3].transpose().subs(self.freevarsubs), jointtree=tree,Dfk=Tfinal[0,0:3].transpose())
def solveFullIK_Lookat3D(self,LinksRaw, jointvars, isolvejointvars,rawbasedir=Matrix(3,1,[S.Zero,S.Zero,S.One]),rawbasepos=Matrix(3,1,[S.Zero,S.Zero,S.Zero])):
"""basedir,basepos needs to be filled with a direction and position of the ray to control the lookat
"""
basedir = Matrix(3,1,[Real(x,30) for x in rawbasedir])
basepos = Matrix(3,1,[self.convertRealToRational(x) for x in rawbasepos])
basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2])
for i in range(3):
basedir[i] = self.convertRealToRational(basedir[i])
basepos = basepos-basedir*basedir.dot(basepos)
Links = LinksRaw[:]
LinksInv = [self.affineInverse(link) for link in Links]
T = self.multiplyMatrix(Links)
Tfinal = zeros((4,4))
Tfinal[0,0:3] = (T[0:3,0:3]*basedir).transpose()
Tfinal[0:3,3] = T[0:3,0:3]*basepos+T[0:3,3]
self.testconsistentvalues = self.computeConsistentValues(jointvars,Tfinal,numsolutions=4)
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 2:
raise self.CannotSolveError('need 2 joints')
log.info('ikfast lookat3d: %s',solvejointvars)
Paccum = self.Tee[0:3,3]
numvarsdone = 2
Positions = []
Positionsee = []
for i in range(len(Links)-1):
T = self.multiplyMatrix(Links[i:])
P = T[0:3,0:3]*basepos+T[0:3,3]
D = T[0:3,0:3]*basedir
hasvars = [self.has_any_symbols(P,v) or self.has_any_symbols(D,v) for v in solvejointvars]
if __builtin__.sum(hasvars) == numvarsdone:
Positions.append(P.cross(D))
Positionsee.append(Paccum.cross(D))
numvarsdone -= 1
Tinv = self.affineInverse(Links[i])
Paccum = Tinv[0:3,0:3]*Paccum+Tinv[0:3,3]
frontcond = (Links[-1][0:3,0:3]*basedir).dot(Paccum-(Links[-1][0:3,0:3]*basepos+Links[-1][0:3,3]))
for v in jointvars:
frontcond = frontcond.subs(self.Variable(v).subs)
endbranchtree = [AST.SolverStoreSolution (jointvars,checkgreaterzero=[frontcond],isHinge=[self.isHinge(var.name) for var in jointvars])]
AllEquations = self.buildEquationsFromTwoSides(Positions,Positionsee,jointvars,uselength=True)
self.checkSolvability(AllEquations,solvejointvars,self.freejointvars)
tree = self.solveAllEquations(AllEquations,curvars=solvejointvars,othersolvedvars = self.freejointvars[:],solsubs = self.freevarsubs[:],endbranchtree=endbranchtree)
tree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,tree)
chaintree = AST.SolverIKChainLookat3D([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Pee=self.Tee[0:3,3].subs(self.freevarsubs), jointtree=tree,Dfk=Tfinal[0,0:3].transpose(),Pfk=Tfinal[0:3,3])
chaintree.dictequations += self.ppsubs
return chaintree
def solveFullIK_Rotation3D(self,LinksRaw, jointvars, isolvejointvars, Rbaseraw=eye(3)):
Rbase = eye(4)
for i in range(3):
for j in range(3):
Rbase[i,j] = self.convertRealToRational(Rbaseraw[i,j])
Tfirstright = LinksRaw[-1]*Rbase
Links = LinksRaw[:-1]
LinksInv = [self.affineInverse(link) for link in Links]
Tfinal = self.multiplyMatrix(Links)
self.testconsistentvalues = self.computeConsistentValues(jointvars,Tfinal,numsolutions=4)
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.isHinge(var.name) for var in jointvars])]
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 3:
raise self.CannotSolveError('need 3 joints')
log.info('ikfast rotation3d: %s',solvejointvars)
AllEquations = self.buildEquationsFromRotation(Links,self.Tee[0:3,0:3],solvejointvars,self.freejointvars)
self.checkSolvability(AllEquations,solvejointvars,self.freejointvars)
tree = self.solveAllEquations(AllEquations,curvars=solvejointvars[:],othersolvedvars=self.freejointvars,solsubs = self.freevarsubs[:],endbranchtree=endbranchtree)
tree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,tree)
return AST.SolverIKChainRotation3D([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], (self.Tee[0:3,0:3] * self.affineInverse(Tfirstright)[0:3,0:3]).subs(self.freevarsubs), tree, Rfk = Tfinal[0:3,0:3] * Tfirstright[0:3,0:3])
def solveFullIK_TranslationLocalGlobal6D(self,LinksRaw, jointvars, isolvejointvars, Tgripperraw=eye(4)):
Tgripper = eye(4)
for i in range(4):
for j in range(4):
Tgripper[i,j] = self.convertRealToRational(Tgripperraw[i,j])
localpos = Matrix(3,1,[self.Tee[0,0],self.Tee[1,1],self.Tee[2,2]])
chain = self._solveFullIK_Translation3D(LinksRaw,jointvars,isolvejointvars,Tgripper[0:3,3]+Tgripper[0:3,0:3]*localpos,False)
chain.uselocaltrans = True
return chain
def solveFullIK_Translation3D(self,LinksRaw, jointvars, isolvejointvars, rawbasepos=Matrix(3,1,[S.Zero,S.Zero,S.Zero])):
basepos = Matrix(3,1,[self.convertRealToRational(x) for x in rawbasepos])
return self._solveFullIK_Translation3D(LinksRaw,jointvars,isolvejointvars,basepos)
def _solveFullIK_Translation3D(self,LinksRaw, jointvars, isolvejointvars, basepos,check=True):
Links = LinksRaw[:]
LinksInv = [self.affineInverse(link) for link in Links]
Tfinal = self.multiplyMatrix(Links)
Tfinal[0:3,3] = Tfinal[0:3,0:3]*basepos+Tfinal[0:3,3]
self.testconsistentvalues = self.computeConsistentValues(jointvars,Tfinal,numsolutions=4)
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.isHinge(var.name) for var in jointvars])]
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 3:
raise self.CannotSolveError('need 3 joints')
log.info('ikfast translation3d: %s',solvejointvars)
Tbaseposinv = eye(4)
Tbaseposinv[0:3,3] = -basepos
T1links = [Tbaseposinv]+LinksInv[::-1]+[self.Tee]
T1linksinv = [self.affineInverse(Tbaseposinv)]+Links[::-1]+[self.Teeinv]
AllEquations = self.buildEquationsFromPositions(T1links,T1linksinv,solvejointvars,self.freejointvars,uselength=True)
if check:
self.checkSolvability(AllEquations,solvejointvars,self.freejointvars)
transtree = self.solveAllEquations(AllEquations,curvars=solvejointvars[:],othersolvedvars=self.freejointvars,solsubs = self.freevarsubs[:],endbranchtree=endbranchtree)
transtree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,transtree)
chaintree = AST.SolverIKChainTranslation3D([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Pee=self.Tee[0:3,3], jointtree=transtree, Pfk = Tfinal[0:3,3])
chaintree.dictequations += self.ppsubs
return chaintree
def solveFullIK_TranslationXY2D(self,LinksRaw, jointvars, isolvejointvars, rawbasepos=Matrix(2,1,[S.Zero,S.Zero])):
self.ppsubs = [] # disable since pz is not valid
self.pp = None
basepos = Matrix(2,1,[self.convertRealToRational(x) for x in rawbasepos])
Links = LinksRaw[:]
LinksInv = [self.affineInverse(link) for link in Links]
Tfinal = self.multiplyMatrix(Links)
Tfinal[0:2,3] = Tfinal[0:2,0:2]*basepos+Tfinal[0:2,3]
self.testconsistentvalues = self.computeConsistentValues(jointvars,Tfinal,numsolutions=4)
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.isHinge(var.name) for var in jointvars])]
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 2:
raise self.CannotSolveError('need 2 joints')
log.info('ikfast translationxy2d: %s',solvejointvars)
Tbaseposinv = eye(4)
Tbaseposinv[2,2] = S.Zero
Tbaseposinv[0:2,3] = -basepos
Tbasepos = eye(4)
Tbasepos[2,2] = S.Zero
Tbasepos[0:2,3] = basepos
T1links = [Tbaseposinv]+LinksInv[::-1]+[self.Tee]
T1linksinv = [Tbasepos]+Links[::-1]+[self.Teeinv]
Taccum = eye(4)
numvarsdone = 1
Positions = []
Positionsee = []
for i in range(len(T1links)-1):
Taccum = T1linksinv[i]*Taccum
hasvars = [self.has_any_symbols(Taccum,v) for v in solvejointvars]
if __builtin__.sum(hasvars) == numvarsdone:
Positions.append(Taccum[0:2,3])
Positionsee.append(self.multiplyMatrix(T1links[(i+1):])[0:2,3])
numvarsdone += 1
if numvarsdone > 2:
# more than 2 variables is almost always useless
break
if len(Positions) == 0:
Positions.append(zeros((2,1)))
Positionsee.append(self.multiplyMatrix(T1links)[0:2,3])
AllEquations = self.buildEquationsFromTwoSides(Positions,Positionsee,solvejointvars+self.freejointvars,uselength=True)
self.checkSolvability(AllEquations,solvejointvars,self.freejointvars)
transtree = self.solveAllEquations(AllEquations,curvars=solvejointvars[:],othersolvedvars=self.freejointvars,solsubs = self.freevarsubs[:],endbranchtree=endbranchtree)
transtree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,transtree)
chaintree = AST.SolverIKChainTranslationXY2D([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Pee=self.Tee[0:2,3], jointtree=transtree, Pfk = Tfinal[0:2,3])
chaintree.dictequations += self.ppsubs
return chaintree
def solveFullIK_TranslationXYOrientation3D(self,LinksRaw, jointvars, isolvejointvars, rawbasepos=Matrix(2,1,[S.Zero,S.Zero]), rawangle=S.Zero):
raise self.CannotSolveError('TranslationXYOrientation3D not implemented yet')
def solveFullIK_Ray4D(self,LinksRaw, jointvars, isolvejointvars, rawbasedir=Matrix(3,1,[S.Zero,S.Zero,S.One]),rawbasepos=Matrix(3,1,[S.Zero,S.Zero,S.Zero])):
"""basedir,basepos needs to be filled with a direction and position of the ray to control"""
basedir = Matrix(3,1,[Real(x,30) for x in rawbasedir])
basepos = Matrix(3,1,[self.convertRealToRational(x) for x in rawbasepos])
basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2])
for i in range(3):
basedir[i] = self.convertRealToRational(basedir[i])
basepos = basepos-basedir*basedir.dot(basepos)
Links = LinksRaw[:]
LinksInv = [self.affineInverse(link) for link in Links]
T = self.multiplyMatrix(Links)
Tfinal = zeros((4,4))
Tfinal[0,0:3] = (T[0:3,0:3]*basedir).transpose()
Tfinal[0:3,3] = T[0:3,0:3]*basepos+T[0:3,3]
self.testconsistentvalues = self.computeConsistentValues(jointvars,Tfinal,numsolutions=4)
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.isHinge(var.name) for var in jointvars])]
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 4:
raise self.CannotSolveError('need 4 joints')
log.info('ikfast ray4d: %s',solvejointvars)
Pee = self.Tee[0:3,3]
Dee = self.Tee[0,0:3].transpose()
numvarsdone = 2
Positions = []
Positionsee = []
for i in range(len(Links)-1):
T = self.multiplyMatrix(Links[i:])
P = T[0:3,0:3]*basepos+T[0:3,3]
D = T[0:3,0:3]*basedir
hasvars = [self.has_any_symbols(P,v) or self.has_any_symbols(D,v) for v in solvejointvars]
if __builtin__.sum(hasvars) == numvarsdone:
Positions.append(P.cross(D))
Positionsee.append(Pee.cross(Dee))
Positions.append(D)
Positionsee.append(Dee)
break
Tinv = self.affineInverse(Links[i])
Pee = Tinv[0:3,0:3]*Pee+Tinv[0:3,3]
Dee = Tinv[0:3,0:3]*Dee
AllEquations = self.buildEquationsFromTwoSides(Positions,Positionsee,jointvars,uselength=True)
self.checkSolvability(AllEquations,solvejointvars,self.freejointvars)
#try:
tree = self.solveAllEquations(AllEquations,curvars=solvejointvars[:],othersolvedvars = self.freejointvars[:],solsubs = self.freevarsubs[:],endbranchtree=endbranchtree)
#except self.CannotSolveError:
# build the raghavan/roth equations and solve with higher power methods
# pass
tree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,tree)
chaintree = AST.SolverIKChainRay([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Pee=self.Tee[0:3,3].subs(self.freevarsubs), Dee=self.Tee[0,0:3].transpose().subs(self.freevarsubs),jointtree=tree,Dfk=Tfinal[0,0:3].transpose(),Pfk=Tfinal[0:3,3])
chaintree.dictequations += self.ppsubs
return chaintree
def solveFullIK_TranslationDirection5D(self, LinksRaw, jointvars, isolvejointvars, rawbasedir=Matrix(3,1,[S.Zero,S.Zero,S.One]),rawbasepos=Matrix(3,1,[S.Zero,S.Zero,S.Zero])):
"""Solves 3D translation + 3D direction
"""
basepos = Matrix(3,1,[self.convertRealToRational(x) for x in rawbasepos])
basedir = Matrix(3,1,[Real(x,30) for x in rawbasedir])
basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2])
for i in range(3):
basedir[i] = self.convertRealToRational(basedir[i],5)
basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2]) # unfortunately have to do it again...
offsetdist = basedir.dot(basepos)
basepos = basepos-basedir*offsetdist
Links = LinksRaw[:]
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.isHinge(var.name) for var in jointvars])]
numzeros = int(basedir[0]==S.Zero) + int(basedir[1]==S.Zero) + int(basedir[2]==S.Zero)
# if numzeros < 2:
# try:
# log.info('try to rotate the last joint so that numzeros increases')
# assert(not self.has_any_symbols(Links[-1],*solvejointvars))
# localdir = Links[-1][0:3,0:3]*basedir
# localpos = Links[-1][0:3,0:3]*basepos+Links[-1][0:3,3]
# AllEquations = Links[-2][0:3,0:3]*localdir
# tree=self.solveAllEquations(AllEquations,curvars=solvejointvars[-1:],othersolvedvars = [],solsubs = [],endbranchtree=[])
# offset = tree[0].jointeval[0]
# endbranchtree[0].offsetvalues = [S.Zero]*len(solvejointvars)
# endbranchtree[0].offsetvalues[-1] = offset
# Toffset = Links[-2].subs(solvejointvars[-1],offset).evalf()
# localdir2 = Toffset[0:3,0:3]*localdir
# localpos2 = Toffset[0:3,0:3]*localpos+Toffset[0:3,3]
# Links[-1]=eye(4)
# for i in range(3):
# basedir[i] = self.convertRealToRational(localdir2[i])
# basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2]) # unfortunately have to do it again...
# basepos = Matrix(3,1,[self.convertRealToRational(x) for x in localpos2])
# except Exception, e:
# print 'failed to rotate joint correctly',e
LinksInv = [self.affineInverse(link) for link in Links]
T = self.multiplyMatrix(Links)
Tfinal = zeros((4,4))
Tfinal[0,0:3] = (T[0:3,0:3]*basedir).transpose()
Tfinal[0:3,3] = T[0:3,0:3]*basepos+T[0:3,3]
self.testconsistentvalues = self.computeConsistentValues(jointvars,Tfinal,numsolutions=4)
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 5:
raise self.CannotSolveError('need 5 joints')
log.info('ikfast translation direction 5d: %s',solvejointvars)
# if last two axes are intersecting, can divide computing position and direction
ilinks = [i for i,Tlink in enumerate(Links) if self.has_any_symbols(Tlink,*solvejointvars)]
T = self.multiplyMatrix(Links[ilinks[-2]:])
P = T[0:3,0:3]*basepos+T[0:3,3]
D = T[0:3,0:3]*basedir
tree = None
if not self.has_any_symbols(P,*solvejointvars):
Tposinv = eye(4)
Tposinv[0:3,3] = -P
T0links=[Tposinv]+Links[:ilinks[-2]]
try:
log.info('last 2 axes are intersecting')
tree = self.solve5DIntersectingAxes(T0links,basepos,D,solvejointvars,endbranchtree)
except self.CannotSolveError, e:
log.warn('%s', e)
if tree is None:
rawpolyeqs2 = [None]*len(solvejointvars)
coupledsolutions = None
endbranchtree2 = []
for solvemethod in [self.solveLiWoernleHiller, self.solveKohliOsvatic, self.solveManochaCanny]:
if coupledsolutions is not None:
break
for index in [2,3]:
T0links=LinksInv[:ilinks[index]][::-1]
T0 = self.multiplyMatrix(T0links)
T1links=Links[ilinks[index]:]
T1 = self.multiplyMatrix(T1links)
p0 = T0[0:3,0:3]*self.Tee[0:3,3]+T0[0:3,3]
p1 = T1[0:3,0:3]*basepos+T1[0:3,3]
l0 = T0[0:3,0:3]*self.Tee[0,0:3].transpose()
l1 = T1[0:3,0:3]*basedir
if rawpolyeqs2[index] is None:
rawpolyeqs2[index] = self.buildRaghavanRothEquations(p0,p1,l0,l1,solvejointvars)
try:
coupledsolutions,usedvars = solvemethod(rawpolyeqs2[index],solvejointvars,endbranchtree=[AST.SolverSequence([endbranchtree2])])
break
except self.CannotSolveError, e:
log.warn('%s', e)
continue
if coupledsolutions is None:
raise self.CannotSolveError('raghavan roth equations too complex')
log.info('solved coupled variables: %s',usedvars)
if len(usedvars) < len(solvejointvars):
AllEquations = []
for i in range(3):
AllEquations.append(self.simplifyTransform(p0[i]-p1[i]).expand())
AllEquations.append(self.simplifyTransform(l0[i]-l1[i]).expand())
self.sortComplexity(AllEquations)
curvars=solvejointvars[:]
solsubs = self.freevarsubs[:]
for var in usedvars:
curvars.remove(var)
solsubs += self.Variable(var).subs
self.checkSolvability(AllEquations,curvars,self.freejointvars+usedvars)
endbranchtree2 += self.solveAllEquations(AllEquations,curvars=curvars,othersolvedvars = self.freejointvars+usedvars,solsubs = solsubs,endbranchtree=endbranchtree)
tree = self.verifyAllEquations(AllEquations,curvars,solsubs,coupledsolutions)
else:
endbranchtree2 += endbranchtree
tree = coupledsolutions
chaintree = AST.SolverIKChainRay([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Pee=(self.Tee[0:3,3]-self.Tee[0,0:3].transpose()*offsetdist).subs(self.freevarsubs), Dee=self.Tee[0,0:3].transpose().subs(self.freevarsubs),jointtree=tree,Dfk=Tfinal[0,0:3].transpose(),Pfk=Tfinal[0:3,3],is5dray=True)
chaintree.dictequations += self.ppsubs
return chaintree
def solve5DIntersectingAxes(self, T0links, basepos, D, solvejointvars, endbranchtree):
LinksInv = [self.affineInverse(T) for T in T0links]
T0 = self.multiplyMatrix(T0links)
Tbaseposinv = eye(4)
Tbaseposinv[0:3,3] = -basepos
T1links = [Tbaseposinv]+LinksInv[::-1]+[self.Tee]
T1linksinv = [self.affineInverse(Tbaseposinv)]+T0links[::-1]+[self.Teeinv]
AllEquations = self.buildEquationsFromPositions(T1links,T1linksinv,solvejointvars,self.freejointvars,uselength=True)
transvars = [v for v in solvejointvars if self.has_any_symbols(T0,v)]
self.checkSolvability(AllEquations,transvars,self.freejointvars)
dirtree = []
newendbranchtree = [AST.SolverSequence([dirtree])]
transtree = self.solveAllEquations(AllEquations,curvars=transvars[:],othersolvedvars=self.freejointvars,solsubs = self.freevarsubs[:],endbranchtree=newendbranchtree)
transtree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,transtree)
rotvars = [v for v in solvejointvars if self.has_any_symbols(D,v)]
solsubs = self.freevarsubs[:]
for v in transvars:
solsubs += self.Variable(v).subs
AllEquations = self.buildEquationsFromTwoSides([D],[T0[0:3,0:3]*self.Tee[0,0:3].transpose()],solvejointvars,uselength=False)
self.checkSolvability(AllEquations,rotvars,self.freejointvars+transvars)
localdirtree = self.solveAllEquations(AllEquations,curvars=rotvars[:],othersolvedvars = self.freejointvars+transvars,solsubs=solsubs,endbranchtree=endbranchtree)
dirtree += self.verifyAllEquations(AllEquations,rotvars,solsubs,localdirtree)
return transtree
def solveFullIK_6D(self, LinksRaw, jointvars, isolvejointvars,Tgripperraw=eye(4)):
"""Solves the full 6D translatio + rotation IK
"""
Tgripper = eye(4)
for i in range(4):
for j in range(4):
Tgripper[i,j] = self.convertRealToRational(Tgripperraw[i,j])
Tfirstright = LinksRaw[-1]*Tgripper
Links = LinksRaw[:-1]
LinksInv = [self.affineInverse(link) for link in Links]
Tfinal = self.multiplyMatrix(Links)
self.testconsistentvalues = self.computeConsistentValues(jointvars,Tfinal,numsolutions=4)
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.isHinge(var.name) for var in jointvars])]
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 6:
raise self.CannotSolveError('need 6 joints')
log.info('ikfast 6d: %s',solvejointvars)
tree = None
for T0links,T1links,transvars,rotvars,solveRotationFirst in self.iterateThreeIntersectingAxes(solvejointvars,Links, LinksInv):
try:
tree = self.solve6DIntersectingAxes(T0links,T1links,transvars,rotvars,solveRotationFirst=solveRotationFirst, endbranchtree=endbranchtree)
break
except (self.CannotSolveError,self.IKFeasibilityError), e:
log.warn('%s',e)
if tree is None:
for T0links, T1links in self.iterateThreeNonIntersectingAxes(solvejointvars,Links, LinksInv):
try:
tree = self.solveFullIK_6DGeneral(T0links, T1links, solvejointvars, endbranchtree)
break
except (self.CannotSolveError,self.IKFeasibilityError), e:
log.warn('%s',e)
if tree is None:
raise self.CannotSolveError('cannot solve 6D mechanism!')
chaintree = AST.SolverIKChainTransform6D([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], (self.Tee * self.affineInverse(Tfirstright)).subs(self.freevarsubs), tree,Tfk=Tfinal*Tfirstright)
chaintree.dictequations += self.ppsubs+self.npxyzsubs+self.rxpsubs
return chaintree
def iterateThreeIntersectingAxes(self, solvejointvars, Links, LinksInv):
"""Search for 3 consectuive intersecting axes. If a robot has this condition, it makes a lot of IK computations simpler.
"""
ilinks = [i for i,Tlink in enumerate(Links) if self.has_any_symbols(Tlink,*solvejointvars)]
for i in range(len(ilinks)-2):
startindex = ilinks[i]
endindex = ilinks[i+2]+1
T0links = Links[startindex:endindex]
T0 = self.multiplyMatrix(T0links)
solveRotationFirst = None
if not self.has_any_symbols(T0[:3,3],*solvejointvars):
T1links = LinksInv[:startindex][::-1]
T1links.append(self.Tee)
T1links += LinksInv[endindex:][::-1]
solveRotationFirst = False
else:
T0links = LinksInv[startindex:endindex][::-1]
T0 = self.multiplyMatrix(T0links)
if not self.has_any_symbols(T0[:3,3],*solvejointvars):
T1links = Links[endindex:]
T1links.append(self.Teeinv)
T1links += Links[:startindex]
solveRotationFirst = False
if solveRotationFirst is not None:
rotvars = []
transvars = []
for svar in solvejointvars:
if self.has_any_symbols(T0,svar):
rotvars.append(svar)
else:
transvars.append(svar)
if len(rotvars) == 3 and len(transvars) == 3:
log.info('found 3 consecutive intersecting axes links[%d:%d], rotvars=%s, translationvars=%s',startindex, endindex, rotvars,transvars)
yield T0links,T1links,transvars,rotvars,solveRotationFirst
def iterateThreeNonIntersectingAxes(self, solvejointvars, Links, LinksInv):
"""check for three consecutive non-intersecting axes.
if several points exist, so have to choose one that is least complex?
"""
ilinks = [i for i,Tlink in enumerate(Links) if self.has_any_symbols(Tlink,*solvejointvars)]
usedindices = []
for imode in range(2):
for i in range(len(ilinks)-2):
if i in usedindices:
continue
startindex = ilinks[i]
endindex = ilinks[i+2]+1
p0 = self.multiplyMatrix(Links[ilinks[i]:ilinks[i+1]])[0:3,3]
p1 = self.multiplyMatrix(Links[ilinks[i+1]:ilinks[i+2]])[0:3,3]
has0 = self.has_any_symbols(p0,*solvejointvars)
has1 = self.has_any_symbols(p1,*solvejointvars)
if (imode == 0 and has0 and has1) or (imode == 1 and (has0 or has1)):
T0links = Links[startindex:endindex]
T1links = LinksInv[:startindex][::-1]
T1links.append(self.Tee)
T1links += LinksInv[endindex:][::-1]
usedindices.append(i)
usedvars = [var for var in solvejointvars if any([self.has_any_symbols(T0,var) for T0 in T0links])]
log.info('found 3 consecutive non-intersecting axes links[%d:%d], vars=%s',startindex,endindex,str(usedvars))
yield T0links, T1links
def solve6DIntersectingAxes(self, T0links, T1links, transvars,rotvars,solveRotationFirst,endbranchtree):
"""Solve 6D equations using fact that 3 axes are intersecting. The 3 intersecting axes are all part of T0links and will be used to compute the rotation of the robot. The other 3 axes are part of T1links and will be used to first compute the position.
"""
assert(len(transvars)==3 and len(rotvars) == 3)
T0 = self.multiplyMatrix(T0links)
T0posoffset = eye(4)
T0posoffset[0:3,3] = -T0[0:3,3]
T0links = [T0posoffset] + T0links
T1links = [T0posoffset] + T1links
T1 = self.multiplyMatrix(T1links)
othersolvedvars = rotvars+self.freejointvars if solveRotationFirst else self.freejointvars[:]
T1linksinv = [self.affineInverse(T) for T in T1links]
AllEquations = self.buildEquationsFromPositions(T1links,T1linksinv,transvars,othersolvedvars,uselength=True)
self.checkSolvability(AllEquations,transvars,self.freejointvars)
rottree = []
if solveRotationFirst:
newendbranchtree = endbranchtree
else:
newendbranchtree = [AST.SolverSequence([rottree])]
curvars = transvars[:]
solsubs=self.freevarsubs[:]
transtree = self.solveAllEquations(AllEquations,curvars=curvars,othersolvedvars=othersolvedvars[:],solsubs=solsubs,endbranchtree=newendbranchtree)
transtree = self.verifyAllEquations(AllEquations,rotvars if solveRotationFirst else transvars+rotvars,self.freevarsubs[:],transtree)
solvertree= []
solvedvarsubs = self.freevarsubs[:]
if solveRotationFirst:
storesolutiontree = transtree
else:
solvertree += transtree
storesolutiontree = endbranchtree
for tvar in transvars:
solvedvarsubs += self.Variable(tvar).subs
oldglobalsymbols = self.globalsymbols[:]
try:
T1sub = T1.subs(solvedvarsubs)
Ree = zeros((3,3))
for i in range(3):
for j in range(3):
Ree[i,j] = Symbol('new_r%d%d'%(i,j))
self.globalsymbols.append((Ree[i,j],T1sub[i,j]))
othersolvedvars = self.freejointvars if solveRotationFirst else transvars+self.freejointvars
AllEquations = self.buildEquationsFromRotation(T0links,Ree,rotvars,othersolvedvars)
self.checkSolvability(AllEquations,rotvars,othersolvedvars)
currotvars = rotvars[:]
rottree += self.solveAllEquations(AllEquations,curvars=currotvars,othersolvedvars=othersolvedvars,solsubs=self.freevarsubs[:],endbranchtree=storesolutiontree)
if len(rottree) == 0:
raise self.CannotSolveError('could not solve for all rotation variables: %s:%s'%(str(freevar),str(freevalue)))
if solveRotationFirst:
solvertree.append(AST.SolverRotation(T1sub, rottree))
else:
rottree[:] = [AST.SolverRotation(T1sub, rottree[:])]
return solvertree
finally:
self.globalsymbols = oldglobalsymbols
def solveFullIK_6DGeneral(self, T0links, T1links, solvejointvars, endbranchtree):
"""Solve 6D equations of a general kinematics structure.
This method only works if there exists 3 consecutive joints in that do not always intersect!
"""
rawpolyeqs2 = [None,None]
coupledsolutions = None
leftovervarstree = []
for solvemethod in [self.solveLiWoernleHiller, self.solveKohliOsvatic, self.solveManochaCanny]:
if coupledsolutions is not None:
break
for j in range(2):
if rawpolyeqs2[j] is None:
if j == 0:
# invert, this seems to always give simpler solutions, so prioritize it
T0 = self.affineSimplify(self.multiplyMatrix([self.affineInverse(T) for T in T0links][::-1]))
T1 = self.affineSimplify(self.multiplyMatrix([self.affineInverse(T) for T in T1links][::-1]))
else:
T0 = self.affineSimplify(self.multiplyMatrix(T0links))
T1 = self.affineSimplify(self.multiplyMatrix(T1links))
rawpolyeqs,numminvars = self.buildRaghavanRothEquationsFromMatrix(T0,T1,solvejointvars)
if numminvars <= 5 or len(rawpolyeqs[0][1].symbols) <= 6:
rawpolyeqs2[j] = rawpolyeqs
try:
if rawpolyeqs2[j] is not None:
coupledsolutions,usedvars = solvemethod(rawpolyeqs2[j],solvejointvars,endbranchtree=[AST.SolverSequence([leftovervarstree])])
break
except self.CannotSolveError, e:
log.warn('%s',e)
continue
if coupledsolutions is None:
raise self.CannotSolveError('6D general method failed, raghavan roth equations might be too complex')
log.info('solved coupled variables: %s',usedvars)
AllEquations = []
for i in range(3):
for j in range(4):
AllEquations.append(self.simplifyTransform(T0[i,j]-T1[i,j]))
self.sortComplexity(AllEquations)
curvars=solvejointvars[:]
solsubs = self.freevarsubs[:]
for var in usedvars:
curvars.remove(var)
solsubs += self.Variable(var).subs
self.checkSolvability(AllEquations,curvars,self.freejointvars+usedvars)
leftovervarstree += self.solveAllEquations(AllEquations,curvars=curvars,othersolvedvars = self.freejointvars+usedvars,solsubs = solsubs,endbranchtree=endbranchtree)
return coupledsolutions
def solveFullIK_TranslationAxisAngle4D(self, LinksRaw, jointvars, isolvejointvars, rawbasedir=Matrix(3,1,[S.One,S.Zero,S.Zero]),rawbasepos=Matrix(3,1,[S.Zero,S.Zero,S.Zero]),rawglobaldir=Matrix(3,1,[S.Zero,S.Zero,S.One]), rawnormaldir=None):
"""Solves 3D translation + Angle with respect to X-axis
"""
globaldir = Matrix(3,1,[Real(x,30) for x in rawglobaldir])
globaldir /= sqrt(globaldir[0]*globaldir[0]+globaldir[1]*globaldir[1]+globaldir[2]*globaldir[2])
for i in range(3):
globaldir[i] = self.convertRealToRational(globaldir[i],5)
iktype = None
if rawnormaldir is not None:
normaldir = Matrix(3,1,[Real(x,30) for x in rawnormaldir])
binormaldir = normaldir.cross(globaldir).transpose()
if globaldir[0] == S.One and normaldir[2] == S.One:
iktype = IkType.TranslationXAxisAngleZNorm4D
elif globaldir[1] == S.One and normaldir[0] == S.One:
iktype = IkType.TranslationYAxisAngleXNorm4D
elif globaldir[2] == S.One and normaldir[1] == S.One:
iktype = IkType.TranslationZAxisAngleYNorm4D
else:
normaldir = None
if globaldir[0] == S.One:
iktype = IkType.TranslationXAxisAngle4D
elif globaldir[1] == S.One:
iktype = IkType.TranslationYAxisAngle4D
elif globaldir[2] == S.One:
iktype = IkType.TranslationZAxisAngle4D
if iktype is None:
raise ValueError('currently globaldir can only by one of x,y,z axes')
basepos = Matrix(3,1,[self.convertRealToRational(x) for x in rawbasepos])
basedir = Matrix(3,1,[Real(x,30) for x in rawbasedir])
L = sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2])
basedir /= L
for i in range(3):
basedir[i] = self.convertRealToRational(basedir[i],5)
basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2]) # unfortunately have to do it again...
Links = LinksRaw[:]
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.isHinge(var.name) for var in jointvars])]
LinksInv = [self.affineInverse(link) for link in Links]
Tallmult = self.multiplyMatrix(Links)
Tfinal = zeros((4,4))
if normaldir is None:
Tfinal[0,0] = acos(globaldir.dot(Tallmult[0:3,0:3]*basedir))
else:
Tfinal[0,0] = atan2(binormaldir.dot(Tallmult[0:3,0:3]*basedir), globaldir.dot(Tallmult[0:3,0:3]*basedir))
Tfinal[0:3,3] = Tallmult[0:3,0:3]*basepos+Tallmult[0:3,3]
self.testconsistentvalues = self.computeConsistentValues(jointvars,Tfinal,numsolutions=4)
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 4:
raise self.CannotSolveError('need 4 joints')
log.info('ikfast translation axis 4d, globaldir=%s, basedir=%s: %s',globaldir, basedir, solvejointvars)
# if last two axes are intersecting, can divide computing position and direction
ilinks = [i for i,Tlink in enumerate(Links) if self.has_any_symbols(Tlink,*solvejointvars)]
Tbaseposinv = eye(4)
Tbaseposinv[0:3,3] = -basepos
T1links = [Tbaseposinv]+LinksInv[::-1]+[self.Tee]
T1linksinv = [self.affineInverse(Tbaseposinv)]+Links[::-1]+[self.Teeinv]
AllEquations = self.buildEquationsFromPositions(T1links,T1linksinv,solvejointvars,self.freejointvars,uselength=True)
for index in range(len(ilinks)):
T0links=LinksInv[:ilinks[index]][::-1]
T0 = self.multiplyMatrix(T0links)
T1links=Links[ilinks[index]:]
T1 = self.multiplyMatrix(T1links)
globaldir2 = T0[0:3,0:3]*globaldir
basedir2 = T1[0:3,0:3]*basedir
eq = self.simplifyTransform(self.trigsimp(globaldir2.dot(basedir2),solvejointvars))-cos(self.Tee[0])
if self.isExpressionUnique(AllEquations,eq) and self.isExpressionUnique(AllEquations,-eq):
AllEquations.append(eq)
if normaldir is not None:
binormaldir2 = T0[0:3,0:3]*binormaldir
eq = self.simplifyTransform(self.trigsimp(binormaldir2.dot(basedir2),solvejointvars))-sin(self.Tee[0])
if self.isExpressionUnique(AllEquations,eq) and self.isExpressionUnique(AllEquations,-eq):
AllEquations.append(eq)
# check if planar with respect to normaldir
extravar = None
if normaldir is not None:
if Tallmult[0:3,0:3]*normaldir == normaldir:
Tnormaltest = self.rodrigues(normaldir,pi/2)
# planar, so know that the sum of all hinge joints is equal to the final angle
# can use this fact to substitute one angle with the other values
angles = []
for solvejoint in solvejointvars:
if self.isHinge(solvejoint.name):
Tall0 = Tallmult[0:3,0:3].subs(solvejoint,S.Zero)
Tall1 = Tallmult[0:3,0:3].subs(solvejoint,pi/2)
if Tall0*Tnormaltest-Tall1:
angles.append(solvejoint)
else:
angles.append(-solvejoint)
Tzero = Tallmult.subs([(a,S.Zero) for a in angles])
zeroangle = atan2(binormaldir.dot(Tzero[0:3,0:3]*basedir), globaldir.dot(Tzero[0:3,0:3]*basedir))
eqangles = self.Tee[0]-zeroangle
for a in angles[:-1]:
eqangles -= a
extravar = (angles[-1],eqangles)
coseq = cos(eqangles).expand(trig=True)
sineq = sin(eqangles).expand(trig=True)
AllEquationsOld = AllEquations
AllEquations = [self.trigsimp(eq.subs([(cos(angles[-1]),coseq),(sin(angles[-1]),sineq)]).expand(),solvejointvars) for eq in AllEquationsOld]
solvejointvars.remove(angles[-1])
self.sortComplexity(AllEquations)
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.isHinge(var.name) for var in jointvars])]
if extravar is not None:
solution=AST.SolverSolution(extravar[0].name, jointeval=[extravar[1]],isHinge=self.isHinge(extravar[0].name))
endbranchtree.insert(0,solution)
try:
tree = self.solveAllEquations(AllEquations,curvars=solvejointvars[:],othersolvedvars=self.freejointvars,solsubs=self.freevarsubs[:],endbranchtree=endbranchtree)
tree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,tree)
except self.CannotSolveError:
othersolvedvars = self.freejointvars[:]
solsubs = self.freevarsubs[:]
freevarinvsubs = [(f[1],f[0]) for f in self.freevarsubs]
solinvsubs = [(f[1],f[0]) for f in solsubs]
# single variable solutions
solutions = []
for curvar in solvejointvars:
othervars = [var for var in solvejointvars if var != curvar]
curvarsym = self.Variable(curvar)
raweqns = []
for e in AllEquations:
if (len(othervars) == 0 or not e.has_any_symbols(*othervars)) and e.has_any_symbols(curvar,curvarsym.htvar,curvarsym.cvar,curvarsym.svar):
eq = e.subs(self.freevarsubs+solsubs)
if self.isExpressionUnique(raweqns,eq) and self.isExpressionUnique(raweqns,-eq):
raweqns.append(eq)
if len(raweqns) > 0:
try:
rawsolutions=self.solveSingleVariable(raweqns,curvar,othersolvedvars,unknownvars=solvejointvars)
for solution in rawsolutions:
self.solutionComplexity(solution,othersolvedvars,solvejointvars)
solutions.append((solution,curvar))
except self.CannotSolveError:
pass
firstsolution, firstvar = solutions[0]
othersolvedvars.append(firstvar)
solsubs += self.Variable(firstvar).subs
curvars=solvejointvars[:]
curvars.remove(firstvar)
trigsubs = []
polysubs = []
polyvars = []
for v in curvars:
if self.isHinge(v.name):
var = self.Variable(v)
polysubs += [(cos(v),var.cvar),(sin(v),var.svar)]
polyvars += [var.cvar,var.svar]
trigsubs.append((var.svar**2,1-var.cvar**2))
else:
polyvars.append(v)
polysubsinv = [(b,a) for a,b in polysubs]
rawpolyeqs = [Poly(eq.subs(polysubs),*polyvars).subs(trigsubs) for eq in AllEquations if eq.has_any_symbols(*curvars)]
dummys = []
dummysubs = []
dummysubs2 = []
dummyvars = []
for i in range(0,len(polyvars),2):
dummy = Symbol('ht%s'%polyvars[i].name[1:])
# [0] - cos, [1] - sin
dummys.append(dummy)
dummysubs += [(polyvars[i],(1-dummy**2)/(1+dummy**2)),(polyvars[i+1],2*dummy/(1+dummy**2))]
var = polyvars[i].subs(self.invsubs).args[0]
dummysubs2.append((var,2*atan(dummy)))
dummyvars.append((dummy,tan(0.5*var)))
newreducedeqs = []
for peq in rawpolyeqs:
maxdenom = [0]*(len(polyvars)/2)
for monoms in peq.iter_monoms():
for i in range(len(maxdenom)):
maxdenom[i] = max(maxdenom[i],monoms[2*i]+monoms[2*i+1])
eqnew = S.Zero
for c,monoms in peq.iter_terms():
term = c
for i in range(len(polyvars)):
num,denom = fraction(dummysubs[i][1])
term *= num**monoms[i]
# the denoms for 0,1 and 2,3 are the same
for i in range(len(maxdenom)):
denom = fraction(dummysubs[2*i][1])[1]
term *= denom**(maxdenom[i]-monoms[2*i]-monoms[2*i+1])
eqnew += term
newreducedeqs.append(Poly(eqnew,*dummys))
newreducedeqs.sort(cmp=lambda x,y: len(x.monoms) - len(y.monoms))
ileftvar = 0
leftvar = dummys[ileftvar]
coupledvars = dummys[:]
coupledvars.pop(ileftvar)
exportcoeffeqs=None
for ioffset in range(len(newreducedeqs)):
try:
exportcoeffeqs,exportmonoms = self.solveDialytically(newreducedeqs[ioffset:],ileftvar)
log.info('ioffset %d'%ioffset)
break
except self.CannotSolveError, e:
log.debug('solveDialytically errors: %s',e)
if exportcoeffeqs is None:
raise self.CannotSolveError('failed to solveDialytically')
coupledsolution = AST.SolverCoeffFunction(jointnames=[v.name for v in curvars],jointeval=[v[1] for v in dummysubs2],jointevalcos=[dummysubs[2*i][1] for i in range(len(curvars))],jointevalsin=[dummysubs[2*i+1][1] for i in range(len(curvars))],isHinges=[self.isHinge(v.name) for v in curvars],exportvar=[v.name for v in dummys],exportcoeffeqs=exportcoeffeqs,exportfnname='solvedialyticpoly12qep',rootmaxdim=16)
self.usinglapack = True
tree = [firstsolution,coupledsolution]+ endbranchtree
# package final solution
chaintree = AST.SolverIKChainAxisAngle([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Pee=self.Tee[0:3,3].subs(self.freevarsubs), angleee=self.Tee[0,0].subs(self.freevarsubs),jointtree=tree,Pfk=Tfinal[0:3,3],anglefk=Tfinal[0,0],iktype=iktype)
chaintree.dictequations += self.ppsubs
return chaintree
def buildEquationsFromTwoSides(self,leftside, rightside, usedvars, uselength=True):
# try to shift all the constants of each Position expression to one side
for i in range(len(leftside)):
for j in range(leftside[i].shape[0]):
p = leftside[i][j]
pee = rightside[i][j]
pconstterm = None
peeconstterm = None
if p.is_Add:
pconstterm = [term for term in p.args if term.is_number]
elif p.is_number:
pconstterm = [p]
else:
continue
if pee.is_Add:
peeconstterm = [term for term in pee.args if term.is_number]
elif pee.is_number:
peeconstterm = [pee]
else:
continue
if len(pconstterm) > 0 and len(peeconstterm) > 0:
# shift it to the one that has the least terms
for term in peeconstterm if len(p.args) < len(pee.args) else pconstterm:
leftside[i][j] -= term
rightside[i][j] -= term
AllEquations = []
for i in range(len(leftside)):
for j in range(leftside[i].shape[0]):
e = self.trigsimp(leftside[i][j] - rightside[i][j],usedvars)
if self.codeComplexity(e) < 1500:
e = self.simplifyTransform(e)
if self.isExpressionUnique(AllEquations,e) and self.isExpressionUnique(AllEquations,-e):
AllEquations.append(e)
if uselength:
p2 = S.Zero
pe2 = S.Zero
for j in range(leftside[i].shape[0]):
p2 += leftside[i][j]**2
pe2 += rightside[i][j]**2
if self.codeComplexity(p2) < 1200 and self.codeComplexity(pe2) < 1200:
# sympy's trigsimp/customtrigsimp give up too easily
e = self.simplifyTransform(self.trigsimp(p2,usedvars)-self.trigsimp(pe2,usedvars))
if self.isExpressionUnique(AllEquations,e) and self.isExpressionUnique(AllEquations,-e):
AllEquations.append(e.expand())
else:
log.info('length equations too big, skipping %d,%d',self.codeComplexity(p2),self.codeComplexity(pe2))
self.sortComplexity(AllEquations)
return AllEquations
def buildEquationsFromPositions(self,T1links,T1linksinv,transvars,othersolvedvars,uselength=True):
Taccum = eye(4)
numvarsdone = 1
Positions = []
Positionsee = []
for i in range(len(T1links)-1):
Taccum = T1linksinv[i]*Taccum
hasvars = [self.has_any_symbols(Taccum,v) for v in transvars]
if __builtin__.sum(hasvars) == numvarsdone:
Positions.append(Taccum[0:3,3])
Positionsee.append(self.multiplyMatrix(T1links[(i+1):])[0:3,3])
numvarsdone += 1
if numvarsdone > 2:
# more than 2 variables is almost always useless
break
if len(Positions) == 0:
Positions.append(zeros((3,1)))
Positionsee.append(self.multiplyMatrix(T1links)[0:3,3])
return self.buildEquationsFromTwoSides(Positions,Positionsee,transvars+othersolvedvars,uselength=uselength)
def buildEquationsFromRotation(self,T0links,Ree,rotvars,othersolvedvars):
"""Ree is a 3x3 matrix
"""
Raccum = Ree
numvarsdone = 1
AllEquations = []
for i in range(len(T0links)-1):
Raccum = T0links[i][0:3,0:3].transpose()*Raccum # transpose is the inverse
hasvars = [self.has_any_symbols(Raccum,v) for v in rotvars]
if len(AllEquations) > 0 and __builtin__.sum(hasvars) >= len(rotvars):
break
if __builtin__.sum(hasvars) == numvarsdone:
R = self.multiplyMatrix(T0links[(i+1):])
for i in range(3):
for j in range(3):
AllEquations.append(self.trigsimp(Raccum[i,j]-R[i,j],othersolvedvars+rotvars))
numvarsdone += 1
self.sortComplexity(AllEquations)
return AllEquations
def buildRaghavanRothEquationsFromMatrix(self,T0,T1,solvejointvars):
"""Builds the 14 equations using only 5 unknowns. Method explained in [Raghavan1993]_. Basically take the position and one column/row so that the least number of variables are used.
.. [Raghavan1993] <NAME> and <NAME>, "Inverse Kinematics of the General 6R Manipulator and related Linkages", Journal of Mechanical Design, Volume 115, Issue 3, 1993.
"""
p0 = T0[0:3,3]
p1 = T1[0:3,3]
p=p0-p1
T = T0-T1
numminvars = 100000
for irow in range(3):
hasvar = [self.has_any_symbols(T[0:3,irow],var) or self.has_any_symbols(p,var) for var in solvejointvars]
numcurvars = __builtin__.sum(hasvar)
if numminvars > numcurvars and numcurvars > 0:
numminvars = numcurvars
l0 = T0[0:3,irow]
l1 = T1[0:3,irow]
hasvar = [self.has_any_symbols(T[irow,0:3],var) or self.has_any_symbols(p,var) for var in solvejointvars]
numcurvars = __builtin__.sum(hasvar)
if numminvars > numcurvars and numcurvars > 0:
numminvars = numcurvars
l0 = T0[irow,0:3].transpose()
l1 = T1[irow,0:3].transpose()
return self.buildRaghavanRothEquations(p0,p1,l0,l1,solvejointvars),numminvars
def buildRaghavanRothEquations(self,p0,p1,l0,l1,solvejointvars):
eqs = []
for i in range(3):
eqs.append([l0[i],l1[i]])
for i in range(3):
eqs.append([p0[i],p1[i]])
l0xp0 = l0.cross(p0)
l1xp1 = l1.cross(p1)
for i in range(3):
eqs.append([l0xp0[i],l1xp1[i]])
ppl0 = p0.dot(p0)*l0 - 2*l0.dot(p0)*p0
ppl1 = p1.dot(p1)*l1 - 2*l1.dot(p1)*p1
for i in range(3):
eqs.append([ppl0[i],ppl1[i]])
eqs.append([p0.dot(p0),p1.dot(p1)])
eqs.append([l0.dot(p0),l1.dot(p1)])
trigsubs = []
polysubs = []
polyvars = []
for v in solvejointvars:
polyvars.append(v)
if self.isHinge(v.name):
var = self.Variable(v)
polysubs += [(cos(v),var.cvar),(sin(v),var.svar)]
polyvars += [var.cvar,var.svar]
trigsubs.append((var.svar**2,1-var.cvar**2))
for v in self.freejointvars:
if self.isHinge(v.name):
trigsubs.append((sin(v)**2,1-cos(v)**2))
polysubsinv = [(b,a) for a,b in polysubs]
usedvars = []
for j in range(2):
usedvars.append([var for var in polyvars if any([eq[j].subs(polysubs).has_any_symbols(var) for eq in eqs])])
polyeqs = []
for i in range(len(eqs)):
polyeqs.append([None,None])
for j in range(2):
for i in range(len(eqs)):
poly0 = Poly(eqs[i][j].subs(polysubs),*usedvars[j]).subs(trigsubs)
poly1 = Poly(poly0.as_basic().expand().subs(trigsubs),*usedvars[j])
poly2 = Poly(S.Zero,*poly1.symbols)
for c,m in poly1.iter_terms():
cnew = self.simplifyTransform(c)
if cnew != S.Zero:
poly2 = poly2.add_term(cnew,m)
polyeqs[i][j] = poly2
# remove all fractions? having big integers could blow things up...
return polyeqs
def reduceBothSides(self,polyeqs):
"""Reduces a set of equations in 5 unknowns to a set of equations with 3 unknowns by solving for one side with respect to another.
The input is usually the output of buildRaghavanRothEquations.
"""
usedvars = [polyeqs[0][0].symbols, polyeqs[0][1].symbols]
reducedelayed = []
for j in range(2):
if len(usedvars[j]) <= 4:
leftsideeqs = [polyeq[j] for polyeq in polyeqs if polyeq[j].degree > 0]
rightsideeqs = [polyeq[1-j] for polyeq in polyeqs if polyeq[j].degree > 0]
if all([eq.degree <= 2 for eq in leftsideeqs]):
try:
numsymbolcoeffs, _computereducedequations = self.reduceBothSidesSymbolicallyDelayed(leftsideeqs,rightsideeqs)
reducedelayed.append([j,leftsideeqs,rightsideeqs,__builtin__.sum(numsymbolcoeffs), _computereducedequations])
except self.CannotSolveError:
continue
# sort with respect to least number of symbols
reducedelayed.sort(lambda x,y: x[3]-y[3])
reducedeqs = []
tree = []
for j,leftsideeqs,rightsideeqs,numsymbolcoeffs, _computereducedequations in reducedelayed:
try:
reducedeqs2 = _computereducedequations()
if len(reducedeqs2) == 0:
log.info('forcing matrix inverse (might take some time)')
reducedeqs2,tree = self.reduceBothSidesInverseMatrix(leftsideeqs,rightsideeqs)
if len(reducedeqs2) > 0:
# success, add all the reduced equations
reducedeqs += [[Poly(eq[0],*usedvars[j]),Poly(eq[1],*usedvars[1-j])] for eq in reducedeqs2] + [[Poly(S.Zero,*polyeq[j].symbols),polyeq[1-j]-polyeq[j].as_basic()] for polyeq in polyeqs if polyeq[j].degree == 0]
if len(reducedeqs) > 0:
break;
except self.CannotSolveError,e:
print e
continue
if len(reducedeqs) > 0:
# check if any substitutions are needed
# for eq in reducedeqs:
# for j in range(2):
# eq[j] = Poly(eq[j].subs(trigsubs).as_basic().expand(),*eq[j].symbols)
polyeqs = reducedeqs
return [eq for eq in polyeqs if eq[0] != S.Zero or eq[1] != S.Zero],tree
def reduceBothSidesInverseMatrix(self,leftsideeqs,rightsideeqs):
"""solve a linear system inside the program since the matrix cannot be reduced so easily
"""
allmonomsleft = set()
for peq in leftsideeqs:
allmonomsleft = allmonomsleft.union(set(peq.monoms))
allmonomsleft = list(allmonomsleft)
allmonomsleft.sort()
if __builtin__.sum(allmonomsleft[0]) == 0:
allmonomsleft.pop(0)
if len(leftsideeqs) < len(allmonomsleft):
raise self.CannotSolveError('left side has too few equations for the number of variables %d<%d'%(len(leftsideeqs),len(allmonomsleft)))
systemcoeffs = []
for ileft,left in enumerate(leftsideeqs):
coeffs = [S.Zero]*len(allmonomsleft)
rank = 0
for c,m in left.iter_terms():
if __builtin__.sum(m) > 0:
if c != S.Zero:
rank += 1
coeffs[allmonomsleft.index(m)] = c
systemcoeffs.append((rank,ileft,coeffs))
# ideally we want to try all combinations of simple equations first until we arrive to linearly independent ones.
# However, in practice most of the first equations are linearly dependent and it takes a lot of time to prune all of them,
# so start at the most complex
systemcoeffs.sort(lambda x,y: -x[0]+y[0])
# sort left and right in the same way
leftsideeqs = [leftsideeqs[ileft] for rank,ileft,coeffs in systemcoeffs]
rightsideeqs = [rightsideeqs[ileft] for rank,ileft,coeffs in systemcoeffs]
A = zeros((len(allmonomsleft),len(allmonomsleft)))
Asymbols = []
for i in range(A.shape[0]):
Asymbols.append([Symbol('gconst%d_%d'%(i,j)) for j in range(A.shape[1])])
solution = None
for eqindices in combinations(range(len(leftsideeqs)),len(allmonomsleft)):
for i,index in enumerate(eqindices):
for k in range(len(allmonomsleft)):
A[i,k] = systemcoeffs[index][2][k]
det = self.det_bareis(A,*self.pvars)
if det == S.Zero:
continue
solution = AST.SolverMatrixInverse(A=A,Asymbols=Asymbols)
self.usinglapack = True
solution.checkforzeros = [self.removecommonexprs(det,onlygcd=False,onlynumbers=True)]
Aadj=A.adjugate() # too big to be useful for now, but can be used to see if any symbols are always 0
break
if solution is None:
raise self.CannotSolveError('failed to find %d linearly independent equations'%len(allmonomsleft))
reducedeqs = []
for i in range(len(allmonomsleft)):
var=S.One
for k,kpower in enumerate(allmonomsleft[i]):
if kpower != 0:
var *= leftsideeqs[0].symbols[k]**kpower
pright = S.Zero
for k in range(len(allmonomsleft)):
if Aadj[i,k] != S.Zero:
pright += Asymbols[i][k] * (rightsideeqs[eqindices[k]].as_basic()-leftsideeqs[eqindices[k]].coeff())
reducedeqs.append([var,pright.expand()])
othereqindices = set(range(len(leftsideeqs))).difference(set(eqindices))
for i in othereqindices:
# have to multiply just the constant by the determinant
neweq = rightsideeqs[i].as_basic()
for c,m in leftsideeqs[i].iter_terms():
if __builtin__.sum(m) > 0:
neweq -= c*reducedeqs[allmonomsleft.index(m)][1]
else:
neweq -= c
reducedeqs.append([S.Zero,neweq])
return reducedeqs, [solution]
# Adj=M[:,:-1].adjugate()
# #D=M[:,:-1].det()
# D=M[:,:-1].det()
# sols=-Adj*M[:,-1]
# solsubs = []
# for i,v in enumerate(newunknowns):
# newsol=sols[i].subs(localsymbols)
# solsubs.append((v,newsol))
# reducedeqs.append([v.subs(localsymbols)*D,newsol])
# othereqindices = set(range(len(newleftsideeqs))).difference(set(eqindices))
# for i in othereqindices:
# # have to multiply just the constant by the determinant
# newpoly = S.Zero
# for c,m in newleftsideeqs[i].iter_terms():
# monomindices = [index for index in range(len(newunknowns)) if m[index]>0]
# if len(monomindices) == 0:
# newpoly += c.subs(localsymbols)*D
# else:
# assert(len(monomindices)==1)
# newpoly += c.subs(localsymbols)*solsubs[monomindices[0]][1]
# reducedeqs.append([S.Zero,newpoly])
# break
# # there are too many symbols, so have to resolve to a little more involved method
# P,L,DD,U= M[:,:-1].LUdecompositionFF(*self.pvars)
# finalnums = S.One
# finaldenoms = S.One
# for i in range(len(newunknowns)):
# print i
# n,d = self.recursiveFraction(L[i,i]*U[i,i]/DD[i,i])
# finalnums *= n
# finaldenoms *= d
# n,d = self.recursiveFraction(DD[i,i])
# q,r = div(n,d,*pvars)
# DD[i,i] = q
# assert(r==S.Zero)
# det,r = div(finalnums,finaldenoms,*pvars)
# assert(r==S.Zero)
# b = -P*M[:,-1]
# y = [[b[0],L[0,0]]]
# for i in range(1,L.shape[0]):
# commondenom=y[0][1]
# for j in range(1,i):
# commondenom=lcm(commondenom,y[j][1],*pvars)
# accum = S.Zero
# for j in range(i):
# accum += L[i,j]*y[j][0]*(commondenom/y[j][1])
# res = (commondenom*b[i]-accum)/(commondenom*L[i,i])
# y.append(self.recursiveFraction(res))
#
# ynew = []
# for i in range(L.shape[0]):
# print i
# q,r=div(y[i][0]*DD[i,i],y[i][1],*pvars)
# print 'remainder: ',r
# ynew.append(q)
#
# x = [[ynew[-1],U[-1,-1]]]
# for i in range(U.shape[0]-2,-1,-1):
# commondenom=x[0][1]
# for j in range(i+1,U.shape[0]):
# commondenom=lcm(commondenom,x[j][1],*pvars)
# accum = S.Zero
# for j in range(i+1,U.shape[0]):
# accum += U[i,j]*x[j][0]*(commondenom/x[j][1])
# res = (commondenom*b[i]-accum)/(commondenom*U[i,i])
# x.append(self.recursiveFraction(res))
#
# print 'ignoring num symbols: ',numsymbols
# continue
def reduceBothSidesSymbolically(self,*args,**kwargs):
numsymbolcoeffs, _computereducedequations = self.reduceBothSidesSymbolicallyDelayed(*args,**kwargs)
return _computereducedequations()
def reduceBothSidesSymbolicallyDelayed(self,leftsideeqs,rightsideeqs,maxsymbols=10,usesymbols=True):
"""the left and right side of the equations need to have different variables
"""
assert(len(leftsideeqs)==len(rightsideeqs))
# first count the number of different monomials, then try to solve for each of them
symbolgen = cse_main.numbered_symbols('const')
vargen = cse_main.numbered_symbols('tempvar')
rightsidedummy = []
localsymbols = []
dividesymbols = []
allmonoms = dict()
for left,right in izip(leftsideeqs,rightsideeqs):
if right != S.Zero:
rightsidedummy.append(symbolgen.next())
localsymbols.append((rightsidedummy[-1],right.as_basic().expand()))
else:
rightsidedummy.append(S.Zero)
for m in left.iter_monoms():
if __builtin__.sum(m) > 0 and not m in allmonoms:
newvar = vargen.next()
localsymbols.append((newvar,Poly(S.Zero,*left.symbols).add_term(S.One,m).as_basic()))
allmonoms[m] = newvar
if len(leftsideeqs) < len(allmonoms):
raise self.CannotSolveError('left side has too few equations for the number of variables %d<%d'%(len(leftsideeqs),len(allmonoms)))
unknownvars = leftsideeqs[0].symbols
newleftsideeqs = []
numsymbolcoeffs = []
for left,right in izip(leftsideeqs,rightsidedummy):
left = left - right
newleft = Poly(S.Zero,*allmonoms.values())
leftcoeffs = [c for c,m in left.iter_terms() if __builtin__.sum(m) > 0]
allnumbers = all([c.is_number for c in leftcoeffs])
if usesymbols and not allnumbers:
# check if all the equations are within a constant from each other
# This is neceesary since the current linear system solver cannot handle too many symbols.
reducedeq0,common0 = self.removecommonexprs(leftcoeffs[0],returncommon=True)
commonmults = [S.One]
for c in leftcoeffs[1:]:
reducedeq1,common1 = self.removecommonexprs(c,returncommon=True)
if self.equal(reducedeq1,reducedeq0):
commonmults.append(common1/common0)
elif self.equal(reducedeq1,-reducedeq0):
commonmults.append(-common1/common0)
else:
break
if len(commonmults) == len(leftcoeffs):
# divide everything by reducedeq0
index = 0
for c,m in left.iter_terms():
if __builtin__.sum(m) > 0:
newleft = newleft + commonmults[index]*allmonoms.get(m)
index += 1
else:
# look in the dividesymbols for something similar
gmult = None
for gsym,geq in dividesymbols:
greducedeq,gcommon = self.removecommonexprs(S.One/geq,returncommon=True)
if self.equal(greducedeq,reducedeq0):
gmult = gsym*(gcommon/common0)
break
elif self.equal(greducedeq,-reducedeq0):
gmult = gsym*(-gcommon/common0)
break
if gmult is None:
gmult = symbolgen.next()
dividesymbols.append((gmult,S.One/leftcoeffs[0]))
newc = (c*gmult).subs(localsymbols).expand()
sym = symbolgen.next()
localsymbols.append((sym,newc))
newleft = newleft + sym
numsymbolcoeffs.append(0)
newleftsideeqs.append(newleft)
continue
numsymbols = 0
for c,m in left.iter_terms():
polyvar = S.One
if __builtin__.sum(m) > 0:
polyvar = allmonoms.get(m)
if not c.is_number:
numsymbols += 1
newleft = newleft + c*polyvar
numsymbolcoeffs.append(numsymbols)
newleftsideeqs.append(newleft)
def _computereducedequations():
reducedeqs = []
# order the equations based on the number of terms
newleftsideeqs.sort(lambda x,y: len(x.monoms) - len(y.monoms))
newunknowns = newleftsideeqs[0].symbols
log.info('solving for all pairwise variables in %s, number of symbol coeffs are %s',unknownvars,__builtin__.sum(numsymbolcoeffs))
systemcoeffs = []
for eq in newleftsideeqs:
coeffs = []
for i,var in enumerate(newunknowns):
monom = [0]*len(newunknowns)
monom[i] = 1
coeffs.append(eq.coeff(*monom))
monom = [0]*len(newunknowns)
coeffs.append(-eq.coeff(*monom))
systemcoeffs.append(coeffs)
detvars = [s for s,v in localsymbols] + self.pvars
for eqindices in combinations(range(len(newleftsideeqs)),len(newunknowns)):
# very quick rejection
numsymbols = __builtin__.sum([numsymbolcoeffs[i] for i in eqindices])
if numsymbols > maxsymbols:
continue
M = Matrix([systemcoeffs[i] for i in eqindices])
det = self.det_bareis(M[:,:-1], *detvars)
if det == S.Zero:
continue
try:
eqused = [newleftsideeqs[i] for i in eqindices]
solution=solve(eqused,newunknowns)
except IndexError:
# not enough equations?
continue
if solution is not None and all([self.isValidSolution(value.subs(localsymbols)) for key,value in solution.iteritems()]):
# substitute
solsubs = []
allvalid = True
for key,value in solution.iteritems():
valuesub = value.subs(localsymbols)
solsubs.append((key,valuesub))
reducedeqs.append([key.subs(localsymbols),valuesub])
othereqindices = set(range(len(newleftsideeqs))).difference(set(eqindices))
for i in othereqindices:
reducedeqs.append([S.Zero,(newleftsideeqs[i].subs(solsubs).subs(localsymbols)).as_basic().expand()])
break
# remove the dividesymbols from reducedeqs
for sym,ivalue in dividesymbols:
value=1/ivalue
for i in range(len(reducedeqs)):
eq = reducedeqs[i][1]
if eq.has_any_symbols(sym):
neweq = S.Zero
peq = Poly(eq,sym)
for c,m in peq.iter_terms():
neweq += c*value**(peq.degree - m[0])
reducedeqs[i][1] = neweq.expand()
reducedeqs[i][0] = (reducedeqs[i][0]*value**peq.degree).expand()
if len(reducedeqs) > 0:
log.info('finished with %d equations',len(reducedeqs))
return reducedeqs
return numsymbolcoeffs, _computereducedequations
def solveManochaCanny(self,rawpolyeqs,solvejointvars,endbranchtree):
"""Solves the IK equations using eigenvalues/eigenvectors of a 12x12 quadratic eigenvalue problem. Method explained in
<NAME> and <NAME>. "Efficient inverse kinematics for general 6R manipulators", IEEE Transactions on Robotics and Automation, Volume 10, Issue 5, Oct 1994.
"""
log.info('attempting manocha/canny general ik method')
PolyEquations, raghavansolutiontree = self.reduceBothSides(rawpolyeqs)
# find all equations with zeros on the left side
RightEquations = []
for ipeq,peq in enumerate(PolyEquations):
if peq[0] == S.Zero:
if len(raghavansolutiontree) > 0:
# give up on optimization
RightEquations.append(peq[1])
else:
RightEquations.append(Poly(self.simplifyTransform(peq[1]),*peq[1].symbols))
if len(RightEquations) < 6:
raise self.CannotSolveError('number of equations %d less than 6'%(len(RightEquations)))
# sort with respect to the number of monomials
RightEquations.sort(lambda x, y: len(x.monoms)-len(y.monoms))
# substitute with dummy=tan(half angle)
symbols = RightEquations[0].symbols
symbolsubs = [(symbols[i].subs(self.invsubs),symbols[i]) for i in range(len(symbols))]
log.info('solving simultaneously for symbols: %s',symbols)
dummys = []
dummysubs = []
dummysubs2 = []
dummyvars = []
usedvars = []
singlevariables = []
i = 0
while i < len(symbols):
dummy = Symbol('ht%s'%symbols[i].name[1:])
var = symbols[i].subs(self.invsubs)
if not isinstance(var,Symbol):
# [0] - cos, [1] - sin
var = var.args[0]
dummys.append(dummy)
dummysubs += [(symbols[i],(1-dummy**2)/(1+dummy**2)),(symbols[i+1],2*dummy/(1+dummy**2))]
dummysubs2.append((var,2*atan(dummy)))
dummyvars.append((dummy,tan(0.5*var)))
if not var in usedvars:
usedvars.append(var)
i += 2
else:
singlevariables.append(var)
# most likely a single variable
dummys.append(var)
dummysubs += [(var,var)]
dummysubs2.append((var,var))
if not var in usedvars:
usedvars.append(var)
i += 1
newreducedeqs = []
for peq in RightEquations:
maxdenom = dict()
for monoms in peq.iter_monoms():
i = 0
while i < len(monoms):
if peq.symbols[i].name[0] == 'j':
# single variable
maxdenom[peq.symbols[i]] = max(maxdenom.get(peq.symbols[i],0),monoms[i])
i += 1
else:
maxdenom[peq.symbols[i]] = max(maxdenom.get(peq.symbols[i],0),monoms[i]+monoms[i+1])
i += 2
eqnew = S.Zero
for c,monoms in peq.iter_terms():
term = c
for i in range(len(dummysubs)):
num,denom = fraction(dummysubs[i][1])
term *= num**monoms[i]
# the denoms for 0,1 and 2,3 are the same
i = 0
while i < len(monoms):
if peq.symbols[i].name[0] == 'j':
denom = fraction(dummysubs[i][1])[1]
term *= denom**(maxdenom[peq.symbols[i]]-monoms[i])
i += 1
else:
denom = fraction(dummysubs[i][1])[1]
term *= denom**(maxdenom[peq.symbols[i]]-monoms[i]-monoms[i+1])
i += 2
eqnew += term
newreducedeqs.append(Poly(eqnew,*dummys))
# from IPython.Shell import IPShellEmbed
# ipshell = IPShellEmbed(argv='',banner = 'OpenRAVE Dropping into IPython, variables: env, robot',exit_msg = 'Leaving Interpreter and closing program.')
# ipshell(local_ns=locals())
# check for equations with a single variable
if len(singlevariables) > 0:
try:
AllEquations = [eq.subs(self.invsubs).as_basic() for eq in newreducedeqs]
tree = self.solveAllEquations(AllEquations,curvars=dummys,othersolvedvars=[],solsubs=self.freevarsubs,endbranchtree=endbranchtree)
return raghavansolutiontree+tree,usedvars
except self.CannotSolveError:
pass
if 0:
# try solving for the single variable and substituting for the rest of the equations in order to get a set of equations without the single variable
var = singlevariables[0]
monomindex = symbols.index(var)
singledegreeeqs = []
AllEquations = []
for peq in newreducedeqs:
if all([m[monomindex] <= 1 for m in peq.monoms]):
newpeq = Poly(peq,var)
if newpeq.degree > 0:
singledegreeeqs.append(newpeq)
else:
AllEquations.append(peq.subs(self.invsubs).as_basic())
for peq0, peq1 in combinations(singledegreeeqs,2):
AllEquations.append(simplify((peq0.coeff(0)*peq1.coeff(1) - peq0.coeff(1)*peq1.coeff(0)).subs(self.invsubs)))
print AllEquations
#sol=self.solvePairVariablesHalfAngle(AllEquations,usedvars[1],usedvars[2],[])
# choose which leftvar can determine the singularity of the following equations!
ileftvar = 0
leftvar = dummys[ileftvar]
coupledvars = dummys[:]
coupledvars.pop(ileftvar)
getsubs = raghavansolutiontree[0].getsubs if len(raghavansolutiontree) > 0 else None
exportcoeffeqs,exportmonoms = self.solveDialytically(newreducedeqs,ileftvar,getsubs=getsubs)
jointevalcos=[d[1] for d in dummysubs if d[0].name[0] == 'c']
jointevalsin=[d[1] for d in dummysubs if d[0].name[0] == 's']
#jointeval=[d[1] for d in dummysubs if d[0].name[0] == 'j']
coupledsolution = AST.SolverCoeffFunction(jointnames=[v.name for v in usedvars],jointeval=[v[1] for v in dummysubs2],jointevalcos=jointevalcos, jointevalsin=jointevalsin, isHinges=[self.isHinge(v.name) for v in usedvars],exportvar=[v.name for v in dummys],exportcoeffeqs=exportcoeffeqs,exportfnname='solvedialyticpoly12qep',rootmaxdim=16)
self.usinglapack = True
return raghavansolutiontree+[coupledsolution]+endbranchtree,usedvars
def solveLiWoernleHiller(self,rawpolyeqs,solvejointvars,endbranchtree):
"""Li-Woernle-Hiller procedure covered in
<NAME>, "Fundamentals of Robotics Mechanical Systems", Springer, 2007.
"""
log.info('attempting li/woernle/hiller general ik method')
if len(rawpolyeqs[0][0].symbols) < len(rawpolyeqs[0][1].symbols):
for peq in rawpolyeqs:
peq[0],peq[1] = peq[1],peq[0]
symbols = list(rawpolyeqs[0][0].symbols)
othersymbols = list(rawpolyeqs[0][1].symbols)
symbolsubs = [(symbols[i].subs(self.invsubs),symbols[i]) for i in range(len(symbols))]
numsymbols = 0
for solvejointvar in solvejointvars:
for var in self.Variable(solvejointvar).vars:
if var in symbols:
numsymbols += 1
break
if numsymbols != 3:
raise self.CannotSolveError('Kohli/Osvatic method requires 3 unknown variables, has %d'%numsymbols)
# choose which leftvar can determine the singularity of the following equations!
allowedindices = []
for i in range(len(symbols)):
# if first symbol is cjX, then next should be sjX
if symbols[i].name[0] == 'c':
assert( symbols[i+1].name == 's'+symbols[i].name[1:])
if 8 == __builtin__.sum([int(peq[0].has_any_symbols(symbols[i],symbols[i+1])) for peq in rawpolyeqs]):
allowedindices.append(i)
if len(allowedindices) == 0:
raise self.CannotSolveError('need exactly 8 equations of one variable')
solutiontree = []
checkforzeros = []
getsubs = None
cvar = symbols[allowedindices[0]]
svar = symbols[allowedindices[0]+1]
varname = cvar.name[1:]
tvar = Symbol('ht'+varname)
symbols.remove(cvar)
symbols.remove(svar)
symbols.append(tvar)
othersymbols.append(tvar)
polyeqs = [[peq[0].as_basic(),peq[1]] for peq in rawpolyeqs if peq[0].has_any_symbols(cvar,svar)]
neweqs=[]
for i in range(0,len(polyeqs),2):
p0 = Poly(polyeqs[i][0],cvar,svar)
p1 = Poly(polyeqs[i+1][0],cvar,svar)
r0 = polyeqs[i][1].as_basic()
r1 = polyeqs[i+1][1].as_basic()
if self.equal(p0.coeff(1,0),-p1.coeff(0,1)) and self.equal(p0.coeff(0,1),p1.coeff(1,0)):
p0,p1 = p1,p0
r0,r1 = r1,r0
if self.equal(p0.coeff(1,0),p1.coeff(0,1)) and self.equal(p0.coeff(0,1),-p1.coeff(1,0)):
# p0+tvar*p1, p1-tvar*p0
# subs: tvar*svar + cvar = 1, svar-tvar*cvar=tvar
neweqs.append([Poly(p0.coeff(1,0) + p0.coeff(0,1)*tvar + p0.coeff(0,0) + tvar*p1.coeff(0,0),*symbols), Poly(r0+tvar*r1,*othersymbols)])
neweqs.append([Poly(p0.coeff(1,0)*tvar - p0.coeff(0,1) - p0.coeff(0,0)*tvar + p1.coeff(0,0),*symbols), Poly(r1-tvar*r0,*othersymbols)])
if len(neweqs) != 8:
raise self.CannotSolveError('coefficients of equations need to match! only got %d reduced equations'%len(neweqs))
for peq in rawpolyeqs:
if not peq[0].has_any_symbols(cvar,svar):
neweqs.append([Poly(peq[0],*symbols),Poly(peq[1],*othersymbols)])
neweqs.append([Poly(peq[0].as_basic()*tvar,*symbols),Poly(peq[1].as_basic()*tvar,*othersymbols)])
# one side should have only numbers, this makes the following inverse operations trivial
neweqs_full = []
reducedeqs = []
for peq in neweqs:
peq[1] = peq[1] - tvar*peq[0].coeff(0,0,0,0,1)-peq[0].coeff()
peq[0] = peq[0] - tvar*peq[0].coeff(0,0,0,0,1)-peq[0].coeff()
if peq[0] != S.Zero:
neweqs_full.append(peq)
else:
reducedeqs.append(peq[1].as_basic().subs(self.freevarsubs))
haszeroequations = len(reducedeqs)>0
allmonoms = set()
for peq in neweqs_full:
allmonoms = allmonoms.union(set(peq[0].monoms))
allmonoms = list(allmonoms)
allmonoms.sort()
if len(allmonoms) > len(neweqs_full):
raise self.CannotSolveError('new monoms is not %d!=16'%len(allmonoms))
if len(allmonoms) < len(neweqs_full):
# order with respect to complexity of [0], this is to make the inverse of A faster
complexity = [(self.codeComplexity(peq[0].as_basic()),peq) for peq in neweqs_full]
complexity.sort()
neweqs_full = [peq for c,peq in complexity]
A = zeros((len(neweqs_full),len(allmonoms)))
B = zeros((len(neweqs_full),1))
for ipeq,peq in enumerate(neweqs_full):
for c,m in peq[0].iter_terms():
A[ipeq,allmonoms.index(m)] = c.subs(self.freevarsubs)
B[ipeq] = peq[1].as_basic().subs(self.freevarsubs)
AU = zeros((len(allmonoms),len(allmonoms)))
AL = zeros((A.shape[0]-len(allmonoms),len(allmonoms)))
BU = zeros((len(allmonoms),1))
BL = zeros((A.shape[0]-len(allmonoms),1))
AUadjugate = None
AUinv = None
AU = A[:A.shape[1],:]
nummatrixsymbols = __builtin__.sum([1 for a in AU if not a.is_number])
if nummatrixsymbols > 80:
raise self.CannotSolveError('matrix has too many symbols (%d), giving up since most likely will freeze'%nummatrixsymbols)
AUdet = AU.det()
if AUdet != S.Zero:
rows = range(A.shape[1])
else:
# prune the dependent vectors
AU = A[0:1,:]
rows = [0]
for i in range(1,A.shape[0]):
AU2 = AU.col_join(A[i:(i+1),:])
if AU2.shape[0] == AU2.shape[1]:
AUdet = AU2.det()
else:
AUdet = (AU2*AU2.transpose()).det()
if AUdet == S.Zero:
continue
AU = AU2
rows.append(i)
if AU.shape[0] == AU.shape[1]:
break
if AU.shape[0] != AU.shape[1]:
raise self.CannotSolveError('could not find non-singular matrix')
AUinv = AU.inv()
if self.has_any_symbols(A,*self.freevars):
log.info('AU has symbols, so working with inverse might take some time')
AUdet = self.trigsimp(AUdet.subs(self.freevarsubsinv),self.freejointvars).subs(self.freevarsubs)
# find the adjugate by simplifying from the inverse
AUadjugate = zeros(AUinv.shape)
sinsubs = []
for freevar in self.freejointvars:
var=self.Variable(freevar)
sinsubs.append((var.cvar**2,1-var.svar**2))
for i in range(AUinv.shape[0]):
for j in range(AUinv.shape[1]):
numerator,denominator = self.recursiveFraction(AUinv[i,j])
numerator = self.trigsimp(numerator.subs(self.freevarsubsinv),self.freejointvars).subs(self.freevarsubs)
numerator, common = self.removecommonexprs(numerator,onlygcd=True,returncommon=True)
denominator = self.trigsimp((denominator/common).subs(self.freevarsubsinv),self.freejointvars).subs(self.freevarsubs)
q,r=div(numerator*AUdet,denominator,self.freevars)
if r != S.Zero:
# sines and cosines can mix things up a lot, try converting to all sines
numerator2 = (numerator*AUdet).subs(sinsubs)
denominator2 = denominator.subs(sinsubs)
q,r=div(numerator2,denominator2,self.freevars)
if r != S.Zero:
log.warn('cannot get rid of denominator in (%s/%s)',numerator2,denominator2)
q = (numerator/denominator)*AUdet
AUadjugate[i,j] = self.trigsimp(q.subs(self.freevarsubsinv),self.freejointvars).subs(self.freevarsubs)
checkforzeros.append(self.removecommonexprs(AUdet,onlygcd=False,onlynumbers=True))
log.info('found non-singular AU matrix')
otherrows = range(A.shape[0])
for i,row in enumerate(rows):
BU[i] = B[row]
otherrows.remove(row)
for i,row in enumerate(otherrows):
BL[i] = B[row]
AL[i,:] = A[row,:]
if AUadjugate is not None:
# reason we're multiplying by adjugate instead of inverse is to get rid of the potential divides by (free) parameters
C = AL*(AUadjugate*BU)-BL*AUdet
else:
C = AL*(AUinv*BU)-BL
for c in C:
reducedeqs.append(c)
# is now a (len(neweqs)-len(allmonoms))x1 matrix, usually this is 4x1
htvars = []
htvarsubs = []
htvarsubs2 = []
usedvars = []
for nextindex in [0,2]:
name = othersymbols[nextindex].name[1:]
htvar = Symbol('ht%s'%name)
htvarsubs += [(othersymbols[nextindex],(1-htvar**2)/(1+htvar**2)),(othersymbols[nextindex+1],2*htvar/(1+htvar**2))]
htvars.append(htvar)
htvarsubs2.append((Symbol(name),2*atan(htvar)))
usedvars.append(Symbol(name))
htvarsubs += [(cvar,(1-tvar**2)/(1+tvar**2)),(svar,2*tvar/(1+tvar**2))]
htvars.append(tvar)
htvarsubs2.append((Symbol(varname),2*atan(tvar)))
usedvars.append(Symbol(varname))
if haszeroequations:
log.info('special structure in equations detected, try to solve through elimination')
AllEquations = [eq.subs(self.invsubs) for eq in reducedeqs]
for curvar in usedvars[:-1]:
try:
unknownvars = usedvars[:]
unknownvars.remove(curvar)
jointtrees2=[]
curvarsubs=self.Variable(curvar).subs
treefirst = self.solveAllEquations(AllEquations,curvars=[curvar],othersolvedvars=self.freejointvars,solsubs=self.freevarsubs[:],endbranchtree=[AST.SolverSequence([jointtrees2])],unknownvars=unknownvars+[tvar])
# solvable, which means we now have len(AllEquations)-1 with two variables, solve with half angles
halfanglesolution=self.solvePairVariablesHalfAngle(raweqns=[eq.subs(curvarsubs) for eq in AllEquations],var0=unknownvars[0],var1=unknownvars[1],othersolvedvars=self.freejointvars+[curvar])[0]
# sometimes halfanglesolution can evaluate to all zeros (katana arm), need to catch this and go to a different branch
halfanglesolution.AddHalfTanValue = True
jointtrees2.append(halfanglesolution)
halfanglevar = unknownvars[0] if halfanglesolution.jointname==unknownvars[0].name else unknownvars[1]
unknownvars.remove(halfanglevar)
try:
# give that two variables are solved, can most likely solve the rest. Solving with the original
# equations yields simpler solutions since reducedeqs hold half-tangents
curvars = solvejointvars[:]
curvars.remove(curvar)
curvars.remove(halfanglevar)
subsinv = []
for v in solvejointvars:
subsinv += self.Variable(v).subsinv
AllEquationsOrig = [(peq[0].as_basic()-peq[1].as_basic()).subs(subsinv) for peq in rawpolyeqs]
self.sortComplexity(AllEquationsOrig)
jointtrees2 += self.solveAllEquations(AllEquationsOrig,curvars=curvars,othersolvedvars=self.freejointvars+[curvar,halfanglevar],solsubs=self.freevarsubs+curvarsubs+self.Variable(halfanglevar).subs,endbranchtree=endbranchtree)
return solutiontree+treefirst,solvejointvars
except self.CannotSolveError,e:
# try another strategy
log.debug(e)
# solve all the unknowns now
jointtrees3=[]
treesecond = self.solveAllEquations(AllEquations,curvars=unknownvars,othersolvedvars=self.freejointvars+[curvar,halfanglevar],solsubs=self.freevarsubs+curvarsubs+self.Variable(halfanglevar).subs,endbranchtree=[AST.SolverSequence([jointtrees3])])
for t in treesecond:
# most likely t is a solution...
t.AddHalfTanValue = True
if isinstance(t,AST.SolverCheckZeros):
for t2 in t.zerobranch:
t2.AddHalfTanValue = True
for t2 in t.nonzerobranch:
t2.AddHalfTanValue = True
if len(t.zerobranch) == 0 or isinstance(t.zerobranch[0],AST.SolverBreak):
log.info('detected zerobranch with SolverBreak, trying to fix')
jointtrees2 += treesecond
# using these solutions, can evaluate all monoms and check for consistency, this step is crucial since
# AllEquations might not constrain all degrees of freedom (check out katana)
indices = []
for i in range(4):
monom = [0]*len(symbols)
monom[i] = 1
indices.append(allmonoms.index(tuple(monom)))
X = AUinv*BU
for i in [0,2]:
jointname=symbols[i].name[1:]
try:
# atan2(0,0) produces an invalid solution
jointtrees3.append(AST.SolverSolution(jointname,jointeval=[atan2(X[indices[i+1]],X[indices[i]])],isHinge=self.isHinge(jointname)))
usedvars.append(Symbol(jointname))
except Exception, e:
log.warn(e)
jointcheckeqs = []
for i,monom in enumerate(allmonoms):
if not i in indices:
eq = S.One
for isymbol,ipower in enumerate(monom):
eq *= symbols[isymbol]**ipower
jointcheckeqs.append(eq-X[i])
# threshold can be a little more loose since just a sanity check
jointtrees3.append(AST.SolverCheckZeros('sanitycheck',jointcheckeqs,zerobranch=endbranchtree,nonzerobranch=[AST.SolverBreak()],anycondition=False,thresh=0.001))
return solutiontree+treefirst,usedvars
except self.CannotSolveError,e:
log.info(e)
try:
log.info('try to solve first two variables pairwise')
#solution = self.solvePairVariables(AllEquations,usedvars[0],usedvars[1],self.freejointvars,maxcomplexity=50)
jointtrees=[]
raweqns=[eq for eq in AllEquations if not eq.has_any_symbols(tvar)]
if len(raweqns) > 0:
halfanglesolution = self.solvePairVariablesHalfAngle(raweqns=raweqns,var0=usedvars[0],var1=usedvars[1],othersolvedvars=self.freejointvars)[0]
halfanglevar = usedvars[0] if halfanglesolution.jointname==usedvars[0].name else usedvars[1]
unknownvar = usedvars[1] if halfanglesolution.jointname==usedvars[0].name else usedvars[0]
nexttree = self.solveAllEquations(raweqns,curvars=[unknownvar],othersolvedvars=self.freejointvars+[halfanglevar],solsubs=self.freevarsubs+self.Variable(halfanglevar).subs,endbranchtree=[AST.SolverSequence([jointtrees])])
#finalsolution = self.solveSingleVariable(AllEquations,usedvars[2],othersolvedvars=self.freejointvars+usedvars[0:2],maxsolutions=4,maxdegree=4)
try:
finaltree = self.solveAllEquations(AllEquations,curvars=usedvars[2:],othersolvedvars=self.freejointvars+usedvars[0:2],solsubs=self.freevarsubs+self.Variable(usedvars[0]).subs+self.Variable(usedvars[1]).subs,endbranchtree=endbranchtree)
jointtrees += finaltree
return [halfanglesolution]+nexttree,usedvars
except self.CannotSolveError,e:
log.debug('failed to solve for final variable %s, so returning just two: %s'%(usedvars[2],str(usedvars[0:2])))
jointtrees += endbranchtree
# sometimes the last variable cannot be solved, so returned the already solved variables and let the higher function take care of it
return [halfanglesolution]+nexttree,usedvars[0:2]
except self.CannotSolveError,e:
log.debug(e)
newreducedeqs = []
hassinglevariable = False
for eq in reducedeqs:
peq = Poly(eq,*othersymbols)
maxdenom = [0,0]
for monoms in peq.iter_monoms():
for i in range(len(maxdenom)):
maxdenom[i] = max(maxdenom[i],monoms[2*i]+monoms[2*i+1])
eqnew = S.Zero
for c,monoms in peq.iter_terms():
term = c
for i in range(4):
num,denom = fraction(htvarsubs[i][1])
term *= num**monoms[i]
term *= tvar**monoms[4]
# the denoms for 0,1 and 2,3 are the same
for i in range(len(maxdenom)):
denom = fraction(htvarsubs[2*i][1])[1]
term *= denom**(maxdenom[i]-monoms[2*i]-monoms[2*i+1])
eqnew += term
newpeq = Poly(eqnew,htvars[0],htvars[1],tvar)
newreducedeqs.append(newpeq)
hassinglevariable |= any([all([__builtin__.sum(monom)==monom[i] for monom in newpeq.monoms]) for i in range(3)])
if hassinglevariable:
print 'hassinglevariable, trying with raw equations'
AllEquations = []
for eq in reducedeqs:
peq = Poly(eq,tvar)
if peq.degree == 0:
AllEquations.append(peq.coeff().subs(self.invsubs).expand())
elif peq.degree == 1 and peq.coeff() == S.Zero:
AllEquations.append(peq.coeff(1).subs(self.invsubs).expand())
else:
# two substitutions: sin/(1+cos), (1-cos)/sin
neweq0 = S.Zero
neweq1 = S.Zero
for c,monoms in peq.iter_terms():
neweq0 += c*(svar**monoms[0])*((1+cvar)**(peq.degree-monoms[0]))
neweq1 += c*((1-cvar)**monoms[0])*(svar**(peq.degree-monoms[0]))
AllEquations.append(neweq0.subs(self.invsubs).expand())
AllEquations.append(neweq1.subs(self.invsubs).expand())
self.sortComplexity(AllEquations)
for i in range(3):
try:
unknownvars = usedvars[:]
unknownvars.pop(i)
endbranchtree2 = []
solutiontree = self.solveAllEquations(AllEquations,curvars=[usedvars[i]],othersolvedvars=self.freejointvars[:],solsubs=self.freevarsubs[:],endbranchtree=[AST.SolverSequence([endbranchtree2])],unknownvars=unknownvars)
endbranchtree2 += self.solveAllEquations(AllEquations,curvars=unknownvars[0:2],othersolvedvars=self.freejointvars[:]+[usedvars[i]],solsubs=self.freevarsubs[:]+self.Variable(usedvars[i]).subs,endbranchtree=endbranchtree)
return solutiontree, usedvars
except self.CannotSolveError:
pass
# try:
# testvars = [Symbol(othersymbols[0].name[1:]),Symbol(othersymbols[2].name[1:]),Symbol(varname)]
# AllEquations = [(peq[0].as_basic()-peq[1].as_basic()).expand() for peq in polyeqs if not peq[0].has_any_symbols(*symbols)]
# coupledsolutions = self.solveAllEquations(AllEquations,curvars=testvars,othersolvedvars=self.freejointvars[:],solsubs=self.freevarsubs[:],endbranchtree=endbranchtree)
# return coupledsolutions,testvars
# except self.CannotSolveError:
# pass
#
exportcoeffeqs,exportmonoms = self.solveDialytically(newreducedeqs,0,getsubs=getsubs)
coupledsolution = AST.SolverCoeffFunction(jointnames=[v.name for v in usedvars],jointeval=[v[1] for v in htvarsubs2],jointevalcos=[htvarsubs[2*i][1] for i in range(len(htvars))],jointevalsin=[htvarsubs[2*i+1][1] for i in range(len(htvars))],isHinges=[self.isHinge(v.name) for v in usedvars],exportvar=[v.name for v in htvars],exportcoeffeqs=exportcoeffeqs,exportfnname='solvedialyticpoly8qep',rootmaxdim=16)
coupledsolution.presetcheckforzeros = checkforzeros
solutiontree.append(coupledsolution)
self.usinglapack = True
return solutiontree+endbranchtree,usedvars
def solveKohliOsvatic(self,rawpolyeqs,solvejointvars,endbranchtree):
"""Find a 16x16 matrix where the entries are linear with respect to the tan half-angle of one of the variables [Kohli1993]_. Takes in the 14 raghavan/roth equations.
.. [Kohli1993] <NAME> and <NAME>, "Inverse Kinematics of General 6R and 5R,P Serial Manipulators", Journal of Mechanical Design, Volume 115, Issue 4, Dec 1993.
"""
log.info('attempting kohli/osvatic general ik method')
if len(rawpolyeqs[0][0].symbols) < len(rawpolyeqs[0][1].symbols):
for peq in rawpolyeqs:
peq[0],peq[1] = peq[1],peq[0]
symbols = list(rawpolyeqs[0][0].symbols)
othersymbols = list(rawpolyeqs[0][1].symbols)
symbolsubs = [(symbols[i].subs(self.invsubs),symbols[i]) for i in range(len(symbols))]
if len(symbols) != 6:
raise self.CannotSolveError('Kohli/Osvatic method requires 3 unknown variables')
# choose which leftvar can determine the singularity of the following equations!
for i in range(0,6,2):
eqs = [peq for peq in rawpolyeqs if peq[0].has_any_symbols(symbols[i],symbols[i+1])]
if len(eqs) <= 8:
break
if len(eqs) > 8:
raise self.CannotSolveError('need 8 or less equations of one variable')
cvar = symbols[i]
svar = symbols[i+1]
tvar = Symbol('t'+cvar.name[1:])
symbols.remove(cvar)
symbols.remove(svar)
othereqs = [peq for peq in rawpolyeqs if not peq[0].has_any_symbols(cvar,svar)]
polyeqs = [[eq[0].as_basic(),eq[1]] for eq in eqs]
if len(polyeqs) < 8:
raise self.CannotSolveError('solveKohliOsvatic: need 8 or more polyeqs')
neweqs=[]
for i in range(0,8,2):
p0 = Poly(polyeqs[i][0],cvar,svar)
p1 = Poly(polyeqs[i+1][0],cvar,svar)
r0 = polyeqs[i][1].as_basic()
r1 = polyeqs[i+1][1].as_basic()
if self.equal(p0.coeff(1,0),-p1.coeff(0,1)) and self.equal(p0.coeff(0,1),p1.coeff(1,0)):
p0,p1 = p1,p0
r0,r1 = r1,r0
if self.equal(p0.coeff(1,0),p1.coeff(0,1)) and self.equal(p0.coeff(0,1),-p1.coeff(1,0)):
# p0+tvar*p1, p1-tvar*p0
# subs: tvar*svar + cvar = 1, svar-tvar*cvar=tvar
neweqs.append([Poly(p0.coeff(1,0) + p0.coeff(0,1)*tvar + p0.coeff(0,0) + tvar*p1.coeff(0,0),*symbols), Poly(r0+tvar*r1,*othersymbols)])
neweqs.append([Poly(p0.coeff(1,0)*tvar - p0.coeff(0,1) - p0.coeff(0,0)*tvar + p1.coeff(0,0),*symbols), Poly(r1-tvar*r0,*othersymbols)])
if len(neweqs) != 8:
raise self.CannotSolveError('coefficients of equations need to match! only got %d reduced equations'%len(neweqs))
# solve the othereqs for symbols without the standalone symbols[2] and symbols[3]
for jother in range(len(othersymbols)/2):
if jother == 0:
cosmonom = (1,0,0,0)
sinmonom = (0,1,0,0)
else:
cosmonom = (0,0,1,0)
sinmonom = (0,0,0,1)
leftsideeqs = []
rightsideeqs = []
for eq0,eq1 in othereqs:
leftsideeq = Poly(eq1,*othersymbols)
rightsideeq = Poly(eq0,*(symbols+othersymbols[2*jother:(2*jother+2)]))
coscoeff = leftsideeq.coeff(*cosmonom)
if coscoeff != S.Zero:
rightsideeq = rightsideeq.sub_term(coscoeff,(0,0,0,0,1,0))
leftsideeq = leftsideeq.sub_term(coscoeff,cosmonom)
sincoeff = leftsideeq.coeff(*sinmonom)
if sincoeff != S.Zero:
rightsideeq = rightsideeq.sub_term(sincoeff,(0,0,0,0,0,1))
leftsideeq = leftsideeq.sub_term(sincoeff,sinmonom)
const = leftsideeq.coeff(0,0,0,0)
if const != S.Zero:
rightsideeq = rightsideeq.sub_term(const,(0,0,0,0,0,0))
leftsideeq = leftsideeq.sub_term(const,(0,0,0,0))
rightsideeqs.append(rightsideeq)
leftsideeqs.append(leftsideeq)
# number of symbols for hiro robot is 16
if len(othersymbols) > 2:
reducedeqs = self.reduceBothSidesSymbolically(leftsideeqs,rightsideeqs,usesymbols=False,maxsymbols=18)
for peq in reducedeqs:
peq[0] = Poly(peq[0],*othersymbols)
else:
reducedeqs = [[left,right] for left,right in izip(leftsideeqs,rightsideeqs)]
if len(reducedeqs) > 0:
break
if len(reducedeqs) == 0:
raise self.CannotSolveError('KohliOsvatic method: could not reduce the equations')
finaleqs = []
finaleqsymbols = symbols+othersymbols[2*jother:(2+2*jother)]
log.info('build final equations for symbols: %s',finaleqsymbols)
for eq0,eq1 in neweqs:
commondenom = Poly(S.One,*self.pvars)
for c,m in eq1.iter_terms():
foundreq = [req[1] for req in reducedeqs if req[0].monoms[0] == m]
if len(foundreq) > 0:
n,d = fraction(foundreq[0])
commondenom = Poly(lcm(commondenom,d),*self.pvars)
commondenom = self.removecommonexprs(commondenom.as_basic(),onlygcd=True,onlynumbers=True)
finaleq = eq0.as_basic()*commondenom
for c,m in eq1.iter_terms():
foundreq = [req[1] for req in reducedeqs if req[0].monoms[0] == m]
if len(foundreq) > 0:
finaleq = finaleq - c*simplify(foundreq[0]*commondenom)
else:
finaleq = finaleq - Poly(S.Zero,*eq1.symbols).add_term(c*commondenom,m).as_basic()
finaleqs.append(Poly(finaleq.expand(),*finaleqsymbols))
# finally do the half angle substitution with symbols
# set:
# j=othersymbols[2]*(1+dummys[0]**2)*(1+dummys[1]**2)
# k=othersymbols[3]*(1+dummys[0]**2)*(1+dummys[1]**2)
dummys = []
dummysubs = []
dummysubs2 = []
dummyvars = []
usedvars = []
dummys.append(tvar)
dummyvars.append((tvar,tan(0.5*Symbol(tvar.name[1:]))))
usedvars.append(Symbol(cvar.name[1:]))
dummysubs2.append((usedvars[-1],2*atan(tvar)))
dummysubs += [(cvar,(1-tvar**2)/(1+tvar**2)),(svar,2*tvar/(1+tvar**2))]
for i in range(0,len(symbols),2):
dummy = Symbol('ht%s'%symbols[i].name[1:])
# [0] - cos, [1] - sin
dummys.append(dummy)
dummysubs += [(symbols[i],(1-dummy**2)/(1+dummy**2)),(symbols[i+1],2*dummy/(1+dummy**2))]
var = symbols[i].subs(self.invsubs).args[0]
dummyvars.append((dummy,tan(0.5*var)))
dummysubs2.append((var,2*atan(dummy)))
if not var in usedvars:
usedvars.append(var)
commonmult = (1+dummys[1]**2)*(1+dummys[2]**2)
usedvars.append(Symbol(othersymbols[2*jother].name[1:]))
dummyj = Symbol('dummyj')
dummyk = Symbol('dummyk')
dummyjk = Symbol('dummyjk')
dummys.append(dummyj)
dummyvars.append((dummyj,othersymbols[2*jother]*(1+dummyvars[1][1]**2)*(1+dummyvars[2][1]**2)))
dummysubs.append((othersymbols[2*jother],cos(dummyjk)))
dummys.append(dummyk)
dummyvars.append((dummyk,othersymbols[1+2*jother]*(1+dummyvars[1][1]**2)*(1+dummyvars[2][1]**2)))
dummysubs.append((othersymbols[1+2*jother],sin(dummyjk)))
dummysubs2.append((usedvars[-1],dummyjk))
ileftvar=0
newreducedeqs = []
for peq in finaleqs:
eqnew = S.Zero
for c,monoms in peq.iter_terms():
term = S.One
for i in range(4):
term *= dummysubs[i+2][1]**monoms[i]
if monoms[4] == 1:
eqnew += c * dummyj
elif monoms[5] == 1:
eqnew += c * dummyk
else:
eqnew += c*simplify(term*commonmult)
newreducedeqs.append(Poly(eqnew,*dummys))
exportcoeffeqs,exportmonoms = self.solveDialytically(newreducedeqs,ileftvar,getsubs=None)
coupledsolution = AST.SolverCoeffFunction(jointnames=[v.name for v in usedvars],jointeval=[v[1] for v in dummysubs2],jointevalcos=[dummysubs[2*i][1] for i in range(len(usedvars))],jointevalsin=[dummysubs[2*i+1][1] for i in range(len(usedvars))],isHinges=[self.isHinge(v.name) for v in usedvars],exportvar=dummys[0:3]+[dummyjk],exportcoeffeqs=exportcoeffeqs,exportfnname='solvedialyticpoly16lep',rootmaxdim=16)
self.usinglapack = True
return [coupledsolution]+endbranchtree,usedvars
def solveDialytically(self,newreducedeqs,ileftvar,returnmatrix=False,getsubs=None):
""" Return the coefficients to solve equations dialytically (Salmon 1885) leaving out variable index ileftvar.
Extract the coefficients of 1, leftvar**1, leftvar**2, ... of every equation
every len(newreducedeqs)*len(monoms) coefficients specify one degree of all the equations (order of monoms is specified in exportmonomorder
there should be len(newreducedeqs)*len(monoms)*maxdegree coefficients
Method also checks if the equations are linearly dependent
"""
allmonoms = set()
origmonoms = set()
maxdegree = 0
for peq in newreducedeqs:
if peq.degree == 0:
log.warn('solveDialytically: polynomial %s degree is 0',peq)
continue
for m in peq.iter_monoms():
mlist = list(m)
maxdegree=max(maxdegree,mlist.pop(ileftvar))
allmonoms.add(tuple(mlist))
origmonoms.add(tuple(mlist))
mlist[0] += 1
allmonoms.add(tuple(mlist))
allmonoms = list(allmonoms)
allmonoms.sort()
origmonoms = list(origmonoms)
origmonoms.sort()
if len(allmonoms)<2*len(newreducedeqs):
log.warn('solveDialytically equations %d > %d, should be equal...', 2*len(newreducedeqs),len(allmonoms))
newreducedeqs = newreducedeqs[0:(len(allmonoms)/2)]
if len(allmonoms)>2*len(newreducedeqs):
raise self.CannotSolveError('solveDialytically: more unknowns than equations %d>%d'%(len(allmonoms), 2*len(newreducedeqs)))
Mall = [zeros((2*len(newreducedeqs),len(allmonoms))) for i in range(maxdegree+1)]
exportcoeffeqs = [S.Zero]*(len(newreducedeqs)*len(origmonoms)*(maxdegree+1))
for ipeq,peq in enumerate(newreducedeqs):
for c,m in peq.iter_terms():
mlist = list(m)
degree=mlist.pop(ileftvar)
exportindex = degree*len(origmonoms)*len(newreducedeqs) + len(origmonoms)*ipeq+origmonoms.index(tuple(mlist))
exportcoeffeqs[exportindex] = c
Mall[degree][len(newreducedeqs)+ipeq,allmonoms.index(tuple(mlist))] = c
mlist[0] += 1
Mall[degree][ipeq,allmonoms.index(tuple(mlist))] = c
# have to check that the determinant is not zero for several values of ileftvar! It is very common that
# some equations are linearly dependent and not solvable through this method.
if self.testconsistentvalues is not None:
linearlyindependent = False
for itest,subs in enumerate(self.testconsistentvalues):
if getsubs is not None:
# have to explicitly evaluate since testsubs can be very complex
subsvals = [(s,v.evalf()) for s,v in subs]
subs = subsvals+getsubs(subsvals)
A = Mall[maxdegree].subs(subs).evalf()
eps = 10**-(self.precision-3)
eigenvals = numpy.linalg.eigvals(numpy.array(numpy.array(A),numpy.float64))
if all([abs(f) > eps for f in eigenvals]):
Ainv = A.inv(method='LU')
B = Ainv*Mall[1].subs(subs).evalf()
C = Ainv*Mall[0].subs(subs).evalf()
A2 = zeros((B.shape[0],B.shape[0]*2))
for i in range(B.shape[0]):
A2[i,B.shape[0]+i] = S.One
A2=A2.col_join((-C).row_join(-B))
eigenvals2 = numpy.linalg.eigvals(numpy.array(numpy.array(A2),numpy.float64))
linearlyindependent = True
break
if not linearlyindependent:
raise self.CannotSolveError('equations are not linearly independent')
if returnmatrix:
return Mall,allmonoms
return exportcoeffeqs,origmonoms
def simplifyTransform(self,eq,othervars=None):
"""Attemps to simplify an equation given that variables from a rotation matrix have been used. There are 12 constraints that are tested:
- lengths of rows and colums are 1
- dot products of combinations of rows/columns are 0
- cross products of combinations of rows/columns yield the left over row/column
"""
if othervars is not None:
peq = Poly(eq,*othervars)
peqnew = Poly(S.Zero,*othervars)
for c,m in peq.iter_terms():
cnew = self.simplifyTransform(c)
if cnew:
peqnew = peqnew.add_term(cnew,m)
return peqnew.as_basic()
# first simplify just rotations (since they don't add any new variables)
allsymbols = list(self.Tee[0:3,0:3])
# check normals
normgroups = []
for i in range(3):
normgroups.append([self.Tee[i,0],self.Tee[i,1],self.Tee[i,2],S.One])
normgroups.append([self.Tee[0,i],self.Tee[1,i],self.Tee[2,i],S.One])
def _simplifynorm(eq):
neweq = None
for group in normgroups:
p = Poly(eq,group[0],group[1],group[2])
for m0,m1 in combinations(p.monoms,2):
if self.equal(p.coeff(*m0),p.coeff(*m1)):
for i,j,k in [(0,1,2),(0,2,1),(1,2,0)]:
if ((m0[i] == 2 and m1[j] == 2) or (m0[j]==2 and m1[i]==2)) and m0[k]==m1[k]:
# there is a bug in sympy polynomial adding here! (0.6.7)
p = p + p.coeff(*m0)*(group[3]-p.symbols[0]**2-p.symbols[1]**2-p.symbols[2]**2)*p.symbols[k]**(m0[k])
neweq = p.as_basic()
eq = neweq
break
return neweq
# check for dot products between rows and columns
dotgroups = []
for i,j in combinations(range(3),2):
# dot product of rotations is always 0
dotgroups.append([[i,j],[i+3,j+3],[i+6,j+6],S.Zero])
dotgroups.append([[3*i,3*j],[3*i+1,3*j+1],[3*i+2,3*j+2],S.Zero])
def _simplifydot(eq):
p = Poly(eq,*allsymbols)
changed = False
for dg in dotgroups:
for i,j,k in [(0,1,2),(0,2,1),(1,2,0)]:
for comb in combinations(p.iter_terms(),2):
if self.equal(comb[0][0],comb[1][0]):
for (c0,m0),(c1,m1) in [comb,comb[::-1]]:
if m0[dg[i][0]] == 1 and m0[dg[i][1]] == 1 and m1[dg[j][0]] == 1 and m1[dg[j][1]] == 1:
# make sure the left over terms are also the same
m0l = list(m0); m0l[dg[i][0]] = 0; m0l[dg[i][1]] = 0
m1l = list(m1); m1l[dg[j][0]] = 0; m1l[dg[j][1]] = 0
if tuple(m0l) == tuple(m1l):
m2 = list(m0l); m2[dg[k][0]] += 1; m2[dg[k][1]] += 1
# there is a bug in sympy polynomial adding here! (0.6.7)
p = p.sub_term(c0,m0).sub_term(c1,m1).sub_term(c0,tuple(m2))
if dg[3] != S.Zero:
p = p.add_term(c0*dg[3],tuple(m0l))
changed = True
break
if changed:
break
return p.as_basic() if changed else None
# add cross products
crossgroups = []
for i,j,k in [(0,1,2),(0,2,1),(1,2,0)]:
# column
crossgroups.append([[i+3,j+6],[i+6,j+3],k])
crossgroups.append([[i+6,j],[i,j+6],k+3])
crossgroups.append([[i,j+3],[i+3,j],k+6])
# row
crossgroups.append([[3*i+1,3*j+2],[3*i+2,3*j+1],3*k])
crossgroups.append([[3*i+2,3*j],[3*i,3*j+2],3*k+1])
crossgroups.append([[3*i,3*j+1],[3*i+1,3*j],3*k+2])
# swap if sign is negative: if j!=1+i
if j!=1+i:
for crossgroup in crossgroups[-6:]:
crossgroup[0],crossgroup[1] = crossgroup[1],crossgroup[0]
def _simplifycross(eq):
# check cross products
changed = False
p = Poly(eq,*allsymbols)
pzero = Poly(S.Zero,*allsymbols)
for cg in crossgroups:
for comb in combinations(p.iter_terms(),2):
if self.equal(comb[0][0],-comb[1][0]):
for (c0,m0),(c1,m1) in [comb,comb[::-1]]:
if m0[cg[0][0]] == 1 and m0[cg[0][1]] == 1 and m1[cg[1][0]] == 1 and m1[cg[1][1]] == 1:
# make sure the left over terms are also the same
m0l = list(m0); m0l[cg[0][0]] = 0; m0l[cg[0][1]] = 0
m1l = list(m1); m1l[cg[1][0]] = 0; m1l[cg[1][1]] = 0
if tuple(m0l) == tuple(m1l):
m2 = m0l; m2[cg[2]] += 1
# there is a bug in sympy polynomial caching here! (0.6.7)
#p = p.sub_term(c0,m0).sub_term(c1,m1).add_term(c0,tuple(m2))
p = Poly(p.as_basic() - pzero.add_term(c0,m0).as_basic() - pzero.add_term(c1,m1).as_basic() + pzero.add_term(c0,tuple(m2)).as_basic(),*allsymbols)
changed = True
break
if changed:
break
return p.as_basic() if changed else None
fns = [_simplifynorm,_simplifydot,_simplifycross]
changed = True
while changed and eq.has_any_symbols(*allsymbols):
changed = False
for fn in fns:
neweq = fn(eq)
if neweq is not None:
eq = neweq
changed = True
# check if full 3D position is available
if self.pp is not None:
# add positions
ip = 9
inp = 12
ipp = 15
irxp = 16
allsymbols += list(self.Tee[0:3,3])+self.npxyz+[self.pp]+self.rxp[0]+self.rxp[1]+self.rxp[2]
normgroups.append([self.Tee[0,3],self.Tee[1,3],self.Tee[2,3],self.pp])
for i in range(3):
dotgroups.append([[i,ip],[i+3,ip+1],[i+6,ip+2],self.npxyz[i]])
dotgroups.append([[3*i+0,inp],[3*i+1,inp+1],[3*i+2,inp+2],self.Tee[i,3]])
# column i cross position
crossgroups.append([[i+3,ip+2],[i+6,ip+1],irxp+3*i+0])
crossgroups.append([[i+6,ip+0],[i,ip+2],irxp+3*i+1])
crossgroups.append([[i,ip+1],[i+3,ip+0],irxp+3*i+2])
changed = True
while changed and eq.has_any_symbols(*allsymbols):
changed = False
for fn in fns:
neweq = fn(eq)
if neweq is not None:
eq = neweq
changed = True
return eq
def isExpressionUnique(self, exprs, expr):
for exprtest in exprs:
if self.equal(expr,exprtest):
return False
return True
def getCommonExpression(self, exprs, expr):
for i,exprtest in enumerate(exprs):
if self.equal(expr,exprtest):
return i
return None
def verifyAllEquations(self,AllEquations,unsolvedvars, solsubs, tree=None):
extrazerochecks=[]
for i in range(len(AllEquations)):
expr = AllEquations[i]
if not self.isValidSolution(expr):
raise self.CannotSolveError('verifyAllEquations: equation is not valid: %s'%(str(expr)))
if not expr.has_any_symbols(*unsolvedvars) and (self.isExpressionUnique(extrazerochecks,expr) or self.isExpressionUnique(extrazerochecks,-expr)):
extrazerochecks.append(self.removecommonexprs(expr.subs(solsubs).evalf(),onlygcd=False,onlynumbers=True))
if len(extrazerochecks) > 0:
return [AST.SolverCheckZeros(None,extrazerochecks,tree,[AST.SolverBreak()],anycondition=False)]
return tree
def solveAllEquations(self,AllEquations,curvars,othersolvedvars,solsubs,endbranchtree,currentcases=None,unknownvars=None):
if len(curvars) == 0:
return endbranchtree
if unknownvars is None:
unknownvars = []
log.info('%s %s',othersolvedvars,curvars)
solsubs = solsubs[:]
freevarinvsubs = [(f[1],f[0]) for f in self.freevarsubs]
solinvsubs = [(f[1],f[0]) for f in solsubs]
# single variable solutions
solutions = []
for curvar in curvars:
othervars = unknownvars+[var for var in curvars if var != curvar]
curvarsym = self.Variable(curvar)
raweqns = []
for e in AllEquations:
if (len(othervars) == 0 or not e.has_any_symbols(*othervars)) and e.has_any_symbols(curvar,curvarsym.htvar,curvarsym.cvar,curvarsym.svar):
eq = e.subs(self.freevarsubs+solsubs)
if self.isExpressionUnique(raweqns,eq) and self.isExpressionUnique(raweqns,-eq):
raweqns.append(eq)
if len(raweqns) > 0:
try:
rawsolutions=self.solveSingleVariable(raweqns,curvar,othersolvedvars, unknownvars=curvars+unknownvars)
for solution in rawsolutions:
self.solutionComplexity(solution,othersolvedvars,curvars)
solutions.append((solution,curvar))
except self.CannotSolveError:
pass
# only return here if a solution was found that perfectly determines the unknown
# otherwise, the pairwise solver could come up with something..
# There is still a problem with this: (bertold robot)
# Sometimes an equation like atan2(y,x) evaluates to atan2(0,0) during runtime.
# This cannot be known at compile time, so the equation is selected and any other possibilities are rejected.
# In the bertold robot case, the next possibility is a pair-wise solution involving two variables
if any([s[0].numsolutions()==1 for s in solutions]):
return self.addSolution(solutions,AllEquations,curvars,othersolvedvars,solsubs,endbranchtree,currentcases=currentcases)
curvarsubssol = []
for var0,var1 in combinations(curvars,2):
othervars = unknownvars+[var for var in curvars if var != var0 and var != var1]
raweqns = []
complexity = 0
for e in AllEquations:
if (len(othervars) == 0 or not e.has_any_symbols(*othervars)) and e.has_any_symbols(var0,var1):
eq = e.subs(self.freevarsubs+solsubs)
if self.isExpressionUnique(raweqns,eq) and self.isExpressionUnique(raweqns,-eq):
raweqns.append(eq)
complexity += self.codeComplexity(eq)
if len(raweqns) > 1:
curvarsubssol.append((var0,var1,raweqns,complexity))
curvarsubssol.sort(lambda x, y: x[3]-y[3])
for var0,var1,raweqns,complexity in curvarsubssol:
try:
rawsolutions=self.solvePairVariables(raweqns,var0,var1,othersolvedvars,unknownvars=curvars+unknownvars)
for solution in rawsolutions:
#solution.subs(freevarinvsubs)
self.solutionComplexity(solution,othersolvedvars,curvars)
solutions.append((solution,Symbol(solution.jointname)))
if len(rawsolutions) > 0: # solving a pair is rare, so any solution will do
break
except self.CannotSolveError:
pass
# take the least complex solution and go on
if len(solutions) > 0:
return self.addSolution(solutions,AllEquations,curvars,othersolvedvars,solsubs,endbranchtree,currentcases=currentcases)
# test with higher degrees, necessary?
for curvar in curvars:
othervars = unknownvars+[var for var in curvars if var != curvar]
raweqns = []
for e in AllEquations:
if (len(othervars) == 0 or not e.has_any_symbols(*othervars)) and e.has_any_symbols(curvar):
eq = e.subs(self.freevarsubs+solsubs)
if self.isExpressionUnique(raweqns,eq) and self.isExpressionUnique(raweqns,-eq):
raweqns.append(eq)
for raweqn in raweqns:
try:
log.info('testing with higher degrees')
solution=self.solveHighDegreeEquationsHalfAngle([raweqn],self.Variable(curvar))
self.solutionComplexity(solution,othersolvedvars,curvars)
solutions.append((solution,curvar))
except self.CannotSolveError:
pass
if len(solutions) > 0:
return self.addSolution(solutions,AllEquations,curvars,othersolvedvars,solsubs,endbranchtree,currentcases=currentcases)
# solve with all 3 variables together
# have got this far, so perhaps two axes are aligned?
raise self.CannotSolveError('failed to find a variable to solve')
def addSolution(self,solutions,AllEquations,curvars,othersolvedvars,solsubs,endbranchtree,currentcases=None):
"""Take the least complex solution of a set of solutions and resume solving
"""
solutions = [s for s in solutions if s[0].score < oo and s[0].checkValidSolution()] # remove infinite scores
if len(solutions) == 0:
raise self.CannotSolveError('no valid solutions')
solutions.sort(lambda x, y: x[0].score-y[0].score)
hasonesolution = False
for solution in solutions:
checkforzeros = solution[0].checkforzeros
hasonesolution |= solution[0].numsolutions() == 1
if len(checkforzeros) == 0 and solution[0].numsolutions() == 1:
# did find a good solution, so take it. Make sure to check any zero branches
var = solution[1]
newvars=curvars[:]
newvars.remove(var)
return [solution[0].subs(solsubs)]+self.solveAllEquations(AllEquations,curvars=newvars,othersolvedvars=othersolvedvars+[var],solsubs=solsubs+self.Variable(var).subs,endbranchtree=endbranchtree,currentcases=currentcases)
if not hasonesolution:
# check again except without the number of solutions requirement
for solution in solutions:
checkforzeros = solution[0].checkforzeros
if len(checkforzeros) == 0:
# did find a good solution, so take it. Make sure to check any zero branches
var = solution[1]
newvars=curvars[:]
newvars.remove(var)
return [solution[0].subs(solsubs)]+self.solveAllEquations(AllEquations,curvars=newvars,othersolvedvars=othersolvedvars+[var],solsubs=solsubs+self.Variable(var).subs,endbranchtree=endbranchtree,currentcases=currentcases)
# all solutions have check for zero equations
# choose the variable with the shortest solution and compute (this is a conservative approach)
usedsolutions = []
# remove any solutions with similar checkforzero constraints (because they are essentially the same)
for solution,var in solutions:
solution.subs(solsubs)
if len(usedsolutions) == 0:
usedsolutions.append((solution,var))
else:
match = False
for usedsolution,usedvar in usedsolutions:
if len(solution.checkforzeros) == len(usedsolution.checkforzeros):
if not any([self.isExpressionUnique(usedsolution.checkforzeros,-eq) or self.isExpressionUnique(usedsolution.checkforzeros,eq) for eq in solution.checkforzeros]):
match = True
break
if not match:
usedsolutions.append((solution,var))
if len(usedsolutions) >= 3:
# don't need more than three alternatives (used to be two, but then lookat barrettwam4 proved that wrong)
break
nextsolutions = dict()
allvars = curvars+[Symbol('s%s'%v.name) for v in curvars]+[Symbol('c%s'%v.name) for v in curvars]
lastbranch = []
prevbranch=lastbranch
if currentcases is None:
currentcases = set()
if self.degeneratecases is None:
self.degeneratecases = self.DegenerateCases()
handledconds = self.degeneratecases.gethandledconds(currentcases)
eqs = []
hascheckzeros = False
# iterate in reverse order and put the most recently processed solution at the front.
# There is a problem with this algorithm transferring the degenerate cases correctly.
# Although the zeros of the first equation are checked, they are not added as conditions
# to the later equations, so that the later equations will also use variables as unknowns (even though they are determined to be specific constants). This is most apparent in rotations.
for solution,var in usedsolutions[::-1]:
# there are divide by zeros, so check if they can be explicitly solved for joint variables
checkforzeros = []
for checkzero in solution.checkforzeros:
if checkzero.has_any_symbols(*allvars):
log.info('ignoring special check for zero since it has symbols %s: %s',str(allvars),str(checkzero))
continue
# bother trying to extract something if too complex (takes a lot of computation time to check and most likely nothing will be extracted). 100 is an arbitrary value
if self.codeComplexity(checkzero) > 120:
log.warn('checkforzero too big (%d): %s',self.codeComplexity(checkzero),checkzero)
checkforzeros.append(checkzero)#self.removecommonexprs(checkzero.evalf(),onlygcd=False,onlynumbers=True))
else:
# fractions could get big, so evaluate directly
checkforzeros.append(checkzero.evalf())#self.removecommonexprs(checkzero.evalf(),onlygcd=False,onlynumbers=True))
for othervar in othersolvedvars:
sothervar = self.Variable(othervar).svar
cothervar = self.Variable(othervar).cvar
if checkzero.has_any_symbols(othervar,sothervar,cothervar):
# the easiest thing to check first is if the equation evaluates to zero on boundaries 0,pi/2,pi,-pi/2
s = AST.SolverSolution(othervar.name,jointeval=[],isHinge=self.isHinge(othervar.name))
for value in [S.Zero,pi/2,pi,-pi/2]:
try:
checkzerosub=checkzero.subs([(othervar,value),(sothervar,sin(value).evalf()),(cothervar,cos(value).evalf())])
if self.isValidSolution(checkzerosub) and checkzerosub.evalf() == S.Zero:
if s.jointeval is None:
s.jointeval = []
s.jointeval.append(S.One*value)
except AssertionError,e:
log.warn('othervar %s=%f: %s',str(othervar),value,e)
if s.jointeval is not None and len(s.jointeval) > 0:
ss = [s]
else:
ss = []
try:
ss += self.solveSingleVariable([checkzero.subs([(sothervar,sin(othervar)),(cothervar,cos(othervar))])],othervar,othersolvedvars)
except polys.polynomial.PolynomialError:
# checkzero was too complex
pass
except self.CannotSolveError,e:
# this is actually a little tricky, sometimes really good solutions can have a divide that looks like:
# ((0.405 + 0.331*cj2)**2 + 0.109561*sj2**2 (manusarm_left)
# This will never be 0, but the solution cannot be solved. Instead of rejecting, add a condition to check if checkzero itself is 0 or not
pass
for s in ss:
# can actually simplify Positions and possibly get a new solution!
if s.jointeval is not None:
for eq in s.jointeval:
if eq.is_number:
cond=othervar-eq.evalf()
if self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],-cond) and self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],cond):
if self.isHinge(othervar.name):
evalcond=fmod(cond+pi,2*pi)-pi
else:
evalcond=cond
eqs.append([cond,evalcond,[(sothervar,sin(eq).evalf()),(sin(othervar),sin(eq).evalf()),(cothervar,cos(eq).evalf()),(cos(othervar),cos(eq).evalf()),(othervar,eq)]])
elif s.jointevalsin is not None:
for eq in s.jointevalsin:
if eq.is_number:
cond=othervar-asin(eq).evalf()
if self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],-cond) and self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],cond):
if self.isHinge(othervar.name):
evalcond=fmod(cond+pi,2*pi)-pi
else:
evalcond=cond
eqs.append([cond,evalcond,[(sothervar,eq),(sin(othervar),eq),(cothervar,sqrt(1-eq*eq).evalf()),(cos(othervar),sqrt(1-eq*eq).evalf()),(othervar,asin(eq).evalf())]])
cond=othervar-(pi-asin(eq).evalf())
if self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],-cond) and self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],cond):
if self.isHinge(othervar.name):
evalcond=fmod(cond+pi,2*pi)-pi
else:
evalcond=cond
eqs.append([cond,evalcond,[(sothervar,eq),(sin(othervar),eq),(cothervar,-sqrt(1-eq*eq).evalf()),(cos(othervar),-sqrt(1-eq*eq).evalf()),(othervar,(pi-asin(eq)).evalf())]])
elif s.jointevalcos is not None:
for eq in s.jointevalcos:
if eq.is_number:
cond=othervar-acos(eq).evalf()
if self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],-cond) and self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],cond):
if self.isHinge(othervar.name):
evalcond=fmod(cond+pi,2*pi)-pi
else:
evalcond=cond
eqs.append([cond,evalcond,[(sothervar,sqrt(1-eq*eq).evalf()),(sin(othervar),sqrt(1-eq*eq).evalf()),(cothervar,eq),(cos(othervar),eq),(othervar,acos(eq).evalf())]])
cond=othervar+acos(eq).evalf()
if self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],-cond) and self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],cond):
if self.isHinge(othervar.name):
evalcond=fmod(cond+pi,2*pi)-pi
else:
evalcond=cond
eqs.append([cond,evalcond,[(sothervar,-sqrt(1-eq*eq).evalf()),(sin(othervar),-sqrt(1-eq*eq).evalf()),(cothervar,eq),(cos(othervar),eq),(othervar,-acos(eq).evalf())]])
if not var in nextsolutions:
newvars=curvars[:]
newvars.remove(var)
olddegeneratecases = self.degeneratecases
self.degeneratecases = olddegeneratecases.clone()
nextsolutions[var] = self.solveAllEquations(AllEquations,curvars=newvars,othersolvedvars=othersolvedvars+[var],solsubs=solsubs+self.Variable(var).subs,endbranchtree=endbranchtree,currentcases=currentcases)
self.degeneratecases = olddegeneratecases
if len(checkforzeros) > 0:
hascheckzeros = True
solvercheckzeros = AST.SolverCheckZeros(jointname=var.name,jointcheckeqs=checkforzeros,nonzerobranch=[solution]+nextsolutions[var],zerobranch=prevbranch,anycondition=True,thresh=solution.thresh)
# have to transfer the dictionary!
solvercheckzeros.dictequations = solution.dictequations
solution.dictequations = []
prevbranch=[solvercheckzeros]
else:
prevbranch = [solution]+nextsolutions[var]
if len(prevbranch) == 0:
raise self.CannotSolveError('failed to add solution!')
if len(currentcases) >= 4:
log.warn('4 levels deep in checking degenerate cases, skipping...')
lastbranch.append(AST.SolverBreak())
return prevbranch
# fill the last branch with all the zero conditions
if hascheckzeros and len(eqs) == 0:
# if not equations found, try setting two variables at once
# also try setting px, py, or pz to 0 (barrettwam4 lookat)
for checkzero in checkforzeros:
for checkzero in solution.checkforzeros:
if checkzero.has_any_symbols(*allvars):
log.info('ignoring special check for zero 2 since it has symbols %s: %s',str(allvars), str(checkzero))
continue
# bother trying to extract something if too complex (takes a lot of computation time to check and most likely nothing will be extracted). 100 is an arbitrary value
if self.codeComplexity(checkzero) > 120:
continue
for preal in [Symbol('px'),Symbol('py'),Symbol('pz')]:
if checkzero.has_any_symbols(preal):
# first check if the position alone can yield a zero
eq = checkzero.subs([(preal,S.Zero)]).evalf()
if eq == S.Zero:
cond = abs(preal)
evalcond = abs(preal)
if self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],-cond) and self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],cond):
eqs.append([cond,evalcond,[(preal,S.Zero)]])
log.info('%s=0 in %s',preal,checkzero)
continue
for othervar in othersolvedvars:
if not self.isHinge(othervar.name):
continue
sothervar = Symbol('s%s'%othervar.name)
cothervar = Symbol('c%s'%othervar.name)
if checkzero.has_any_symbols(othervar,sothervar,cothervar):
for value in [S.Zero,pi/2,pi,-pi/2]:
eq = checkzero.subs([(othervar,value),(sothervar,sin(value).evalf()),(cothervar,cos(value).evalf()),(preal,S.Zero)]).evalf()
if eq == S.Zero:
cond = abs(othervar-value)+abs(preal)
evalcond = abs(fmod(othervar-value+pi,2*pi)-pi)+abs(preal)
if self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],-cond) and self.isExpressionUnique(handledconds+[tempeq[0] for tempeq in eqs],cond):
eqs.append([cond,evalcond,[(sothervar,sin(value).evalf()),(sin(othervar),sin(value).evalf()),(cothervar,cos(value).evalf()),(cos(othervar),cos(value).evalf()),(preal,S.Zero),(othervar,value)]])
log.info('%s=%s,%s=0 in %s', othervar,value,preal,checkzero)
# test the solutions
zerobranches = []
accumequations = []
for cond,evalcond,othervarsubs in eqs:
# have to convert to fractions before substituting!
if not all([self.isValidSolution(v) for s,v in othervarsubs]):
continue
othervarsubs = [(s,self.convertRealToRational(v)) for s,v in othervarsubs]
NewEquations = [eq.subs(othervarsubs) for eq in AllEquations]
try:
# forcing a value, so have to check if all equations in NewEquations that do not contain
# unknown variables are really 0
extrazerochecks=[]
for i in range(len(NewEquations)):
expr = NewEquations[i]
if not self.isValidSolution(expr):
log.warn('not valid: %s',expr)
extrazerochecks=None
break
if not expr.has_any_symbols(*allvars) and (self.isExpressionUnique(extrazerochecks,expr) or self.isExpressionUnique(extrazerochecks,-expr)):
extrazerochecks.append(expr.subs(solsubs).evalf())
if extrazerochecks is not None:
newcases = set(currentcases)
newcases.add(cond)
newtree = self.solveAllEquations(NewEquations,curvars,othersolvedvars,solsubs,endbranchtree,currentcases=newcases)
accumequations.append(NewEquations) # store the equations for debugging purposes
zerobranches.append(([evalcond]+extrazerochecks,newtree))
self.degeneratecases.addcases(newcases)
except self.CannotSolveError:
continue
if len(zerobranches) > 0:
branchconds = AST.SolverBranchConds(zerobranches+[(None,[AST.SolverBreak()])])
branchconds.accumequations = accumequations
lastbranch.append(branchconds)
else:
lastbranch.append(AST.SolverBreak())
return prevbranch
def solvePairVariablesHalfAngle(self,raweqns,var0,var1,othersolvedvars,subs=None):
"""solves equations of two variables in sin and cos
"""
varsym0 = self.Variable(var0)
varsym1 = self.Variable(var1)
varsyms = [varsym0,varsym1]
unknownvars=[varsym0.cvar,varsym0.svar,varsym1.cvar,varsym1.svar]
varsubs=varsym0.subs+varsym1.subs
varsubsinv = varsym0.subsinv+varsym1.subsinv
halftansubs = []
for varsym in varsyms:
halftansubs += [(varsym.cvar,(1-varsym.htvar**2)/(1+varsym.htvar**2)),(varsym.svar,2*varsym.htvar/(1+varsym.htvar**2))]
dummyvars = []
for othervar in othersolvedvars:
v = self.Variable(othervar)
dummyvars += [v.cvar,v.svar,v.var,v.htvar]
polyeqs = []
for eq in raweqns:
peq = Poly(eq.subs(varsubs).subs(varsym0.svar**2,1-varsym0.cvar**2).expand().subs(varsym1.svar**2,1-varsym1.cvar**2),*unknownvars)
if peq.has_any_symbols(varsym0.var) or peq.has_any_symbols(varsym1.var):
raise self.CannotSolveError('expecting only sin and cos! %s'%peq)
maxmonoms = [0,0,0,0]
maxdenom = [0,0]
for monoms in peq.iter_monoms():
for i in range(4):
maxmonoms[i] = max(maxmonoms[i],monoms[i])
maxdenom[0] = max(maxdenom[0],monoms[0]+monoms[1])
maxdenom[1] = max(maxdenom[1],monoms[2]+monoms[3])
eqnew = S.Zero
for c,monoms in peq.iter_terms():
term = c
for i in range(4):
num,denom = fraction(halftansubs[i][1])
term *= num**monoms[i]
# the denoms for 0,1 and 2,3 are the same
for i in [0,2]:
denom = fraction(halftansubs[i][1])[1]
term *= denom**(maxdenom[i/2]-monoms[i]-monoms[i+1])
eqnew += simplify(term)
polyeq = Poly(eqnew,varsym0.htvar,varsym1.htvar)
if polyeq.coeff() == S.Zero:
# might be able to divide out variables?
minmonoms = None
for monom in polyeq.monoms:
if minmonoms is None:
minmonoms = list(monom)
else:
for i in range(len(minmonoms)):
minmonoms[i] = min(minmonoms[i],monom[i])
newpolyeq = Poly(S.Zero,*polyeq.symbols)
for c,m in polyeq.iter_terms():
newm = list(m)
for i in range(len(minmonoms)):
newm[i] -= minmonoms[i]
newpolyeq = newpolyeq.add_term(c,tuple(newm))
log.warn('converting polyeq "%s" to "%s"'%(polyeq,newpolyeq))
# check if any equations are only in one variable
polyeq = newpolyeq
polyeqs.append(polyeq)
try:
return self.solveSingleVariable([e.as_basic() for e in polyeqs if not e.has_any_symbols(varsym1.htvar)],varsym0.var,othersolvedvars,unknownvars=[])
except self.CannotSolveError:
pass
try:
return self.solveSingleVariable([e.as_basic() for e in polyeqs if not e.has_any_symbols(varsym0.htvar)],varsym1.var,othersolvedvars,unknownvars=[])
except self.CannotSolveError:
pass
complexity = [(self.codeComplexity(peq.as_basic()),peq) for peq in polyeqs]
complexity.sort()
polyeqs = [peq[1] for peq in complexity]
solutions = [None,None]
linearsolution = None
for ileftvar in range(2):
if linearsolution is not None:
break
leftvar = varsyms[ileftvar].htvar
newpolyeqs = [Poly(eq,varsyms[1-ileftvar].htvar) for eq in polyeqs]
mindegree = __builtin__.min([peq.degree for peq in newpolyeqs])
maxdegree = __builtin__.max([peq.degree for peq in newpolyeqs])
for peq in newpolyeqs:
if len(peq.monoms) == 1:
possiblefinaleq = self.checkFinalEquation(Poly(peq.coeffs[0],leftvar),subs)
if possiblefinaleq is not None:
solutions[ileftvar] = [possiblefinaleq]
break
for degree in range(mindegree,maxdegree+1):
if solutions[ileftvar] is not None or linearsolution is not None:
break
newpolyeqs2 = [peq for peq in newpolyeqs if peq.degree <= degree]
if degree+1 <= len(newpolyeqs2):
# in order to avoid wrong solutions, have to get resultants for all equations
possibilities = []
unusedindices = range(len(newpolyeqs2))
for eqsindices in combinations(range(len(newpolyeqs2)),degree+1):
Mall = zeros((degree+1,degree+1))
for i,eqindex in enumerate(eqsindices):
eq = newpolyeqs2[eqindex]
for j in range(degree+1):
Mall[i,j] = eq.coeff(j)
# det_bareis freezes when there are huge fractions
#det=self.det_bareis(Mall,*(self.pvars+dummyvars+[leftvar]))
possiblefinaleq = self.checkFinalEquation(Poly(Mall.berkowitz_det(),leftvar),subs)
if possiblefinaleq is not None:
# sometimes +- I are solutions, so remove them
q,r = div(possiblefinaleq,leftvar+I)
if r == S.Zero:
possiblefinaleq = Poly(q,leftvar)
q,r = div(possiblefinaleq,leftvar-I)
if r == S.Zero:
possiblefinaleq = Poly(q,leftvar)
possibilities.append(possiblefinaleq)
for eqindex in eqsindices:
if eqindex in unusedindices:
unusedindices.remove(eqindex)
if len(unusedindices) == 0:
break
if len(possibilities) > 0:
if len(possibilities) > 1:
try:
linearsolutions = self.solveVariablesLinearly(possibilities,othersolvedvars)
# if can solve for a unique solution linearly, then prioritize this over anything
prevsolution = AST.SolverBreak()
for divisor,linearsolution in linearsolutions:
assert(len(linearsolution)==1)
divisorsymbol = self.gsymbolgen.next()
solversolution = AST.SolverSolution(varsyms[ileftvar].name,jointeval=[2*atan(linearsolution[0]/divisorsymbol)],isHinge=self.isHinge(varsyms[ileftvar].name))
prevsolution = AST.SolverCheckZeros(varsyms[ileftvar].name,[divisorsymbol],zerobranch=[prevsolution],nonzerobranch=[solversolution],thresh=1e-6)
prevsolution.dictequations = [(divisorsymbol,divisor)]
linearsolution = prevsolution
break
except self.CannotSolveError:
pass
# sort with respect to degree
equationdegrees = [(peq.degree*100000+self.codeComplexity(peq.as_basic()),peq) for peq in possibilities]
equationdegrees.sort()
solutions[ileftvar] = [peq[1] for peq in equationdegrees]
break
if linearsolution is not None:
return [linearsolution]
# take the solution with the smallest degree
pfinals = None
ileftvar = None
if solutions[0] is not None:
if solutions[1] is not None:
if solutions[1][0].degree < solutions[0][0].degree:
pfinals = solutions[1]
ileftvar = 1
elif solutions[1][0].degree == solutions[0][0].degree and self.codeComplexity(solutions[1][0].as_basic()) < self.codeComplexity(solutions[0][0].as_basic()):
pfinals = solutions[1]
ileftvar = 1
else:
pfinals = solutions[0]
ileftvar = 0
else:
pfinals = solutions[0]
ileftvar = 0
elif solutions[1] is not None:
pfinals = solutions[1]
ileftvar = 1
dictequations = []
if pfinals is None:
#simplifyfn = self._createSimplifyFn(self.freejointvars,self.freevarsubs,self.freevarsubsinv)
for newreducedeqs in combinations(polyeqs,2):
try:
Mall = None
for ileftvar in range(2):
# TODO, sometimes this works and sometimes this doesn't
try:
Mall, allmonoms = self.solveDialytically(newreducedeqs,ileftvar,returnmatrix=True)
if Mall is not None:
leftvar=polyeqs[0].symbols[ileftvar]
break
except self.CannotSolveError, e:
log.debug(e)
if Mall is None:
continue
shape=Mall[0].shape
Malltemp = [None]*len(Mall)
M = zeros(shape)
for idegree in range(len(Mall)):
Malltemp[idegree] = zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
if Mall[idegree][i,j] != S.Zero:
sym = self.gsymbolgen.next()
Malltemp[idegree][i,j] = sym
dictequations.append((sym,Mall[idegree][i,j]))
M += Malltemp[idegree]*leftvar**idegree
tempsymbols = [self.gsymbolgen.next() for i in range(16)]
tempsubs = []
for i in range(16):
if M[i] != S.Zero:
tempsubs.append((tempsymbols[i],Poly(M[i],leftvar)))
else:
tempsymbols[i] = S.Zero
Mtemp = Matrix(4,4,tempsymbols)
dettemp=Mtemp.det()
log.info('multiplying all determinant coefficients for solving %s',leftvar)
eqadds = []
for arg in dettemp.args:
eqmuls = [Poly(arg2.subs(tempsubs),leftvar) for arg2 in arg.args]
if eqmuls[0].degree == 0:
eq = eqmuls.pop(0)
eqmuls[0] = eqmuls[0]*eq
while len(eqmuls) > 1:
ioffset = 0
eqmuls2 = []
while ioffset < len(eqmuls)-1:
eqmuls2.append(eqmuls[ioffset]*eqmuls[ioffset+1])
ioffset += 2
eqmuls = eqmuls2
eqadds.append(eqmuls[0])
det = Poly(S.Zero,leftvar)
for eq in eqadds:
det += eq
# if len(Mall) == 2:
# log.info('attempting to simplify determinant...')
# newdet = Poly(S.Zero,leftvar)
# for c,m in det.iter_terms():
# newc = self.trigsimp(c.subs(dictequations),othersolvedvars)
# newdet += newc*leftvar**m[0]
# dictequations = []
# det = newdet
pfinals = [det]
break
except self.CannotSolveError,e:
log.debug(e)
if pfinals is None:
raise self.CannotSolveError('solvePairVariablesHalfAngle: solve dialytically with %d equations'%(len(polyeqs)))
jointsol = 2*atan(varsyms[ileftvar].htvar)
solution = AST.SolverPolynomialRoots(jointname=varsyms[ileftvar].name,poly=pfinals[0],jointeval=[jointsol],isHinge=self.isHinge(varsyms[ileftvar].name))
solution.checkforzeros = []
solution.postcheckforzeros = []
solution.postcheckfornonzeros = [peq.as_basic() for peq in pfinals[1:]]
solution.postcheckforrange = []
solution.dictequations = dictequations
solution.AddHalfTanValue = True
return [solution]
def _createSimplifyFn(self,vars,varsubs,varsubsinv):
return lambda eq: self.trigsimp(eq.subs(varsubsinv),vars).subs(varsubs)
def solveVariablesLinearly(self,polyeqs,othersolvedvars,maxsolvabledegree=4):
log.debug('solveVariablesLinearly for %s: othersolvedvars=%s',polyeqs[0].symbols,othersolvedvars)
nummonoms = [len(peq.monoms)-int(peq.coeff()!=S.Zero) for peq in polyeqs]
mindegree = __builtin__.min(nummonoms)
maxdegree = min(__builtin__.max(nummonoms),len(polyeqs))
complexity = [(self.codeComplexity(peq.as_basic()),peq) for peq in polyeqs]
complexity.sort()
polyeqs = [peq[1] for peq in complexity]
trigsubs = []
trigsubsinv = []
for othervar in othersolvedvars:
v = self.Variable(othervar)
trigsubs += v.subs
trigsubsinv += v.subsinv
symbolscheck = []
for i,solvevar in enumerate(polyeqs[0].symbols):
monom = [0]*len(polyeqs[0].symbols)
monom[i] = 1
symbolscheck.append(tuple(monom))
solutions = []
for degree in range(mindegree,maxdegree+1):
allindices = [i for i,n in enumerate(nummonoms) if n <= degree]
if len(allindices) >= degree:
allmonoms = set()
for index in allindices:
allmonoms = allmonoms.union(set(polyeqs[index].monoms))
allmonoms = list(allmonoms)
allmonoms.sort()
if __builtin__.sum(allmonoms[0]) == 0:
allmonoms.pop(0)
# allmonoms has to have symbols as a single variable
if not all([check in allmonoms for check in symbolscheck]):
continue
if len(allmonoms) == degree:
if degree > maxsolvabledegree:
log.warn('cannot handle linear solving for more than 4 equations')
continue
systemequations = []
consts = []
for index in allindices:
systemequations.append([polyeqs[index].coeff(*monom) for monom in allmonoms])
consts.append(-polyeqs[index].coeff())
# generate at least two solutions in case first's determinant is 0
solutions = []
for startrow in range(len(systemequations)):
rows = [startrow]
M = Matrix(1,len(allmonoms),systemequations[rows[0]])
for i in range(startrow+1,len(systemequations)):
numequationsneeded = M.shape[1] - M.shape[0]
if i+numequationsneeded > len(systemequations):
# cannot do anything
break
mergedsystemequations = list(systemequations[i])
for j in range(1,numequationsneeded):
mergedsystemequations += systemequations[i+j]
M2 = M.col_join(Matrix(numequationsneeded,len(allmonoms),mergedsystemequations))
Mdet = M2.det()
if Mdet != S.Zero:
M = M2
for j in range(numequationsneeded):
rows.append(i+j)
break
if M.shape[0] == M.shape[1]:
Mdet = self.trigsimp(Mdet.subs(trigsubsinv),othersolvedvars).subs(trigsubs)
#Minv = M.inv()
B = Matrix(M.shape[0],1,[consts[i] for i in rows])
Madjugate = M.adjugate()
solution = []
for check in symbolscheck:
value = Madjugate[allmonoms.index(check),:]*B
solution.append(self.trigsimp(value[0].subs(trigsubsinv),othersolvedvars).subs(trigsubs))
solutions.append([Mdet,solution])
if len(solutions) >= 2:
break
if len(solutions) > 0:
break
if len(solutions) == 0:
raise self.CannotSolveError('solveVariablesLinearly failed')
return solutions
def solveSingleVariableLinearly(self,raweqns,solvevar,othervars,maxnumeqs=2,douniquecheck=True):
"""tries to linearly solve for one variable treating everything else as constant.
need at least 3 equations
"""
cvar = Symbol('c%s'%solvevar.name)
svar = Symbol('s%s'%solvevar.name)
varsubs = [(cos(solvevar),cvar),(sin(solvevar),svar)]
othervarsubs = [(sin(v)**2,1-cos(v)**2) for v in othervars]
eqpolys = [Poly(eq.subs(varsubs),cvar,svar) for eq in raweqns]
eqpolys = [eq for eq in eqpolys if eq.degree == 1 and not eq.coeff(0,0).has_any_symbols(solvevar)]
#eqpolys.sort(lambda x,y: iksolver.codeComplexity(x) - iksolver.codeComplexity(y))
partialsolutions = []
neweqs = []
for p0,p1 in combinations(eqpolys,2):
M = Matrix(2,3,[p0.coeff(1,0),p0.coeff(0,1),p0.coeff(0,0),p1.coeff(1,0),p1.coeff(0,1),p1.coeff(0,0)])
M = M.subs(othervarsubs).expand()
partialsolution = [-M[1,1]*M[0,2]+M[0,1]*M[1,2],M[1,0]*M[0,2]-M[0,0]*M[1,2],M[0,0]*M[1,1]-M[0,1]*M[1,0]]
partialsolution = [eq.expand().subs(othervarsubs).expand() for eq in partialsolution]
rank = [self.codeComplexity(eq) for eq in partialsolution]
partialsolutions.append([rank,partialsolution])
# cos(A)**2 + sin(A)**2 - 1 = 0, useful equation but the squares introduce wrong solutions
#neweqs.append(partialsolution[0]**2+partialsolution[1]**2-partialsolution[2]**2)
# try to cross
partialsolutions.sort(lambda x, y: int(min(x[0])-min(y[0])))
for (rank0,ps0),(rank1,ps1) in combinations(partialsolutions,2):
if self.equal(ps0[0]*ps1[2]-ps1[0]*ps0[2],S.Zero):
continue
neweqs.append(ps0[0]*ps1[2]-ps1[0]*ps0[2])
neweqs.append(ps0[1]*ps1[2]-ps1[1]*ps0[2])
# probably a linear combination of the first two
#neweqs.append(ps0[0]*ps1[1]-ps1[0]*ps0[1])
# too long
#neweqs.append(ps0[0]*ps1[0]+ps0[1]*ps1[1]-ps0[2]*ps1[2])
if len(neweqs) >= maxnumeqs:
break;
neweqs2 = [eq.expand().subs(othervarsubs).expand() for eq in neweqs]
if douniquecheck:
reducedeqs = []
i = 0
while i < len(neweqs2):
reducedeq = self.removecommonexprs(neweqs2[i])
if neweqs2[i] != S.Zero and self.isExpressionUnique(reducedeqs,reducedeq) and self.isExpressionUnique(reducedeqs,-reducedeq):
reducedeqs.append(reducedeq)
i += 1
else:
eq=neweqs2.pop(i)
return neweqs2
def solveHighDegreeEquationsHalfAngle(self,lineareqs,varsym,subs=None):
"""solve a set of equations in one variable with half-angle substitution
"""
dummysubs = [(varsym.cvar,(1-varsym.htvar**2)/(1+varsym.htvar**2)),(varsym.svar,2*varsym.htvar/(1+varsym.htvar**2))]
polyeqs = []
for eq in lineareqs:
peq = Poly(eq.subs(varsym.subs).subs(varsym.svar**2,1-varsym.cvar**2),varsym.cvar,varsym.svar)
if peq.has_any_symbols(varsym.var):
raise self.CannotSolveError('expecting only sin and cos! %s'%peq)
if peq.degree == 0:
continue
# check if all terms are multiples of cos/sin
maxmonoms = [0,0]
maxdenom = 0
for monoms in peq.iter_monoms():
for i in range(2):
maxmonoms[i] = max(maxmonoms[i],monoms[i])
maxdenom = max(maxdenom,monoms[0]+monoms[1])
eqnew = S.Zero
for c,monoms in peq.iter_terms():
if c.evalf() != S.Zero: # big fractions might make this difficult to reduce to 0
term = c
for i in range(2):
num,denom = fraction(dummysubs[i][1])
term *= num**monoms[i]
# the denoms for 0,1 and 2,3 are the same
denom = fraction(dummysubs[0][1])[1]
term *= denom**(maxdenom-monoms[0]-monoms[1])
eqnew += simplify(term)
polyeqs.append(Poly(eqnew,varsym.htvar))
for peq in polyeqs:
# do some type of resultants, for now just choose first polynomial
finaleq = simplify(peq.as_basic()).expand()
pfinal = Poly(self.removecommonexprs(finaleq,onlygcd=False,onlynumbers=True),varsym.htvar)
pfinal = self.checkFinalEquation(pfinal,subs)
if pfinal is not None:
jointsol = 2*atan(varsym.htvar)
solution = AST.SolverPolynomialRoots(jointname=varsym.name,poly=pfinal,jointeval=[jointsol],isHinge=self.isHinge(varsym.name))
solution.AddHalfTanValue = True
solution.checkforzeros = []
solution.postcheckforzeros = []
solution.postcheckfornonzeros = []
solution.postcheckforrange = []
return solution
raise self.CannotSolveError('half-angle substitution for joint %s failed, %d equations examined'%(varsym.var,len(polyeqs)))
def checkFinalEquation(self,pfinal,subs=None):
"""check an equation in one variable for validity
"""
assert(len(pfinal.symbols)==1)
if subs is None:
subs = []
htvar = pfinal.symbols[0]
# remove all trivial 0s
while pfinal.degree > 0 and pfinal.coeff(0) == S.Zero:
pfinalnew = Poly(S.Zero,htvar)
for c,m in pfinal.iter_terms():
if m[0] > 0:
pfinalnew += c*htvar**(m[0]-1)
pfinal = pfinalnew
# check to see that LC is non-zero for at least one solution
if pfinal.LC.evalf() == S.Zero or all([pfinal.LC.subs(subs).subs(testconsistentvalue).evalf()==S.Zero for testconsistentvalue in self.testconsistentvalues]):
return None
# sanity check that polynomial can produce a solution and is not actually very small values
found = False
LCnormalized, common = self.removecommonexprs(pfinal.LC,returncommon=True,onlygcd=False,onlynumbers=True)
for testconsistentvalue in self.testconsistentvalues:
coeffs = []
globalsymbols = [(s,v.subs(testconsistentvalue).evalf()) for s,v in self.globalsymbols]
for degree in range(pfinal.degree,-1,-1):
coeffs.append(pfinal.coeff(degree).subs(subs).subs(globalsymbols+testconsistentvalue).evalf()/common.evalf())
# since coeffs[0] is normalized with the LC constant, can compare for precision
if len(coeffs) == 1 and abs(coeffs[0]) < 2*(10.0**-self.precision):
coeffs = None
break
if coeffs is None:
continue
if not all([c.is_number for c in coeffs]):
# cannot evalute
log.warn('cannot evalute: %s',coeffs)
found = True
break
realsolution = pfinal.symbols[0].subs(subs).subs(self.globalsymbols).subs(testconsistentvalue).evalf()
roots = mpmath.polyroots(coeffs)
for root in roots:
if abs(float(root.imag)) < 10.0**-self.precision and abs(float(root.real)-realsolution) < 10.0**-(self.precision-2):
found = True
break
if found:
break
return pfinal if found else None
def solveSingleVariable(self,raweqns,var,othersolvedvars,maxsolutions=4,maxdegree=2,subs=None, unknownvars=None):
varsym = self.Variable(var)
vars = [varsym.cvar,varsym.svar,varsym.htvar,var]
othersubs = []
for othersolvedvar in othersolvedvars:
othersubs += self.Variable(othersolvedvar).subs
# eqns = []
# for eq in raweqns:
# if eq.has_any_symbols(*vars):
# # for equations that are very complex, make sure at least one set of values yields a non zero equation
# testeq = eq.subs(varsym.subs+othersubs)
# if any([testeq.subs(testconsistentvalue).evalf()!=S.Zero for testconsistentvalue in self.testconsistentvalues]):
# eqns.append(eq)
eqns = [eq.expand() for eq in raweqns if eq.has_any_symbols(*vars)]
if len(eqns) == 0:
raise self.CannotSolveError('not enough equations')
# prioritize finding a solution when var is alone
for eq in eqns:
symbolgen = cse_main.numbered_symbols('const')
eqnew, symbols = self.groupTerms(eq.subs(varsym.subs), vars, symbolgen)
try:
ps = Poly(eqnew,varsym.svar)
pc = Poly(eqnew,varsym.cvar)
if ps.degree > 0 or pc.degree > 0 or ps.coeff(0) == S.Zero or pc.coeff(0) == S.Zero:
continue
except polys.polynomial.PolynomialError:
continue
numvar = self.countVariables(eqnew,var)
if numvar >= 1 and numvar <= 2:
tempsolutions = solve(eqnew,var)
jointsolutions = [self.trigsimp(s.subs(symbols),othersolvedvars) for s in tempsolutions]
if all([self.isValidSolution(s) and s != S.Zero for s in jointsolutions]) and len(jointsolutions)>0:
return [AST.SolverSolution(var.name,jointeval=jointsolutions,isHinge=self.isHinge(var.name))]
numvar = self.countVariables(eqnew,varsym.htvar)
if Poly(eqnew,varsym.htvar).coeff() != S.Zero and numvar >= 1 and numvar <= 2:
tempsolutions = solve(eqnew,varsym.htvar)
jointsolutions = [2*atan(self.trigsimp(s.subs(symbols),othersolvedvars)) for s in tempsolutions]
if all([self.isValidSolution(s) and s != S.Zero for s in jointsolutions]) and len(jointsolutions)>0:
return [AST.SolverSolution(var.name,jointeval=jointsolutions,isHinge=self.isHinge(var.name))]
solutions = []
if len(eqns) > 1:
neweqns = []
listsymbols = []
symbolgen = cse_main.numbered_symbols('const')
for e in eqns:
enew, symbols = self.groupTerms(e.subs(varsym.subs),[varsym.cvar,varsym.svar,var], symbolgen)
# remove coupled equations
if any([(m[0]>0)+(m[1]>0)+(m[2]>0)>1 for m in Poly(enew,varsym.cvar,varsym.svar,var).monoms]):
continue
# ignore any equations with degree 3 or more
if Poly(enew,varsym.svar).degree > maxdegree or Poly(enew,varsym.cvar).degree > maxdegree:
log.debug('ignoring equation: ',enew)
continue
if Poly(enew,varsym.svar).coeff() == S.Zero or Poly(enew,varsym.cvar) == S.Zero or Poly(enew,varsym.var) == S.Zero:
log.debug('equation %s is allowing trivial solution for variable %s, ignoring ',e,varsym.name)
continue
rank = self.codeComplexity(enew)
for s in symbols:
rank += self.codeComplexity(s[1])
neweqns.append((rank,enew))
listsymbols += symbols
# since we're solving for two variables, we only want to use two equations, so
# start trying all the equations starting from the least complicated ones to the most until a solution is found
eqcombinations = []
for eqs in combinations(neweqns,2):
eqcombinations.append((eqs[0][0]+eqs[1][0],[Eq(e[1],0) for e in eqs]))
eqcombinations.sort(lambda x, y: x[0]-y[0])
hasgoodsolution = False
for icomb,comb in enumerate(eqcombinations):
# skip if too complex
if len(solutions) > 0 and comb[0] > 200:
break
# try to solve for both sin and cos terms
if not self.has_any_symbols(comb[1],varsym.svar) or not self.has_any_symbols(comb[1], varsym.cvar):
continue
try:
s = solve(comb[1],[varsym.svar,varsym.cvar])
except PolynomialError, e:
log.debug('solveSingleVariable: failed: %s',e)
continue
if s is not None:
sollist = None
if hasattr(s,'has_key'):
if s.has_key(varsym.svar) and s.has_key(varsym.cvar):
sollist = [(s[varsym.svar],s[varsym.cvar])]
else:
sollist = []
else:
sollist = s
solversolution = AST.SolverSolution(var.name,jointeval=[],isHinge=self.isHinge(var.name))
goodsolution = 0
for svarsol,cvarsol in sollist:
# solutions cannot be trivial
if (svarsol-cvarsol).subs(listsymbols).expand() == S.Zero:
break
if svarsol.subs(listsymbols).expand() == S.Zero and abs(cvarsol.subs(listsymbols).expand()) - S.One != S.Zero:
break
if cvarsol.subs(listsymbols).expand() == S.Zero and abs(svarsol.subs(listsymbols).expand()) - S.One != S.Zero:
break
# check the numerator and denominator if solutions are the same or for possible divide by zeros
svarfrac=fraction(svarsol)
svarfrac = [svarfrac[0].subs(listsymbols), svarfrac[1].subs(listsymbols)]
cvarfrac=fraction(cvarsol)
cvarfrac = [cvarfrac[0].subs(listsymbols), cvarfrac[1].subs(listsymbols)]
if self.equal(svarfrac[0],cvarfrac[0]) and self.equal(svarfrac[1],cvarfrac[1]):
break
if not self.isValidSolution(svarfrac[0]) or not self.isValidSolution(svarfrac[1]) or not self.isValidSolution(cvarfrac[0]) or not self.isValidSolution(cvarfrac[1]):
continue
# check if there exists at least one test solution with non-zero denominators
if subs is None:
testeqs = [svarfrac[1].subs(othersubs),cvarfrac[1].subs(othersubs)]
else:
testeqs = [svarfrac[1].subs(subs).subs(othersubs),cvarfrac[1].subs(subs).subs(othersubs)]
testsuccess = False
for testconsistentvalue in self.testconsistentvalues:
if all([testeq.subs(testconsistentvalue).evalf()!=S.Zero for testeq in testeqs]):
testsuccess = True
break
if not testsuccess:
continue
scomplexity = self.codeComplexity(svarfrac[0])+self.codeComplexity(svarfrac[1])
ccomplexity = self.codeComplexity(cvarfrac[0])+self.codeComplexity(cvarfrac[1])
if scomplexity > 1200 or ccomplexity > 1200:
log.debug('equation too complex for single variable solution (%d,%d).... (probably wrong?)',scomplexity,ccomplexity)
break
if scomplexity < 500:
svarfrac[1] = simplify(svarfrac[1])
if self.chop(svarfrac[1])== 0:
break
if ccomplexity < 500:
cvarfrac[1] = simplify(cvarfrac[1])
if self.chop(cvarfrac[1])== 0:
break
# sometimes the returned simplest solution makes really gross approximations
svarfracsimp_denom = self.trigsimp(svarfrac[1],othersolvedvars)
cvarfracsimp_denom = self.trigsimp(cvarfrac[1],othersolvedvars)
# self.simplifyTransform could help in reducing denoms further...
denomsequal = False
if self.equal(svarfracsimp_denom,cvarfracsimp_denom):
denomsequal = True
elif self.equal(svarfracsimp_denom,-cvarfracsimp_denom):
cvarfrac[0] = -cvarfrac[0]
cvarfracsimp_denom = -cvarfracsimp_denom
if self.equal(svarfracsimp_denom,cvarfracsimp_denom) and not svarfracsimp_denom.is_number:
log.debug('%s solution: denominator is equal %s, doing a global substitution',var.name,svarfracsimp_denom)
denom = self.gsymbolgen.next()
solversolution.dictequations.append((denom,sign(svarfracsimp_denom)))
svarsolsimp = self.trigsimp(svarfrac[0],othersolvedvars)*denom
cvarsolsimp = self.trigsimp(cvarfrac[0],othersolvedvars)*denom
solversolution.FeasibleIsZeros = False
solversolution.presetcheckforzeros.append(svarfracsimp_denom)
expandedsol = atan2(svarsolsimp,cvarsolsimp)
else:
svarfracsimp_num = self.trigsimp(svarfrac[0],othersolvedvars)
cvarfracsimp_num = self.trigsimp(cvarfrac[0],othersolvedvars)
svarsolsimp = svarfracsimp_num/svarfracsimp_denom
cvarsolsimp = cvarfracsimp_num/cvarfracsimp_denom
if svarsolsimp.is_number and cvarsolsimp.is_number:
if abs(svarsolsimp**2+cvarsolsimp**2-S.One).evalf() > 1e-10:
log.debug('%s solution: atan2(%s,%s), sin/cos not on circle so ignoring',var.name,svarsolsimp,cvarsolsimp)
continue
expandedsol = atan2check(svarsolsimp,cvarsolsimp)
solversolution.FeasibleIsZeros = False
log.debug('%s solution: atan2 check for joint',var.name)
solversolution.jointeval.append(expandedsol)
if unknownvars is not None:
unsolvedsymbols = []
for unknownvar in unknownvars:
if unknownvar != var:
unsolvedsymbols += self.Variable(unknownvar).vars
if len(unsolvedsymbols) > 0:
solversolution.equationsused = [eq for eq in eqns if not eq.has_any_symbols(*unsolvedsymbols)]
else:
solversolution.equationsused = eqns
if len(solversolution.equationsused) > 0:
log.info('%s solution: equations used for atan2: %s',var.name, str(solversolution.equationsused))
if len(self.checkForDivideByZero(expandedsol)) == 0:
goodsolution += 1
if len(solversolution.jointeval) == len(sollist) and len(sollist) > 0:
solutions.append(solversolution)
if goodsolution > 0:
hasgoodsolution = True
if len(sollist) == goodsolution and goodsolution == 1:
break
if len(solutions) >= maxsolutions:
# probably more than enough already?
break
if len(solutions) > 0 or hasgoodsolution: # found a solution without any divides, necessary for pr2 head_torso lookat3d ik
return solutions
# solve one equation
for ieq,eq in enumerate(eqns):
symbolgen = cse_main.numbered_symbols('const')
eqnew, symbols = self.groupTerms(eq.subs(varsym.subs), [varsym.cvar,varsym.svar,varsym.var], symbolgen)
try:
# ignore any equations with degree 3 or more
ps = Poly(eqnew,varsym.svar)
pc = Poly(eqnew,varsym.cvar)
if ps.degree > maxdegree or pc.degree > maxdegree:
log.debug('cannot solve equation with high degree: %s',str(eqnew))
continue
if ps.coeff(0) == S.Zero and len(ps.monoms) > 0:
log.debug('equation %s has trivial solution, ignoring...', ps)
continue
if pc.coeff(0) == S.Zero and len(pc.monoms) > 0:
log.debug('equation %s has trivial solution, ignoring...', pc)
continue
except polys.polynomial.PolynomialError:
# might not be a polynomial, so ignore
continue
equationsused = None
if unknownvars is not None:
unsolvedsymbols = []
for unknownvar in unknownvars:
if unknownvar != var:
unsolvedsymbols += self.Variable(unknownvar).vars
if len(unsolvedsymbols) > 0:
equationsused = [eq2 for ieq2,eq2 in enumerate(eqns) if ieq2!=ieq and not eq2.has_any_symbols(*unsolvedsymbols)]
else:
equationsused = eqns[:]
equationsused.pop(ieq)
numcvar = self.countVariables(eqnew,varsym.cvar)
numsvar = self.countVariables(eqnew,varsym.svar)
if numcvar == 1 and numsvar == 1:
a = Wild('a',exclude=[varsym.svar,varsym.cvar])
b = Wild('b',exclude=[varsym.svar,varsym.cvar])
c = Wild('c',exclude=[varsym.svar,varsym.cvar])
m = eqnew.match(a*varsym.cvar+b*varsym.svar+c)
if m is not None:
symbols += [(varsym.svar,sin(var)),(varsym.cvar,cos(var))]
asinsol = trigsimp(asin(-m[c]/abs(sqrt(m[a]*m[a]+m[b]*m[b]))).subs(symbols),deep=True)
constsol = -atan2(m[a],m[b]).subs(symbols).evalf()
jointsolutions = [constsol+asinsol,constsol+pi.evalf()-asinsol]
if all([self.isValidSolution(s) and self.isValidSolution(s) for s in jointsolutions]):
solutions.append(AST.SolverSolution(var.name,jointeval=jointsolutions,isHinge=self.isHinge(var.name)))
solutions[-1].equationsused = equationsused
continue
if numcvar > 0:
try:
# substitute cos
if self.countVariables(eqnew,varsym.svar) <= 1 or (self.countVariables(eqnew,varsym.cvar) <= 2 and self.countVariables(eqnew,varsym.svar) == 0): # anything more than 1 implies quartic equation
tempsolutions = solve(eqnew.subs(varsym.svar,sqrt(1-varsym.cvar**2)),varsym.cvar)
jointsolutions = [self.trigsimp(s.subs(symbols+varsym.subsinv),othersolvedvars) for s in tempsolutions]
if all([self.isValidSolution(s) and self.isValidSolution(s) for s in jointsolutions]):
solutions.append(AST.SolverSolution(var.name,jointevalcos=jointsolutions,isHinge=self.isHinge(var.name)))
solutions[-1].equationsused = equationsused
continue
except self.CannotSolveError,e:
log.debug(e)
except NotImplementedError:
pass
if numsvar > 0:
# substitute sin
try:
if self.countVariables(eqnew,varsym.svar) <= 1 or (self.countVariables(eqnew,varsym.svar) <= 2 and self.countVariables(eqnew,varsym.cvar) == 0): # anything more than 1 implies quartic equation
tempsolutions = solve(eqnew.subs(varsym.cvar,sqrt(1-varsym.svar**2)),varsym.svar)
jointsolutions = [self.trigsimp(s.subs(symbols+varsym.subsinv),othersolvedvars) for s in tempsolutions]
if all([self.isValidSolution(s) and self.isValidSolution(s) for s in jointsolutions]):
solutions.append(AST.SolverSolution(var.name,jointevalsin=jointsolutions,isHinge=self.isHinge(var.name)))
solutions[-1].equationsused = equationsused
continue
except self.CannotSolveError,e:
log.debug(e)
except NotImplementedError:
pass
if numcvar == 0 and numsvar == 0:
tempsolutions = solve(eqnew,var)
jointsolutions = [self.trigsimp(s.subs(symbols),othersolvedvars) for s in tempsolutions]
if all([self.isValidSolution(s) and s != S.Zero for s in jointsolutions]) and len(jointsolutions) > 0:
solutions.append(AST.SolverSolution(var.name,jointeval=jointsolutions,isHinge=self.isHinge(var.name)))
solutions[-1].equationsused = equationsused
continue
try:
solution = self.solveHighDegreeEquationsHalfAngle([eqnew],varsym,symbols)
solutions.append(solution.subs(symbols))
solutions[-1].equationsused = equationsused
except self.CannotSolveError,e:
log.debug(e)
if len(solutions) > 0:
return solutions
return [self.solveHighDegreeEquationsHalfAngle(eqns,varsym)]
def solvePairVariables(self,raweqns,var0,var1,othersolvedvars,maxcomplexity=50,unknownvars=None):
# make sure both variables are hinges
if not self.isHinge(var0.name) or not self.isHinge(var1.name):
raise self.CannotSolveError('pairwise variables only supports hinge joints')
varsym0 = self.Variable(var0)
varsym1 = self.Variable(var1)
cvar0,svar0 = varsym0.cvar, varsym0.svar
cvar1,svar1 = varsym1.cvar, varsym1.svar
varsubs=varsym0.subs+varsym1.subs
varsubsinv = varsym0.subsinv+varsym1.subsinv
unknownvars=[cvar0,svar0,cvar1,svar1]
reducesubs = [(svar0**2,1-cvar0**2),(svar1**2,1-cvar1**2)]
eqns = [eq.subs(varsubs).subs(reducesubs).expand() for eq in raweqns if eq.has_any_symbols(var0,var1)]
if len(eqns) <= 1:
raise self.CannotSolveError('not enough equations')
# group equations with single variables
symbolgen = cse_main.numbered_symbols('const')
orgeqns = []
allsymbols = []
for eq in eqns:
eqnew, symbols = self.groupTerms(eq, unknownvars, symbolgen)
allsymbols += symbols
orgeqns.append([self.codeComplexity(eq),Poly(eqnew,*unknownvars)])
orgeqns.sort(lambda x, y: x[0]-y[0])
neweqns = orgeqns[:]
pairwisesubs = [(svar0*cvar1,Symbol('s0c1')),(svar0*svar1,Symbol('s0s1')),(cvar0*cvar1,Symbol('c0c1')),(cvar0*svar1,Symbol('c0s1')),(cvar0*svar0,Symbol('s0c0')),(cvar1*svar1,Symbol('c1s1'))]
pairwiseinvsubs = [(f[1],f[0]) for f in pairwisesubs]
pairwisevars = [f[1] for f in pairwisesubs]
reduceeqns = [Poly(eq.as_basic().subs(pairwisesubs),*pairwisevars) for rank,eq in orgeqns if rank < 4*maxcomplexity]
for i,eq in enumerate(reduceeqns):
if eq.TC != S.Zero and not eq.TC.is_Symbol:
n=symbolgen.next()
allsymbols.append((n,eq.TC.subs(allsymbols)))
reduceeqns[i] += n-eq.TC
# try to at least subtract as much paired variables out
eqcombs = [c for c in combinations(reduceeqns,2)]
while len(eqcombs) > 0 and len(neweqns) < 20:
eq0,eq1 = eqcombs.pop()
for i in range(6):
monom = [0,0,0,0,0,0]
monom[i] = 1
if eq0.coeff(*monom) != 0 and eq1.coeff(*monom) != 0:
tempeq = (eq0.as_basic()*eq1.coeff(*monom)-eq0.coeff(*monom)*eq1.as_basic()).subs(allsymbols+pairwiseinvsubs).expand()
if self.codeComplexity(tempeq) > 200:
continue
eq = simplify(tempeq)
if eq == S.Zero:
continue
peq = Poly(eq,*pairwisevars)
if peq.degree > 0 and self.codeComplexity(eq) > maxcomplexity:
# don't need such complex equations
continue
if not self.isExpressionUnique(eqns,eq) or not self.isExpressionUnique(eqns,-eq):
continue
if eq.has_any_symbols(*unknownvars): # be a little strict about new candidates
eqns.append(eq)
eqnew, symbols = self.groupTerms(eq, unknownvars, symbolgen)
allsymbols += symbols
neweqns.append([self.codeComplexity(eq),Poly(eqnew,*unknownvars)])
orgeqns = neweqns[:]
# try to solve for all pairwise variables
systemofequations = []
for i in range(len(reduceeqns)):
if reduceeqns[i].has_any_symbols(pairwisevars[4],pairwisevars[5]):
continue
if not all([__builtin__.sum(m) <= 1 for m in reduceeqns[i].iter_monoms()]):
continue
arr = [S.Zero]*5
for c,m in reduceeqns[i].iter_terms():
if __builtin__.sum(m) == 1:
arr[list(m).index(1)] = c
else:
arr[4] = c
systemofequations.append(arr)
singleeqs = None
for eqs in combinations(systemofequations,4):
M = zeros((4,4))
B = zeros((4,1))
for i,arr in enumerate(eqs):
for j in range(4):
M[i,j] = arr[j]
B[i] = -arr[4]
det = self.det_bareis(M,*(self.pvars+unknownvars)).subs(allsymbols)
if det.evalf() != S.Zero:
X = M.adjugate()*B
singleeqs = []
for i in range(4):
eq = (pairwisesubs[i][0]*det - X[i]).subs(allsymbols)
eqnew, symbols = self.groupTerms(eq, unknownvars, symbolgen)
allsymbols += symbols
singleeqs.append([self.codeComplexity(eq),Poly(eqnew,*unknownvars)])
break
if singleeqs is not None:
neweqns += singleeqs
neweqns.sort(lambda x, y: x[0]-y[0])
# check if any equations are at least degree 1 (if not, try to compute some)
for ivar in range(2):
polyunknown = []
for rank,eq in orgeqns:
p = Poly(eq,unknownvars[2*ivar],unknownvars[2*ivar+1])
if p.degree == 1 and __builtin__.sum(p.lead_monom) == 1:
polyunknown.append((rank,p))
if len(polyunknown) > 0:
break
if len(polyunknown) == 0:
addedeqs = eqns[:]
polyeqs = []
for ivar in range(2):
polyunknown = []
for rank,eq in orgeqns:
p = Poly(eq,unknownvars[2*ivar],unknownvars[2*ivar+1])
polyunknown.append(p.subs(unknownvars[2*ivar+1]**2,1-unknownvars[2*ivar]**2))
if len(polyunknown) >= 2:
monomtoremove = [[polyunknown,(2,0)],[polyunknown,(1,1)]]
for curiter in range(2):
# remove the square
polyunknown,monom = monomtoremove[curiter]
pbase = [p for p in polyunknown if p.coeff(*monom) != S.Zero]
if len(pbase) == 0:
continue
pbase = pbase[0]
for i in range(len(polyunknown)):
eq = (polyunknown[i]*pbase.coeff(*monom)-pbase*polyunknown[i].coeff(*monom)).as_basic().subs(allsymbols).expand()
if eq != S.Zero and self.isExpressionUnique(addedeqs,eq) and self.isExpressionUnique(addedeqs,-eq):
eqnew, symbols = self.groupTerms(eq, unknownvars, symbolgen)
allsymbols += symbols
p = Poly(eqnew,*pbase.symbols)
if p.coeff(1,1) != S.Zero and curiter == 0:
monomtoremove[1][0].insert(0,p)
polyeqs.append([self.codeComplexity(eqnew),Poly(eqnew,*unknownvars)])
addedeqs.append(eq)
neweqns += polyeqs
neweqns.sort(lambda x,y: x[0]-y[0])
rawsolutions = []
# try single variable solution, only return if a single solution has been found
# returning multiple solutions when only one exists can lead to wrong results.
try:
rawsolutions += self.solveSingleVariable([e.as_basic().subs(varsubsinv).expand() for score,e in neweqns if not e.has_any_symbols(cvar1,svar1,var1)],var0,othersolvedvars,subs=allsymbols,unknownvars=unknownvars)
except self.CannotSolveError:
pass
try:
rawsolutions += self.solveSingleVariable([e.as_basic().subs(varsubsinv).expand() for score,e in neweqns if not e.has_any_symbols(cvar0,svar0,var0)],var1,othersolvedvars,subs=allsymbols,unknownvars=unknownvars)
except self.CannotSolveError:
pass
if len(rawsolutions) > 0:
solutions = []
for s in rawsolutions:
try:
solutions.append(s.subs(allsymbols))
except self.CannotSolveError:
pass
if len(solutions) > 0:
return solutions
groups=[]
for i,unknownvar in enumerate(unknownvars):
listeqs = []
listeqscmp = []
for rank,eq in neweqns:
# if variable ever appears, it should be alone
if all([m[i] == 0 or (__builtin__.sum(m) == m[i] and m[i]>0) for m in eq.iter_monoms()]) and any([m[i] > 0 for m in eq.iter_monoms()]):
# make sure there's only one monom that includes other variables
othervars = [__builtin__.sum(m) - m[i] > 0 for m in eq.iter_monoms()]
if __builtin__.sum(othervars) <= 1:
eqcmp = self.removecommonexprs(eq.subs(allsymbols).as_basic(),onlynumbers=False,onlygcd=True)
if self.isExpressionUnique(listeqscmp,eqcmp) and self.isExpressionUnique(listeqscmp,-eqcmp):
listeqs.append(eq)
listeqscmp.append(eqcmp)
groups.append(listeqs)
# find a group that has two or more equations:
useconic=False
goodgroup = [(i,g) for i,g in enumerate(groups) if len(g) >= 2]
if len(goodgroup) == 0:
# might have a set of equations that can be solved with conics
# look for equations where the variable and its complement are alone
groups=[]
for i in [0,2]:
unknownvar = unknownvars[i]
complementvar = unknownvars[i+1]
listeqs = []
listeqscmp = []
for rank,eq in neweqns:
# if variable ever appears, it should be alone
addeq = False
if all([__builtin__.sum(m) == m[i]+m[i+1] for m in eq.iter_monoms()]):
addeq = True
else:
# make sure there's only one monom that includes other variables
othervars = 0
for m in eq.iter_monoms():
if __builtin__.sum(m) > m[i]+m[i+1]:
if m[i] == 0 and m[i+1]==0:
othervars += 1
else:
othervars = 10000
if othervars <= 1:
addeq = True
if addeq:
eqcmp = self.removecommonexprs(eq.subs(allsymbols).as_basic(),onlynumbers=False,onlygcd=True)
if self.isExpressionUnique(listeqscmp,eqcmp) and self.isExpressionUnique(listeqscmp,-eqcmp):
listeqs.append(eq)
listeqscmp.append(eqcmp)
groups.append(listeqs)
groups.append([]) # necessary to get indices correct
goodgroup = [(i,g) for i,g in enumerate(groups) if len(g) >= 2]
useconic=True
if len(goodgroup) == 0:
try:
return self.solvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
except self.CannotSolveError,e:
log.warn('%s',e)
# try a separate approach where the two variables are divided on both sides
neweqs = []
for rank,eq in neweqns:
p = Poly(eq,unknownvars[0],unknownvars[1])
iscoupled = False
for m in p.iter_monoms():
if __builtin__.sum(m) > 0:
if p.coeff(*m).has_any_symbols(unknownvars[2],unknownvars[3]):
iscoupled = True
break
if not iscoupled:
neweqs.append([p-p.coeff(0,0),Poly(-p.coeff(0,0),unknownvars[2],unknownvars[3])])
if len(neweqs) > 0:
for ivar in range(2):
lineareqs = [eq for eq in neweqs if __builtin__.sum(eq[ivar].lead_monom)==1]
for paireq0,paireq1 in combinations(lineareqs,2):
log.info('solving separated equations with linear terms')
eq0 = paireq0[ivar]
eq1 = paireq1[ivar]
disc = (eq0.coeff(1,0)*eq1.coeff(0,1) - eq0.coeff(0,1)*eq1.coeff(1,0)).subs(allsymbols).expand()
if disc == S.Zero:
continue
othereq0 = paireq0[1-ivar].as_basic() - eq0.coeff(0,0)
othereq1 = paireq1[1-ivar].as_basic() - eq1.coeff(0,0)
csol = - eq1.coeff(0,1) * othereq0 + eq0.coeff(0,1) * othereq1
ssol = eq1.coeff(1,0) * othereq0 - eq0.coeff(1,0) * othereq1
polysymbols = paireq0[1-ivar].symbols
totaleq = (csol**2+ssol**2-disc**2).subs(allsymbols).expand()
if self.codeComplexity(totaleq) < 4000:
log.info('simplifying final equation to %d',self.codeComplexity(totaleq))
totaleq = simplify(totaleq)
ptotal_cos = Poly(totaleq,*polysymbols).subs(polysymbols[0]**2,1-polysymbols[1]**2).subs(polysymbols[1]**2,1-polysymbols[0]**2)
ptotal_sin = Poly(S.Zero,*polysymbols)
for c,m in ptotal_cos.iter_terms():
if m[1] > 0:
assert m[1] == 1
ptotal_sin = ptotal_sin.sub_term(c,(m[0],0))
ptotal_cos = ptotal_cos.sub_term(c,m)
finaleq = (ptotal_cos.as_basic()**2 - (1-polysymbols[0]**2)*ptotal_sin.as_basic()**2).expand()
# sometimes denominators can accumulate
pfinal = Poly(self.removecommonexprs(finaleq,onlygcd=False,onlynumbers=True),polysymbols[0])
pfinal = self.checkFinalEquation(pfinal)
if pfinal is not None:
jointsol = atan2(ptotal_cos.as_basic()/ptotal_sin.as_basic(), polysymbols[0])
var = var1 if ivar == 0 else var0
solution = AST.SolverPolynomialRoots(jointname=var.name,poly=pfinal,jointeval=[jointsol],isHinge=self.isHinge(var.name))
solution.postcheckforzeros = [ptotal_sin.as_basic()]
solution.postcheckfornonzeros = []
solution.postcheckforrange = []
return [solution]
# if maxnumeqs is any less, it will miss linearly independent equations
lineareqs = self.solveSingleVariableLinearly(raweqns,var0,[var1],maxnumeqs=len(raweqns))
if len(lineareqs) > 0:
try:
return [self.solveHighDegreeEquationsHalfAngle(lineareqs,varsym1)]
except self.CannotSolveError,e:
log.warn('%s',e)
raise self.CannotSolveError('cannot cleanly separate pair equations')
varindex=goodgroup[0][0]
var = var0 if varindex < 2 else var1
varsym = varsym0 if varindex < 2 else varsym1
unknownvar=unknownvars[goodgroup[0][0]]
eqs = goodgroup[0][1][0:2]
simpleterms = []
complexterms = []
domagicsquare = False
for i in range(2):
if useconic:
terms=[(c,m) for c,m in eqs[i].iter_terms() if __builtin__.sum(m) - m[varindex] - m[varindex+1] > 0]
else:
terms=[(c,m) for c,m in eqs[i].iter_terms() if __builtin__.sum(m) - m[varindex] > 0]
if len(terms) > 0:
simpleterms.append(eqs[i].sub_term(*terms[0]).as_basic()/terms[0][0]) # divide by the coeff
complexterms.append(Poly(0,*unknownvars).add_term(S.One,terms[0][1]).as_basic())
domagicsquare = True
else:
simpleterms.append(eqs[i].as_basic())
complexterms.append(S.Zero)
finaleq = None
checkforzeros = []
if domagicsquare:
# here is the magic transformation:
finaleq = self.trigsimp(expand(((complexterms[0]**2+complexterms[1]**2) - simpleterms[0]**2 - simpleterms[1]**2).subs(varsubsinv)),othersolvedvars+[var0,var1]).subs(varsubs)
denoms = [fraction(simpleterms[0])[1], fraction(simpleterms[1])[1], fraction(complexterms[0])[1], fraction(complexterms[1])[1]]
lcmvars = self.pvars+unknownvars
for othersolvedvar in othersolvedvars:
lcmvars += self.Variable(othersolvedvar).vars
denomlcm = Poly(S.One,*lcmvars)
for denom in denoms:
if denom != S.One:
checkforzeros.append(self.removecommonexprs(denom,onlygcd=False,onlynumbers=True))
denomlcm = Poly(lcm(denomlcm,denom),*lcmvars)
finaleq = simplify(finaleq*denomlcm.as_basic()**2)
complementvarindex = varindex-(varindex%2)+((varindex+1)%2)
complementvar = unknownvars[complementvarindex]
finaleq = simplify(finaleq.subs(complementvar**2,1-unknownvar**2)).subs(allsymbols).expand()
else:
# try to reduce finaleq
p0 = Poly(simpleterms[0],unknownvars[varindex],unknownvars[varindex+1])
p1 = Poly(simpleterms[1],unknownvars[varindex],unknownvars[varindex+1])
if p0.degree > 1 and p1.degree > 1 and p0.degree == p1.degree and p0.lead_term[1] == p1.lead_term[1]:
finaleq = (p0*p1.lead_term[0]-p1*p0.lead_term[0]).as_basic()
finaleq = expand(simplify(finaleq.subs(allsymbols)))
if finaleq == S.Zero:
finaleq = expand(p0.as_basic().subs(allsymbols))
if finaleq is None:
log.warn('solvePairVariables: did not compute a final variable. This is a weird condition...')
return self.solvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
if not self.isValidSolution(finaleq):
log.warn('failed to solve pairwise equation: %s'%str(finaleq))
return self.solvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
newunknownvars = unknownvars[:]
newunknownvars.remove(unknownvar)
if finaleq.has_any_symbols(*newunknownvars):
log.warn('equation relies on unsolved variables(%s): %s',newunknownvars,finaleq)
return self.solvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
if not finaleq.has_any_symbols(unknownvar):
# somehow removed all variables, so try the general method
return self.solvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
try:
if self.codeComplexity(finaleq) > 100000:
return self.solvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
except self.CannotSolveError:
pass
if useconic:
# conic roots solver not as robust as half-angle transform!
#return [SolverConicRoots(var.name,[finaleq],isHinge=self.isHinge(var.name))]
solution = self.solveHighDegreeEquationsHalfAngle([finaleq],varsym)
solution.checkforzeros += checkforzeros
return [solution]
# now that everything is with respect to one variable, simplify and solve the equation
eqnew, symbols = self.groupTerms(finaleq, unknownvars, symbolgen)
allsymbols += symbols
solutions=solve(eqnew,unknownvar)
log.info('pair solution: %s, %s', eqnew,solutions)
if solutions:
solversolution=AST.SolverSolution(var.name, isHinge=self.isHinge(var.name))
if (varindex%2)==0:
solversolution.jointevalcos=[self.trigsimp(s.subs(allsymbols+varsubsinv),othersolvedvars).subs(varsubs) for s in solutions]
else:
solversolution.jointevalsin=[self.trigsimp(s.subs(allsymbols+varsubsinv),othersolvedvars).subs(varsubs) for s in solutions]
return [solversolution]
return self.solvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
#raise self.CannotSolveError('cannot solve pair equation')
## SymPy helper routines
@staticmethod
def isValidSolution(expr):
"""return true if solution does not contain any nan or inf terms"""
if expr.is_number:
e=expr.evalf()
if e.has(I) or isinf(e) or isnan(e):
return False
return True
if expr.is_Mul:
# first multiply all numbers
number = S.One
for arg in expr.args:
if arg.is_number:
number *= arg
elif not IKFastSolver.isValidSolution(arg):
return False
# finally evalute the multiplied form
return IKFastSolver.isValidSolution(number.evalf())
for arg in expr.args:
if not IKFastSolver.isValidSolution(arg):
return False
return True
@staticmethod
def recursiveFraction(expr):
if expr.is_Add:
allpoly = []
finaldenom = S.One
for arg in expr.args:
n,d = IKFastSolver.recursiveFraction(arg)
finaldenom = finaldenom*d
allpoly.append([n,d])
finalnum = S.Zero
for n,d in allpoly:
finalnum += n*(finaldenom/d)
return finalnum,finaldenom
elif expr.is_Mul:
finalnum = S.One
finaldenom = S.One
for arg in expr.args:
n,d = IKFastSolver.recursiveFraction(arg)
finalnum = finalnum * n
finaldenom = finaldenom * d
return finalnum,finaldenom
elif expr.is_Pow and expr.exp.is_number:
n,d=IKFastSolver.recursiveFraction(expr.base)
if expr.exp < 0:
exponent = -expr.exp
n,d = d,n
else:
exponent = expr.exp
return n**exponent,d**exponent
else:
return fraction(expr)
@staticmethod
def groupTerms(expr,vars,symbolgen = None):
"""Separates all terms that do have var in them"""
if symbolgen is None:
symbolgen = cse_main.numbered_symbols('const')
symbols = []
p = Poly(expr,*vars)
newexpr = S.Zero
for c,m in p.iter_terms():
# make huge numbers into constants too
if (c.is_number and len(str(c)) > 40) or (not c.is_number and not c.is_Symbol):
# if it is a product of a symbol and a number, then ignore
if not c.is_Mul or not all([e.is_number or e.is_Symbol for e in c.args]):
sym = symbolgen.next()
symbols.append((sym,c))
c = sym
if __builtin__.sum(m) == 0:
newexpr += c
else:
for i,degree in enumerate(m):
c = c*vars[i]**degree
newexpr += c
return newexpr,symbols
@staticmethod
def replaceNumbers(expr,symbolgen = None):
"""Replaces all numbers with symbols, this is to make gcd faster when fractions get too big"""
if symbolgen is None:
symbolgen = cse_main.numbered_symbols('const')
symbols = []
if expr.is_number:
result = symbolgen.next()
symbols.append((result,expr))
elif expr.is_Mul:
result = S.One
for arg in expr.args:
newresult, newsymbols = IKFastSolver.replaceNumbers(arg,symbolgen)
result *= newresult
symbols += newsymbols
elif expr.is_Add:
result = S.Zero
for arg in expr.args:
newresult, newsymbols = IKFastSolver.replaceNumbers(arg,symbolgen)
result += newresult
symbols += newsymbols
elif expr.is_Pow:
# don't replace the exponent
newresult, newsymbols = IKFastSolver.replaceNumbers(expr.base,symbolgen)
symbols += newsymbols
result = newresult**expr.exp
else:
result = expr
return result,symbols
@staticmethod
def frontnumbers(eq):
if eq.is_Number:
return [eq]
if eq.is_Mul:
n = []
for arg in eq.args:
n += IKFastSolver.frontnumbers(arg)
return n
return []
@staticmethod
def removecommonexprs(eq,returncommon=False,onlygcd=False,onlynumbers=True):
"""removes common expressions from a sum. Assumes all the coefficients are rationals. For example:
a*c_0 + a*c_1 + a*c_2 = 0
will return in
c_0 + c_1 + c_2 = 0
"""
eq = eq.expand() # doesn't work otherwise
if eq.is_Add:
exprs = eq.args
totaldenom = S.One
common = S.One
if onlynumbers:
for i in range(len(exprs)):
denom = S.One
for d in IKFastSolver.frontnumbers(fraction(exprs[i])[1]):
denom *= d
if denom != S.One:
exprs = [expr*denom for expr in exprs]
totaldenom *= denom
if onlygcd:
common = None
for i in range(len(exprs)):
coeff = S.One
for n in IKFastSolver.frontnumbers(exprs[i]):
coeff *= n
if common == None:
common = coeff
else:
common = igcd(common,coeff)
if common == S.One:
break
else:
for i in range(len(exprs)):
denom = fraction(exprs[i])[1]
if denom != S.One:
exprs = [expr*denom for expr in exprs]
totaldenom *= denom
# there are no fractions, so can start simplifying
common = exprs[0]/fraction(cancel(exprs[0]/exprs[1]))[0]
for i in range(2,len(exprs)):
common = common/fraction(cancel(common/exprs[i]))[0]
if common.is_number:
common=S.One
# find the smallest number and divide by it
if not onlygcd:
smallestnumber = None
for expr in exprs:
if expr.is_number:
if smallestnumber is None or smallestnumber > abs(expr):
smallestnumber = abs(expr)
elif expr.is_Mul:
n = S.One
for arg in expr.args:
if arg.is_number:
n *= arg
if smallestnumber is None or smallestnumber > abs(n):
smallestnumber = abs(n)
if smallestnumber is not None:
common = common*smallestnumber
eq = S.Zero
for expr in exprs:
eq += expr/common
if returncommon:
return eq,common/totaldenom
elif eq.is_Mul:
coeff = S.One
for d in IKFastSolver.frontnumbers(eq):
coeff *= d
if returncommon:
return eq/coeff,coeff
return eq/coeff
if returncommon:
return eq,S.One
return eq
@staticmethod
def det_bareis(M,*vars,**kwargs):
"""Function from sympy with a couple of improvements.
Compute matrix determinant using Bareis' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
TODO: Implement algorithm for sparse matrices (SFF).
Function from sympy/matrices/matrices.py
"""
if not M.is_square:
raise NonSquareMatrixException()
n = M.rows
M = M[:,:] # make a copy
if n == 1:
det = M[0, 0]
elif n == 2:
det = M[0, 0]*M[1, 1] - M[0, 1]*M[1, 0]
else:
sign = 1 # track current sign in case of column swap
for k in range(n-1):
# look for a pivot in the current column
# and assume det == 0 if none is found
if M[k, k] == 0:
for i in range(k+1, n):
if M[i, k] != 0:
M.row_swap(i, k)
sign *= -1
break
else:
return S.Zero
# proceed with Bareis' fraction-free (FF)
# form of Gaussian elimination algorithm
for i in range(k+1, n):
for j in range(k+1, n):
D = M[k, k]*M[i, j] - M[i, k]*M[k, j]
if k > 0:
if len(vars) > 0:
#print i,j,M[k-1, k-1]
D,r = div(Poly(D,*vars),M[k-1, k-1])
else:
D /= M[k-1, k-1]
if D.is_Atom:
M[i, j] = D
else:
if len(vars) > 0:
M[i, j] = D
else:
M[i, j] = Poly.cancel(D)
det = sign * M[n-1, n-1]
return det.expand()
@staticmethod
def tolatex(e):
s = printing.latex(e)
s1 = re.sub('\\\\operatorname\{(sin|cos)\}\\\\left\(j_\{(\d)\}\\\\right\)','\g<1>_\g<2>',s)
s2 = re.sub('1\.(0*)([^0-9])','1\g<2>',s1)
s3 = re.sub('1 \\\\(sin|cos)','\g<1>',s2)
s4 = re.sub('(\d*)\.([0-9]*[1-9])(0*)([^0-9])','\g<1>.\g<2>\g<4>',s3)
s5 = re.sub('sj_','s_',s4)
s5 = re.sub('cj_','c_',s5)
s5 = re.sub('sin','s',s5)
s5 = re.sub('cos','c',s5)
replacements = [('px','p_x'),('py','p_y'),('pz','p_z'),('r00','r_{00}'),('r01','r_{01}'),('r02','r_{02}'),('r10','r_{10}'),('r11','r_{11}'),('r12','r_{12}'),('r20','r_{20}'),('r21','r_{21}'),('r022','r_{22}')]
for old,new in replacements:
s5 = re.sub(old,new,s5)
return s5
@staticmethod
def GetSolvers():
"""Returns a dictionary of all the supported solvers and their official identifier names"""
return {'transform6d':IKFastSolver.solveFullIK_6D,
'rotation3d':IKFastSolver.solveFullIK_Rotation3D,
'translation3d':IKFastSolver.solveFullIK_Translation3D,
'direction3d':IKFastSolver.solveFullIK_Direction3D,
'ray4d':IKFastSolver.solveFullIK_Ray4D,
'lookat3d':IKFastSolver.solveFullIK_Lookat3D,
'translationdirection5d':IKFastSolver.solveFullIK_TranslationDirection5D,
'translationxy2d':IKFastSolver.solveFullIK_TranslationXY2D,
'translationxyorientation3d':IKFastSolver.solveFullIK_TranslationXYOrientation3D,
'translationxaxisangle4d':IKFastSolver.solveFullIK_TranslationAxisAngle4D,
'translationyaxisangle4d':IKFastSolver.solveFullIK_TranslationAxisAngle4D,
'translationzaxisangle4d':IKFastSolver.solveFullIK_TranslationAxisAngle4D,
'translationxaxisangleznorm4d':IKFastSolver.solveFullIK_TranslationAxisAngle4D,
'translationyaxisanglexnorm4d':IKFastSolver.solveFullIK_TranslationAxisAngle4D,
'translationzaxisangleynorm4d':IKFastSolver.solveFullIK_TranslationAxisAngle4D
}
if __name__ == '__main__':
import openravepy
parser = OptionParser(description="""IKFast: The Robot Kinematics Compiler
Software License Agreement (Lesser GPL v3).
Copyright (C) 2009-2011 <NAME>.
IKFast is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
IKFast is part of OpenRAVE. This program can be used with robots or kinbodies defined and is independent of the OpenRAVE databases.
Example usage for 7 DOF Barrett WAM where the 3rd joint is a free parameter:
python ikfast.py --robot=robots/barrettwam.robot.xml --baselink=0 --eelink=7 --savefile=ik.cpp --freeindex=2
""",version=__version__)
parser.add_option('--robot', action='store', type='string', dest='robot',default=None,
help='robot file (COLLADA or OpenRAVE XML)')
parser.add_option('--savefile', action='store', type='string', dest='savefile',default='ik.cpp',
help='filename where to store the generated c++ code')
parser.add_option('--baselink', action='store', type='int', dest='baselink',
help='base link index to start extraction of ik chain')
parser.add_option('--eelink', action='store', type='int', dest='eelink',
help='end effector link index to end extraction of ik chain')
parser.add_option('--freeindex', action='append', type='int', dest='freeindices',default=[],
help='Optional joint index specifying a free parameter of the manipulator. If not specified, assumes all joints not solving for are free parameters. Can be specified multiple times for multiple free parameters.')
parser.add_option('--iktype', action='store', dest='iktype',default='transform6d',
help='The iktype to generate the ik for. Possible values are: %s'%(', '.join(name for name,fn in IKFastSolver.GetSolvers().iteritems())))
parser.add_option('--lang', action='store',type='string',dest='lang',default='cpp',
help='The language to generate the code in (default=%default), available=('+','.join(name for name,value in CodeGenerators.iteritems())+')')
parser.add_option('--debug','-d', action='store', type='int',dest='debug',default=logging.INFO,
help='Debug level for python nose (smaller values allow more text).')
(options, args) = parser.parse_args()
if options.robot is None or options.baselink is None or options.eelink is None:
print('Error: Not all arguments specified')
sys.exit(1)
format = logging.Formatter('%(levelname)s: %(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(format)
log.addHandler(handler)
log.setLevel(options.debug)
solvefn=IKFastSolver.GetSolvers()[options.iktype]
if options.robot is not None:
try:
env=openravepy.Environment()
kinbody=env.ReadRobotXMLFile(options.robot)
env.Add(kinbody)
solver = IKFastSolver(kinbody,kinbody)
chaintree = solver.generateIkSolver(options.baselink,options.eelink,options.freeindices,solvefn=solvefn)
code=solver.writeIkSolver(chaintree,lang=options.lang)
finally:
openravepy.RaveDestroy()
if len(code) > 0:
open(options.savefile,'w').write(code)
```
#### File: python/interfaces/BaseManipulation.py
```python
from __future__ import with_statement # for python 2.5
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2009-2011 <NAME> <<EMAIL>>'
__license__ = 'Apache License, Version 2.0'
# python 2.5 raises 'import *' not allowed with 'from .'
from ..openravepy_int import RaveCreateModule, RaveCreateTrajectory, matrixSerialization, IkParameterization
from ..openravepy_ext import planning_error
import numpy
from copy import copy as shallowcopy
import logging
log = logging.getLogger('openravepy.interfaces.BaseManipulation')
class BaseManipulation:
"""Interface wrapper for :ref:`module-basemanipulation`
"""
def __init__(self,robot,plannername=None,maxvelmult=None):
env = robot.GetEnv()
self.prob = RaveCreateModule(env,'BaseManipulation')
self.robot = robot
self.args = self.robot.GetName()
if plannername is not None:
self.args += u' planner ' + plannername
if maxvelmult is not None:
self.args += u' maxvelmult %.15e '%maxvelmult
env.Add(self.prob,True,self.args)
def __del__(self):
# need to lock the environment since Remove locks it
env = self.prob.GetEnv()
if env.Lock(1.0):
try:
env.Remove(self.prob)
finally:
env.Unlock()
else:
log.warn('failed to lock environment for BaseManipulation.__del__!')
def clone(self,envother):
return self.Clone(envother)
def Clone(self,envother):
"""Clones the interface into another environment
"""
clone = shallowcopy(self)
clone.prob = RaveCreateModule(envother,'BaseManipulation')
clone.robot = envother.GetRobot(self.robot.GetName())
envother.Add(clone.prob,True,clone.args)
return clone
def SetRobot(self,robot):
"""See :ref:`module-basemanipulation-setrobot`
"""
success = self.prob.SendCommand(u'setrobot '+robot.GetName())
if success is not None:
self.robot = robot
return True
return False
def TrajFromData(self,data,resettrans=False,resettiming=False):
"""See :ref:`module-basemanipulation-traj`
"""
return self.prob.SendCommand('traj stream ' + data + ' %d %d '%(resettrans,resettiming))
def VerifyTrajectory(self,data,resettrans=False,resettiming=False,samplingstep=None):
"""See :ref:`module-basemanipulation-verifytrajectory`
"""
cmd = 'VerifyTrajectory stream ' + data + ' resettrans %d resettiming %d '%(resettrans,resettiming)
if samplingstep is not None:
cmd += 'samplingstep %.15e '%samplingstep
print cmd
return self.prob.SendCommand(cmd)
def MoveHandStraight(self,direction,minsteps=None,maxsteps=None,stepsize=None,ignorefirstcollision=None,starteematrix=None,greedysearch=None,execute=None,outputtraj=None,maxdeviationangle=None,steplength=None,planner=None,outputtrajobj=None):
"""See :ref:`module-basemanipulation-movehandstraight`
"""
cmd = 'MoveHandStraight direction %.15e %.15e %.15e '%(direction[0],direction[1],direction[2])
if minsteps is not None:
cmd += 'minsteps %d '%minsteps
if maxsteps is not None:
cmd += 'maxsteps %d '%maxsteps
if stepsize is not None:
cmd += 'steplength %.15e '%stepsize
if steplength is not None:
cmd += 'steplength %.15e '%steplength
if planner is not None:
cmd += 'planner %s '%planner
if execute is not None:
cmd += 'execute %d '%execute
if starteematrix is not None:
cmd += 'starteematrix ' + matrixSerialization(starteematrix) + ' '
if greedysearch is not None:
cmd += 'greedysearch %d '%greedysearch
if (outputtraj is not None and outputtraj) or (outputtrajobj is not None and outputtrajobj):
cmd += 'outputtraj '
if ignorefirstcollision is not None:
cmd += 'ignorefirstcollision %.15e '%ignorefirstcollision
if maxdeviationangle is not None:
cmd += 'maxdeviationangle %.15e '%maxdeviationangle
res = self.prob.SendCommand(cmd)
if res is None:
raise planning_error('MoveHandStraight')
if outputtrajobj is not None and outputtrajobj:
return RaveCreateTrajectory(self.prob.GetEnv(),'').deserialize(res)
return res
def MoveManipulator(self,goal=None,maxiter=None,execute=None,outputtraj=None,maxtries=None,goals=None,steplength=None,outputtrajobj=None,jitter=None,releasegil=False):
"""See :ref:`module-basemanipulation-movemanipulator`
"""
if goal is not None:
assert(len(goal) == len(self.robot.GetActiveManipulator().GetArmIndices()))
return self._MoveJoints('MoveManipulator',goal=goal,steplength=steplength,maxiter=maxiter,maxtries=maxtries,execute=execute,outputtraj=outputtraj,goals=goals,outputtrajobj=outputtrajobj,jitter=jitter,releasegil=releasegil)
def MoveActiveJoints(self,goal=None,steplength=None,maxiter=None,maxtries=None,execute=None,outputtraj=None,goals=None,outputtrajobj=None,jitter=None,releasegil=False,postprocessingplanner=None,postprocessingparameters=None,usedynamicsconstraints=None,initialconfigs=None):
"""See :ref:`module-basemanipulation-moveactivejoints`
"""
if goal is not None:
assert(len(goal) == self.robot.GetActiveDOF() and len(goal) > 0)
return self._MoveJoints('MoveActiveJoints',goal=goal,steplength=steplength,maxiter=maxiter,maxtries=maxtries,execute=execute,outputtraj=outputtraj,goals=goals,outputtrajobj=outputtrajobj,jitter=jitter,releasegil=releasegil,postprocessingplanner=postprocessingplanner,postprocessingparameters=postprocessingparameters,usedynamicsconstraints=usedynamicsconstraints,initialconfigs=initialconfigs)
def _MoveJoints(self,cmd,goal=None,steplength=None,maxiter=None,maxtries=None,execute=None,outputtraj=None,goals=None,outputtrajobj=None,jitter=None,releasegil=False,postprocessingplanner=None,postprocessingparameters=None,usedynamicsconstraints=None,initialconfigs=None):
"""See :ref:`module-basemanipulation-moveactivejoints`
"""
cmd += ' '
if goal is not None:
cmd += 'goal ' + ' '.join('%.15e'%f for f in goal) + ' '
if goals is not None:
cmd += 'goals %d '%len(goals)
for g in goals:
for f in g:
cmd += '%.15e '%f
if initialconfigs is not None:
cmd += 'initialconfigs %d '%len(initialconfigs)
for g in initialconfigs:
for f in g:
cmd += '%.15e '%f
if steplength is not None:
cmd += 'steplength %.15e '%steplength
if execute is not None:
cmd += 'execute %d '%execute
if (outputtraj is not None and outputtraj) or (outputtrajobj is not None and outputtrajobj):
cmd += 'outputtraj '
if maxiter is not None:
cmd += 'maxiter %d '%maxiter
if maxtries is not None:
cmd += 'maxtries %d '%maxtries
if jitter is not None:
cmd += 'jitter %f '%jitter
if usedynamicsconstraints is not None:
cmd += 'usedynamicsconstraints %d '%usedynamicsconstraints
if postprocessingplanner is not None:
cmd += 'postprocessingplanner %s\n'%postprocessingplanner
if postprocessingparameters is not None:
cmd += 'postprocessingparameters %s\n'%postprocessingparameters
res = self.prob.SendCommand(cmd,releasegil=releasegil)
if res is None:
raise planning_error('MoveActiveJoints')
if outputtrajobj is not None and outputtrajobj:
return RaveCreateTrajectory(self.prob.GetEnv(),'').deserialize(res)
return res
def MoveToHandPosition(self,matrices=None,affinedofs=None,maxiter=None,maxtries=None,translation=None,rotation=None,seedik=None,constraintfreedoms=None,constraintmatrix=None,constrainterrorthresh=None,execute=None,outputtraj=None,steplength=None,goalsamples=None,ikparam=None,ikparams=None,jitter=None,minimumgoalpaths=None,outputtrajobj=None,postprocessing=None,jittergoal=None, constrainttaskmatrix=None, constrainttaskpose=None,goalsampleprob=None,goalmaxsamples=None,goalmaxtries=None,releasegil=False,initialconfigs=None):
"""See :ref:`module-basemanipulation-movetohandposition`
postprocessing is two parameters: (plannername,parmaeters)
"""
cmd = 'MoveToHandPosition '
if matrices is not None:
cmd += 'matrices %d '%len(matrices)
for m in matrices:
cmd += matrixSerialization(m) + ' '
if initialconfigs is not None:
cmd += 'initialconfigs %d '%len(initialconfigs)
for g in initialconfigs:
for f in g:
cmd += '%.15e '%f
if maxiter is not None:
cmd += 'maxiter %d '%maxiter
if maxtries is not None:
cmd += 'maxtries %d '%maxtries
if translation is not None:
cmd += 'translation %.15e %.15e %.15e '%(translation[0],translation[1],translation[2])
if rotation is not None:
cmd += 'rotation %.15e %.15e %.15e %.15e '%(rotation[0],rotation[1],rotation[2],rotation[3])
if seedik is not None:
cmd += 'seedik %d '%seedik
if goalsamples is not None:
cmd += 'goalsamples %d '%goalsamples
if postprocessing is not None:
cmd += 'postprocessingplanner %s\n postprocessingparameters %s\n '%(postprocessing[0],postprocessing[1])
if constraintfreedoms is not None:
cmd += 'constraintfreedoms %s '%(' '.join(str(constraintfreedoms[i]) for i in range(6)))
if constraintmatrix is not None:
cmd += 'constraintmatrix %s '%matrixSerialization(constraintmatrix)
if constrainttaskmatrix is not None:
cmd += 'constrainttaskmatrix %s '%matrixSerialization(constrainttaskmatrix)
if constrainterrorthresh is not None:
cmd += 'constrainterrorthresh %s '%constrainterrorthresh
if jitter is not None:
cmd += 'jitter %.15e '%jitter
if steplength is not None:
cmd += 'steplength %.15e '%steplength
if jittergoal is not None:
cmd += 'jittergoal %.15e '%jittergoal
if ikparam is not None:
cmd += 'ikparam ' + str(ikparam) + ' '
if ikparams is not None:
cmd += 'ikparams %d '%len(ikparams)
for ikp in ikparams:
cmd += str(ikp) + ' '
if execute is not None:
cmd += 'execute %d '%execute
if (outputtraj is not None and outputtraj) or (outputtrajobj is not None and outputtrajobj):
cmd += 'outputtraj '
if minimumgoalpaths is not None:
cmd += 'minimumgoalpaths %d '%minimumgoalpaths
if goalsampleprob is not None:
cmd += 'goalsampleprob %.15e '%goalsampleprob
if goalmaxtries is not None:
cmd += 'goalmaxtries %d '%goalmaxtries
res = self.prob.SendCommand(cmd, releasegil=releasegil)
if res is None:
raise planning_error('MoveToHandPosition')
if outputtrajobj is not None and outputtrajobj:
return RaveCreateTrajectory(self.prob.GetEnv(),'').deserialize(res)
return res
def MoveUnsyncJoints(self,jointvalues,jointinds,maxtries=None,planner=None,maxdivision=None,execute=None,outputtraj=None,outputtrajobj=None):
"""See :ref:`module-basemanipulation-moveunsyncjoints`
"""
assert(len(jointinds)==len(jointvalues) and len(jointinds)>0)
cmd = 'MoveUnsyncJoints handjoints %d %s %s '%(len(jointinds),' '.join('%.15e'%f for f in jointvalues), ' '.join(str(f) for f in jointinds))
if planner is not None:
cmd += 'planner %s '%planner
if execute is not None:
cmd += 'execute %d '%execute
if (outputtraj is not None and outputtraj) or (outputtrajobj is not None and outputtrajobj):
cmd += 'outputtraj '
if maxtries is not None:
cmd += 'maxtries %d '%maxtries
if maxdivision is not None:
cmd += 'maxdivision %d '%maxdivision
res = self.prob.SendCommand(cmd)
if res is None:
raise planning_error('MoveUnsyncJoints')
if outputtrajobj is not None and outputtrajobj:
return RaveCreateTrajectory(self.prob.GetEnv(),'').deserialize(res)
return res
def JitterActive(self,maxiter=None,jitter=None,execute=None,outputtraj=None,outputfinal=None,outputtrajobj=None):
"""See :ref:`module-basemanipulation-jitteractive`
"""
cmd = 'JitterActive '
if maxiter is not None:
cmd += 'maxiter %d '%maxiter
if jitter is not None:
cmd += 'jitter %.15e '%jitter
if execute is not None:
cmd += 'execute %d '%execute
if (outputtraj is not None and outputtraj) or (outputtrajobj is not None and outputtrajobj):
cmd += 'outputtraj '
if outputfinal:
cmd += 'outputfinal'
res = self.prob.SendCommand(cmd)
if res is None:
raise planning_error('JitterActive')
resvalues = res.split()
if outputfinal:
final = numpy.array([numpy.float64(resvalues[i]) for i in range(self.robot.GetActiveDOF())])
resvalues=resvalues[len(final):]
else:
final=None
if (outputtraj is not None and outputtraj) or (outputtrajobj is not None and outputtrajobj):
traj = ' '.join(resvalues)
else:
traj = None
if traj is not None and outputtrajobj is not None and outputtrajobj:
traj = RaveCreateTrajectory(self.prob.GetEnv(),'').deserialize(traj)
return final,traj
def FindIKWithFilters(self,ikparam,cone=None,solveall=None,filteroptions=None):
"""See :ref:`module-basemanipulation-findikwithfilters`
"""
cmd = 'FindIKWithFilters ikparam %s '%str(ikparam)
if cone is not None:
cmd += 'cone %s '%(' '.join('%.15e'%f for f in cone))
if solveall is not None and solveall:
cmd += 'solveall '
if filteroptions is not None:
cmd += 'filteroptions %d '%filteroptions
res = self.prob.SendCommand(cmd)
if res is None:
raise planning_error('FindIKWithFilters')
resvalues = res.split()
num = int(resvalues[0])
dim = (len(resvalues)-1)/num
solutions = numpy.reshape([numpy.float64(s) for s in resvalues[1:]],(num,dim))
return solutions
```
#### File: openrave/python/misc.py
```python
from __future__ import with_statement # for python 2.5
import openravepy_int
import openravepy_ext
import os.path
from sys import platform as sysplatformname
from sys import stdout
import numpy
try:
from itertools import izip
except ImportError:
pass
try:
from threading import Thread
except ImportError:
pass
import logging
log = logging.getLogger('openravepy.'+__name__.split('.',2)[-1])
def mkdir_recursive(newdir):
log.warn('openravepy.misc.mkdir_recursive is deprecated, please use os.makedirs')
from os import makedirs
try:
makedirs(newdir)
except OSError:
pass
try:
from os.path import relpath
except ImportError:
# relpath is not present in python 2.5 and below, so hold an implementation of it.
from posixpath import curdir, sep, pardir, join, abspath, commonprefix
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
return curdir if not rel_list else join(*rel_list)
def LoadTrajectoryFromFile(env,trajfile,trajtype=''):
return openravepy_int.RaveCreateTrajectory(env,trajtype).deserialize(open(trajfile,'r').read())
def InitOpenRAVELogging(stream=stdout):
"""Sets the python logging **openravepy** scope to the same debug level as OpenRAVE and initializes handles if they are not present
"""
levelmap = {openravepy_int.DebugLevel.Verbose:logging.DEBUG, openravepy_int.DebugLevel.Debug:logging.DEBUG, openravepy_int.DebugLevel.Info:logging.INFO, openravepy_int.DebugLevel.Warn:logging.WARN, openravepy_int.DebugLevel.Error:logging.ERROR, openravepy_int.DebugLevel.Fatal:logging.FATAL }
log=logging.getLogger('openravepy')
log.setLevel(levelmap[openravepy_int.RaveGetDebugLevel()&0xffff])
if len(log.handlers) == 0:
try:
import codecs
colorize=__import__('logutils.colorize',fromlist=['colorize'])
handler = colorize.ColorizingStreamHandler(codecs.getwriter('utf-8')(stream))
handler.level_map[logging.DEBUG] =(None, 'green', False)
handler.level_map[logging.INFO] = (None, None, False)
handler.level_map[logging.WARNING] = (None, 'yellow', False)
handler.level_map[logging.ERROR] = (None, 'red', False)
handler.level_map[logging.CRITICAL] = ('white', 'magenta', True)
except ImportError:
handler = logging.StreamHandler(stream)
openravepy_int.raveLogVerbose('python logutils not present so cannot colorize python output.')
handler.setFormatter(logging.Formatter('%(name)s: %(funcName)s, %(message)s'))
log.addHandler(handler)
def SetViewerUserThread(env,viewername,userfn):
"""Adds a viewer to the environment if one doesn't exist yet and starts it on this thread. Then creates a new thread to call the user-defined function to continue computation.
This function will return when the viewer and uesrfn exits. If userfn exits first, then will quit the viewer
"""
if env.GetViewer() is not None or viewername is None:
userfn()
viewer = None
if sysplatformname.startswith('darwin'):
viewer = openravepy_int.RaveCreateViewer(env,viewername)
else:
# create in a separate thread for windows and linux since the signals do not get messed up
env.SetViewer(viewername)
if viewer is None:
userfn()
# add the viewer before starting the user function
env.Add(viewer)
threading = __import__('threading')
Thread = threading.Thread
def localuserfn(userfn,viewer):
try:
userfn()
finally:
# user function quit, so have to destroy the viewer
viewer.quitmainloop()
userthread = Thread(target=localuserfn,args=(userfn,viewer))
userthread.start()
sig_thread_id = 0
for tid, tobj in threading._active.items():
if tobj is userthread:
sig_thread_id = tid
break
try:
viewer.main(True,sig_thread_id)
finally:
userthread.join()
class OpenRAVEGlobalArguments:
"""manages a global set of command-line options applicable to all openrave environments"""
@staticmethod
def addOptions(parser,testmode=True):
from optparse import OptionGroup
ogroup = OptionGroup(parser,"OpenRAVE Environment Options")
ogroup.add_option('--loadplugin', action="append",type='string',dest='_loadplugins',default=[],
help='List all plugins and the interfaces they provide.')
ogroup.add_option('--collision', action="store",type='string',dest='_collision',default=None,
help='Default collision checker to use')
ogroup.add_option('--physics', action="store",type='string',dest='_physics',default=None,
help='physics engine to use (default=%default)')
ogroup.add_option('--viewer', action="store",type='string',dest='_viewer',default=None,
help='viewer to use (default=qtcoin)' )
ogroup.add_option('--server', action="store",type='string',dest='_server',default=None,
help='server to use (default=None).')
ogroup.add_option('--serverport', action="store",type='int',dest='_serverport',default=4765,
help='port to load server on (default=%default).')
ogroup.add_option('--module', action="append",type='string',dest='_modules',default=[],nargs=2,
help='module to load, can specify multiple modules. Two arguments are required: "name" "args".')
ogroup.add_option('--level','-l','--log_level', action="store",type='string',dest='_level',default=None,
help='Debug level, one of (%s)'%(','.join(str(debugname).lower() for debuglevel,debugname in openravepy_int.DebugLevel.values.iteritems())))
if testmode:
ogroup.add_option('--testmode', action="store_true",dest='testmode',default=False,
help='if set, will run the program in a finite amount of time and spend computation time validating results. Used for testing')
parser.add_option_group(ogroup)
@staticmethod
def parseGlobal(options,**kwargs):
"""Parses all global options independent of the environment"""
if options._level is not None:
for debuglevel,debugname in openravepy_int.DebugLevel.values.iteritems():
if (not options._level.isdigit() and options._level.lower() == debugname.name.lower()) or (options._level.isdigit() and int(options._level) == int(debuglevel)):
openravepy_int.RaveSetDebugLevel(debugname)
break
InitOpenRAVELogging()
@staticmethod
def parseEnvironment(options,env,defaultviewer=False,returnviewer=False,**kwargs):
"""Parses all options that affect the environment. If returnviewer is set, will return the viewer to set instead of setting it"""
try:
if options._collision:
cc = openravepy_int.RaveCreateCollisionChecker(env,options._collision)
if cc is not None:
env.SetCollisionChecker(cc)
except openravepy_ext.openrave_exception, e:
log.warn(e)
try:
if options._physics:
ph = openravepy_int.RaveCreatePhysicsEngine(env,options._physics)
if ph is not None:
env.SetPhysicsEngine(ph)
except openravepy_ext.openrave_exception, e:
log.warn(e)
try:
if options._server:
sr = openravepy_int.RaveCreateModule(env,options._server)
if sr is not None:
env.Add(sr,True,'%d'%options._serverport)
except openravepy_ext.openrave_exception, e:
log.warn(e)
for name,args in options._modules:
try:
module = openravepy_int.RaveCreateModule(env,name)
if module is not None:
env.Add(module,True,args)
except openravepy_ext.openrave_exception, e:
log.warn(e)
try:
viewername=None
if options._viewer is not None:
if len(options._viewer) > 0:
viewername=options._viewer
elif defaultviewer:
viewername='qtcoin'
if returnviewer:
return viewername
elif viewername is not None:
env.SetViewer(viewername)
except openravepy_ext.openrave_exception, e:
log.warn(e)
@staticmethod
def parseAndCreate(options,createenv=openravepy_int.Environment,returnviewer=False,**kwargs):
"""Parse all options and create the global Environment. The left over arguments are passed to the parse functions.
If returnviewer is False, the viewer is created in a separate thread, so this method will not work for MacOSX if this is the main executing thread.
"""
openravepy_int.RaveInitialize(True)
for plugin in options._loadplugins:
openravepy_int.RaveLoadPlugin(plugin)
OpenRAVEGlobalArguments.parseGlobal(options,**kwargs)
if createenv is None:
return None
env = createenv()
viewername = OpenRAVEGlobalArguments.parseEnvironment(options,env,returnviewer=returnviewer,**kwargs)
if returnviewer:
return env,viewername
else:
return env
@staticmethod
def parseAndCreateThreadedUser(options,userfn,createenv=openravepy_int.Environment,returnviewer=True,**kwargs):
"""Parse all options and create the global Environment. The left over arguments are passed to the parse functions.
If a viewer is requested, it is created in this thread, and another thread is executed with the user function. This is required for OSes that require viewer thread to be in main thread (Mac OSX)
:param userfn: Call with userfn(env,options)
:return: nothing
"""
openravepy_int.RaveInitialize(True)
for plugin in options._loadplugins:
openravepy_int.RaveLoadPlugin(plugin)
OpenRAVEGlobalArguments.parseGlobal(options,**kwargs)
if createenv is None:
raise openravepy_ext.openrave_exception('failed to create environment')
env = createenv()
viewername = OpenRAVEGlobalArguments.parseEnvironment(options,env,returnviewer=True,**kwargs)
SetViewerUserThread(env,viewername,lambda: userfn(env,options))
def ComputeGeodesicSphereMesh(radius=1.0,level=2):
"""Computes a geodesic sphere to a specified level. Returns the vertices and triangle indices"""
GTS_M_ICOSAHEDRON_X = numpy.sqrt(numpy.sqrt(5)+1)/numpy.sqrt(2*numpy.sqrt(5))
GTS_M_ICOSAHEDRON_Y = numpy.sqrt(2)/numpy.sqrt(5+numpy.sqrt(5))
GTS_M_ICOSAHEDRON_Z = 0.0
vertices = [numpy.array((+GTS_M_ICOSAHEDRON_Z, +GTS_M_ICOSAHEDRON_X, -GTS_M_ICOSAHEDRON_Y)),
numpy.array((+GTS_M_ICOSAHEDRON_X, +GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z)),
numpy.array((+GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z, -GTS_M_ICOSAHEDRON_X)),
numpy.array((+GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z, +GTS_M_ICOSAHEDRON_X)),
numpy.array((+GTS_M_ICOSAHEDRON_X, -GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z)),
numpy.array((+GTS_M_ICOSAHEDRON_Z, +GTS_M_ICOSAHEDRON_X, +GTS_M_ICOSAHEDRON_Y)),
numpy.array((-GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z, +GTS_M_ICOSAHEDRON_X)),
numpy.array((+GTS_M_ICOSAHEDRON_Z, -GTS_M_ICOSAHEDRON_X, -GTS_M_ICOSAHEDRON_Y)),
numpy.array((-GTS_M_ICOSAHEDRON_X, +GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z)),
numpy.array((-GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z, -GTS_M_ICOSAHEDRON_X)),
numpy.array((-GTS_M_ICOSAHEDRON_X, -GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z)),
numpy.array((+GTS_M_ICOSAHEDRON_Z, -GTS_M_ICOSAHEDRON_X, +GTS_M_ICOSAHEDRON_Y))]
triindices = [[0, 1, 2],[1, 3, 4],[3, 5, 6],[2, 4, 7],[6, 5, 8],[2, 7, 9],[5, 0, 8],[9, 7, 10],[1, 0, 5],[10, 7, 11],[3, 1, 5],[6, 10, 11],[3, 6, 11],[9, 10, 8],[4, 3, 11],[6, 8, 10],[7, 4, 11],[2, 1, 4],[8, 0, 9],[0, 2, 9]]
while level > 0:
level -= 1
newindices = []
mapnewinds = dict()
for tri in triindices:
# for ever tri, create 3 new vertices and 4 new triangles.
v = [vertices[i] for i in tri]
inds = []
for j in range(3):
key = (tri[j],tri[numpy.mod(j+1,3)])
if key in mapnewinds:
inds.append(mapnewinds[key])
else:
mapnewinds[key] = mapnewinds[key[::-1]] = len(vertices)
inds.append(len(vertices))
vnew = v[j]+v[numpy.mod(j+1,3)]
vertices.append(vnew/numpy.sqrt(sum(vnew**2)))
newindices += [[tri[0],inds[0],inds[2]],[inds[0],tri[1],inds[1]],[inds[2],inds[0],inds[1]],[inds[2],inds[1],tri[2]]]
triindices = newindices
return radius*numpy.array(vertices),triindices
def DrawAxes(env,target,dist=1.0,linewidth=1,coloradd=None):
"""draws xyz coordinate system around target.
:param env: Environment
:param target: can be a 7 element pose, 4x4 matrix, or the name of a kinbody in the environment
:param dist: how far the lines extend from the origin
:param linewidth: how thick the line is rendered in pixels
:param coloradd: an optional 3-element vector for
"""
if isinstance(target,basestring):
T = self.env.GetKinBody(target).GetTransform()
elif len(target) == 7:
T = openravepy_int.matrixFromPose(target)
else:
T = numpy.array(target)
colors=numpy.array([[1,0,0],[1,0,0],[0,1,0],[0,1,0],[0,0,1],[0,0,1]])
if coloradd is not None:
colors = numpy.minimum(1.0, numpy.maximum(0.0, colors + numpy.tile(coloradd,(len(colors),1))))
return env.drawlinelist(numpy.array([T[0:3,3],T[0:3,3]+T[0:3,0]*dist,T[0:3,3],T[0:3,3]+T[0:3,1]*dist,T[0:3,3],T[0:3,3]+T[0:3,2]*dist]),linewidth,colors=colors)
def DrawIkparam(env,ikparam,dist=1.0,linewidth=1,coloradd=None):
"""draws an IkParameterization
"""
if ikparam.GetType() == openravepy_int.IkParameterizationType.Transform6D:
return DrawAxes(env,ikparam.GetTransform6DPose(),dist,linewidth,coloradd)
elif ikparam.GetType() == openravepy_int.IkParameterizationType.TranslationDirection5D:
ray = ikparam.GetTranslationDirection5D()
colors=numpy.array([[0,0,0],[1,0,0]])
if coloradd is not None:
colors = numpy.minimum(1.0, numpy.maximum(0.0, colors + numpy.tile(coloradd,(len(colors),1))))
return env.drawlinelist(numpy.array([ray.pos(),ray.pos()+ray.dir()*dist]),linewidth,colors=colors)
elif ikparam.GetType() == openravepy_int.IkParameterizationType.Translation3D:
if coloradd is not None:
colors = numpy.array([coloradd])
else:
colors=numpy.array([[0,0,0]])
return env.plot3(ikparam.GetTranslation3D(),linewidth,colors=colors)
elif ikparam.GetType() == openravepy_int.IkParameterizationType.TranslationXAxisAngleZNorm4D:
pos,angle = ikparam.GetTranslationXAxisAngleZNorm4D()
T = openravepy_int.matrixFromAxisAngle([0,0,angle])
T[0:3,3] = pos
return DrawAxes(env,T,dist,linewidth,coloradd)
else:
raise NotImplemented('iktype %s'%str(ikparam.GetType()))
def DrawCircle(env, center, normal, radius, linewidth=1, colors=None):
angles = numpy.arange(0, 2*numpy.pi+0.1, 0.1)
R = openravepy_int.matrixFromQuat(openravepy_int.quatRotateDirection([0,0,1],normal))
right = R[0:3,0]*radius
up = R[0:3,1]*radius
return env.drawlinestrip(c_[numpy.dot(numpy.transpose([numpy.cos(angles)]), [right]) + numpy.dot(numpy.transpose([numpy.sin(angles)]), [up]) + numpy.tile(center, (len(angles),1))], linewidth, colors=colors)
def ComputeBoxMesh(extents):
"""Computes a box mesh"""
indices = numpy.reshape([0, 1, 2, 1, 2, 3, 4, 5, 6, 5, 6, 7, 0, 1, 4, 1, 4, 5, 2, 3, 6, 3, 6, 7, 0, 2, 4, 2, 4, 6, 1, 3, 5,3, 5, 7],(12,3))
vertices = numpy.array(((extents[0],extents[1],extents[2]),
(extents[0],extents[1],-extents[2]),
(extents[0],-extents[1],extents[2]),
(extents[0],-extents[1],-extents[2]),
(-extents[0],extents[1],extents[2]),
(-extents[0],extents[1],-extents[2]),
(-extents[0],-extents[1],extents[2]),
(-extents[0],-extents[1],-extents[2])))
return vertices,indices
def ComputeCylinderYMesh(radius,height,angledelta=0.1):
"""Computes a mesh of a cylinder oriented towards y-axis"""
angles = numpy.arange(0,2*numpy.pi,angledelta)
cangles = numpy.cos(angles)
sangles = numpy.sin(angles)
N = len(angles)
vertices = numpy.c_[radius*numpy.tile(cangles,2),numpy.r_[numpy.tile(height*0.5,N),numpy.tile(-height*0.5,N)], radius*numpy.tile(sangles,2)]
indices = []
iprev = N-1
for i in range(N):
indices.append((iprev,i,iprev+N))
indices.append((i,i+N,iprev+N))
iprev = i
return vertices,numpy.array(indices)
def TSP(solutions,distfn):
"""solution to travelling salesman problem. orders the set of solutions such that visiting them one after another is fast.
"""
newsolutions = numpy.array(solutions)
for i in range(newsolutions.shape[0]-2):
n = newsolutions.shape[0]-i-1
dists = [distfn(newsolutions[i,:],newsolutions[j,:]) for j in range(i+1,newsolutions.shape[0])]
minind = numpy.argmin(dists)+i+1
sol = numpy.array(newsolutions[i+1,:])
newsolutions[i+1,:] = newsolutions[minind,:]
newsolutions[minind,:] = sol
return newsolutions
def sequence_cross_product(*sequences):
"""iterates through the cross product of all items in the sequences"""
# visualize an odometer, with "wheels" displaying "digits"...:
wheels = map(iter, sequences)
digits = [it.next( ) for it in wheels]
while True:
yield tuple(digits)
for i in range(len(digits)-1, -1, -1):
try:
digits[i] = wheels[i].next( )
break
except StopIteration:
wheels[i] = iter(sequences[i])
digits[i] = wheels[i].next( )
else:
break
class MultiManipIKSolver:
"""Finds the simultaneous IK solutions of all disjoint manipulators (no manipulators share a joint).
The class is extremely useful in dual-manipulation IK solutions. It also handled grabbed bodies correctly.
"""
def __init__(self,manips):
self.robot = manips[0].GetRobot()
self.manips = manips
indeplinksets=[set([l for l in manip.GetIndependentLinks()]) for manip in self.manips]
indeplinknames=indeplinksets[0].intersection(*indeplinksets[1:])
alllinknames = set([l for l in self.robot.GetLinks()])
self.enablelinknames = [alllinknames.difference(indeplinksets[i]).union(indeplinknames) for i in range(len(self.manips))]
def findMultiIKSolution(self,Tgrasps,filteroptions=openravepy_int.IkFilterOptions.CheckEnvCollisions,dooptimize=False):
"""Return one set collision-free ik solutions for all manipulators.
Method always checks self-collisions.
:param Tgrasps: a list of all the end effector transforms of each of the manipualtors
:param filteroptions: a bitmask of :class:`IkFilterOptions`
"""
assert(len(Tgrasps)==len(self.manips))
with self.robot:
alljointvalues = []
grabbed = self.robot.GetGrabbed()
statesavers = [openravepy_int.KinBody.KinBodyStateSaver(body) for body in grabbed]
try:
with openravepy_ext.RobotStateSaver(self.robot): # for storing enabled state
for i,manip in enumerate(self.manips):
# invalidate all links that are controlled by the other manipulators
for link in self.robot.GetLinks():
link.Enable(link in self.enablelinknames[i])
# enable only the grabbed bodies of this manipulator
for body in grabbed:
body.Enable(manip.IsGrabbing(body))
values=manip.FindIKSolutions(Tgrasps[i],filteroptions)
if values is not None and len(values) > 0:
alljointvalues.append(values)
else:
return None
finally:
for saver in statesavers:
saver.Restore()
if dooptimize:
curvalues = [self.robot.GetDOFValues(manip.GetArmIndices()) for main in self.manips]
distancesolutions = []
for sols in sequence_cross_product(*alljointvalues):
dist = numpy.sum([numpy.sum(numpy.abs(sol0-sol1)) for sol0,sol1 in izip(sols,curvalues)])
distancesolutions.append([dist, sols])
distancesolutions.sort(lambda x,y: int(x[0]-y[0]))
for dist,sols in distancesolutions:
for sol,manip in izip(sols,self.manips):
self.robot.SetDOFValues(sol,manip.GetArmIndices())
if not self.robot.CheckSelfCollision():
if not (filteroptions&openravepy_int.IkFilterOptions.CheckEnvCollisions) or not self.robot.GetEnv().CheckCollision(self.robot):
return sols
else:
for sols in sequence_cross_product(*alljointvalues):
for sol,manip in izip(sols,self.manips):
self.robot.SetDOFValues(sol,manip.GetArmIndices())
if not self.robot.CheckSelfCollision():
if not (filteroptions&openravepy_int.IkFilterOptions.CheckEnvCollisions) or not self.robot.GetEnv().CheckCollision(self.robot):
return sols
return None
class SpaceSamplerExtra:
def __init__(self):
self.faceindices = self.facenumr = self.facenump = None
@staticmethod
def computeSepration(qarray):
"""used to test separation of a set of quaternions"""
qdists = numpy.zeros((qarray.shape[0],qarray.shape[0]))
for i,q in enumerate(qarray):
dists = numpy.abs(numpy.dot(qarray[(i+1):],q))
qdists[i,(i+1):] = qdists[(i+1):,i] = dists
qmaxdists = numpy.max(qdists,axis=0)
return numpy.arccos(numpy.min(1.0,numpy.max(qmaxdists))), numpy.arccos(numpy.min(1.0,numpy.min(qmaxdists)))
def computeFaceIndices(self,N):
if self.faceindices is None or len(self.faceindices[0]) < N:
indices = numpy.arange(N**2)
# separate the odd and even bits into odd,even
maxiter = int(numpy.log2(len(indices)))
oddbits = numpy.zeros(N**2,int)
evenbits = numpy.zeros(N**2,int)
mult = 1
for i in range(maxiter):
oddbits += (indices&1)*mult
evenbits += mult*((indices&2)/2)
indices >>= 2
mult *= 2
self.faceindices = [oddbits+evenbits,oddbits-evenbits]
if self.facenumr is None or len(self.facenumr) != N*12:
self.facenumr = numpy.reshape(numpy.transpose(numpy.tile([2,2,2,2,3,3,3,3,4,4,4,4],(N,1))),N*12)
self.facenump = numpy.reshape(numpy.transpose(numpy.tile([1,3,5,7,0,2,4,6,1,3,5,7],(N,1))),N*12)
def sampleS2(self,level=0,angledelta=None):
"""uses healpix algorithm with ordering from Yershova et. al. 2009 journal paper"""
if angledelta is not None:
# select the best sphere level matching angledelta;
# level,delta:
# [0, 1.0156751592381095]
# [1, 0.5198842203445676]
# [2, 0.25874144949351713]
# [3, 0.13104214473149575]
# [4, 0.085649339187184162]
level=max(0,int(0.5-numpy.log2(angledelta)))
Nside = 2**level
Nside2 = Nside**2
N = 12*Nside**2
self.computeFaceIndices(Nside**2)
# compute sphere z coordinate
jr = self.facenumr*Nside-numpy.tile(self.faceindices[0][0:Nside2],12)-1
nr = numpy.tile(Nside,N)
z = 2*(2*Nside-jr)/(3.0*Nside)
kshift = numpy.mod(jr-Nside,2)
# north pole test
northpoleinds = numpy.flatnonzero(jr<Nside)
nr[northpoleinds] = jr[northpoleinds]
z[northpoleinds] = 1.0 - nr[northpoleinds]**2*(1.0/(3.0*Nside2))
kshift[northpoleinds] = 0
# south pole test
southpoleinds = numpy.flatnonzero(jr>3*Nside)
nr[southpoleinds] = 4*Nside - jr[southpoleinds]
z[southpoleinds] = -1.0 + nr[southpoleinds]**2*(1.0/(3.0*Nside2))
kshift[southpoleinds] = 0
# compute pfi
facenump = numpy.reshape(numpy.transpose(numpy.tile([1,3,5,7,0,2,4,6,1,3,5,7],(Nside2,1))),N)
jp = (self.facenump*nr+numpy.tile(self.faceindices[1][0:Nside2],12)+1+kshift)/2
jp[jp>4*Nside] -= 4*Nside
jp[jp<1] += 4*Nside
return numpy.arccos(z),(jp-(kshift+1)*0.5)*((0.5*numpy.pi)/nr)
@staticmethod
def hopf2quat(hopfarray):
"""convert hopf rotation coordinates to quaternion"""
half0 = hopfarray[:,0]*0.5
half2 = hopfarray[:,2]*0.5
c0 = numpy.cos(half0)
c2 = numpy.cos(half2)
s0 = numpy.sin(half0)
s2 = numpy.sin(half2)
return numpy.c_[c0*c2,c0*s2,s0*numpy.cos(hopfarray[:,1]+half2),s0*numpy.sin(hopfarray[:,1]+half2)]
def sampleSO3(self,level=0,quatdelta=None):
"""Uniformly Sample 3D Rotations.
If quatdelta is specified, will compute the best level aiming for that average quaternion distance.
Algorithm From
<NAME>, <NAME>, <NAME>, <NAME> "Generating Uniform Incremental Grids on SO(3) Using the Hopf Fibration", International Journal of Robotics Research, Nov 13, 2009.
"""
if quatdelta is not None:
# level=0, quatdist = 0.5160220
# level=1: quatdist = 0.2523583
# level=2: quatdist = 0.120735
level=max(0,int(-0.5-numpy.log2(quatdelta)))
s1samples,step = numpy.linspace(0.0,2*numpy.pi,6*(2**level),endpoint=False,retstep=True)
s1samples += step*0.5
theta,pfi = self.sampleS2(level)
band = numpy.zeros((len(s1samples),3))
band[:,2] = s1samples
qarray = numpy.zeros((0,4))
for i in range(len(theta)):
band[:,0] = theta[i]
band[:,1] = pfi[i]
qarray = numpy.r_[qarray,self.hopf2quat(band)]
return qarray
@staticmethod
def sampleR3lattice(averagedist,boxdims):
"""low-discrepancy lattice sampling in using the roots of x^3-3x+1.
The samples are evenly distributed with an average distance of averagedist inside the box with extents boxextents.
Algorithim from "Geometric Discrepancy: An Illustrated Guide" by <NAME>"""
roots = numpy.array([2.8793852415718155,0.65270364466613917,-0.53208888623795614])
bases = numpy.c_[numpy.ones(3),roots,roots**2]
tbases = numpy.transpose(bases)
boxextents = 0.5*numpy.array(boxdims)
# determine the input bounds, which can be very large and inefficient...
bounds = numpy.array(((boxextents[0],boxextents[1],boxextents[2]),
(boxextents[0],boxextents[1],-boxextents[2]),
(boxextents[0],-boxextents[1],boxextents[2]),
(boxextents[0],-boxextents[1],-boxextents[2]),
(-boxextents[0],boxextents[1],boxextents[2]),
(-boxextents[0],boxextents[1],-boxextents[2]),
(-boxextents[0],-boxextents[1],boxextents[2]),
(-boxextents[0],-boxextents[1],-boxextents[2])))
inputbounds = numpy.max(numpy.dot(bounds,numpy.linalg.inv(tbases)),0)
scale = averagedist/numpy.sqrt(3.0)
X,Y,Z = numpy.mgrid[-inputbounds[0]:inputbounds[0]:scale,-inputbounds[1]:inputbounds[1]:scale,-inputbounds[2]:inputbounds[2]:scale]
p = numpy.c_[X.flat,Y.flat,Z.flat]
pts = numpy.dot(p,tbases)
ptsabs = numpy.abs(pts)
newpts = pts[numpy.logical_and(ptsabs[:,0]<=boxextents[0],numpy.logical_and(ptsabs[:,1]<=boxextents[1] ,ptsabs[:,2]<=boxextents[2]))]
newpts[:,0] += boxextents[0]
newpts[:,1] += boxextents[1]
newpts[:,2] += boxextents[2]
return newpts
@staticmethod
def sampleR3(averagedist,boxdims):
"""low-discrepancy sampling using primes.
The samples are evenly distributed with an average distance of averagedist inside the box with dimensions boxdims.
Algorithim from "Geometric Discrepancy: An Illustrated Guide" by <NAME>"""
minaxis = numpy.argmin(boxdims)
maxaxis = numpy.argmax(boxdims)
meddimdist = numpy.sort(boxdims)[1]
# convert average distance to number of samples.... do simple 3rd degree polynomial fitting...
x = meddimdist/averagedist
if x < 25.6:
N = int(numpy.polyval([ -3.50181522e-01, 2.70202333e+01, -3.10449514e+02, 1.07887093e+03],x))
elif x < 36.8:
N = int(numpy.polyval([ 4.39770585e-03, 1.10961031e+01, -1.40066591e+02, 1.24563464e+03],x))
else:
N = int(numpy.polyval([5.60147111e-01, -8.77459988e+01, 7.34286834e+03, -1.67779452e+05],x))
pts = numpy.zeros((N,3))
pts[:,0] = numpy.linspace(0.0,meddimdist,N)
pts[:,1] = meddimdist*numpy.mod(0.5+0.5*numpy.sqrt(numpy.arange(0,5.0*N,5.0)),1.0)
pts[:,2] = meddimdist*numpy.mod(0.5+3*numpy.sqrt(numpy.arange(0,13.0*N,13.0)),1.0)
if boxdims[minaxis] < meddimdist:
pts = pts[pts[:,minaxis]<=boxdims[minaxis],:]
if boxdims[maxaxis] > meddimdist:
# have to copy across the max dimension
numfullcopies = numpy.floor(boxdims[maxaxis]/meddimdist)
if len(pts) > 0:
oldpts = pts
pts = numpy.array(oldpts)
for i in range(int(numfullcopies)-1):
oldpts[:,maxaxis] += meddimdist
pts = numpy.r_[pts,oldpts]
if boxdims[maxaxis]/meddimdist > numfullcopies:
oldpts[:,maxaxis] += meddimdist
pts = numpy.r_[pts,oldpts[oldpts[:,maxaxis]<=boxdims[maxaxis],:]]
else:
# sample the center
pts = numpy.array([[0.0,0.0,0.0]])
return pts
def CompareBodies(body0,body1,comparegeometries=True,comparesensors=True,comparemanipulators=True,comparegrabbed=True,comparephysics=True,computeadjacent=True,epsilon=1e-10):
"""Compares that two bodies are structurally and positionally equivalent without hashes, used for debug checking.
"""
def transdist(list0,list1):
assert(len(list0)==len(list1))
return numpy.sum([numpy.sum(abs(item0-item1)) for item0, item1 in izip(list0,list1)])
assert(body0.IsRobot() == body1.IsRobot())
assert(len(body0.GetJoints())==len(body1.GetJoints()))
assert(len(body0.GetPassiveJoints()) == len(body1.GetPassiveJoints()))
assert(body0.GetDOF()==body1.GetDOF())
assert(body1.GetDescription()==body0.GetDescription())
assert(transdist(body0.GetTransform(), body1.GetTransform()) <= epsilon)
with body1:
body1.SetTransform(body0.GetTransform()) # in case
body1.SetDOFValues(body0.GetDOFValues()) # in case
joints0 = body0.GetJoints()+body0.GetPassiveJoints()
joints1 = body1.GetJoints()+body1.GetPassiveJoints()
for j0 in joints0:
assert( len(j0.GetName()) > 0 )
if j0.GetJointIndex() >= 0:
# if not passive, indices should match
j1 = joints1[j0.GetJointIndex()]
assert(j1.GetJointIndex()==j0.GetJointIndex() and j1.GetDOFIndex() == j0.GetDOFIndex())
else:
j1s = [j1 for j1 in joints1 if j0.GetName() == j1.GetName()]
assert( len(j1s) == 1 )
j1 = j1s[0]
assert( transdist(j0.GetAnchor(),j1.GetAnchor()) <= epsilon )
assert( j0.GetDOF() == j1.GetDOF() and j0.GetType() == j1.GetType() )
# todo, once physics is complete, uncomment
#assert( j0.GetHierarchyParentLink().GetName() == j1.GetHierarchyParentLink().GetName() )
#assert( j0.GetHierarchyChildLink().GetName() == j1.GetHierarchyChildLink().GetName() )
# cannot compare individual j0.GetInternalHierarchyXTransform() since representation is ambiguous
# compare product instead
assert( transdist(numpy.dot(j0.GetInternalHierarchyLeftTransform(),j0.GetInternalHierarchyRightTransform()), numpy.dot(j1.GetInternalHierarchyLeftTransform(), j1.GetInternalHierarchyRightTransform()) <= epsilon ))
assert( j0.IsStatic() == j1.IsStatic() )
assert( transdist(j0.GetLimits(),j1.GetLimits()) <= epsilon )
assert( transdist(j0.GetWeights(),j1.GetWeights()) <= epsilon )
assert( transdist(j0.GetResolutions(),j1.GetResolutions()) <= epsilon )
for idof in range(j0.GetDOF()):
if not j0.IsStatic():
assert( abs(j0.GetMaxVel(idof)-j1.GetMaxVel(idof)) <= epsilon )
assert( abs(j0.GetMaxAccel(idof)-j1.GetMaxAccel(idof)) <= epsilon )
assert( abs(j0.GetWeight(idof)-j1.GetWeight(idof)) <= epsilon )
assert( abs(j0.GetResolution(idof)-j1.GetResolution(idof)) <= epsilon )
assert( j0.IsCircular(idof) == j1.IsCircular(idof) )
assert( j0.IsRevolute(idof) == j1.IsRevolute(idof) )
assert( j0.IsPrismatic(idof) == j1.IsPrismatic(idof) )
assert( transdist(j0.GetInternalHierarchyAxis(idof),j1.GetInternalHierarchyAxis(idof)) <= epsilon )
assert( j0.IsMimic(idof) == j1.IsMimic(idof) )
if j0.IsMimic(idof):
mimicjoints0 = [body0.GetJointFromDOFIndex(index).GetName() for index in j0.GetMimicDOFIndices(idof)]
mimicjoints1 = [body1.GetJointFromDOFIndex(index).GetName() for index in j1.GetMimicDOFIndices(idof)]
assert( mimicjoints0 == mimicjoints1 )
# is it possible to compare equations? perhaps just set random values and see if both robots behave the same?
# assert( j0.GetMimicEquation(idof) == j1.GetMimicEquation(idof) )
assert(len(body0.GetLinks())==len(body1.GetLinks()))
indexmap = []
for link0 in body0.GetLinks():
if len(link0.GetName()) == 0:
# skip
continue
link1s = [link1 for link1 in body1.GetLinks() if link0.GetName() == link1.GetName()]
assert( len(link1s) == 1 )
link1 = link1s[0]
indexmap.append(link1.GetIndex())
for link0 in body0.GetLinks():
if len(link0.GetName()) == 0:
# skip
continue
link1s = [link1 for link1 in body1.GetLinks() if link0.GetName() == link1.GetName()]
assert( len(link1s) == 1 )
link1 = link1s[0]
indexmap.append(link1.GetIndex())
assert( transdist(link0.GetTransform(),link1.GetTransform()) <= epsilon )
assert( link0.IsEnabled() == link1.IsEnabled() )
#assert( link0.IsStatic() == link1.IsStatic() )
assert( len(link0.GetParentLinks()) == len(link1.GetParentLinks()) )
assert( all([lp0.GetName()==lp1.GetName() for lp0, lp1 in izip(link0.GetParentLinks(),link1.GetParentLinks())]) )
if comparephysics:
assert(abs(link0.GetMass()-link1.GetMass()) <= epsilon)
assert(transdist(link0.GetLocalMassFrame(),link1.GetLocalMassFrame()) <= epsilon)
assert(transdist(link0.GetGlobalCOM(),link1.GetGlobalCOM()) <= epsilon) # redundant
assert(transdist(link0.GetPrincipalMomentsOfInertia(),link1.GetPrincipalMomentsOfInertia()) <= epsilon)
if comparegeometries:
assert( len(link0.GetGeometries()) == len(link1.GetGeometries()) )
ab0=link0.ComputeAABB()
ab1=link1.ComputeAABB()
assert(transdist(ab0.pos(),ab1.pos()) <= epsilon*200) # tesselation
assert(transdist(ab0.extents(),ab1.extents()) <= epsilon*200) # tesselation
for ig,g0 in enumerate(link0.GetGeometries()):
g1=link1.GetGeometries()[ig]
assert(g0.GetType()==g1.GetType())
assert(transdist(g0.GetTransform(),g1.GetTransform()) <= epsilon)
assert(transdist(g0.GetBoxExtents(),g1.GetBoxExtents()) <= epsilon)
assert(transdist(g0.GetDiffuseColor(),g1.GetDiffuseColor()) <= epsilon)
assert(transdist(g0.GetAmbientColor(),g1.GetAmbientColor()) <= epsilon)
assert(g0.IsVisible()==g1.IsVisible())
if computeadjacent:
# the geometry and initial configuration determine adjancent links
adjacentlinks = set([tuple(sorted((indexmap[index0],indexmap[index1]))) for index0,index1 in body0.GetAdjacentLinks()])
assert(adjacentlinks == set(body1.GetAdjacentLinks()))
if body0.IsRobot():
robot0 = body0.GetEnv().GetRobot(body0.GetName())
robot1 = body1.GetEnv().GetRobot(body1.GetName())
if comparemanipulators:
assert(len(robot0.GetManipulators()) == len(robot1.GetManipulators()))
for manip0 in robot0.GetManipulators():
manip1 = robot1.GetManipulator(manip0.GetName())
assert(transdist(manip0.GetLocalToolTransform(),manip1.GetLocalToolTransform()) <= epsilon)
assert(manip0.GetBase().GetName() == manip1.GetBase().GetName())
assert(manip0.GetEndEffector().GetName() == manip1.GetEndEffector().GetName())
assert(all(manip0.GetArmIndices() == manip1.GetArmIndices()))
assert(all(manip0.GetGripperIndices() == manip1.GetGripperIndices()))
if comparegrabbed:
grabbed0 = robot0.GetGrabbed()
grabbed1 = robot1.GetGrabbed()
assert( set([body.GetName() for body in grabbed0]) == set([body.GetName() for body in grabbed1]) )
for g0 in grabbed0:
g1 = robot1.GetEnv().GetKinBody(g0.GetName())
grabbedlink0 = robot0.IsGrabbing(g0)
grabbedlink1 = robot1.IsGrabbing(g1)
assert(grabbedlink0.GetName()==grabbedlink1.GetName())
# compare the positions
if comparesensors:
pass
#assert(len(robot0.GetAttachedSensors()) == len(robot1.GetAttachedSensors()))
def CompareEnvironments(env,env2,options=openravepy_int.CloningOptions.Bodies,epsilon=1e-10):
"""compares two state of two environments and raises exceptions if anything is different, used for debugging.
Structural information of bodies is compared with hashes.
"""
if options & openravepy_int.CloningOptions.Bodies:
def transdist(list0,list1):
assert(len(list0)==len(list1))
return numpy.sum([numpy.sum(abs(item0-item1)) for item0, item1 in izip(list0,list1)])
bodies=env.GetBodies()
bodies2=env2.GetBodies()
assert(len(bodies)==len(bodies2))
for body in bodies:
body2 = env2.GetKinBody(body.GetName())
assert(body.GetKinematicsGeometryHash()==body2.GetKinematicsGeometryHash())
assert(transdist(body.GetLinkTransformations(),body2.GetLinkTransformations()) <= epsilon)
assert(transdist(body.GetLinkVelocities(),body2.GetLinkVelocities()) <= epsilon)
if body.GetDOF() > 0:
assert(transdist(body.GetDOFValues(),body2.GetDOFValues()) <= epsilon)
assert(transdist(body.GetDOFVelocities(),body2.GetDOFVelocities()) <= epsilon)
if body.IsRobot():
robot=env.GetRobot(body.GetName())
robot2=env2.GetRobot(body2.GetName())
grabbed = robot.GetGrabbed()
grabbed2 = robot2.GetGrabbed()
assert( set([body.GetName() for body in grabbed]) == set([body.GetName() for body in grabbed2]) )
assert( transdist(robot.GetActiveDOFIndices(),robot2.GetActiveDOFIndices()) == 0)
assert( robot.GetActiveManipulator().GetName() == robot2.GetActiveManipulator().GetName())
```
#### File: openrave/release/generate_installer_windows.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2009-2010 <NAME> (<EMAIL>)'
__license__ = 'Apache License, Version 2.0'
from optparse import OptionParser
import os, sys, re, shutil, urllib
import numpy
import sympy
from types import ModuleType
from subprocess import Popen, PIPE
from distutils.sysconfig import get_python_lib
EnvVarUpdate = """
/**
* EnvVarUpdate.nsh
* : Environmental Variables: append, prepend, and remove entries
*
* WARNING: If you use StrFunc.nsh header then include it before this file
* with all required definitions. This is to avoid conflicts
*
* Usage:
* ${EnvVarUpdate} "ResultVar" "EnvVarName" "Action" "RegLoc" "PathString"
*
* Credits:
* Version 1.0
* * <NAME> (turnec2)
* * <NAME> (KiCHiK) and e-circ for developing the forerunners of this
* function: AddToPath, un.RemoveFromPath, AddToEnvVar, un.RemoveFromEnvVar,
* WriteEnvStr, and un.DeleteEnvStr
* * <NAME> (deguix) for StrTok
* * <NAME> (kenglish_hi) for StrContains
* * <NAME> (Smile2Me), <NAME> (deguix), and <NAME>
* (dandaman32) for StrReplace
*
* Version 1.1 (compatibility with StrFunc.nsh)
* * techtonik
*
* http://nsis.sourceforge.net/Environmental_Variables:_append%%2C_prepend%%2C_and_remove_entries
*
*/
!ifndef ENVVARUPDATE_FUNCTION
!define ENVVARUPDATE_FUNCTION
!verbose push
!verbose 3
!include "LogicLib.nsh"
!include "WinMessages.NSH"
!include "StrFunc.nsh"
; ---- Fix for conflict if StrFunc.nsh is already includes in main file -----------------------
!macro _IncludeStrFunction StrFuncName
!ifndef ${StrFuncName}_INCLUDED
${${StrFuncName}}
!endif
!ifndef Un${StrFuncName}_INCLUDED
${Un${StrFuncName}}
!endif
!define un.${StrFuncName} "${Un${StrFuncName}}"
!macroend
!insertmacro _IncludeStrFunction StrTok
!insertmacro _IncludeStrFunction StrStr
!insertmacro _IncludeStrFunction StrRep
; ---------------------------------- Macro Definitions ----------------------------------------
!macro _EnvVarUpdateConstructor ResultVar EnvVarName Action Regloc PathString
Push "${EnvVarName}"
Push "${Action}"
Push "${RegLoc}"
Push "${PathString}"
Call EnvVarUpdate
Pop "${ResultVar}"
!macroend
!define EnvVarUpdate '!insertmacro "_EnvVarUpdateConstructor"'
!macro _unEnvVarUpdateConstructor ResultVar EnvVarName Action Regloc PathString
Push "${EnvVarName}"
Push "${Action}"
Push "${RegLoc}"
Push "${PathString}"
Call un.EnvVarUpdate
Pop "${ResultVar}"
!macroend
!define un.EnvVarUpdate '!insertmacro "_unEnvVarUpdateConstructor"'
; ---------------------------------- Macro Definitions end-------------------------------------
;----------------------------------- EnvVarUpdate start----------------------------------------
!define hklm_all_users 'HKLM "SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment"'
!define hkcu_current_user 'HKCU "Environment"'
!macro EnvVarUpdate UN
Function ${UN}EnvVarUpdate
Push $0
Exch 4
Exch $1
Exch 3
Exch $2
Exch 2
Exch $3
Exch
Exch $4
Push $5
Push $6
Push $7
Push $8
Push $9
Push $R0
/* After this point:
-------------------------
$0 = ResultVar (returned)
$1 = EnvVarName (input)
$2 = Action (input)
$3 = RegLoc (input)
$4 = PathString (input)
$5 = Orig EnvVar (read from registry)
$6 = Len of $0 (temp)
$7 = tempstr1 (temp)
$8 = Entry counter (temp)
$9 = tempstr2 (temp)
$R0 = tempChar (temp) */
; Step 1: Read contents of EnvVarName from RegLoc
;
; Check for empty EnvVarName
${If} $1 == ""
SetErrors
DetailPrint "ERROR: EnvVarName is blank"
Goto EnvVarUpdate_Restore_Vars
${EndIf}
; Check for valid Action
${If} $2 != "A"
${AndIf} $2 != "P"
${AndIf} $2 != "R"
SetErrors
DetailPrint "ERROR: Invalid Action - must be A, P, or R"
Goto EnvVarUpdate_Restore_Vars
${EndIf}
${If} $3 == HKLM
ReadRegStr $5 ${hklm_all_users} $1 ; Get EnvVarName from all users into $5
${ElseIf} $3 == HKCU
ReadRegStr $5 ${hkcu_current_user} $1 ; Read EnvVarName from current user into $5
${Else}
SetErrors
DetailPrint 'ERROR: Action is [$3] but must be "HKLM" or HKCU"'
Goto EnvVarUpdate_Restore_Vars
${EndIf}
; Check for empty PathString
${If} $4 == ""
SetErrors
DetailPrint "ERROR: PathString is blank"
Goto EnvVarUpdate_Restore_Vars
${EndIf}
; Make sure we've got some work to do
${If} $5 == ""
${AndIf} $2 == "R"
SetErrors
DetailPrint "$1 is empty - Nothing to remove"
Goto EnvVarUpdate_Restore_Vars
${EndIf}
; Step 2: Scrub EnvVar
;
StrCpy $0 $5 ; Copy the contents to $0
; Remove spaces around semicolons (NOTE: spaces before the 1st entry or
; after the last one are not removed here but instead in Step 3)
${If} $0 != "" ; If EnvVar is not empty ...
${Do}
${${UN}StrStr} $7 $0 " ;"
${If} $7 == ""
${ExitDo}
${EndIf}
${${UN}StrRep} $0 $0 " ;" ";" ; Remove '<space>;'
${Loop}
${Do}
${${UN}StrStr} $7 $0 "; "
${If} $7 == ""
${ExitDo}
${EndIf}
${${UN}StrRep} $0 $0 "; " ";" ; Remove ';<space>'
${Loop}
${Do}
${${UN}StrStr} $7 $0 ";;"
${If} $7 == ""
${ExitDo}
${EndIf}
${${UN}StrRep} $0 $0 ";;" ";"
${Loop}
; Remove a leading or trailing semicolon from EnvVar
StrCpy $7 $0 1 0
${If} $7 == ";"
StrCpy $0 $0 "" 1 ; Change ';<EnvVar>' to '<EnvVar>'
${EndIf}
StrLen $6 $0
IntOp $6 $6 - 1
StrCpy $7 $0 1 $6
${If} $7 == ";"
StrCpy $0 $0 $6 ; Change ';<EnvVar>' to '<EnvVar>'
${EndIf}
; DetailPrint "Scrubbed $1: [$0]" ; Uncomment to debug
${EndIf}
/* Step 3. Remove all instances of the target path/string (even if "A" or "P")
$6 = bool flag (1 = found and removed PathString)
$7 = a string (e.g. path) delimited by semicolon(s)
$8 = entry counter starting at 0
$9 = copy of $0
$R0 = tempChar */
${If} $5 != "" ; If EnvVar is not empty ...
StrCpy $9 $0
StrCpy $0 ""
StrCpy $8 0
StrCpy $6 0
${Do}
${${UN}StrTok} $7 $9 ";" $8 "0" ; $7 = next entry, $8 = entry counter
${If} $7 == "" ; If we've run out of entries,
${ExitDo} ; were done
${EndIf} ;
; Remove leading and trailing spaces from this entry (critical step for Action=Remove)
${Do}
StrCpy $R0 $7 1
${If} $R0 != " "
${ExitDo}
${EndIf}
StrCpy $7 $7 "" 1 ; Remove leading space
${Loop}
${Do}
StrCpy $R0 $7 1 -1
${If} $R0 != " "
${ExitDo}
${EndIf}
StrCpy $7 $7 -1 ; Remove trailing space
${Loop}
${If} $7 == $4 ; If string matches, remove it by not appending it
StrCpy $6 1 ; Set 'found' flag
${ElseIf} $7 != $4 ; If string does NOT match
${AndIf} $0 == "" ; and the 1st string being added to $0,
StrCpy $0 $7 ; copy it to $0 without a prepended semicolon
${ElseIf} $7 != $4 ; If string does NOT match
${AndIf} $0 != "" ; and this is NOT the 1st string to be added to $0,
StrCpy $0 $0;$7 ; append path to $0 with a prepended semicolon
${EndIf} ;
IntOp $8 $8 + 1 ; Bump counter
${Loop} ; Check for duplicates until we run out of paths
${EndIf}
; Step 4: Perform the requested Action
;
${If} $2 != "R" ; If Append or Prepend
${If} $6 == 1 ; And if we found the target
DetailPrint "Target is already present in $1. It will be removed and"
${EndIf}
${If} $0 == "" ; If EnvVar is (now) empty
StrCpy $0 $4 ; just copy PathString to EnvVar
${If} $6 == 0 ; If found flag is either 0
${OrIf} $6 == "" ; or blank (if EnvVarName is empty)
DetailPrint "$1 was empty and has been updated with the target"
${EndIf}
${ElseIf} $2 == "A" ; If Append (and EnvVar is not empty),
StrCpy $0 $0;$4 ; append PathString
${If} $6 == 1
DetailPrint "appended to $1"
${Else}
DetailPrint "Target was appended to $1"
${EndIf}
${Else} ; If Prepend (and EnvVar is not empty),
StrCpy $0 $4;$0 ; prepend PathString
${If} $6 == 1
DetailPrint "prepended to $1"
${Else}
DetailPrint "Target was prepended to $1"
${EndIf}
${EndIf}
${Else} ; If Action = Remove
${If} $6 == 1 ; and we found the target
DetailPrint "Target was found and removed from $1"
${Else}
DetailPrint "Target was NOT found in $1 (nothing to remove)"
${EndIf}
${If} $0 == ""
DetailPrint "$1 is now empty"
${EndIf}
${EndIf}
; Step 5: Update the registry at RegLoc with the updated EnvVar and announce the change
;
ClearErrors
${If} $3 == HKLM
WriteRegExpandStr ${hklm_all_users} $1 $0 ; Write it in all users section
${ElseIf} $3 == HKCU
WriteRegExpandStr ${hkcu_current_user} $1 $0 ; Write it to current user section
${EndIf}
IfErrors 0 +4
MessageBox MB_OK|MB_ICONEXCLAMATION "Could not write updated $1 to $3" /SD IDOK
DetailPrint "Could not write updated $1 to $3"
Goto EnvVarUpdate_Restore_Vars
; "Export" our change
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
EnvVarUpdate_Restore_Vars:
;
; Restore the user's variables and return ResultVar
Pop $R0
Pop $9
Pop $8
Pop $7
Pop $6
Pop $5
Pop $4
Pop $3
Pop $2
Pop $1
Push $0 ; Push my $0 (ResultVar)
Exch
Pop $0 ; Restore his $0
FunctionEnd
!macroend ; EnvVarUpdate UN
!insertmacro EnvVarUpdate ""
!insertmacro EnvVarUpdate "un."
;----------------------------------- EnvVarUpdate end----------------------------------------
!verbose pop
!endif
"""
nsiscript = """
# name the installer
!include "MUI2.nsh"
!include "StrFunc.nsh"
!include "Library.nsh"
!include "StrFunc.nsh"
%(EnvVarUpdate)s
Name "OpenRAVE %(openrave_version)s"
Caption "Open Robotics Automation Virtual Environment %(openrave_version_full)s for vc%(vcversion)s"
outFile "%(output_name)s.exe"
SetDateSave on
SetDatablockOptimize on
CRCCheck on
SilentInstall normal
BGGradient ecf8fa ffffff 112255
#InstallColors FF8080 000030
XPStyle on
SetCompress auto
#SetCompressor lzma
InstallDir "$PROGRAMFILES\\OpenRAVE-%(openrave_version)s"
AutoCloseWindow false
SetOverwrite on
InstallDirRegKey HKLM "Software\\OpenRAVE" "InstallRoot"
RequestExecutionLevel admin
!define MUI_WELCOMEPAGE_TEXT "http://www.openrave.org$\\n$\\nGit Commit %(openrave_commit)s$\\n$\\nC++ Developers: All DLLs are compiled with Multithreaded DLL Runtime Library.$\\n$\\nMost examples are written in Python and can be directly executed from the Start Menu."
!define MUI_ABORTWARNING
!insertmacro MUI_PAGE_WELCOME
!insertmacro MUI_PAGE_LICENSE "%(license)s"
!insertmacro MUI_PAGE_COMPONENTS
!insertmacro MUI_PAGE_DIRECTORY
;Start Menu Folder Page Configuration
Var StartMenuFolder
!define MUI_STARTMENUPAGE_REGISTRY_ROOT "HKCU"
!define MUI_STARTMENUPAGE_REGISTRY_KEY "Software\\OpenRAVE\\%(openrave_version)s"
!define MUI_STARTMENUPAGE_REGISTRY_VALUENAME "Start Menu Folder"
!insertmacro MUI_PAGE_STARTMENU Application $StartMenuFolder
!insertmacro MUI_PAGE_INSTFILES
!insertmacro MUI_UNPAGE_CONFIRM
!insertmacro MUI_UNPAGE_INSTFILES
!insertmacro MUI_LANGUAGE "English"
${StrTrimNewLines}
Function GetVCRedist
# check for the visual studio runtime
MessageBox MB_YESNO "Need to install Microsoft Visual Studio Runtime Redistributable (x86) for vc%(vcversion)s. Continue with auto-download and install?" /SD IDYES IDNO done
nsisdl::download /TIMEOUT=30000 "%(vcredist_url)s" $TEMP\\vcredist.exe
Pop $R0 ;Get the return value
StrCmp $R0 "success" install
MessageBox MB_OK "Download failed: $R0" /SD IDOK
Quit
install:
ExecWait "$TEMP\\vcredist.exe"
Delete "$TEMP\\vcredist.exe"
done:
FunctionEnd
Function DetectVCRedist
GetDLLVersion "MSVCR%(vcversion)s" $R0 $R1
StrCmp $R0 "" 0 done
Call GetVCRedist
done:
FunctionEnd
# check for boost installation
Function GetBoost
MessageBox MB_YESNO "Need to install boost %(boost_version)s. Select 'Multithreaded, DLL' and make sure the installed DLLs are added to 'Path'. Continue with auto-download and install?" /SD IDYES IDNO done
File "installers\\%(boost_installer)s"
ExecWait '"$INSTDIR\\%(boost_installer)s"' $1
Delete "$INSTDIR\\%(boost_installer)s"
DetailPrint $1
ClearErrors
ReadRegStr $0 HKLM "SOFTWARE\\boostpro.com\\%(boost_version)s" InstallRoot
IfErrors 0 done
MessageBox MB_OK "Failed to find boost %(boost_version)s" /SD IDOK
Abort "Cannot install"
Quit
done:
FunctionEnd
Function DetectBoost
ClearErrors
ReadRegStr $0 HKLM "SOFTWARE\\boostpro.com\\%(boost_version)s" InstallRoot
IfErrors 0 done0
Call GetBoost
Goto done1
done0:
IfFileExists $0\\lib\\*.dll done1
Call GetBoost
done1:
DetailPrint "boost installation at: $0, copying necessary DLLs"
CopyFiles $0\\lib\\boost*vc%(vcversion)s*.dll $INSTDIR\\bin
FunctionEnd
Function GetQt4
MessageBox MB_YESNO "Need to install Qt %(qt_version)s in 'C:\\Qt\\%(qt_version)s'. Continue with auto-download and install?" /SD IDYES IDNO done
nsisdl::download /TIMEOUT=30000 "%(qt_url)s" $TEMP\\qt-installer.exe
Pop $R0 ;Get the return value
StrCmp $R0 "success" install
MessageBox MB_OK "Download failed: $R0" /SD IDOK
Quit
install:
ExecWait "$TEMP\\qt-installer.exe"
Delete "$TEMP\\qt-installer.exe"
done:
FunctionEnd
Function DetectQt4
GetDLLVersion "C:\\Qt\\%(qt_version)s\\bin\\QtCore4.dll" $R0 $R1
IntOp $R2 $R0 >> 16
IntOp $R2 $R2 & 0x0000ffff
IntOp $R3 $R0 & 0x0000ffff
IntOp $R4 $R1 >> 16
IntOp $R4 $R4 & 0x0000ffff
IntOp $R5 $R1 & 0x0000ffff
StrCpy $0 "$R2.$R3.$R4"
Strcmp $0 "%(qt_version)s" done 0
Call GetQt4
done:
CopyFiles C:\\Qt\\%(qt_version)s\\bin\\QtCore4.dll $INSTDIR\\bin
CopyFiles C:\\Qt\\%(qt_version)s\\bin\\QtGui4.dll $INSTDIR\\bin
CopyFiles C:\\Qt\\%(qt_version)s\\bin\\QtOpenGL4.dll $INSTDIR\\bin
CopyFiles C:\\Qt\\%(qt_version)s\\bin\\Qt3Support4.dll $INSTDIR\\bin
FunctionEnd
Section
SetOutPath $INSTDIR
CreateDirectory $INSTDIR\\bin # for copying DLLs
CreateDirectory $INSTDIR\\share
CreateDirectory $INSTDIR\\share\\openrave-%(openrave_soversion)s
Call DetectVCRedist
Call DetectBoost
Call DetectQt4
# start menu
!insertmacro MUI_STARTMENU_WRITE_BEGIN Application
CreateDirectory "$SMPROGRAMS\\$StartMenuFolder"
#CreateDirectory "$SMPROGRAMS\\$StartMenuFolder\\databases"
!insertmacro MUI_STARTMENU_WRITE_END
SectionEnd
Function GetPython
MessageBox MB_YESNO "Need to install Python %(python_version)s. Continue with auto-download and install?" /SD IDYES IDNO done
StrCpy $2 "$TEMP\\python-%(python_version_full)s.msi"
nsisdl::download /TIMEOUT=30000 %(python_url)s $2
Pop $R0 ;Get the return value
StrCmp $R0 "success" install
MessageBox MB_OK "Download failed: $R0" /SD IDOK
Quit
install:
ExecWait '"msiexec" /i $2'
Delete $2
done:
FunctionEnd
Function DetectPython
ClearErrors
ReadRegStr $0 HKLM "SOFTWARE\\Python\\PythonCore\\%(python_version)s\\InstallPath" ""
IfErrors 0 done
Call GetPython
done:
FunctionEnd
Function GetNumPy
MessageBox MB_YESNO "Need to install Python NumPy %(numpy_version)s Library. Continue with auto-download and install?" /SD IDYES IDNO done
StrCpy $2 "numpy-%(numpy_version)s-win32-superpack-python%(python_version)s.exe"
nsisdl::download /TIMEOUT=30000 %(python_numpy_url)s $TEMP\\$2
Pop $R0 ;Get the return value
StrCmp $R0 "success" install
MessageBox MB_OK "Download failed: $R0" /SD IDOK
Quit
install:
ExecWait "$TEMP\\$2"
Delete "$TEMP\\$2"
done:
FunctionEnd
Function DetectNumPy
ClearErrors
ReadRegStr $1 HKLM "SOFTWARE\\Python\\PythonCore\\%(python_version)s\\InstallPath" ""
IfErrors 0 start
MessageBox MB_OK "Failed to find python installation" /SD IDOK
Quit
start:
ExecWait '"$1\\python.exe" -c "import numpy; assert(numpy.version.version==\\"%(numpy_version)s\\")"' $0
StrCmp $0 "0" done
Call GetNumPy
done:
FunctionEnd
Function GetPythonSetupTools
MessageBox MB_YESNO "Need to install Python SetupTools 0.6c11 Library. Continue with auto-download and install?" /SD IDYES IDNO done
StrCpy $2 "setuptools-0.6c11.win32-py%(python_version)s.exe"
nsisdl::download /TIMEOUT=30000 %(python_setuptools_url)s $TEMP\\$2
Pop $R0 ;Get the return value
StrCmp $R0 "success" install
MessageBox MB_OK "Download failed: $R0" /SD IDOK
Quit
install:
ExecWait "$TEMP\\$2"
Delete "$TEMP\\$2"
done:
FunctionEnd
Function DetectPythonSetupTools
ClearErrors
ReadRegStr $1 HKLM "SOFTWARE\\Python\\PythonCore\\%(python_version)s\\InstallPath" ""
IfErrors 0 start
MessageBox MB_OK "Failed to find python installation" /SD IDOK
Quit
start:
ExecWait '"$1\\python.exe" -c "from pkg_resources import resource_filename"' $0
StrCmp $0 "0" done
Call GetPythonSetupTools
done:
FunctionEnd
Function GetSymPy
MessageBox MB_YESNO "Need to install Python SymPy %(sympy_version)s Library. Continue with auto-download and install?" /SD IDYES IDNO done
StrCpy $2 "sympy-%(sympy_version)s.win32.exe"
nsisdl::download /TIMEOUT=30000 %(python_sympy_url)s $TEMP\\$2
Pop $R0 ;Get the return value
StrCmp $R0 "success" install
MessageBox MB_OK "Download failed: $R0" /SD IDOK
Quit
install:
ExecWait "$TEMP\\$2"
Delete "$TEMP\\$2"
done:
FunctionEnd
Function DetectSymPy
ClearErrors
ReadRegStr $1 HKLM "SOFTWARE\\Python\\PythonCore\\%(python_version)s\\InstallPath" ""
IfErrors 0 start
MessageBox MB_OK "Failed to find python installation" /SD IDOK
Quit
start:
ExecWait '"$1\\python.exe" -c "import sympy; assert(sympy.__version__ == \\"%(sympy_version)s\\")"' $0
StrCmp $0 "0" done
Call GetSymPy
done:
FunctionEnd
SectionGroup /e "Python Bindings" secpython
Section
SetOutPath $INSTDIR
Call DetectPython
Call DetectNumPy
Call DetectSymPy
Call DetectPythonSetupTools
SetOutPath $INSTDIR\\bin
File /r %(installdir)s\\bin\\*.py
CreateDirectory $INSTDIR\\%(openravepy_reldir)s\\openravepy
SetOutPath $INSTDIR\\%(openravepy_reldir)s
File /r /x *.pyd %(installdir)s\\%(openravepy_reldir)s\\openravepy
%(install_python_dll)s
!insertmacro MUI_STARTMENU_WRITE_BEGIN Application
CreateShortCut "$SMPROGRAMS\\$StartMenuFolder\\openravepy ipython.lnk" "$INSTDIR\\bin\\openrave.py" "-i" "$INSTDIR\\bin\\openrave.py" 0
CreateDirectory "$SMPROGRAMS\\$StartMenuFolder\\Python Examples"
%(openrave_python_shortcuts)s
!insertmacro MUI_STARTMENU_WRITE_END
SectionEnd
Section "Add to PYTHONPATH"
${EnvVarUpdate} $0 "PYTHONPATH" "A" "HKLM" "$INSTDIR\\%(openravepy_reldir)s"
SectionEnd
SectionGroupEnd
SectionGroup /e "Octave Bindings" secoctave
Section
SetOutPath $INSTDIR\\share\\openrave-%(openrave_soversion)s
File /r %(installdir)s\\share\\openrave-%(openrave_soversion)s\\octave
!insertmacro MUI_STARTMENU_WRITE_BEGIN Application
CreateShortCut "$SMPROGRAMS\\$StartMenuFolder\\Octave Examples.lnk" "$INSTDIR\\share\\openrave-%(openrave_soversion)s\\octave" "" "$INSTDIR\\share\\openrave-%(openrave_soversion)s\\octave" 0
!insertmacro MUI_STARTMENU_WRITE_END
SectionEnd
Section "Add to OCTAVE_PATH"
${EnvVarUpdate} $0 "OCTAVE_PATH" "A" "HKLM" "$INSTDIR\\share\\openrave-%(openrave_soversion)s\\octave"
SectionEnd
SectionGroupEnd
Section
SetOutPath $INSTDIR
WriteRegStr HKLM SOFTWARE\\OpenRAVE "" "%(openrave_version)s"
WriteRegStr HKLM "SOFTWARE\\OpenRAVE\\%(openrave_version)s" "InstallRoot" "$INSTDIR"
# register with cmake installer
WriteRegStr HKLM "SOFTWARE\\Kitware\\CMake\\Packages\\OpenRAVE" "%(openrave_version)s" "$INSTDIR\\lib\\cmake\\openrave-%(openrave_soversion)s"
File /r /x *.dll /x *.py %(installdir)s\\bin
File /r %(installdir)s\\include
SetOutPath $INSTDIR\\lib
File /r %(installdir)s\\lib\\*.lib
File /r %(installdir)s\\lib\\cmake
SetOutPath $INSTDIR\\share\\openrave-%(openrave_soversion)s
File /r %(installdir)s\\share\\openrave-%(openrave_soversion)s\\cppexamples
File /r %(installdir)s\\share\\openrave-%(openrave_soversion)s\\data
File /r %(installdir)s\\share\\openrave-%(openrave_soversion)s\\matlab
File /r %(installdir)s\\share\\openrave-%(openrave_soversion)s\\models
File /r %(installdir)s\\share\\openrave-%(openrave_soversion)s\\plugins
File /r %(installdir)s\\share\\openrave-%(openrave_soversion)s\\robots
File /r %(installdir)s\\share\\openrave-%(openrave_soversion)s\\LICENSE*
File /r %(installdir)s\\share\\openrave-%(openrave_soversion)s\\COPYING
SetOutPath $INSTDIR
%(install_dll)s
FileOpen $0 $INSTDIR\\include\\openrave-%(openrave_soversion)s\\openrave\\config.h w
${StrRep} $2 "$INSTDIR" "\\" "\\\\"
${StrRep} $1 "%(openrave_config)s" "__INSTDIR__" $2
FileWrite $0 $1
FileClose $0
WriteUninstaller $INSTDIR\\uninstall.exe
!insertmacro MUI_STARTMENU_WRITE_BEGIN Application
CreateShortCut "$SMPROGRAMS\\$StartMenuFolder\\openrave.lnk" "$INSTDIR\\bin\\openrave.exe" "" "$INSTDIR\\bin\\openrave.exe" 0
CreateShortCut "$SMPROGRAMS\\$StartMenuFolder\\C++ Examples.lnk" "$INSTDIR\\share\\openrave-%(openrave_soversion)s\\cppexamples" "" "$INSTDIR\\share\\openrave-%(openrave_soversion)s\\cppexamples" 0
CreateShortCut "$SMPROGRAMS\\$StartMenuFolder\\Robots.lnk" "$INSTDIR\\share\\openrave-%(openrave_soversion)s\\robots" "" "$INSTDIR\\share\\openrave-%(openrave_soversion)s\\robots" 0
CreateShortCut "$SMPROGRAMS\\$StartMenuFolder\\Uninstall.lnk" "$INSTDIR\\uninstall.exe" "" "$INSTDIR\\uninstall.exe" 0
%(openrave_shortcuts)s
!insertmacro MUI_STARTMENU_WRITE_END
SectionEnd
Section /o "Extra Robots" secrobots
DetailPrint "Getting robot list"
nsisdl::download /TIMEOUT=30000 https://openrave.svn.sourceforge.net/svnroot/openrave/data/robots/manipulatorlist $TEMP\\manipulatorlist
Pop $R0 ;Get the return value
StrCmp $R0 "success" getrobots
MessageBox MB_OK "Robot List download failed: $R0" /SD IDOK
Goto done
getrobots:
ClearErrors
FileOpen $2 $TEMP\\manipulatorlist r
readrobot:
FileRead $2 $1
IfErrors donerobot
${StrTrimNewLines} $3 "$INSTDIR\\share\\openrave-%(openrave_soversion)s\\robots\\$1"
${StrTrimNewLines} $4 https://openrave.svn.sourceforge.net/svnroot/openrave/data/robots/$1
DetailPrint "Robot '$1' from $4"
nsisdl::download /TIMEOUT=30000 "$4" "$TEMP\\robot"
Pop $R0 ;Get the return value
StrCmp $R0 "success" copyrobot
DetailPrint "$1: $R0"
Goto readrobot
copyrobot:
CopyFiles "$TEMP\\robot" "$3"
Goto readrobot
donerobot:
FileClose $2
done:
SectionEnd
Section "Add to Path" secpath
${EnvVarUpdate} $0 "Path" "A" "HKLM" "$INSTDIR\\bin"
SectionEnd
#Language strings
LangString desc_secpython ${LANG_ENGLISH} "Installs Python bindings."
LangString desc_secoctave ${LANG_ENGLISH} "Installs Octave bindings."
LangString desc_secpath ${LANG_ENGLISH} "Sets the environment path so OpenRAVE DLLs can be found."
LangString desc_secrobots ${LANG_ENGLISH} "Downloads and installs all extra COLLADA robots."
#Assign language strings to sections
!insertmacro MUI_FUNCTION_DESCRIPTION_BEGIN
!insertmacro MUI_DESCRIPTION_TEXT ${secpython} $(desc_secpython)
!insertmacro MUI_DESCRIPTION_TEXT ${secoctave} $(desc_secoctave)
!insertmacro MUI_DESCRIPTION_TEXT ${secpath} $(desc_secpath)
!insertmacro MUI_DESCRIPTION_TEXT ${secrobots} $(desc_secrobots)
!insertmacro MUI_FUNCTION_DESCRIPTION_END
# create a section to define what the uninstaller does.
# the section will always be named "Uninstall"
Section "Uninstall"
DeleteRegKey HKLM SOFTWARE\\OpenRAVE\\%(openrave_version)s
DeleteRegValue HKLM SOFTWARE\\Kitware\\CMake\\Packages\\OpenRAVE" "%(openrave_version)s"
ReadRegStr $0 HKLM "SOFTWARE\\OpenRAVE" ""
StrCmp %(openrave_version)s $0 0 noremove
DeleteRegValue HKLM SOFTWARE\\OpenRAVE ""
noremove:
${un.EnvVarUpdate} $0 "PYTHONPATH" "R" "HKLM" "$INSTDIR\\%(openravepy_reldir)s"
${un.EnvVarUpdate} $0 "OCTAVE_PATH" "R" "HKLM" "$INSTDIR\\share\\openrave-%(openrave_soversion)s\\octave"
${un.EnvVarUpdate} $0 "Path" "R" "HKLM" "$INSTDIR\\bin"
# have to store install dir since it gets wiped out somewhere
StrCpy $1 "$INSTDIR"
# Always delete uninstaller first?
Delete "$INSTDIR\\uninstall.exe"
%(uninstall_dll)s
RMDir /r "$SMPROGRAMS\\$StartMenuFolder"
# have to set current path outside of installation dir
SetOutPath "$1\\.."
RMDir /r "$1\\bin"
RMDir /r "$1\\include"
RMDir /r "$1\\lib"
RMDir /r "$1\\share"
Delete "$1\\%(boost_installer)s"
RMDir "$1"
SectionEnd
"""
vcredist_urls = {'100':'http://www.microsoft.com/downloads/info.aspx?na=41&SrcFamilyId=A7B7A05E-6DE6-4D3A-A423-37BF0912DB84&SrcDisplayLang=en&u=http%3a%2f%2fdownload.microsoft.com%2fdownload%2f5%2fB%2fC%2f5BC5DBB3-652D-4DCE-B14A-475AB85EEF6E%2fvcredist_x86.exe',
'90':'http://www.microsoft.com/downloads/info.aspx?na=41&SrcFamilyId=A5C84275-3B97-4AB7-A40D-3802B2AF5FC2&SrcDisplayLang=en&u=http%3a%2f%2fdownload.microsoft.com%2fdownload%2fd%2fd%2f9%2fdd9a82d0-52ef-40db-8dab-795376989c03%2fvcredist_x86.exe'}
qt_urls = {'100':'http://qt-msvc-installer.googlecode.com/files/qt-win32-opensource-%s-vs2008.exe',
'90':'http://qt-msvc-installer.googlecode.com/files/qt-win32-opensource-%s-vs2008.exe'}
if __name__ == "__main__":
parser = OptionParser(description='Creates a NSI installer for windows')
parser.add_option('--lang',action="store",type='string',dest='lang',default='en',
help='Language folder.')
parser.add_option('--installdir',action="store",type='string',dest='installdir',default=None,
help='Directory of the cmake installation')
parser.add_option('--commit',action="store",type='string',dest='commit',default=None,
help='Git commit hash.')
(options,args) = parser.parse_args()
python_installdir = 'lib\\site-packages\\'
os.environ['Path'] = os.path.join(os.path.abspath(options.installdir),'bin')+';'+os.environ['PATH']
qt_version = Popen(['openrave-config','--qt-version'],stdout=PIPE).communicate()[0].strip()
version = Popen(['openrave-config','--version'],stdout=PIPE).communicate()[0].strip()
soversion = '.'.join(version.split('.')[0:2])
_soversion = '_'.join(version.split('.')[0:2])
openravepy_dir = os.path.abspath(get_python_lib(1,prefix=options.installdir))
openravepy_reldir = os.path.relpath(openravepy_dir,os.path.abspath(options.installdir))
sys.path.insert(0,openravepy_dir)
openravepy = __import__('openravepy')
openravepy.examples = __import__('openravepy.examples',fromlist=['openravepy'])
assert(openravepy.__version__==version)
args = dict()
args['openrave_version'] = openravepy.__version__
args['openrave_version_full'] = openravepy.__version__
args['openrave_soversion'] = soversion
args['openrave_commit'] = ''
if options.commit is not None:
args['openrave_version_full'] += '-'+options.commit[0:6]
args['openrave_commit'] = options.commit
args['vcversion'] = os.path.split(options.installdir)[1][2:]
args['vcredist_url'] = vcredist_urls[args['vcversion']]
args['qt_version'] = qt_version
args['qt_url'] = qt_urls[args['vcversion']]%args['qt_version']
args['openrave_shortcuts'] = ''
args['openrave_python_shortcuts'] = ''
args['output_name'] = 'openrave-%(openrave_version_full)s-win32-vc%(vcversion)s-setup'%args
args['installdir'] = os.path.abspath(options.installdir)
args['install_dll'] = ''
args['install_python_dll'] = ''
args['openravepy_reldir'] = openravepy_reldir
args['uninstall_dll'] = ''
args['EnvVarUpdate'] = EnvVarUpdate
args['license'] = os.path.join(options.installdir,'share','openrave-'+soversion,'COPYING')
# install the dlls (allows us to use them without modifying the path)
for dllname in os.listdir(os.path.join(options.installdir,'bin')):
if os.path.splitext(dllname)[1] == '.dll':
args['install_dll'] += '!insertmacro InstallLib DLL NOTSHARED NOREBOOT_PROTECTED %s\\bin\\%s $INSTDIR\\bin\\%s $INSTDIR\n'%(args['installdir'],dllname,dllname)
args['uninstall_dll'] += '!insertmacro UninstallLib DLL NOTSHARED NOREBOOT_PROTECTED $INSTDIR\\bin\\%s\n'%(dllname)
# python dlls
_soversionpy = ''
for dllname in os.listdir(os.path.join(openravepy_dir,'openravepy','_openravepy_'+_soversionpy)):
if os.path.splitext(dllname)[1] == '.pyd':
args['install_python_dll'] += '!insertmacro InstallLib DLL NOTSHARED NOREBOOT_PROTECTED %s\\openravepy\\_openravepy_%s\\%s $INSTDIR\\%s\\openravepy\\_openravepy_%s\\%s $INSTDIR\n'%(openravepy_dir,_soversionpy,dllname,openravepy_reldir,_soversionpy,dllname)
args['uninstall_dll'] += '!insertmacro UninstallLib DLL NOTSHARED NOREBOOT_PROTECTED $INSTDIR\\%s\\openravepy\\_openravepy_%s\\%s\n'%(openravepy_reldir,_soversionpy,dllname)
# add the runable examples
for name in dir(openravepy.examples):
if not name.startswith('__'):
try:
m=__import__('openravepy.examples.'+name)
if type(m) is ModuleType:
path = '$INSTDIR\\%s\\openravepy\\_openravepy_%s\\examples\\%s.py'%(openravepy_reldir,_soversionpy,name)
args['openrave_python_shortcuts'] += 'CreateShortCut "$SMPROGRAMS\\$StartMenuFolder\\Python Examples\\%s.lnk" "%s" "" "%s" 0\n'%(name,path,path)
args['openrave_python_shortcuts'] += 'CreateShortCut "$SMPROGRAMS\\$StartMenuFolder\\Python Examples\\%s Documentation.lnk" "http://openrave.org/en/main/openravepy/examples.%s.html" "" "C:\WINDOWS\system32\shell32.dll" 979\n'%(name,name)
except ImportError:
pass
# not sure how useful this would be, perhaps a database generator GUI would help?
for name in []:#dir(openravepy.databases):
if not name.startswith('__'):
try:
m=__import__('openravepy.databases.'+name)
if type(m) is ModuleType:
path = '$INSTDIR\\%s\\openravepy\\_openravepy_%s\\databases\\%s.py'%(openravepy_reldir,_soversionpy,name)
args['openrave_python_shortcuts'] += 'CreateShortCut "$SMPROGRAMS\\$StartMenuFolder\\Databases\\%s.lnk" "%s" "" "%s" 0\n'%(name,path,path)
except ImportError:
pass
# edit the config.h
config = open(os.path.join(options.installdir,'include','openrave-'+soversion,'openrave','config.h'),'r').read()
pattern=re.compile(args['installdir'].replace('\\','/'),re.IGNORECASE)
args['openrave_config'] = pattern.sub('__INSTDIR__',config).replace('\n','$\\n').replace('"','$\\"').replace('\r','$\\r')
open(os.path.join(options.installdir,'include','openrave-'+soversion,'openrave','config.h'),'w').write(config)
# boost installation
boostversion = Popen(['openrave-config','--boost-version'],stdout=PIPE).communicate()[0].strip()
boostversionsep = boostversion.split('.')
if len(boostversionsep) == 2:
boostversionsep.append('0')
for boost_version in ['%s.%s.%s'%tuple(boostversionsep),'%s.%s'%tuple(boostversionsep[0:2])]:
boost_installer = 'boost_%s_setup.exe'%boost_version.replace('.','_')
if not os.path.exists(os.path.join('installers',boost_installer)):
try:
boosturl = 'http://www.boostpro.com/download/'+boost_installer
localfile, headers = urllib.urlretrieve(boosturl)
if headers['content-type'].find('application') < 0:
continue
try:
os.mkdir('installers')
except OSError:
pass
shutil.copyfile(localfile,os.path.join('installers',boost_installer))
except IOError:
continue # website down?
args['boost_installer'] = boost_installer
args['boost_version'] = boost_version
break
if not 'boost_version' in args:
raise ValueError('failed to find boost installer for version %s'%boostversionsep)
# python installation
args['python_version'] = '%s.%s'%(sys.version_info[0:2])
args['python_version_full'] = '%s.%s.%s'%(sys.version_info[0:3])
args['python_architecture'] = ''
args['numpy_version'] = numpy.version.version
args['sympy_version'] = sympy.__version__
python_url = 'http://www.python.org/ftp/python/%(python_version_full)s/python-%(python_version_full)s%(python_architecture)s.msi'%args
args['python_url'] = python_url
args['python_numpy_url'] = 'http://downloads.sourceforge.net/project/numpy/NumPy/%(numpy_version)s/numpy-%(numpy_version)s-win32-superpack-python%(python_version)s.exe'%args
args['python_setuptools_url'] = 'http://pypi.python.org/packages/%(python_version)s/s/setuptools/setuptools-0.6c11.win32-py%(python_version)s.exe'%args
args['python_sympy_url'] = 'http://sympy.googlecode.com/files/sympy-%(sympy_version)s.win32.exe'%args
open(args['output_name']+'.nsi','w').write(nsiscript%args)
os.system('"C:\\Program Files\\NSIS\\makensis.exe" %s.nsi'%args['output_name'])
def test():
class empty: pass
options = empty()
options.installdir = 'C:\\jenkins\\workspace\\openrave_windows\\install\\vc10'
```
#### File: openrave/sandbox/evaluateplanning.py
```python
from __future__ import with_statement # for python 2.5
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2009-2010'
__license__ = 'Apache License, Version 2.0'
from openravepy import *
from openravepy.examples import mobilemanipulation,graspplanning
from openravepy.databases import inversereachability,linkstatistics
from numpy import *
import numpy,time,os,pickle
from itertools import izip
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy import stats
class StatisticsData:
pass
class OpenRAVEEvaluator(metaclass.AutoReloader):
@staticmethod
def fntimer(f, *args, **kwargs):
starttime=time.time()
res = f(*args,**kwargs)
return time.time()-starttime,res
@staticmethod
def getdatafiles(dataprefix):
robotdir,prefix = os.path.split(dataprefix)
allnames = os.listdir(robotdir)
return [os.path.join(robotdir,name) for name in allnames if name.startswith(prefix)]
@staticmethod
def drawbarcomparison(statdata,stattitle,statylabel,datalabels=('Inverse Reachability', 'Random')):
fig = plt.figure()
ax = fig.add_subplot(111)
ind = arange(len(statdata)) # the x locations for the groups
width = 0.35 # the width of the bars
rects1 = ax.bar(ind, [mean(d[1]) for d in statdata], width, color='r', yerr=[std(d[1]) for d in statdata])
rects2 = ax.bar(ind+width, [mean(d[2]) for d in statdata], width, color='y', yerr=[std(d[2]) for d in statdata])
ax.set_ylabel(statylabel)
ax.set_title(stattitle)
ax.set_xticks(ind+width)
ax.set_ylim(0,1.3*numpy.max([max(mean(d[1]),mean(d[2])) for d in statdata]))
ax.set_xticklabels([d[0] for d in statdata])
ax.legend( (rects1[0], rects2[0]), datalabels, loc='upper left' )
for i,rects in enumerate((rects1,rects2)):
for j,rect in enumerate(rects):
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%.3f'%height, ha='center', va='bottom')
# draw median
m = median(statdata[j][i+1])
ax.plot([rect.get_x(),rect.get_x()+rect.get_width()], [m,m],color='k')
ax.text(rect.get_x()+rect.get_width()/2., 0.95*m, 'median: %.3f'%m, ha='center', va='top')
plt.show()
return fig
class EvaluateGrasping(OpenRAVEEvaluator):
pass
class EvaluateInverseKinematics(OpenRAVEEvaluator):
pass
class EvaluateReachability(OpenRAVEEvaluator):
pass
class EvaluateInverseReachability(OpenRAVEEvaluator):
def __init__(self,env,scenename):
self.env = env
self.env.Reset()
#self.env.StopSimulation()
self.scenename = scenename
self.env.Load(scenename)
self.robot = self.env.GetRobots()[0]
self.planning = graspplanning.GraspPlanning(self.robot,dests=[])
sceneprefix = os.path.split(self.scenename)[1]
if sceneprefix.find('.') >= 0:
sceneprefix = sceneprefix[0:sceneprefix.find('.')]
self.dataprefix = self.getdataprefix(self.robot)+sceneprefix
@staticmethod
def getdataprefix(robot):
return os.path.join(robot.GetEnv().GetHomeDirectory(),'robot.'+robot.GetKinematicsGeometryHash(),'irstats.')
def testgraspables(self,Nsamples=1000,logllthresh=2.4,weight=1.0):
for gmodel,dests in self.planning.graspables:
self.robot.SetActiveManipulator(gmodel.manip)
irmodel=databases.inversereachability.InverseReachabilityModel (self.robot)
if not irmodel.load():
irmodel.autogenerate()
gr = mobilemanipulation.GraspReachability(robot=self.robot,irgmodels=[(irmodel,gmodel)])
starttime = time.time()
densityfn,samplerfn,bounds = gr.computeGraspDistribution(logllthresh=logllthresh)
print 'time to build distribution: %fs'%(time.time()-starttime)
#h = gr.irmodel.showBaseDistribution(densityfn,bounds,self.target.GetTransform()[2,3],thresh=1.0)
data = StatisticsData()
data.Nsamples = Nsamples
data.samplingavg = array(())
data.samplingfailures = array(())
for i in range(10):
starttime = time.time()
goals,numfailures = gr.sampleGoals(lambda N: samplerfn(N=N,weight=weight),N=Nsamples,timeout=Nsamples)
data.samplingavg = r_[data.samplingavg,min(Nsamples,(time.time()-starttime)/(len(goals)+1e-8))]
data.samplingfailures = r_[data.samplingfailures,numfailures/float(len(goals)+numfailures)]
data.randomavg = array(())
data.randomfailures = array(())
Trobot = self.robot.GetTransform()
validgrasps,validindices = gmodel.computeValidGrasps(checkik=False)
Tgrasps = [(gmodel.getGlobalGraspTransform(grasp),i) for i,grasp in enumerate(validgrasps)]
bounds = array(((0,-1.2,-1.2),(2*pi,1.2,1.2)))
def randomsampler(N,weight=1.0):
indices = random.randint(0,len(Tgrasps),N)
angles = 0.5*random.rand(N)*(bounds[1,0]-bounds[0,0])+bounds[0,0]
XY = [Tgrasps[i][0][0:2,3]+random.rand(2)*(bounds[1,1:3]-bounds[0,1:3])+bounds[0,1:3] for i in indices]
return c_[cos(angles),zeros((N,2)),sin(angles),array(XY),tile(Trobot[2,3],N)],[(gmodel,i) for i in indices],([],[])
for i in range(10):
starttime = time.time()
goals,numfailures = gr.sampleGoals(randomsampler,N=Nsamples,timeout=Nsamples)
data.randomavg = r_[data.randomavg,min(Nsamples,(time.time()-starttime)/(len(goals)+1e-8))]
data.randomfailures = r_[data.randomfailures,numfailures/float(len(goals)+numfailures)]
with self.env:
data.samplingtimes = [self.fntimer(gr.sampleValidPlacementIterator(weight=weight,logllthresh=logllthresh,randomgrasps=True,randomplacement=False).next)[0] for i in range(Nsamples)]
# remove the last %1
data.samplingtimes = sort(data.samplingtimes)[0:floor(Nsamples*0.99)]
data.randomtimes = [self.fntimer(gr.sampleValidPlacementIterator(weight=weight,logllthresh=logllthresh,randomgrasps=True,randomplacement=True).next)[0] for i in range(Nsamples)]
data.randomtimes = sort(data.randomtimes)[0:floor(Nsamples*0.99)]
data.robotname = self.robot.GetName()
data.targetname = gmodel.target.GetName()
datafilename = self.dataprefix+'.'+gmodel.target.GetName()+'.pp'
pickle.dump(data,open(datafilename,'w'))
print 'finished inversereachability'
@staticmethod
def gatherdata(robotname):
env = Environment()
robot = env.ReadRobotXMLFile(robotname)
env.AddRobot(robot)
dataprefix = EvaluateInverseReachability.getdataprefix(robot)
datafiles = EvaluateInverseReachability.getdatafiles(dataprefix)
allavg = []
allfailures = []
alltimes = []
for datafile in datafiles:
try:
data = pickle.load(open(datafile,'r'))
except:
continue
allavg.append((data.targetname,data.samplingavg,data.randomavg))
allfailures.append((data.targetname,data.samplingfailures,data.randomfailures))
alltimes.append((data.targetname,data.samplingtimes,data.randomtimes))
# find all files starting with dataprefix and combine their data
fig = EvaluateInverseReachability.drawbarcomparison(allavg,'Robot %s\nAverage Valid Configuration Sampling Time'%robot.GetName(),'seconds')
fig.savefig(dataprefix+'average.pdf',format='pdf')
plt.close(fig)
fig = EvaluateInverseReachability.drawbarcomparison(allfailures,'Robot %s\Sample Failure Probability'%robot.GetName(),'samples')
fig.savefig(dataprefix+'failures.pdf',format='pdf')
plt.close(fig)
fig = EvaluateInverseReachability.drawbarcomparison(alltimes,'Robot %s\nTime to First Valid Configuration in New Scene'%robot.GetName(),'seconds')
fig.savefig(dataprefix+'times.pdf',format='pdf')
plt.close(fig)
robot = None
env.Destroy()
@staticmethod
def plot(avg,firsttimes):
"""histogram plotting"""
fig = plt.figure()
ax = fig.add_subplot(111)
delta = 0.2
maxtime = 20.0
n, bins, patches = ax.hist(firsttimes, bins=maxtime/delta,range=[0,maxtime], normed=1, facecolor='green', alpha=0.75)
# add a 'best fit' line
# params = stats.genexpon.fit(firsttimes)
# bincenters = 0.5*(bins[1:]+bins[:-1])
# y = stats.genexpon.pdf( bincenters, *params)
# l = ax.plot(bincenters, y, 'r--', linewidth=1)
l = ax.plot(bincenters, y, 'r--', linewidth=1)
ax.set_xlabel('Time to First Solution (Average: %f)')
ax.set_ylabel('Probability')
#ax.set_title(r'$\mathrm{Histogram\ of\ IQ:}\ \mu=100,\ \sigma=15$')
ax.set_xlim(0.0,maxtime)
ax.set_ylim(0,1/delta)
ax.grid(True)
plt.show()
return fig
@staticmethod
def run():
import evaluateplanning
logllthresh = 2.4
Nsamples = 100
weight = 0.5
env = Environment()
self = evaluateplanning.EvaluateInverseReachability(env,'data/wamtest1.env.xml')
self.testgraspables(Nsamples=Nsamples,logllthresh=logllthresh)
self = evaluateplanning.EvaluateInverseReachability(env,'data/wamtest2.env.xml')
self.testgraspables(Nsamples=Nsamples,logllthresh=logllthresh)
env.Destroy()
# save data
evaluateplanning.EvaluateInverseReachability.gatherdata('robots/barrettsegway.robot.xml')
class EvaluateDistanceMetric(OpenRAVEEvaluator):
def __init__(self,env,scenename):
self.env = env
self.env.Reset()
self.env.StopSimulation()
self.scenename = scenename
self.env.Load(scenename)
self.robot = self.env.GetRobots()[0]
self.planning = graspplanning.GraspPlanning(self.robot,dests=[])
sceneprefix = os.path.split(self.scenename)[1]
if sceneprefix.find('.') >= 0:
sceneprefix = sceneprefix[0:sceneprefix.find('.')]
self.dataprefix = self.getdataprefix(self.robot)+sceneprefix
self.lsmodel = linkstatistics.LinkStatisticsModel(self.robot)
if not self.lsmodel.load():
self.lsmodel.autogenerate()
@staticmethod
def getdataprefix(robot):
return os.path.join(robot.GetEnv().GetHomeDirectory(),'robot.'+robot.GetKinematicsGeometryHash(),'diststats.')
def testgraspables(self,weightexp=0.005,N=10):
gmodel,dests = self.planning.graspables[0]
validgrasps,validindices = gmodel.computeValidGrasps()
Tgrasps = [gmodel.getGlobalGraspTransform(grasp) for grasp in validgrasps]
data = StatisticsData()
data.robotname = self.robot.GetName()
data.catimes = []
for type in range(2):
self.lsmodel.setRobotWeights(weightexp=weightexp,type=type)
data.catimes.append([self.fntimer(self.planning.basemanip.MoveToHandPosition,matrices=Tgrasps[0:1],maxtries=1,seedik=4,maxiter=10000,execute=False)[0] for i in range(N)])
datafilename = self.dataprefix+'.pp'
pickle.dump(data,open(datafilename,'w'))
@staticmethod
def gatherdata(robotnames):
env = Environment()
alltimes = []
for robotname in robotnames:
env.Reset()
robot = env.ReadRobotXMLFile(robotname)
env.AddRobot(robot)
dataprefix = EvaluateDistanceMetric.getdataprefix(robot)
datafiles = EvaluateDistanceMetric.getdatafiles(dataprefix)
catimes = [[],[]]
for datafile in datafiles:
try:
data = pickle.load(open(datafile,'r'))
except:
continue
catimes[0] += data.catimes[0]
catimes[1] += data.catimes[1]
alltimes.append((robot.GetName(),array(catimes[0]),array(catimes[1])))
# find all files starting with dataprefix and combine their data
fig = EvaluateInverseReachability.drawbarcomparison(alltimes,'Robot %s\nPlanning Times (Weights)'%robot.GetName(),'seconds',datalabels=('Volume-Dependent Weights', 'Uniform Weight (%f)'))
fig.savefig(dataprefix+'pdf',format='pdf')
plt.close(fig)
env.Destroy()
@staticmethod
def run():
import evaluateplanning
env = Environment()
self = evaluateplanning.EvaluateDistanceMetric(env,'data/wamtest1.env.xml')
self.testgraspables(N=100)
env.Destroy()
# save data
evaluateplanning.EvaluateDistanceMetric.gatherdata(['robots/barrettsegway.robot.xml'])
class EvaluateResolutions(OpenRAVEEvaluator):
def __init__(self,env,scenename):
self.env = env
self.env.Reset()
self.env.StopSimulation()
self.scenename = scenename
self.env.Load(scenename)
self.robot = self.env.GetRobots()[0]
self.planning = graspplanning.GraspPlanning(self.robot,dests=[])
sceneprefix = os.path.split(self.scenename)[1]
if sceneprefix.find('.') >= 0:
sceneprefix = sceneprefix[0:sceneprefix.find('.')]
self.dataprefix = self.getdataprefix(self.robot)+sceneprefix
self.lsmodel = linkstatistics.LinkStatisticsModel(self.robot)
if not self.lsmodel.load():
self.lsmodel.autogenerate()
@staticmethod
def getdataprefix(robot):
return os.path.join(robot.GetEnv().GetHomeDirectory(),'robot.'+robot.GetKinematicsGeometryHash(),'resstats.')
def testgraspables(self,xyzdelta=0.005,N=10):
gmodel,dests = self.planning.graspables[0]
validgrasps,validindices = gmodel.computeValidGrasps()
Tgrasps = [gmodel.getGlobalGraspTransform(grasp) for grasp in validgrasps]
data = StatisticsData()
data.robotname = self.robot.GetName()
self.lsmodel.setRobotResolution(xyzdelta)
resolutions1 = [self.robot.GetJointResolutions(),self.robot.GetAffineTranslationResolution(),self.robot.GetAffineRotationAxisResolution()]
minrotres = min(r_[resolutions1[0], resolutions1[2]])
resolutions2 = [tile(minrotres,len(self.robot.GetJoints())),[xyzdelta,xyzdelta,xyzdelta],tile(minrotres,4)]
data.resolutions = (resolutions1,resolutions2)
data.catimes = []
for resolutions in data.resolutions:
for r,j in izip(resolutions[0],self.robot.GetJoints()):
j.SetResolution(r)
self.robot.SetAffineTranslationResolution(resolutions[1])
self.robot.SetAffineRotationAxisResolution(resolutions[2])
data.catimes.append([self.fntimer(self.planning.basemanip.MoveToHandPosition,matrices=Tgrasps[0:1],maxtries=1,seedik=4,maxiter=10000,execute=False)[0] for i in range(N)])
datafilename = self.dataprefix+'.pp'
pickle.dump(data,open(datafilename,'w'))
@staticmethod
def gatherdata(robotnames):
env = Environment()
alltimes = []
for robotname in robotnames:
env.Reset()
robot = env.ReadRobotXMLFile(robotname)
env.AddRobot(robot)
dataprefix = EvaluateResolutions.getdataprefix(robot)
datafiles = EvaluateResolutions.getdatafiles(dataprefix)
catimes = [[],[]]
for datafile in datafiles:
try:
data = pickle.load(open(datafile,'r'))
except:
continue
catimes[0] += data.catimes[0]
catimes[1] += data.catimes[1]
alltimes.append((robot.GetName(),array(catimes[0]),array(catimes[1])))
# find all files starting with dataprefix and combine their data
fig = EvaluateInverseReachability.drawbarcomparison(alltimes,'Robot %s\nPlanning Times (Resolution)'%robot.GetName(),'seconds',datalabels=('Swept Volume Resolutions', 'Minimum Resolution (%f)'%data.resolutions[1][0][0]))
fig.savefig(dataprefix+'pdf',format='pdf')
plt.close(fig)
env.Destroy()
@staticmethod
def run():
import evaluateplanning
env = Environment()
self = evaluateplanning.EvaluateResolutions(env,'data/wamtest1.env.xml')
self.testgraspables(N=100)
env.Destroy()
# save data
evaluateplanning.EvaluateResolutions.gatherdata(['robots/barrettsegway.robot.xml'])
class EvaluateManipulation(OpenRAVEEvaluator):
pass
class EvaluateMobileManipulation(OpenRAVEEvaluator):
pass
if __name__ == "__main__":
pass
```
#### File: sandbox/mintime/ZMP.py
```python
from openravepy import *
from numpy import *
def v2t(v):
T=eye(4)
T[0:3,3]=v
return T
##########################################################################
def ComputeJacobians(config,delta,i,params):
base_T=config[0]
base_vel=config[1] # Not used here
base_acc=config[2] # Not used here
q=config[3]
qd=config[4]
qdd=config[5]
robot=params['robot']
base_link=robot.GetLinks()[0]
with robot:
base_link.SetTransform(v2t(base_T))
robot.SetDOFValues(q)
rpq_0=robot.CalculateJacobian(i,robot.GetLinks()[i].GetGlobalCOM())
ro_0=robot.CalculateAngularVelocityJacobian(i)
n=len(q)
norm_qd=numpy.linalg.norm(qd)
if norm_qd<1e-10:
rpqqfs=zeros((3,n))
roqfs=zeros((3,n))
else:
qdunit=delta/norm_qd*qd
with robot:
robot.SetDOFValues(q+qdunit)
rpqqfs=norm_qd/delta*(robot.CalculateJacobian(i,robot.GetLinks()[i].GetGlobalCOM())-rpq_0)
roqfs=norm_qd/delta*(robot.CalculateAngularVelocityJacobian(i)-ro_0)
return [rpq_0,ro_0,rpqqfs,roqfs]
##########################################################################
def ComputeCOM(config,params):
base_T=config[0]
q=config[1]
robot=params['robot']
exclude_list=params['exclude_list']
base_link=robot.GetLinks()[0]
n=len(q)
with robot:
base_link.SetTransform(v2t(base_T))
robot.SetDOFValues(q)
com_pos=array([k.GetGlobalCOM() for k in robot.GetLinks()])
masses=[k.GetMass() for k in robot.GetLinks()]
M=sum(masses)
weighted_com=zeros(3)
for i in range(n+1):
if i in exclude_list:
continue
weighted_com+=masses[i]*array(com_pos[i])
res=weighted_com/M
return res
def ComputeZMP(config,params):
base_T=config[0]
base_vel=config[1]
base_acc=config[2]
q=config[3]
qd=config[4]
qdd=config[5]
n=len(q)
robot=params['robot']
g=params['gravity']
moment_coef=params['moment_coef'] # Usually =1, sometimes =0 for testing purpose
exclude_list=params['exclude_list']
base_link=robot.GetLinks()[0]
with robot:
base_link.SetTransform(v2t(base_T))
robot.SetDOFValues(q)
robot.SetDOFVelocities(qd)
com_pos=array([k.GetGlobalCOM() for k in robot.GetLinks()])
vel=robot.GetLinkVelocities()
acc=robot.GetLinkAccelerations(qdd) # Includes gravity term
for i in range(n):
vel[i,0:3]=vel[i,0:3]+base_vel
acc[i,0:3]=acc[i,0:3]+base_acc
transforms=[k.GetTransform()[0:3,0:3] for k in robot.GetLinks()]
masses=[k.GetMass() for k in robot.GetLinks()]
inertiae=[k.GetLocalInertia() for k in robot.GetLinks()]
localCOM=[k.GetLocalCOM() for k in robot.GetLinks()]
xnum=0
ynum=0
denum=0
for i in range(n+1):
if i in exclude_list:
continue
# Compute the inertia matrix in the global frame
R=transforms[i]
Ii=dot(R,dot(inertiae[i],transpose(R)))
ri=dot(R,localCOM[i])
# Compute the inertia moment
omegai=vel[i,3:6]
omegadi=acc[i,3:6]
Mi=moment_coef*(dot(Ii,omegadi)+cross(omegai,dot(Ii,omegai)))
com_vel=vel[i,0:3]+cross(omegai,ri)
com_acc=acc[i,0:3]+cross(omegai,cross(omegai,ri))+cross(omegadi,ri)
# Extract the position and accelerations
xi=com_pos[i,0]
yi=com_pos[i,1]
zi=com_pos[i,2]
xddi=com_acc[0]
yddi=com_acc[1]
zddi=com_acc[2]
# Testing purpose
# print '-------'
# print i
# print xddi
# print yddi
# print zddi
# Compute the numerators and denominator
xnum+=masses[i]*(zddi*xi-xddi*zi)-Mi[1]
ynum+=masses[i]*(zddi*yi-yddi*zi)-Mi[0]
denum+=masses[i]*zddi
return array([xnum/denum,ynum/denum])
def ComputeZMPTraj(traj,params_init):
n_steps=traj.n_steps
t_vect=traj.t_vect
q_vect=traj.q_vect
qd_vect=traj.qd_vect
qdd_vect=traj.qdd_vect
zmp_vect=zeros((2,n_steps))
for i in range(n_steps):
q=q_vect[:,i]
qd=qd_vect[:,i]
qdd=qdd_vect[:,i]
# Here we assume there is no base link rotation
zmp=ComputeZMP([q[0:3],qd[0:3],qdd[0:3],q[6:len(q)],qd[6:len(q)],qdd[6:len(q)]],params_init)
zmp_vect[:,i]=zmp
return zmp_vect
def ComputeCOMTraj(traj,params_init):
n_steps=traj.n_steps
t_vect=traj.t_vect
q_vect=traj.q_vect
com_vect=zeros((3,n_steps))
for i in range(n_steps):
q=q_vect[:,i]
# Here we assume there is no base link rotation
com_vect[:,i]=ComputeCOM([q[0:3],q[6:len(q)]],params_init)
return com_vect
##########################################################################
def ComputeCoefsFractionZMP(config_pure,params):
base_T=config_pure[0]
base_s=config_pure[1]
base_ss=config_pure[2]
q=config_pure[3]
qs=config_pure[4]
qss=config_pure[5]
n=len(q)
robot=params['robot']
g=params['gravity']
exclude_list=params['exclude_list']
moment_coef=params['moment_coef']
base_link=robot.GetLinks()[0]
#Initialize the coefficients
ax,bx,cx=0,0,0
ay,by,cy=0,0,0
d,e,f=0,0,0
for i in range(n+1):
if i in exclude_list:
continue
params={'linkindex':i,'robot':robot}
with robot:
base_link.SetTransform(v2t(base_T))
robot.SetDOFValues(q)
LocalI=robot.GetLinks()[i].GetLocalInertia()
R=robot.GetLinks()[i].GetTransform()[0:3,0:3]
I=dot(R,dot(LocalI,transpose(R)))
m=robot.GetLinks()[i].GetMass()
rp=robot.GetLinks()[i].GetGlobalCOM()
# Terms in x,y,z
x=rp[0]
y=rp[1]
z=rp[2]
# Compute the Jacobians
delta=1e-10
[rp_q,romega,rp_qqXqs,romega_qXqs]=ComputeJacobians(config_pure,delta,i,params)
# Terms in xdd, ydd, zdd
apdd=dot(rp_q,qs)+base_s
bpdd=dot(rp_q,qss)+dot(qs,transpose(rp_qqXqs))+base_ss
axdd=apdd[0]
bxdd=bpdd[0]
aydd=apdd[1]
bydd=bpdd[1]
azdd=apdd[2]
bzdd=bpdd[2]
# Testing purpose
# sd=1
# sdd=1
# print '-------'
# print i
# print axdd*sdd+bxdd*sd*sd
# print aydd*sdd+bydd*sd*sd
# print azdd*sdd+bzdd*sd*sd
# Terms in omega
if moment_coef>0:
romegaXqs=dot(romega,qs)
aM=dot(dot(I,romega),qs)
bM=dot(I,dot(romega,qss)+dot(qs,transpose(romega_qXqs)))+cross(romegaXqs,dot(I,romegaXqs))
aMx=moment_coef*aM[0]
bMx=moment_coef*bM[0]
aMy=moment_coef*aM[1]
bMy=moment_coef*bM[1]
else:
[aMx,bMx,aMy,bMy]=[0,0,0,0]
# Computations of the coefficients of the numerators and denominator
ax+=m*(azdd*x-axdd*z)-aMy
bx+=m*(bzdd*x-bxdd*z)-bMy
cx+=m*g*x
ay+=m*(azdd*y-aydd*z)-aMx
by+=m*(bzdd*y-bydd*z)-bMx
cy+=m*g*y
d+=m*azdd
e+=m*bzdd
f+=m*g
return [ax,bx,cx,ay,by,cy,d,e,f]
```
#### File: sympy/core/numbers.py
```python
from core import C
from sympify import converter, sympify, _sympify, SympifyError
from basic import Basic
from singleton import S, Singleton
from expr import Expr, AtomicExpr
from decorators import _sympifyit, deprecated
from cache import cacheit, clear_cache
import sympy.mpmath as mpmath
import sympy.mpmath.libmp as mlib
from sympy.mpmath.libmp import mpf_pow, mpf_pi, mpf_e, phi_fixed
from sympy.mpmath.ctx_mp import mpnumeric
import decimal
rnd = mlib.round_nearest
# TODO: we should use the warnings module
_errdict = {"divide": False}
def seterr(divide=False):
"""
Should sympy raise an exception on 0/0 or return a nan?
divide == True .... raise an exception
divide == False ... return nan
"""
if _errdict["divide"] != divide:
clear_cache()
_errdict["divide"] = divide
# (a,b) -> gcd(a,b)
_gcdcache = {}
# TODO caching with decorator, but not to degrade performance
def igcd(a, b):
"""Computes positive, integer greatest common divisor of two numbers.
The algorithm is based on the well known Euclid's algorithm. To
improve speed, igcd() has its own caching mechanism implemented.
"""
try:
return _gcdcache[(a,b)]
except KeyError:
if a and b:
if b < 0:
b = -b
while b:
a, b = b, a % b
else:
a = abs(a or b)
_gcdcache[(a,b)] = a
return a
def ilcm(a, b):
"""Computes integer least common multiple of two numbers. """
if a == 0 and b == 0:
return 0
else:
return a * b // igcd(a, b)
def igcdex(a, b):
"""Returns x, y, g such that g = x*a + y*b = gcd(a, b).
>>> from sympy.core.numbers import igcdex
>>> igcdex(2, 3)
(-1, 1, 1)
>>> igcdex(10, 12)
(-1, 1, 2)
>>> x, y, g = igcdex(100, 2004)
>>> x, y, g
(-20, 1, 4)
>>> x*100 + y*2004
4
"""
if (not a) and (not b):
return (0, 1, 0)
if not a:
return (0, b//abs(b), abs(b))
if not b:
return (a//abs(a), 0, abs(a))
if a < 0:
a, x_sign = -a, -1
else:
x_sign = 1
if b < 0:
b, y_sign = -b, -1
else:
y_sign = 1
x, y, r, s = 1, 0, 0, 1
while b:
(c, q) = (a % b, a // b)
(a, b, r, s, x, y) = (b, c, x-q*r, y-q*s, r, s)
return (x*x_sign, y*y_sign, a)
class Number(AtomicExpr):
"""
Represents any kind of number in sympy.
Floating point numbers are represented by the Float class.
Integer numbers (of any size), together with rational numbers (again, there
is no limit on their size) are represented by the Rational class.
If you want to represent, for example, ``1+sqrt(2)``, then you need to do::
Rational(1) + sqrt(Rational(2))
"""
is_commutative = True
is_comparable = True
is_bounded = True
is_finite = True
is_number = True
__slots__ = []
# Used to make max(x._prec, y._prec) return x._prec when only x is a float
_prec = -1
is_Number = True
def __new__(cls, *obj):
if len(obj)==1:
obj=obj[0]
if isinstance(obj, (int, long)):
return Integer(obj)
if isinstance(obj, tuple) and len(obj) == 2:
return Rational(*obj)
if isinstance(obj, (float, mpmath.mpf, decimal.Decimal)):
return Float(obj)
if isinstance(obj, str):
val = sympify(obj)
if isinstance(val, Number):
return val
else:
raise ValueError('String "%s" does not denote a Number'%obj)
if isinstance(obj, Number):
return obj
raise TypeError("expected str|int|long|float|Decimal|Number object but got %r" % (obj))
def _as_mpf_val(self, prec):
"""Evaluation of mpf tuple accurate to at least prec bits."""
raise NotImplementedError('%s needs ._as_mpf_val() method' % \
(self.__class__.__name__))
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def _as_mpf_op(self, prec):
prec = max(prec, self._prec)
return self._as_mpf_val(prec), prec
def __float__(self):
return mlib.to_float(self._as_mpf_val(53))
def _eval_conjugate(self):
return self
def _eval_order(self, *symbols):
# Order(5, x, y) -> Order(1,x,y)
return C.Order(S.One, *symbols)
@classmethod
def class_key(cls):
return 1, 0, 'Number'
def sort_key(self, order=None):
return self.class_key(), (0, ()), (), self
def __eq__(self, other):
raise NotImplementedError('%s needs .__eq__() method' % (self.__class__.__name__))
def __ne__(self, other):
raise NotImplementedError('%s needs .__ne__() method' % (self.__class__.__name__))
def __lt__(self, other):
raise NotImplementedError('%s needs .__lt__() method' % (self.__class__.__name__))
def __le__(self, other):
raise NotImplementedError('%s needs .__le__() method' % (self.__class__.__name__))
def __gt__(self, other):
return _sympify(other).__lt__(self)
def __ge__(self, other):
return _sympify(other).__le__(self)
def __hash__(self):
return super(Number, self).__hash__()
@property
def is_number(self):
return True
def as_coeff_mul(self, *deps):
# a -> c * t
if self.is_Rational:
return self, tuple()
elif self.is_negative:
return S.NegativeOne, (-self,)
return S.One, (self,)
def as_coeff_add(self, *deps):
# a -> c + t
if self.is_Rational:
return self, tuple()
return S.Zero, (self,)
def gcd(self, other):
"""Compute greatest common divisor of input arguments. """
_ = _sympify(other)
return S.One
def lcm(self, other):
"""Compute least common multiple of input arguments. """
other = _sympify(other)
return self*other
def cofactors(self, other):
"""Compute GCD and cofactors of input arguments. """
other = _sympify(other)
return S.One, self, other
def as_coeff_Mul(self):
"""Efficiently extract the coefficient of a product. """
return self, S.One
class Float(Number):
"""
Represents a floating point number. It is capable of representing
arbitrary-precision floating-point numbers
**Usage**
::
Float(3.5)
3.5 # (the 3.5 was converted from a python float)
Float("3.0000000000000005")
>>> from sympy import Float
>>> Float((1,3,0,2)) # mpmath tuple: (-1)**1 * 3 * 2**0; 3 has 2 bits
-3.00000000000000
**Notes**
- Float(x) with x being a Python int/long will return Integer(x)
"""
is_real = True
is_irrational = False
is_integer = False
__slots__ = ['_mpf_', '_prec']
# mpz can't be pickled
def __getnewargs__(self):
return (mlib.to_pickable(self._mpf_),)
def __getstate__(self):
d = Expr.__getstate__(self).copy()
del d["_mpf_"]
return mlib.to_pickable(self._mpf_), d
def __setstate__(self, state):
_mpf_, d = state
_mpf_ = mlib.from_pickable(_mpf_)
self._mpf_ = _mpf_
Expr.__setstate__(self, d)
is_Float = True
def floor(self):
return C.Integer(int(mlib.to_int(mlib.mpf_floor(self._mpf_, self._prec))))
def ceiling(self):
return C.Integer(int(mlib.to_int(mlib.mpf_ceil(self._mpf_, self._prec))))
@property
def num(self):
return mpmath.mpf(self._mpf_)
def _as_mpf_val(self, prec):
return self._mpf_
def _as_mpf_op(self, prec):
return self._mpf_, max(prec, self._prec)
def __new__(cls, num, prec=15):
prec = mlib.libmpf.dps_to_prec(prec)
if isinstance(num, (int, long)):
return Integer(num)
if isinstance(num, (str, decimal.Decimal)):
_mpf_ = mlib.from_str(str(num), prec, rnd)
elif isinstance(num, tuple) and len(num) == 4:
if type(num[1]) is str:
# it's a hexadecimal (coming from a pickled object)
# assume that it is in standard form
num = list(num)
num[1] = long(num[1], 16)
_mpf_ = tuple(num)
else:
_mpf_ = mpmath.mpf(
S.NegativeOne ** num[0] * num[1] * 2 ** num[2])._mpf_
else:
_mpf_ = mpmath.mpf(num)._mpf_
if not num:
return C.Zero()
obj = Expr.__new__(cls)
obj._mpf_ = _mpf_
obj._prec = prec
return obj
@classmethod
def _new(cls, _mpf_, _prec):
if _mpf_ == mlib.fzero:
return S.Zero
obj = Expr.__new__(cls)
obj._mpf_ = _mpf_
obj._prec = _prec
return obj
def _hashable_content(self):
return (self._mpf_, self._prec)
def _eval_is_positive(self):
return self.num > 0
def _eval_is_negative(self):
return self.num < 0
def __neg__(self):
return Float._new(mlib.mpf_neg(self._mpf_), self._prec)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mul(self._mpf_, rhs, prec, rnd), prec)
return Number.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(self._mpf_, rhs, prec, rnd), prec)
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(rhs, self._mpf_, prec, rnd), prec)
return Number.__rmod__(self, other)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if (other is S.NaN) or (self is NaN):
return S.NaN
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_add(self._mpf_, rhs, prec, rnd), prec)
return Number.__add__(self, other)
def _eval_power(self, e):
"""
e is symbolic object but not equal to 0, 1
(-p) ** r -> exp(r * log(-p)) -> exp(r * (log(p) + I*Pi)) ->
-> p ** r * (sin(Pi*r) + cos(Pi*r) * I)
"""
if isinstance(e, Number):
if isinstance(e, Integer):
prec = self._prec
return Float._new(mlib.mpf_pow_int(self._mpf_, e.p, prec, rnd), prec)
e, prec = e._as_mpf_op(self._prec)
b = self._mpf_
try:
y = mpf_pow(b, e, prec, rnd)
return Float._new(y, prec)
except mlib.ComplexResult:
re, im = mlib.mpc_pow((b, mlib.fzero), (e, mlib.fzero), prec, rnd)
return Float._new(re, prec) + Float._new(im, prec) * S.ImaginaryUnit
def __abs__(self):
return Float._new(mlib.mpf_abs(self._mpf_), self._prec)
def __int__(self):
return int(mlib.to_int(self._mpf_))
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other --> not ==
if isinstance(other, NumberSymbol):
if other.is_irrational: return False
return other.__eq__(self)
if isinstance(other, FunctionClass): #cos as opposed to cos(x)
return False
if isinstance(other, Number):
return bool(mlib.mpf_eq(self._mpf_, other._as_mpf_val(self._prec)))
return False # Float != non-Number
def __ne__(self, other):
try:
other = _sympify(other)
except SympifyError:
return True # sympy != other
if isinstance(other, NumberSymbol):
if other.is_irrational: return True
return other.__ne__(self)
if isinstance(other, FunctionClass): #cos as opposed to cos(x)
return True
if isinstance(other, Number):
return bool(not mlib.mpf_eq(self._mpf_, other._as_mpf_val(self._prec)))
return True # Float != non-Number
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other
if isinstance(other, NumberSymbol):
return other.__ge__(self)
if other.is_comparable: other = other.evalf()
if isinstance(other, Number):
return bool(mlib.mpf_lt(self._mpf_, other._as_mpf_val(self._prec)))
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other --> ! <=
if isinstance(other, NumberSymbol):
return other.__gt__(self)
if other.is_comparable: other = other.evalf()
if isinstance(other, Number):
return bool(mlib.mpf_le(self._mpf_, other._as_mpf_val(self._prec)))
return Expr.__le__(self, other)
def __hash__(self):
return super(Float, self).__hash__()
def epsilon_eq(self, other, epsilon="10e-16"):
return abs(self - other) < Float(epsilon)
def _sage_(self):
import sage.all as sage
return sage.RealNumber(str(self))
# Add sympify converters
converter[float] = converter[decimal.Decimal] = Float
# this is here to work nicely in Sage
RealNumber = Float
@deprecated
def Real(*args, **kwargs): # pragma: no cover
"""Deprecated alias for the Float constructor."""
return Float(*args, **kwargs)
class Rational(Number):
"""Represents integers and rational numbers (p/q) of any size.
**Examples**
>>> from sympy import Rational
>>> from sympy.abc import x, y
>>> Rational(3)
3
>>> Rational(1,2)
1/2
>>> Rational(1.5)
1
Rational can also accept strings that are valid literals for reals:
>>> Rational("1.23")
123/100
>>> Rational('1e-2')
1/100
>>> Rational(".1")
1/10
Parsing needs for any other type of string for which a Rational is desired
can be handled with the rational=True option in sympify() which produces
rationals from strings like '.[3]' (=1/3) and '3/10' (=3/10).
**Low-level**
Access numerator and denominator as .p and .q:
>>> r = Rational(3,4)
>>> r
3/4
>>> r.p
3
>>> r.q
4
Note that p and q return integers (not sympy Integers) so some care
is needed when using them in expressions:
>>> r.p/r.q
0
"""
is_real = True
is_integer = False
is_rational = True
__slots__ = ['p', 'q']
is_Rational = True
@cacheit
def __new__(cls, p, q=None):
if q is None:
if isinstance(p, Rational):
return p
if isinstance(p, basestring):
try:
# we might have a Float
neg_pow, digits, expt = decimal.Decimal(p).as_tuple()
p = [1, -1][neg_pow] * int("".join(str(x) for x in digits))
if expt > 0:
# TODO: this branch needs a test
return Rational(p*Pow(10, expt), 1)
return Rational(p, Pow(10, -expt))
except decimal.InvalidOperation:
import re
f = re.match('^([-+]?[0-9]+)/([0-9]+)$', p.replace(' ',''))
if f:
n, d = f.groups()
return Rational(int(n), int(d))
raise ValueError('invalid literal: %s' % p)
elif not isinstance(p, Basic):
return Rational(S(p))
q = S.One
if isinstance(q, Rational):
p *= q.q
q = q.p
if isinstance(p, Rational):
q *= p.q
p = p.p
p = int(p)
q = int(q)
if q == 0:
if p == 0:
if _errdict["divide"]:
raise ValueError("Indeterminate 0/0")
else:
return S.NaN
if p < 0:
return S.NegativeInfinity
return S.Infinity
if q < 0:
q = -q
p = -p
n = igcd(abs(p), q)
if n > 1:
p //= n
q //= n
if q == 1:
return Integer(p)
if p == 1 and q == 2:
return S.Half
obj = Expr.__new__(cls)
obj.p = p
obj.q = q
#obj._args = (p, q)
return obj
def limit_denominator(self, max_denominator=1000000):
"""Closest Rational to self with denominator at most max_denominator.
>>> from sympy import Rational
>>> Rational('3.141592653589793').limit_denominator(10)
22/7
>>> Rational('3.141592653589793').limit_denominator(100)
311/99
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self.q <= max_denominator:
return self
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self.p, self.q
while True:
a = n//d
q2 = q0+a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
n, d = d, n-a*d
k = (max_denominator-q0)//q1
bound1 = Rational(p0+k*p1, q0+k*q1)
bound2 = Rational(p1, q1)
if abs(bound2 - self) <= abs(bound1-self):
return bound2
else:
return bound1
def __getnewargs__(self):
return (self.p, self.q)
def _hashable_content(self):
return (self.p, self.q)
def _eval_is_positive(self):
return self.p > 0
def _eval_is_zero(self):
return self.p == 0
def __neg__(self):
return Rational(-self.p, self.q)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if (other is S.NaN) or (self is S.NaN):
return S.NaN
if isinstance(other, Float):
return other * self
if isinstance(other, Rational):
return Rational(self.p * other.p, self.q * other.q)
return Number.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if isinstance(other, Rational):
n = (self.p*other.q) // (other.p*self.q)
return Rational(self.p*other.q - n*other.p*self.q, self.q*other.q)
if isinstance(other, Float):
return self.evalf() % other
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Rational):
return Rational.__mod__(other, self)
if isinstance(other, Float):
return other % self.evalf()
return Number.__rmod__(self, other)
# TODO reorder
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if (other is S.NaN) or (self is S.NaN):
return S.NaN
if isinstance(other, Float):
return other + self
if isinstance(other, Rational):
if self.is_unbounded:
if other.is_bounded:
return self
elif self==other:
return self
else:
if other.is_unbounded:
return other
return Rational(self.p * other.q + self.q * other.p, self.q * other.q)
return Number.__add__(self, other)
def _eval_power(b, e):
if (e is S.NaN): return S.NaN
if isinstance(e, Number):
if isinstance(e, Float):
return b._eval_evalf(e._prec) ** e
if e.is_negative:
# (3/4)**-2 -> (4/3)**2
ne = -e
if (ne is S.One):
return Rational(b.q, b.p)
if b < 0:
if e.q != 1:
return -(S.NegativeOne) ** ((e.p % e.q) / S(e.q)) * Rational(b.q, -b.p) ** ne
else:
return S.NegativeOne ** ne * Rational(b.q, -b.p) ** ne
else:
return Rational(b.q, b.p) ** ne
if (e is S.Infinity):
if b.p > b.q:
# (3/2)**oo -> oo
return S.Infinity
if b.p < -b.q:
# (-3/2)**oo -> oo + I*oo
return S.Infinity + S.Infinity * S.ImaginaryUnit
return S.Zero
if isinstance(e, Integer):
# (4/3)**2 -> 4**2 / 3**2
return Rational(b.p ** e.p, b.q ** e.p)
if isinstance(e, Rational):
if b.p != 1:
# (4/3)**(5/6) -> 4**(5/6) * 3**(-5/6)
return Integer(b.p) ** e * Integer(b.q) ** (-e)
if b >= 0:
return Integer(b.q)**Rational(e.p * (e.q-1), e.q) / ( Integer(b.q) ** Integer(e.p))
else:
return (-1)**e * (-b)**e
c, t = b.as_coeff_mul()
if e.is_even and isinstance(c, Number) and c < 0:
return (-c * Mul(*t)) ** e
return
def _as_mpf_val(self, prec):
return mlib.from_rational(self.p, self.q, prec, rnd)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(mlib.from_rational(self.p, self.q, prec, rnd))
def __abs__(self):
return Rational(abs(self.p), self.q)
def __int__(self):
return int(float(self.p)/self.q)
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other --> not ==
if isinstance(other, NumberSymbol):
if other.is_irrational: return False
return other.__eq__(self)
if isinstance(other, FunctionClass): #cos as opposed to cos(x)
return False
if other.is_comparable and not isinstance(other, Rational):
other = other.evalf()
if isinstance(other, Number):
if isinstance(other, Float):
return bool(mlib.mpf_eq(self._as_mpf_val(other._prec), other._mpf_))
return bool(self.p==other.p and self.q==other.q)
return False # Rational != non-Number
def __ne__(self, other):
try:
other = _sympify(other)
except SympifyError:
return True # sympy != other
if isinstance(other, NumberSymbol):
if other.is_irrational: return True
return other.__ne__(self)
if isinstance(other, FunctionClass): #cos as opposed to cos(x)
return True
if other.is_comparable and not isinstance(other, Rational):
other = other.evalf()
if isinstance(other, Number):
if isinstance(other, Float):
return bool(not mlib.mpf_eq(self._as_mpf_val(other._prec), other._mpf_))
return bool(self.p!=other.p or self.q!=other.q)
return True # Rational != non-Number
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other --> not <
if isinstance(other, NumberSymbol):
return other.__ge__(self)
if other.is_comparable and not isinstance(other, Rational):
other = other.evalf()
if isinstance(other, Number):
if isinstance(other, Float):
return bool(mlib.mpf_lt(self._as_mpf_val(other._prec), other._mpf_))
return bool(self.p * other.q < self.q * other.p)
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other --> not <=
if isinstance(other, NumberSymbol):
return other.__gt__(self)
if other.is_comparable and not isinstance(other, Rational):
other = other.evalf()
if isinstance(other, Number):
if isinstance(other, Float):
return bool(mlib.mpf_le(self._as_mpf_val(other._prec), other._mpf_))
return bool(self.p * other.q <= self.q * other.p)
return Expr.__le__(self, other)
def __hash__(self):
return super(Rational, self).__hash__()
def factors(self, limit=None, use_trial=True,
use_rho=False,
use_pm1=False,
verbose=False):
"""A wrapper to factorint which return factors of self that are
smaller than limit (or cheap to compute). Special methods of
factoring are disabled by default so that only trial division is used.
"""
from sympy.ntheory import factorint
f = factorint(self.p, limit=limit,
use_trial=use_trial,
use_rho=use_rho,
use_pm1=use_pm1,
verbose=verbose).copy()
for p, e in factorint(self.q, limit=limit,
use_trial=use_trial,
use_rho=use_rho,
use_pm1=use_pm1,
verbose=verbose).items():
try: f[p] += -e
except KeyError: f[p] = -e
if len(f)>1 and 1 in f: del f[1]
return f
def gcd(self, other):
"""Compute greatest common divisor of input arguments. """
if type(other) in (int, long):
p = igcd(self.p, other)
if self.is_Integer:
return Integer(p)
else:
return Rational(p, self.q)
else:
other = _sympify(other)
if other.is_Rational:
p = igcd(self.p, other.p)
if other.is_Integer:
if self.is_Integer:
return Integer(p)
else:
return Rational(p, self.q)
else:
if self.is_Integer:
return Rational(p, other.q)
else:
return Rational(p, ilcm(self.q, other.q))
elif other.is_Number:
return S.One
else:
raise TypeError("expected an integer or rational, got %s" % other)
def lcm(self, other):
"""Compute least common multiple of input arguments. """
if type(other) in (int, long):
return Integer(ilcm(self.p, other))
else:
other = _sympify(other)
if other.is_Rational:
p = ilcm(self.p, other.p)
if self.is_Integer or other.is_Integer:
return Integer(p)
else:
return Rational(p, igcd(self.q, other.q))
elif other.is_Number:
return self*other
else:
raise TypeError("expected an integer or rational, got %s" % other)
def cofactors(self, other):
"""Compute GCD and cofactors of input arguments. """
other = _sympify(other)
gcd = self.gcd(other)
if gcd is S.One:
return gcd, self, other
else:
return gcd, self/gcd, other/gcd
def as_numer_denom(self):
return Integer(self.p), Integer(self.q)
def _sage_(self):
import sage.all as sage
return sage.Integer(self.p)/sage.Integer(self.q)
# int -> Integer
_intcache = {}
# TODO move this tracing facility to sympy/core/trace.py ?
def _intcache_printinfo():
ints = sorted(_intcache.keys())
nhit = _intcache_hits
nmiss= _intcache_misses
if nhit == 0 and nmiss == 0:
print
print 'Integer cache statistic was not collected'
return
miss_ratio = float(nmiss) / (nhit+nmiss)
print
print 'Integer cache statistic'
print '-----------------------'
print
print '#items: %i' % len(ints)
print
print ' #hit #miss #total'
print
print '%5i %5i (%7.5f %%) %5i' % (nhit, nmiss, miss_ratio*100, nhit+nmiss)
print
print ints
_intcache_hits = 0
_intcache_misses = 0
def int_trace(f):
import os
if os.getenv('SYMPY_TRACE_INT', 'no').lower() != 'yes':
return f
def Integer_tracer(cls, i):
global _intcache_hits, _intcache_misses
try:
_intcache_hits += 1
return _intcache[i]
except KeyError:
_intcache_hits -= 1
_intcache_misses += 1
return f(cls, i)
# also we want to hook our _intcache_printinfo into sys.atexit
import atexit
atexit.register(_intcache_printinfo)
return Integer_tracer
class Integer(Rational):
q = 1
is_integer = True
is_Integer = True
__slots__ = ['p']
def _as_mpf_val(self, prec):
return mlib.from_int(self.p)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(self._as_mpf_val(prec))
# TODO caching with decorator, but not to degrade performance
@int_trace
def __new__(cls, i):
ival = int(i)
try:
return _intcache[ival]
except KeyError:
# We only work with well-behaved integer types. This converts, for
# example, numpy.int32 instances.
if ival == 0: obj = S.Zero
elif ival == 1: obj = S.One
elif ival == -1: obj = S.NegativeOne
else:
obj = Expr.__new__(cls)
obj.p = ival
_intcache[ival] = obj
return obj
def __getnewargs__(self):
return (self.p,)
# Arithmetic operations are here for efficiency
def __int__(self):
return self.p
def __neg__(self):
return Integer(-self.p)
def __abs__(self):
if self.p >= 0:
return self
else:
return Integer(-self.p)
def __divmod__(self, other):
return divmod(self.p, other.p)
# TODO make it decorator + bytecodehacks?
def __add__(a, b):
if isinstance(b, (int, long)):
return Integer(a.p + b)
elif isinstance(b, Integer):
return Integer(a.p + b.p)
return Rational.__add__(a, b) # a,b -not- b,a
def __radd__(a, b):
if isinstance(b, (int, long)):
return Integer(b + a.p)
elif isinstance(b, Integer):
return Integer(b.p + a.p)
return Rational.__add__(a, b)
def __sub__(a, b):
if isinstance(b, (int, long)):
return Integer(a.p - b)
elif isinstance(b, Integer):
return Integer(a.p - b.p)
return Rational.__sub__(a, b)
def __rsub__(a, b):
if isinstance(b, (int, long)):
return Integer(b - a.p)
elif isinstance(b, Integer):
return Integer(b.p - a.p)
return Rational.__rsub__(a, b)
def __mul__(a, b):
if isinstance(b, (int, long)):
return Integer(a.p * b)
elif isinstance(b, Integer):
return Integer(a.p * b.p)
return Rational.__mul__(a, b)
def __rmul__(a, b):
if isinstance(b, (int, long)):
return Integer(b * a.p)
elif isinstance(b, Integer):
return Integer(b.p * a.p)
return Rational.__mul__(a, b)
def __mod__(a, b):
if isinstance(b, (int, long)):
return Integer(a.p % b)
elif isinstance(b, Integer):
return Integer(a.p % b.p)
return Rational.__mod__(a, b)
def __rmod__(a, b):
if isinstance(b, (int, long)):
return Integer(b % a.p)
elif isinstance(b, Integer):
return Integer(b.p % a.p)
return Rational.__rmod__(a, b)
def __eq__(a, b):
if isinstance(b, (int, long)):
return (a.p == b)
elif isinstance(b, Integer):
return (a.p == b.p)
return Rational.__eq__(a, b)
def __ne__(a, b):
if isinstance(b, (int, long)):
return (a.p != b)
elif isinstance(b, Integer):
return (a.p != b.p)
return Rational.__ne__(a, b)
def __gt__(a, b):
if isinstance(b, (int, long)):
return (a.p > b)
elif isinstance(b, Integer):
return (a.p > b.p)
return Rational.__gt__(a, b)
def __lt__(a, b):
if isinstance(b, (int, long)):
return (a.p < b)
elif isinstance(b, Integer):
return (a.p < b.p)
return Rational.__lt__(a, b)
def __ge__(a, b):
if isinstance(b, (int, long)):
return (a.p >= b)
elif isinstance(b, Integer):
return (a.p >= b.p)
return Rational.__ge__(a, b)
def __le__(a, b):
if isinstance(b, (int, long)):
return (a.p <= b)
elif isinstance(b, Integer):
return (a.p <= b.p)
return Rational.__le__(a, b)
def __hash__(self):
return super(Integer, self).__hash__()
def __index__(self):
return self.p
########################################
def _eval_is_odd(self):
return bool(self.p % 2)
def _eval_power(b, e):
"""
Tries to do some simplifications on b ** e, where b is
an instance of Integer
Returns None if no further simplifications can be done
When exponent is a fraction (so we have for example a square root),
we try to find a simpler representation by factoring the argument
up to factors of 2**15, e.g.
- 4**Rational(1,2) becomes 2
- (-4)**Rational(1,2) becomes 2*I
- (2**(3+7)*3**(6+7))**Rational(1,7) becomes 6*18**(3/7)
Further simplification would require a special call to factorint on
the argument which is not done here for sake of speed.
"""
from sympy import perfect_power
if e is S.NaN:
return S.NaN
if b is S.One:
return S.One
if b is S.NegativeOne:
return
if e is S.Infinity:
if b > S.One:
return S.Infinity
if b is S.NegativeOne:
return S.NaN
# cases for 0 and 1 are done in their respective classes
return S.Infinity + S.ImaginaryUnit * S.Infinity
if not isinstance(e, Number):
# simplify when exp is even
# (-2) ** k --> 2 ** k
c, t = b.as_coeff_mul()
if e.is_even and isinstance(c, Number) and c < 0:
return (-c*Mul(*t))**e
if not isinstance(e, Rational):
return
if e is S.Half and b < 0:
# we extract I for this special case since everyone is doing so
return S.ImaginaryUnit*Pow(-b, e)
if e < 0:
# invert base and change sign on exponent
ne = -e
if b < 0:
if e.q != 1:
return -(S.NegativeOne)**((e.p % e.q) /
S(e.q)) * Rational(1, -b)**ne
else:
return (S.NegativeOne)**ne*Rational(1, -b)**ne
else:
return Rational(1, b)**ne
# see if base is a perfect root, sqrt(4) --> 2
b_pos = int(abs(b))
x, xexact = integer_nthroot(b_pos, e.q)
if xexact:
# if it's a perfect root we've finished
result = Integer(x ** abs(e.p))
if b < 0:
result *= (-1)**e
return result
# The following is an algorithm where we collect perfect roots
# from the factors of base.
# if it's not an nth root, it still might be a perfect power
p = perfect_power(b_pos)
if p is not False:
dict = {p[0]: p[1]}
else:
dict = Integer(b_pos).factors(limit=2**15)
# now process the dict of factors
if b.is_negative:
dict[-1] = 1
out_int = 1 # integer part
out_rad = 1 # extracted radicals
sqr_int = 1
sqr_gcd = 0
sqr_dict = {}
for prime, exponent in dict.items():
exponent *= e.p
# remove multiples of e.q, e.g. (2**12)**(1/10) -> 2*(2**2)**(1/10)
div_e, div_m = divmod(exponent, e.q)
if div_e > 0:
out_int *= prime**div_e
if div_m > 0:
# see if the reduced exponent shares a gcd with e.q
# (2**2)**(1/10) -> 2**(1/5)
g = igcd(div_m, e.q)
if g != 1:
out_rad *= Pow(prime, Rational(div_m//g, e.q//g))
else:
sqr_dict[prime] = div_m
# identify gcd of remaining powers
for p, ex in sqr_dict.iteritems():
if sqr_gcd == 0:
sqr_gcd = ex
else:
sqr_gcd = igcd(sqr_gcd, ex)
if sqr_gcd == 1:
break
for k, v in sqr_dict.iteritems():
sqr_int *= k**(v//sqr_gcd)
if sqr_int == b and out_int == 1 and out_rad == 1:
result = None
else:
result = out_int*out_rad*Pow(sqr_int, Rational(sqr_gcd, e.q))
return result
def _eval_is_prime(self):
if self.p < 0:
return False
def as_numer_denom(self):
return self, S.One
def __floordiv__(self, other):
return Integer(self.p // Integer(other).p)
def __rfloordiv__(self, other):
return Integer(Integer(other).p // self.p)
def factorial(a):
"""Compute factorial of `a`. """
from sympy.functions.combinatorial.factorials import factorial
return Integer(factorial(int(a)))
def isqrt(a):
"""Compute integer square root of `a`. """
return Integer(mlib.isqrt(int(a)))
def half_gcdex(a, b):
"""Half Extended Euclidean Algorithm. """
s, _, h = a.gcdex(b)
return s, h
def gcdex(a, b):
"""Extended Euclidean Algorithm. """
if isinstance(b, (int, long)):
return tuple(map(Integer, igcdex(int(a), b)))
else:
b = _sympify(b)
if b.is_Integer:
return tuple(map(Integer, igcdex(int(a), int(b))))
else:
raise ValueError("expected an integer, got %s" % b)
def invert(a, b):
"""Invert `a` modulo `b`, if possible. """
if isinstance(b, (int, long)):
a = int(a)
else:
b = _sympify(b)
if b.is_Integer:
a, b = int(a), int(b)
else:
raise ValueError("expected an integer, got %s" % b)
s, _, h = igcdex(a, b)
if h == 1:
return Integer(s % b)
else:
raise ZeroDivisionError("zero divisor")
# Add sympify converters
converter[int] = converter[long] = Integer
class RationalConstant(Rational):
"""
Abstract base class for rationals with specific behaviors
Derived classes must define class attributes p and q and should probably all
be singletons.
"""
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
class IntegerConstant(Integer):
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
class Zero(IntegerConstant):
__metaclass__ = Singleton
p = 0
q = 1
is_positive = False
is_negative = False
is_finite = False
is_zero = True
is_prime = False
is_composite = False
__slots__ = []
@staticmethod
def __abs__():
return S.Zero
@staticmethod
def __neg__():
return S.Zero
def _eval_power(b, e):
if e.is_negative:
return S.Infinity
if e.is_positive:
return b
d = e.evalf()
if isinstance(d, Number):
if d.is_negative:
return S.Infinity
return b
coeff, terms = e.as_coeff_mul()
if coeff.is_negative:
return S.Infinity ** Mul(*terms)
if coeff is not S.One:
return b ** Mul(*terms)
def _eval_order(self, *symbols):
# Order(0,x) -> 0
return self
def __nonzero__(self):
return False
class One(IntegerConstant):
__metaclass__ = Singleton
p = 1
q = 1
is_prime = True
__slots__ = []
def _eval_evalf(self, prec):
return self
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.NegativeOne
def _eval_order(self, *symbols):
return
@staticmethod
def factors():
return {1: 1}
class NegativeOne(IntegerConstant):
__metaclass__ = Singleton
p = -1
q = 1
__slots__ = []
def _eval_evalf(self, prec):
return self
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.One
def _eval_power(b, e):
if e.is_odd: return S.NegativeOne
if e.is_even: return S.One
if isinstance(e, Number):
if isinstance(e, Float):
return Float(-1.0) ** e
if e is S.NaN:
return S.NaN
if e is S.Infinity or e is S.NegativeInfinity:
return S.NaN
if e is S.Half:
return S.ImaginaryUnit
if isinstance(e, Rational):
if e.q == 2:
return S.ImaginaryUnit ** Integer(e.p)
q = Float(e).floor()
if q:
q = Integer(q)
return b ** q * b ** (e - q)
return
class Half(RationalConstant):
__metaclass__ = Singleton
p = 1
q = 2
__slots__ = []
@staticmethod
def __abs__():
return S.Half
class Infinity(RationalConstant):
__metaclass__ = Singleton
p = 1
q = 0
__slots__ = []
is_commutative = True
is_positive = True
is_bounded = False
is_finite = False
is_infinitesimal = False
is_integer = None
is_rational = None
is_odd = None
@staticmethod
def __abs__():
return S.Infinity
@staticmethod
def __neg__():
return S.NegativeInfinity
def _eval_power(b, e):
"""
e is symbolic object but not equal to 0, 1
oo ** nan -> nan
oo ** (-p) -> 0, p is number, oo
"""
if e.is_positive:
return S.Infinity
if e.is_negative:
return S.Zero
if isinstance(e, Number):
if e is S.NaN:
return S.NaN
d = e.evalf()
if isinstance(d, Number):
return b ** d
return
def _as_mpf_val(self, prec):
return mlib.finf
def _sage_(self):
import sage.all as sage
return sage.oo
def __gt__(a, b):
if b is S.Infinity:
return False
return True
def __lt__(a, b):
return False
def __ge__(a, b):
return True
def __le__(a, b):
if b is S.Infinity:
return True
return False
def __mod__(self, other):
return S.NaN
__rmod__ = __mod__
oo = S.Infinity
class NegativeInfinity(RationalConstant):
__metaclass__ = Singleton
p = -1
q = 0
__slots__ = []
is_commutative = True
is_real = True
is_positive = False
is_bounded = False
is_finite = False
is_infinitesimal = False
is_integer = None
is_rational = None
@staticmethod
def __abs__():
return S.Infinity
@staticmethod
def __neg__():
return S.Infinity
def _eval_power(b, e):
"""
e is symbolic object but not equal to 0, 1
(-oo) ** nan -> nan
(-oo) ** oo -> nan
(-oo) ** (-oo) -> nan
(-oo) ** e -> oo, e is positive even integer
(-oo) ** o -> -oo, o is positive odd integer
"""
if isinstance(e, Number):
if (e is S.NaN) or (e is S.Infinity) or (e is S.NegativeInfinity):
return S.NaN
if isinstance(e, Integer):
if e.is_positive:
if e.is_odd:
return S.NegativeInfinity
return S.Infinity
return S.NegativeOne**e * S.Infinity ** e
return
def _as_mpf_val(self, prec):
return mlib.fninf
def _sage_(self):
import sage.all as sage
return -(sage.oo)
def __gt__(a, b):
return False
def __lt__(a, b):
if b is S.NegativeInfinity:
return False
return True
def __ge__(a, b):
if b is S.NegativeInfinity:
return True
return False
def __le__(a, b):
return True
class NaN(RationalConstant):
__metaclass__ = Singleton
p = 0
q = 0
is_commutative = True
is_real = None
is_rational = None
is_integer = None
is_comparable = False
is_finite = None
is_bounded = None
#is_unbounded = False
is_zero = None
is_prime = None
is_positive = None
__slots__ = []
def _as_mpf_val(self, prec):
return mlib.fnan
def _eval_power(b, e):
if e is S.Zero:
return S.One
return b
def _sage_(self):
import sage.all as sage
return sage.NaN
nan = S.NaN
class ComplexInfinity(AtomicExpr):
__metaclass__ = Singleton
is_commutative = True
is_comparable = None
is_bounded = False
is_real = None
is_number = True
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
@staticmethod
def __abs__():
return S.Infinity
@staticmethod
def __neg__():
return S.ComplexInfinity
def _eval_power(b, e):
if e is S.ComplexInfinity:
return S.NaN
if isinstance(e, Number):
if e is S.Zero:
return S.NaN
else:
if e.is_positive:
return S.ComplexInfinity
else:
return S.Zero
zoo = S.ComplexInfinity
class NumberSymbol(AtomicExpr):
__metaclass__ = Singleton
is_commutative = True
is_comparable = True
is_bounded = True
is_finite = True
is_number = True
__slots__ = []
is_NumberSymbol = True
def __new__(cls):
return AtomicExpr.__new__(cls)
def approximation(self, number_cls):
""" Return an interval with number_cls endpoints
that contains the value of NumberSymbol.
If not implemented, then return None.
"""
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other --> not ==
if self is other:
return True
if isinstance(other, Number) and self.is_irrational:
return False
return False # NumberSymbol != non-(Number|self)
def __ne__(self, other):
try:
other = _sympify(other)
except SympifyError:
return True # sympy != other
if self is other:
return False
if isinstance(other, Number) and self.is_irrational:
return True
return True # NumberSymbol != non(Number|self)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other --> not <
if self is other:
return False
if isinstance(other, Number):
approx = self.approximation_interval(other.__class__)
if approx is not None:
l,u = approx
if other < l:
return False
if other > u:
return True
return self.evalf()<other
if other.is_comparable:
other = other.evalf()
return self.evalf()<other
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy > other --> not <=
if self is other:
return True
if other.is_comparable:
other = other.evalf()
if isinstance(other, Number):
return self.evalf()<=other
return Expr.__le__(self, other)
def __gt__(self, other):
return (-self) < (-other)
def __ge__(self, other):
return (-self) <= (-other)
def __int__(self):
return int(self.evalf(0))
def __hash__(self):
return super(NumberSymbol, self).__hash__()
class Exp1(NumberSymbol):
__metaclass__ = Singleton
is_real = True
is_positive = True
is_negative = False # XXX Forces is_negative/is_nonnegative
is_irrational = True
__slots__ = []
@staticmethod
def __abs__():
return S.Exp1
def _as_mpf_val(self, prec):
return mpf_e(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls,Integer):
return (Integer(2),Integer(3))
elif issubclass(number_cls,Rational):
pass
def _eval_power(self, exp):
return C.exp(exp)
def _sage_(self):
import sage.all as sage
return sage.e
E = S.Exp1
class Pi(NumberSymbol):
__metaclass__ = Singleton
is_real = True
is_positive = True
is_negative = False
is_irrational = True
__slots__ = []
@staticmethod
def __abs__():
return S.Pi
def _as_mpf_val(self, prec):
return mpf_pi(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (Integer(3), Integer(4))
elif issubclass(number_cls, Rational):
return (Rational(223,71), Rational(22,7))
def _sage_(self):
import sage.all as sage
return sage.pi
pi = S.Pi
class GoldenRatio(NumberSymbol):
__metaclass__ = Singleton
is_real = True
is_positive = True
is_negative = False
is_irrational = True
__slots__ = []
def _as_mpf_val(self, prec):
return mlib.from_man_exp(phi_fixed(prec+10), -prec-10)
def _eval_expand_func(self, deep=True, **hints):
return S.Half + S.Half*S.Sqrt(5)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.One, Rational(2))
elif issubclass(number_cls, Rational):
pass
def _sage_(self):
import sage.all as sage
return sage.golden_ratio
class EulerGamma(NumberSymbol):
__metaclass__ = Singleton
is_real = True
is_positive = True
is_negative = False
is_irrational = None
__slots__ = []
def _as_mpf_val(self, prec):
return mlib.from_man_exp(mlib.libhyper.euler_fixed(
prec+10), -prec-10)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (S.Half, Rational(3, 5))
def _sage_(self):
import sage.all as sage
return sage.euler_gamma
class Catalan(NumberSymbol):
__metaclass__ = Singleton
is_real = True
is_positive = True
is_negative = False
is_irrational = None
__slots__ = []
def _as_mpf_val(self, prec):
return mlib.from_man_exp(mlib.catalan_fixed(prec+10), -prec-10)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (Rational(9, 10), S.One)
def _sage_(self):
import sage.all as sage
return sage.catalan
class ImaginaryUnit(AtomicExpr):
__metaclass__ = Singleton
is_commutative = True
is_imaginary = True
is_bounded = True
is_finite = True
is_number = True
__slots__ = []
@staticmethod
def __abs__():
return S.One
def _eval_evalf(self, prec):
return self
def _eval_conjugate(self):
return -S.ImaginaryUnit
def _eval_power(b, e):
"""
b is I = sqrt(-1)
e is symbolic object but not equal to 0, 1
I ** r -> (-1)**(r/2) -> exp(r/2 * Pi * I) -> sin(Pi*r/2) + cos(Pi*r/2) * I, r is decimal
I ** 0 mod 4 -> 1
I ** 1 mod 4 -> I
I ** 2 mod 4 -> -1
I ** 3 mod 4 -> -I
"""
if isinstance(e, Number):
if isinstance(e, Integer):
ei = e.p % 4
if ei == 0:
return S.One
if ei == 1:
return S.ImaginaryUnit
if ei == 2:
return -S.One
return -S.ImaginaryUnit
return (S.NegativeOne) ** (e * S.Half)
return
def as_base_exp(self):
return S.NegativeOne, S.Half
def _sage_(self):
import sage.all as sage
return sage.I
I = S.ImaginaryUnit
try:
# fractions is only available for python 2.6+
import fractions
def sympify_fractions(f):
return Rational(f.numerator, f.denominator)
converter[fractions.Fraction] = sympify_fractions
except ImportError:
pass
try:
import gmpy
def sympify_mpz(x):
return Integer(long(x))
def sympify_mpq(x):
return Rational(long(x.numer()), long(x.denom()))
converter[type(gmpy.mpz(1))] = sympify_mpz
converter[type(gmpy.mpq(1, 2))] = sympify_mpq
except ImportError:
pass
def sympify_mpmath(x):
return Expr._from_mpmath(x, x.context.prec)
converter[mpnumeric] = sympify_mpmath
def sympify_complex(a):
real, imag = map(sympify, (a.real, a.imag))
return real + S.ImaginaryUnit * imag
converter[complex] = sympify_complex
_intcache[0] = S.Zero
_intcache[1] = S.One
_intcache[-1]= S.NegativeOne
from function import FunctionClass
from power import Pow, integer_nthroot
from mul import Mul
Mul.identity = One()
from add import Add
Add.identity = Zero()
```
#### File: test/noseplugins/capture.py
```python
__author__ = "<NAME> (<EMAIL>)"
import nose
import nose.plugins.capture
import sys
class Capture(nose.plugins.capture.Capture):
def addSuccess(self,test):
test.capturedOutput = self.buffer
def addError(self,test, err):
if not hasattr(test,'capturedOutput'):
test.capturedOutput = self.buffer
sys.stderr.write('error: %s\n'%self.buffer)
def addFailure(self,test, err):
if not hasattr(test,'capturedOutput'):
test.capturedOutput = self.buffer
sys.stderr.write('failure: %s\n'%self.buffer)
def addCaptureToErr(self, ev, output):
return ev
```
#### File: openrave/test/test_collision.py
```python
from common_test_openrave import *
class RunCollision(EnvironmentSetup):
def __init__(self,collisioncheckername):
self.collisioncheckername = collisioncheckername
def setup(self):
EnvironmentSetup.setup(self)
self.env.SetCollisionChecker(RaveCreateCollisionChecker(self.env,self.collisioncheckername))
def test_basic(self):
env=self.env
with env:
self.LoadEnv('data/hironxtable.env.xml')
robot=env.GetRobots()[0]
env.CheckCollision(robot)
newobject=env.ReadKinBodyURI('data/mug1.kinbody.xml')
env.Add(newobject,True)
box=RaveCreateKinBody(env,'')
env.CheckCollision(box)
box.InitFromBoxes(array([[0,0,0,1,1,1]]),True)
box.SetName('box')
env.Add(box,True)
def test_collisioncaching(self):
filenames = ['robots/barrettwam.robot.xml']
env=self.env
for filename in filenames:
env.Reset()
robot=env.ReadRobotURI(filename)
for i in range(10):
env.Add(robot)
lower,upper = robot.GetDOFLimits()
v = random.rand()*(upper-lower)+lower
robot.SetDOFValues(v)
check=robot.CheckSelfCollision()
robot.SetDOFValues(v)
assert(check==robot.CheckSelfCollision())
env.Remove(robot)
def test_selfcollision(self):
with self.env:
self.LoadEnv('data/lab1.env.xml')
target1 = self.env.GetKinBody('mug1')
target2 = self.env.GetKinBody('mug2')
target2.SetTransform(target1.GetTransform())
assert(self.env.CheckCollision(target1))
robot = self.env.GetRobots()[0]
report = CollisionReport()
assert(not robot.CheckSelfCollision(report))
robot.Grab(target1)
assert(not robot.CheckSelfCollision(report))
assert(not target1.CheckSelfCollision())
assert(self.env.CheckCollision(target1,report))
def test_selfcollision_joinxml(self):
testrobot_xml="""<Robot>
<KinBody>
<!-- add a segway model to the base link -->
<Body name="segway">
<translation>0 0 0.305</translation>
<Geom type="box">
<diffusecolor>1.0 0 0</diffusecolor>
<extents>0.33 0.255 0.305</extents>
</Geom>
<Geom type="cylinder">
<translation>0.18 0.32 -0.195</translation>
<diffusecolor>0.3 0.3 0.3</diffusecolor>
<radius>0.11</radius>
<height>0.13</height>
</Geom>
<Geom type="cylinder">
<diffusecolor>0.3 0.3 0.3</diffusecolor>
<translation>-0.18 0.32 -0.195</translation>
<radius>0.11</radius>
<height>0.13</height>
</Geom>
<Geom type="cylinder">
<diffusecolor>0.3 0.3 0.3</diffusecolor>
<translation>0.18 -0.32 -0.195</translation>
<radius>0.11</radius>
<height>0.13</height>
</Geom>
<Geom type="cylinder">
<diffusecolor>0.3 0.3 0.3</diffusecolor>
<translation>-0.18 -0.32 -0.195</translation>
<radius>0.11</radius>
<height>0.13</height>
</Geom>
<Geom type="box">
<diffusecolor>0 0 1.0</diffusecolor>
<translation>-0.31 0 0.635</translation>
<extents>0.05 0.255 0.33</extents>
</Geom>
<Geom type="box">
<diffusecolor>0 0 1.0</diffusecolor>
<translation>-0.31 0.205 1.085</translation>
<extents>0.05 0.05 0.17</extents>
</Geom>
<Geom type="box">
<diffusecolor>0 0 1.0</diffusecolor>
<translation>-0.31 -0.205 1.085</translation>
<extents>0.05 0.05 0.17</extents>
</Geom>
<Geom type="box">
<diffusecolor>0 0 1.0</diffusecolor>
<translation>-0.31 0 1.305</translation>
<extents>0.05 0.255 0.05</extents>
</Geom>
<Geom type="box">
<diffusecolor>0 0 0</diffusecolor>
<translation>-0.615 0 0.635</translation>
<extents>0.255 0.255 0.01</extents>
</Geom>
<Geom type="box">
<diffusecolor>0 0 0</diffusecolor>
<translation>-0.361 0 1.085</translation>
<extents>0.001 0.155 0.17</extents>
</Geom>
<mass type="custom">
<total>40</total>
</mass>
</Body>
</KinBody>
<Robot file="robots/barrettwam.robot.xml"></Robot>
<KinBody>
<body name="wam0">
<!-- shift wam0 to align correctly with segway base -->
<translation>0.22 0.14 0.346</translation>
<translation>-0.099 -0.14 0.61</translation>
</body>
<joint name="joint4" type="hinge" enable="false">
<body>segway</body>
<body>wam0</body>
<limits>0 0</limits>
</joint>
</KinBody>
</Robot>
"""
with self.env:
robot=self.LoadRobotData(testrobot_xml)
robot.SetDOFValues([-0.91,2.05],[1,3])
assert(robot.CheckSelfCollision())
def test_known_collisions(self):
env=self.env
self.LoadEnv('data/lab1.env.xml')
robot=env.GetRobots()[0]
robot.SetDOFValues([ -8.44575603e-02, 1.48528347e+00, -5.09108824e-08, 6.48108822e-01, -4.57571203e-09, -1.04008750e-08, 7.26855048e-10, 5.50807826e-08, 5.50807826e-08, -1.90689327e-08, 0.00000000e+00])
assert(env.CheckCollision(robot))
def test_collisioncallbacks(self):
env=self.env
self.LoadEnv('data/lab1.env.xml')
robot=env.GetRobots()[0]
reports = []
def collisioncallback(report,fromphysics):
assert(not fromphysics)
reports.append(report)
return CollisionAction.DefaultAction
handle = env.RegisterCollisionCallback(collisioncallback)
assert(not env.CheckCollision(robot))
assert(len(reports)==0)
assert(env.CheckCollision(env.GetKinBody('mug1')))
assert(len(reports)==1)
def collisioncallback2(report,fromphysics):
return CollisionAction.Ignore
handle2 = env.RegisterCollisionCallback(collisioncallback2)
assert(not env.CheckCollision(env.GetKinBody('mug1')))
del reports[:]
handle2.Close()
assert(env.CheckCollision(env.GetKinBody('mug1')))
assert(len(reports)==1)
handle.Close()
assert(env.CheckCollision(env.GetKinBody('mug1')))
assert(len(reports)==1)
def test_activedofdistance(self):
self.log.debug('test distance computation with active dofs')
env=self.env
self.LoadEnv('data/lab1.env.xml')
with env:
pqp = RaveCreateCollisionChecker(env,'pqp')
pqp.InitEnvironment()
robot=env.GetRobots()[0]
manip=robot.GetActiveManipulator()
report = CollisionReport()
pqp.SetCollisionOptions(CollisionOptions.Contacts|CollisionOptions.Distance)
pqp.CheckCollision(manip.GetEndEffector(),report=report)
assert(abs(report.minDistance-0.38737) < 0.01 )
assert(report.plink1 == manip.GetEndEffector())
assert(report.plink2 == env.GetKinBody('pole').GetLinks()[0])
pqp.CheckCollision(robot,report=report)
assert(abs(report.minDistance-0.0027169) < 0.01 )
assert(report.plink1 == robot.GetLink('segway'))
assert(report.plink2 == env.GetKinBody('floorwalls').GetLinks()[0])
pqp.SetCollisionOptions(CollisionOptions.Contacts|CollisionOptions.Distance|CollisionOptions.ActiveDOFs)
robot.SetActiveDOFs(manip.GetArmIndices())
pqp.CheckCollision(robot,report=report)
assert(abs(report.minDistance-0.29193971893003506) < 0.01 )
assert(report.plink1 == robot.GetLink('wam1'))
assert(report.plink2 == env.GetKinBody('pole').GetLinks()[0])
def test_multiplecontacts(self):
env=self.env
env.GetCollisionChecker().SetCollisionOptions(CollisionOptions.AllLinkCollisions)
self.LoadEnv('data/lab1.env.xml')
robot = env.GetRobots()[0]
manip = robot.GetManipulators()[0]
body1 = env.GetKinBody('mug1')
body2 = env.GetKinBody('mug2')
body1.SetTransform(manip.GetEndEffector().GetTransform())
body2.SetTransform(manip.GetEndEffector().GetTransform())
report = CollisionReport()
env.CheckCollision(robot,report=report)
assert(len(report.vLinkColliding)==8)
manip.CheckEndEffectorCollision(report)
assert(len(report.vLinkColliding)==4)
#generate_classes(RunCollision, globals(), [('ode','ode'),('bullet','bullet')])
class test_ode(RunCollision):
def __init__(self):
RunCollision.__init__(self, 'ode')
# class test_bullet(RunCollision):
# def __init__(self):
# RunCollision.__init__(self, 'bullet')
#
```
#### File: openrave/test/test_configurationcache.py
```python
from common_test_openrave import *
import imp
class TestConfigurationCache(EnvironmentSetup):
def setup(self):
EnvironmentSetup.setup(self)
# find out where configurationcache is installed
cachepath = None
for path, info in RaveGetPluginInfo():
pathdir, pathname = os.path.split(path)
if pathname.find('openravepy_configurationcache') >= 0:
cachepath = path
break
assert(cachepath is not None)
self.openravepy_configurationcache = imp.load_dynamic('openravepy_configurationcache',cachepath)
def test_insertandquery(self):
self.LoadEnv('data/lab1.env.xml')
env=self.env
robot=env.GetRobots()[0]
robot.SetActiveDOFs(range(7))
cache=self.openravepy_configurationcache.ConfigurationCache(robot)
values = robot.GetActiveDOFValues()
inserted = cache.InsertConfiguration(values, None)
assert(inserted and cache.GetNumNodes()==1)
values[1] = pi/2
inserted=cache.InsertConfiguration(values, None)
assert(inserted and cache.GetNumNodes()>=2)
assert(cache.Validate())
values[1] = pi/2-0.0001
robot.SetActiveDOFValues(values)
ret, closestdist, collisioninfo = cache.CheckCollision(values)
assert(ret==0)
originalvalues = array([0,pi/2,0,pi/6,0,0,0])
sampler = RaveCreateSpaceSampler(env, u'MT19937')
sampler.SetSpaceDOF(robot.GetActiveDOF())
report=CollisionReport()
with env:
for iter in range(0, 10000):
robot.SetActiveDOFValues(originalvalues + 0.05*(sampler.SampleSequence(SampleDataType.Real,1)-0.5))
samplevalues = robot.GetActiveDOFValues()
incollision = env.CheckCollision(robot, report=report)
inserted = cache.InsertConfiguration(samplevalues, report if incollision else None)
self.log.info('cache has %d nodes', cache.GetNumNodes())
assert(cache.Validate())
with env:
numspurious = 0
nummisses = 0
numtests = 1000
collisiontimes = []
cachetimes = []
for iter in range(0, numtests):
robot.SetActiveDOFValues(originalvalues + 0.05*(sampler.SampleSequence(SampleDataType.Real,1)-0.5))
samplevalues = robot.GetActiveDOFValues()
starttime=time.time()
ret, closestdist, collisioninfo = cache.CheckCollision(samplevalues)
midtime=time.time()
incollision = env.CheckCollision(robot, report=report)
endtime=time.time()
cachetimes.append(midtime-starttime)
collisiontimes.append(endtime-midtime)
if ret != -1:
assert(closestdist <= 1)
# might give spurious collision since cache is being conservative
if incollision != ret:
if ret == 1:
numspurious += 1
else:
# unexpected freespace
assert(0)
else:
nummisses += 1
self.log.info('num spurious colisions=%d/%d, num misses = %d/%d, meancache=%fs, meancollision=%fs', numspurious, numtests, nummisses, numtests, mean(cachetimes), mean(collisiontimes))
assert(float(numspurious)/float(numtests)<=0.06)
assert(float(nummisses)/float(numtests)>0.1) # space is pretty big
assert(mean(cachetimes) < mean(collisiontimes)) # caching should be faster
def test_io(self):
env = self.env
with env:
self.LoadEnv('data/hironxtable.env.xml')
robot = env.GetRobots()[0]
manip = robot.SetActiveManipulator('leftarm_torso')
lmodel=databases.linkstatistics.LinkStatisticsModel(robot)
if not lmodel.load():
lmodel.autogenerate()
lmodel.setRobotWeights()
lmodel.setRobotResolutions(xyzdelta=0.04)
basemanip = interfaces.BaseManipulation(robot)
robot.SetActiveDOFs(manip.GetArmIndices())
goal = robot.GetActiveDOFValues()
goal[0] = -0.556
goal[3] = -1.86
oldchecker = env.GetCollisionChecker()
cachechecker = RaveCreateCollisionChecker(self.env,'CacheChecker')
success=cachechecker.SendCommand('TrackRobotState %s'%robot.GetName())
assert(success is not None)
env.SetCollisionChecker(cachechecker)
robot.SetSelfCollisionChecker(cachechecker)
sampler = RaveCreateSpaceSampler(env, u'RobotConfiguration %s'%robot.GetName())
sampler.SampleSequence(SampleDataType.Real,1)
report=CollisionReport()
cachechecker.SendCommand('ResetSelfCache')
stime = time.time()
confs = []
for iter in range(0, 500):
robot.SetActiveDOFValues(sampler.SampleSequence(SampleDataType.Real,1))
samplevalues = robot.GetActiveDOFValues()
confs.append(samplevalues)
if (iter%10==0):
self.log.info('checking self collisions %s...',iter)
self.log.info('writing cache to file...')
cachechecker.SendCommand('SaveCache')
env.GetCollisionChecker().CheckSelfCollision(robot, report=report)
rawtime = time.time()-stime
selfcachedcollisions, selfcachedcollisionhits, selfcachedfreehits, selfcachesize = cachechecker.SendCommand('GetSelfCacheStatistics').split()
self.log.info('selfcollisionhits=%s selffreehits=%s selfcachesize=%s in %ss', selfcachedcollisionhits, selfcachedfreehits, selfcachesize, rawtime)
self.log.info('writing cache to file...')
cachechecker.SendCommand('SaveCache')
def test_find_insert(self):
self.LoadEnv('data/lab1.env.xml')
env=self.env
robot=env.GetRobots()[0]
robot.SetActiveDOFs(range(7))
cache=self.openravepy_configurationcache.ConfigurationCache(robot)
cache.SetFreeSpaceThresh(1)
values = robot.GetActiveDOFValues()
inserted = cache.InsertConfiguration(values, None)
sampler = RaveCreateSpaceSampler(env, u'MT19937')
sampler.SetSpaceDOF(robot.GetActiveDOF())
with env:
self.log.info('testing exhaustive insertion...')
for iter in range(0, 10000):
if iter%1000==0:
self.log.info('%d valid insertions %d nodes...',iter,cache.GetNumNodes())
samplevalues = 0.3*(sampler.SampleSequence(SampleDataType.Real,1)-0.5)
nn = cache.FindNearestNode(samplevalues, 4)
if nn is None:
cache.SetFreeSpaceThresh(8)
inserted = cache.InsertConfigurationDist(samplevalues, None, 1)
assert(inserted == 1)
cache.SetFreeSpaceThresh(1)
self.log.info('exhaustive insertion test passed')
def test_updates(self):
env = self.env
with env:
self.LoadEnv('data/hironxtable.env.xml')
robot = env.GetRobots()[0]
manip = robot.SetActiveManipulator('leftarm_torso')
lmodel=databases.linkstatistics.LinkStatisticsModel(robot)
if not lmodel.load():
lmodel.autogenerate()
lmodel.setRobotWeights()
lmodel.setRobotResolutions(xyzdelta=0.01)
basemanip = interfaces.BaseManipulation(robot)
robot.SetActiveDOFs(manip.GetArmIndices())
goal = robot.GetActiveDOFValues()
goal[0] = -0.556
goal[3] = -1.86
self.log.info('testing cache updates...')
oldchecker = env.GetCollisionChecker()
cachechecker = RaveCreateCollisionChecker(self.env,'CacheChecker')
success=cachechecker.SendCommand('TrackRobotState %s'%robot.GetName())
assert(success is not None)
env.SetCollisionChecker(cachechecker)
robot.SetSelfCollisionChecker(cachechecker)
traj = basemanip.MoveActiveJoints(goal=goal,maxiter=5000,steplength=0.01,maxtries=1,execute=False,outputtrajobj=True)
cachedcollisions, cachedcollisionhits, cachedfreehits, oldcachesize = cachechecker.SendCommand('GetCacheStatistics').split()
self.env.Remove(self.env.GetBodies()[1])
cachedcollisions, cachedcollisionhits, cachedfreehits, cachesize = cachechecker.SendCommand('GetCacheStatistics').split()
assert(oldcachesize>cachesize)
self.log.info('environment removebody test passed (%s/%s)',cachesize,oldcachesize)
assert(int(cachechecker.SendCommand('ValidateCache')) == 1)
assert(int(cachechecker.SendCommand('ValidateSelfCache')) == 1)
self.log.info('valid tests passed')
traj = basemanip.MoveActiveJoints(goal=goal,maxiter=5000,steplength=0.01,maxtries=1,execute=False,outputtrajobj=True)
cachedcollisions, cachedcollisionhits, cachedfreehits, cachesize = cachechecker.SendCommand('GetCacheStatistics').split()
self.env.Reset()
cachedcollisions, cachedcollisionhits, cachedfreehits, addcachesize = cachechecker.SendCommand('GetCacheStatistics').split()
assert(cachesize>addcachesize)
self.log.info('environment addbody test passed (%s/%s)',addcachesize,cachesize)
assert(int(cachechecker.SendCommand('ValidateCache')) == 1)
assert(int(cachechecker.SendCommand('ValidateSelfCache')) == 1)
self.log.info('valid tests passed')
def test_planning(self):
env = self.env
with env:
self.LoadEnv('data/hironxtable.env.xml')
robot = env.GetRobots()[0]
manip = robot.SetActiveManipulator('leftarm_torso')
lmodel=databases.linkstatistics.LinkStatisticsModel(robot)
if not lmodel.load():
lmodel.autogenerate()
lmodel.setRobotWeights()
lmodel.setRobotResolutions(xyzdelta=0.004)
basemanip = interfaces.BaseManipulation(robot)
robot.SetActiveDOFs(manip.GetArmIndices())
goal = robot.GetActiveDOFValues()
goal[0] = -0.556
goal[3] = -1.86
self.log.info('testing planning...')
oldchecker = env.GetCollisionChecker()
cachechecker = RaveCreateCollisionChecker(self.env,'CacheChecker')
success=cachechecker.SendCommand('TrackRobotState %s'%robot.GetName())
assert(success is not None)
starttime = time.time()
traj = basemanip.MoveActiveJoints(goal=goal,maxiter=5000,steplength=0.01,maxtries=1,execute=False,outputtrajobj=True)
regtime = time.time()-starttime
self.log.info('time without cache %s',regtime)
cachechecker.SendCommand('ResetSelfCache')
env.SetCollisionChecker(cachechecker)
robot.SetSelfCollisionChecker(cachechecker)
cachedtimes = []
prevtime = float('Inf')
for runs in range(5):
starttime = time.time()
traj = basemanip.MoveActiveJoints(goal=goal,maxiter=5000,steplength=0.01,maxtries=1,execute=True,outputtrajobj=True)
cachetime = time.time()-starttime
cachedtimes.append(cachetime)
cachedcollisions, cachedcollisionhits, cachedfreehits, cachesize = cachechecker.SendCommand('GetCacheStatistics').split()
cacherate = float((float(cachedfreehits)+float(cachedcollisionhits))/float(cachedcollisions))
selfcachedcollisions, selfcachedcollisionhits, selfcachedfreehits, selfcachesize = cachechecker.SendCommand('GetSelfCacheStatistics').split()
selfcacherate = float((float(selfcachedfreehits)+float(selfcachedcollisionhits))/float(selfcachedcollisions))
self.log.info('planning time=%fs collisionhits=%s/%s freehits=%s/%s cachesize=%s selfcollisionhits=%s/%s selffreehits=%s/%s selfcachesize=%s', cachetime, cachedcollisionhits, cachedcollisions, cachedfreehits, cachedcollisions, cachesize, selfcachedcollisionhits, selfcachedcollisions, selfcachedfreehits, selfcachedcollisions, selfcachesize)
self.log.info('cacherate=%f selfcacherate=%f',cacherate,selfcacherate)
self.log.info('%s',cachechecker.SendCommand('GetCacheTimes'))
self.log.info('run %s', runs)
with robot:
parameters = Planner.PlannerParameters()
parameters.SetRobotActiveJoints(robot)
planningutils.VerifyTrajectory(parameters,traj,samplingstep=0.001)
self.log.info('trajectory test passed')
assert(cachetime < prevtime*1.5)
self.log.info('monotonic decrease test passed (%fs/%fs)',cachetime, prevtime)
prevtime = cachetime
assert(cacherate > 0.9 and selfcacherate > 0.9)
self.log.info('hitrate test passed (%f)(%f)',cacherate,selfcacherate)
env.SetCollisionChecker(oldchecker)
starttime = time.time()
traj2 = basemanip.MoveActiveJoints(goal=goal,maxiter=5000,steplength=0.01,maxtries=1,execute=False,outputtrajobj=True)
originaltime = time.time()-starttime
assert(originaltime*1.5 > cachedtimes[0])
self.log.info('speedup test passed (%fs/%fs)',originaltime, cachedtimes[0])
with robot:
parameters = Planner.PlannerParameters()
parameters.SetRobotActiveJoints(robot)
planningutils.VerifyTrajectory(parameters,traj2,samplingstep=0.002)
spec = manip.GetArmConfigurationSpecification()
usedbodies = spec.ExtractUsedBodies(env)
assert(len(usedbodies) == 1 and usedbodies[0] == robot)
useddofindices, usedconfigindices = spec.ExtractUsedIndices(robot)
assert(sorted(useddofindices) == sorted(manip.GetArmIndices()))
cachechecker.SendCommand('ResetCache')
cachedcollisions, cachedcollisionhits, cachedfreehits, cachesize = cachechecker.SendCommand('GetCacheStatistics').split()
assert(int(cachesize)==0)
self.log.info('cache reset test passed')
cachechecker.SendCommand('ResetSelfCache')
cachedcollisions, cachedcollisionhits, cachedfreehits, cachesize = cachechecker.SendCommand('GetSelfCacheStatistics').split()
assert(int(cachesize)==0)
self.log.info('self cache reset test passed')
```
#### File: openrave/test/test_geometry.py
```python
from common_test_openrave import *
def test_transformations():
log.info('tests basic math transformations')
for i in range(20):
axisangle0 = (random.rand(3)-0.5)*1.99*pi/sqrt(3) # cannot have mag more than pi
trans = random.rand(3)-0.5
R0 = rotationMatrixFromAxisAngle(axisangle0)
quat0 = quatFromAxisAngle(axisangle0)
axisangle1 = axisAngleFromQuat(quat0)
axisangle2 = axisAngleFromRotationMatrix(R0)
R1 = rotationMatrixFromQuat(quat0)
quat1 = quatFromRotationMatrix(R0)
T0 = matrixFromAxisAngle(axisangle1)
T0[0:3,3] = trans
pose0 = poseFromMatrix(T0)
T1 = matrixFromPose(pose0)
poses = poseFromMatrices([T0,T1])
T2,T3 = matrixFromPoses(poses)
assert(sum(abs(R0-R1)) <= g_epsilon)
assert(abs(sum(quat0**2)-1) <= g_epsilon)
assert(sum(abs(linalg.inv(R0)-R0.transpose())))
assert(sum(abs(linalg.inv(T0[0:3,0:3])-R0[0:3,0:3])))
assert(sum(abs(T0-T1)) <= g_epsilon and sum(abs(T0-T2)) <= g_epsilon and sum(abs(T0-T3)) <= g_epsilon)
assert(abs(abs(dot(quat0,quat1))-1) <= g_epsilon)
assert(sum(abs(axisangle0-axisangle1)) <= g_epsilon and sum(abs(axisangle0-axisangle2)) <= g_epsilon)
# test multiplication
X = random.rand(10,3)-0.5
Xnew = quatRotateArrayT(quat0,X)
assert( sum(abs(Xnew-dot(X,R0.transpose()))) <= g_epsilon )
assert( sum(abs(quatRotate(quat0,X[0]) - Xnew[0])) <= g_epsilon )
assert( sum(abs(transformPoints(T0,X)-Xnew-tile(trans,(len(X),1)))) <= g_epsilon )
assert( sum(abs(rotationMatrixFromQuat(quatMult(quat0,quat0)) - dot(R0,R0))) <= g_epsilon )
assert( sum(abs(dot(T0,T0)-matrixFromPose(poseMult(pose0,pose0)))) <= g_epsilon )
qarray0 = randquat(5)
qarray1 = quatArrayTMult(qarray0,quat0)
assert( sum(abs(quatArrayTRotate(qarray0,Xnew[0])-quatArrayTRotate(qarray1,X[0]))) <= g_epsilon )
assert( sum(abs(quatArrayRotate(qarray0.transpose(),Xnew[0])-quatArrayRotate(qarray1.transpose(),X[0]))) <= g_epsilon )
qarray2 = quatMultArrayT(quat0,qarray0)
assert( sum(abs(quatRotateArrayT(quat0,quatArrayTRotate(qarray0,X[0]))-quatArrayTRotate(qarray2,X[0]))) <= g_epsilon )
dists = quatArrayTDist(qarray0[0],qarray0)
assert( all(dists>=0) and sum(dists)>0 and dists[0] <= g_epsilon )
posearray0 = randpose(5)
posearray1 = poseMultArrayT(pose0,posearray0)
assert( sum(abs(poseMult(pose0,posearray0[0])-posearray1[0])) <= g_epsilon )
for j in range(len(posearray0)):
poseTransformPoints(pose0,poseTransformPoints(posearray0[j],X))
poseTransformPoints(posearray1[j],X)
assert( sum(abs(poseTransformPoints(pose0,poseTransformPoints(posearray0[j],X)) - poseTransformPoints(posearray1[j],X))) <= g_epsilon )
# inverses
matrices = matrixFromPoses(posearray0)
posearrayinv0 = invertPoses(posearray0)
for j in range(len(posearray0)):
assert( sum(abs(transformInversePoints(matrices[j],X) - poseTransformPoints(posearrayinv0[j],X))) <= g_epsilon )
# slightly unnormalized pose
T = matrixFromPose([ 0.00422863, 0.00522595, 0.707, 0.707182, 0.204229, 0.628939, 1.40061])
assert(abs(linalg.det(T[0:3,0:3])-1) <= g_epsilon )
def test_quatRotateDirection():
pairs = [ [[1,0,0], [0,1,0]], [[1,0,0], [1,0,0]], [[1,1,0], [0,1,1]] ]
for sourcedir, targetdir in pairs:
T = matrixFromQuat(quatRotateDirection(sourcedir,targetdir))
assert( transdist(dot(T[0:3,0:3],sourcedir),targetdir) <= g_epsilon )
```
#### File: openrave/test/test_planning.py
```python
from common_test_openrave import *
class RunPlanning(EnvironmentSetup):
def __init__(self,collisioncheckername):
self.collisioncheckername = collisioncheckername
def setup(self):
EnvironmentSetup.setup(self)
self.env.SetCollisionChecker(RaveCreateCollisionChecker(self.env,self.collisioncheckername))
def test_basicplanning(self):
env = self.env
with env:
self.LoadEnv('data/hironxtable.env.xml')
robot = env.GetRobots()[0]
manip = robot.SetActiveManipulator('leftarm_torso')
# need the linkstatistics model or else robot will get into collision
lmodel=databases.linkstatistics.LinkStatisticsModel(robot)
if not lmodel.load():
lmodel.autogenerate()
lmodel.setRobotWeights()
lmodel.setRobotResolutions(xyzdelta=0.002)
basemanip = interfaces.BaseManipulation(robot)
robot.SetActiveDOFs(manip.GetArmIndices())
goal = robot.GetActiveDOFValues()
goal[0] = -0.556
goal[3] = -1.86
traj = basemanip.MoveActiveJoints(goal=goal,maxiter=5000,steplength=0.01,maxtries=2,execute=False,outputtrajobj=True)
with robot:
parameters = Planner.PlannerParameters()
parameters.SetRobotActiveJoints(robot)
planningutils.VerifyTrajectory(parameters,traj,samplingstep=0.002)
self.RunTrajectory(robot,traj)
spec = manip.GetArmConfigurationSpecification()
usedbodies = spec.ExtractUsedBodies(env)
assert(len(usedbodies) == 1 and usedbodies[0] == robot)
useddofindices, usedconfigindices = spec.ExtractUsedIndices(robot)
assert(sorted(useddofindices) == sorted(manip.GetArmIndices()))
def test_ikplanning(self):
env = self.env
self.LoadEnv('data/lab1.env.xml')
robot = env.GetRobots()[0]
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot, iktype=IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
with env:
Tee = ikmodel.manip.GetEndEffectorTransform()
Tee[0:3,3] -= 0.4
solutions = ikmodel.manip.FindIKSolutions(Tee,IkFilterOptions.CheckEnvCollisions)
assert(len(solutions)>1)
basemanip = interfaces.BaseManipulation(robot)
with robot:
for sol in solutions:
robot.SetDOFValues(sol,ikmodel.manip.GetArmIndices())
assert(transdist(Tee,ikmodel.manip.GetEndEffectorTransform()) <= g_epsilon)
traj=basemanip.MoveManipulator(goals=solutions,execute=True,outputtrajobj=True)
# check that last point is accurate
lastvalues = traj.GetConfigurationSpecification().ExtractJointValues(traj.GetWaypoint(-1), robot, ikmodel.manip.GetArmIndices(), 0)
with robot:
robot.SetDOFValues(lastvalues,ikmodel.manip.GetArmIndices())
assert(min([sum(abs(s-lastvalues)) for s in solutions]) <= g_epsilon)
assert(transdist(Tee,ikmodel.manip.GetEndEffectorTransform()) <= g_epsilon)
env.StartSimulation(0.01,False)
robot.WaitForController(0)
with env:
self.log.info('Tee dist=%f',transdist(Tee,ikmodel.manip.GetEndEffectorTransform()))
assert(transdist(Tee,ikmodel.manip.GetEndEffectorTransform()) <= g_epsilon)
def test_constraintpr2(self):
env = self.env
robot = self.LoadRobot('robots/pr2-beta-static.zae')
with env:
manip=robot.SetActiveManipulator('leftarm_torso')
basemanip = interfaces.BaseManipulation(robot)
robot.SetDOFValues([.31],[robot.GetJoint('torso_lift_joint').GetDOFIndex()])
T=array([[0,0,1,.6], [0,1,0,.1], [-1,0,0,.73], [0,0,0,1]])
robot.SetDOFValues(manip.FindIKSolution(T,IkFilterOptions.CheckEnvCollisions),manip.GetArmIndices())
Tgoal=array([[0,0,1,.6], [0,1,0,.3], [-1,0,0,.73], [0,0,0,1]])
constraintfreedoms=array([1,1,0,1,0,0]) # can rotate along z, translate along y
constraintmatrix=array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])
for constrainterrorthresh in [0.002,0.01]:
traj = basemanip.MoveToHandPosition(matrices=[Tgoal],maxiter=6000,maxtries=2,seedik=16, constraintfreedoms=constraintfreedoms, constraintmatrix=constraintmatrix, constrainterrorthresh=constrainterrorthresh,execute=False,outputtrajobj=True,steplength=0.001)
self.RunTrajectory(robot,traj)
def test_constraintwam(self):
env = self.env
self.LoadEnv('data/lab1.env.xml')
robot=env.GetRobots()[0]
self.log.debug('generating ikmodel')
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
self.log.debug('generating gmodel')
gmodel = databases.grasping.GraspingModel(robot=robot,target=env.GetKinBody('mug1'))
if not gmodel.load():
# don't do multithreaded yet since ode on some ubuntu distors does not support it
gmodel.numthreads = 2 # at least two threads
gmodel.generate(approachrays=gmodel.computeBoxApproachRays(delta=0.04))
gmodel.save()
self.log.debug('planning')
with env:
basemanip = interfaces.BaseManipulation(robot)
robot.SetActiveDOFs(ikmodel.manip.GetArmIndices())
validgrasps,validindices = gmodel.computeValidGrasps(returnnum=1)
validgrasp=validgrasps[0]
gmodel.setPreshape(validgrasp)
T = gmodel.getGlobalGraspTransform(validgrasp,collisionfree=True)
sol = gmodel.manip.FindIKSolution(T,IkFilterOptions.CheckEnvCollisions)
robot.SetActiveDOFValues(sol)
robot.Grab(gmodel.target)
xyzconstraints = array([1,2],int)
constraintfreedoms = array([1,0,1,1,0,0]) # rotation xyz, translation xyz
localrotaxis = array([0,1,0])
constrainterrorthresh = 0.005
constrainttaskmatrix=dot(linalg.inv(T),gmodel.target.GetTransform())
constraintmatrix = linalg.inv(gmodel.target.GetTransform())
Tgoal = ikmodel.manip.GetTransform()
Tgoal[0:3,3] += [0,0.1,0.2]
solgoal = ikmodel.manip.FindIKSolution(Tgoal,IkFilterOptions.CheckEnvCollisions)
assert(solgoal is not None)
traj = basemanip.MoveToHandPosition(matrices=[Tgoal],maxiter=3000,maxtries=1,seedik=40,constraintfreedoms=constraintfreedoms,constraintmatrix=constraintmatrix,constrainttaskmatrix=constrainttaskmatrix,constrainterrorthresh=constrainterrorthresh,steplength=0.002,outputtrajobj=True,execute=False,jitter=0.05)
soltraj = traj.Sample(0,robot.GetActiveConfigurationSpecification())
self.log.debug('make sure it starts at the initial configuration')
assert(transdist(soltraj,sol) <= g_epsilon)
self.RunTrajectory(robot,traj)
# try another goal
Tlocal = matrixFromAxisAngle([0,1,0])
Tlocal[0:3,3] = [0,0.1,-0.2]
Tnewtarget = dot(gmodel.target.GetTransform(),Tlocal)
Tgoal = dot(Tnewtarget, dot(linalg.inv(gmodel.target.GetTransform()), ikmodel.manip.GetTransform()))
traj = basemanip.MoveToHandPosition(matrices=[Tgoal],maxiter=3000,maxtries=1,seedik=40,constraintfreedoms=constraintfreedoms,constraintmatrix=constraintmatrix,constrainttaskmatrix=constrainttaskmatrix,constrainterrorthresh=constrainterrorthresh,steplength=0.002,outputtrajobj=True,execute=False,jitter=0.05)
self.RunTrajectory(robot,traj)
def test_wamgraspfromcollision(self):
env = self.env
self.LoadEnv('data/lab1.env.xml')
robot=env.GetRobots()[0]
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
gmodel = databases.grasping.GraspingModel(robot=robot,target=env.GetKinBody('mug1'))
if not gmodel.load():
# don't do multithreaded yet since ode on some ubuntu distors does not support it
gmodel.numthreads = 2 # at least two threads
gmodel.generate(approachrays=gmodel.computeBoxApproachRays(delta=0.04))
gmodel.save()
with env:
basemanip = interfaces.BaseManipulation(robot)
robot.SetActiveDOFs(ikmodel.manip.GetArmIndices())
orgvalues = robot.GetActiveDOFValues()
validgrasps,validindices = gmodel.computeValidGrasps(returnnum=1)
validgrasp=validgrasps[0]
gmodel.setPreshape(validgrasp)
T = gmodel.getGlobalGraspTransform(validgrasp,collisionfree=True)
traj = basemanip.MoveToHandPosition(matrices=[T],outputtrajobj=True,execute=False)
self.RunTrajectory(robot,traj)
robot.Grab(gmodel.target)
sol = robot.GetActiveDOFValues()
traj = basemanip.MoveManipulator(orgvalues,outputtrajobj=True,execute=False,jitter=0.05)
soltraj = traj.Sample(0,robot.GetActiveConfigurationSpecification())
self.log.debug('make sure it starts at the initial configuration')
assert(transdist(soltraj,sol) <= g_epsilon)
self.RunTrajectory(robot,traj)
# check if cloning works after model is grabbed
robot.SetDOFVelocities(ones(robot.GetDOF()))
env2 = Environment()
env2.Clone(env,CloningOptions.Bodies|CloningOptions.Simulation)
misc.CompareEnvironments(env,env2,epsilon=g_epsilon)
def test_movehandstraight(self):
env = self.env
with env:
self.LoadEnv('data/lab1.env.xml')
robot = env.GetRobots()[0]
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
basemanip = interfaces.BaseManipulation(robot)
testvalues = [[array([ -2.83686683e-01, 1.40828054e+00, 0.00000000e+00, 5.26754682e-01, -3.14159265e+00, -1.20655743e+00, -1.85448301e+00, 1.66533454e-16, 1.66533454e-16, 1.66533454e-16, 0.00000000e+00]), array([ -9.22429319e-16, -3.90560499e-15, -1.00000000e+00])],
[array([-0.26085414, 1.37967815, 0. , 0.60871186, -3.14159265, -1.15320264, -0.26085414, 0. , 0. , 0. , 0. ]), array([ -7.21644966e-16, -3.28903571e-15, -1.00000000e+00])],
[array([ -5.90848599e-02, 9.54294051e-01, 0.00000000e+00, 2.22628339e+00, 9.99200722e-15, -3.89847865e-02, 1.51171147e+00, 1.66533454e-16, 1.66533454e-16, 1.66533454e-16, 0.00000000e+00]), array([ -1.03626102e-14, 7.85046229e-16, -1.00000000e+00])],
[array([-0.53374407, 0.9960226 , 0. , 1.91409838, -3.14159265, -0.23147168, -2.10454039, 0. , 0. , 0. , 0. ]), array([ 6.86915451e-16, -1.35420475e-15, -1.00000000e+00])]]
for dofvalues, direction in testvalues:
robot.SetDOFValues(dofvalues)
assert( not env.CheckCollision(robot) )
ret = basemanip.MoveHandStraight(direction=direction, ignorefirstcollision=False,stepsize=0.001,minsteps=19,maxsteps=20, execute=False)
assert(ret is not None)
# try another environment with grabbing
env.Reset()
self.LoadEnv('data/puma_tabletop.env.xml')
robot = env.GetRobots()[0]
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
basemanip = interfaces.BaseManipulation(robot)
taskmanip = interfaces.TaskManipulation(robot)
traj=taskmanip.ReleaseFingers(execute=False,outputtrajobj=True)[1]
self.RunTrajectory(robot,traj)
Tstart = array([[ -1, 0, 0, 2.00000000e-01], [ 0,0, 1, 6.30000000e-01], [ 0, 1 , 0, 5.50000000e-02], [ 0,0,0,1]])
traj=basemanip.MoveToHandPosition(matrices=[Tstart],execute=False,outputtrajobj=True)
self.RunTrajectory(robot,traj)
traj=taskmanip.CloseFingers(execute=False,outputtrajobj=True)[1]
self.RunTrajectory(robot,traj)
target = env.GetKinBody('cylinder_green_3')
robot.Grab(target)
updir = array((0,0,1))
try:
traj = basemanip.MoveHandStraight(direction=updir,stepsize=0.01,minsteps=0,maxsteps=0,execute=False,outputtrajobj=True)
raise ValueError('test_movehandstraight: should not succeed')
except planning_error:
pass
traj = basemanip.MoveHandStraight(direction=updir,stepsize=0.01,minsteps=1,maxsteps=40,execute=False,outputtrajobj=True)
self.RunTrajectory(robot,traj)
traj = basemanip.MoveHandStraight(direction=-updir,stepsize=0.01,minsteps=1,maxsteps=40,execute=False,outputtrajobj=True)
self.RunTrajectory(robot,traj)
Tee = array([[ 0.99502802, 0.07738446, 0.06269684, -0.42132618], [-0.07583651, 0.99676253, -0.02670751, -0.3924502 ], [-0.06456061, 0.02182001, 0.99767521, 0.95401548], [ 0. , 0. , 0. , 1. ]])
traj = basemanip.MoveHandStraight(direction=array([ 0.78915764, 0.13771766, 0.59855163]),starteematrix=Tee,stepsize=0.01,minsteps=60,maxsteps=80,execute=False,outputtrajobj=True)
self.RunTrajectory(robot,traj)
def test_movetohandpositiongrab(self):
env=self.env
self.LoadEnv('data/hanoi_complex2.env.xml')
robot = env.GetRobots()[0]
basemanip = interfaces.BaseManipulation(robot)
with env:
resolutions = [ 0.00292825, 0.00303916, 0.01520142, 0.0163279, 0.03591959, 0.03591959, 0.08129367]
weights = [ 1.61903856, 1.11858069, 0.20061367, 0.15267405, 0.05951496, 0.04199751, 0.01950391]
for j in robot.GetJoints():
j.SetWeights(weights[j.GetDOFIndex():(j.GetDOFIndex()+j.GetDOF())])
j.SetResolution(resolutions[j.GetDOFIndex()])
Tdisk2 = array([[-0.9152146 , 0.40084098, -0.04133701, -0.21687754],
[-0.39826911, -0.88415551, 0.24423505, -0.19242871],
[ 0.06135107, 0.23999073, 0.96883461, 1.12189841],
[ 0. , 0. , 0. , 1. ]])
env.GetKinBody('disk2').SetTransform(Tdisk2)
disk1 = env.GetKinBody('disk1')
robot.SetDOFValues([ 0.4620151 , -2.67934022, 0.29334635, -1.45878047, -1.2220377 , -1.83725485, -0.25900999])
robot.Grab(disk1)
Tgoal = array([[-0.7912808 , 0.25088882, 0.55761053, -0.1646556 ],
[-0.3734526 , 0.52379002, -0.76562208, 0.02972558],
[-0.48415685, -0.81406315, -0.32076991, 1.38703197],
[ 0. , 0. , 0. , 1. ]])
for i in range(4):
# have to execute several times to bring out the bug
out = basemanip.MoveToHandPosition(matrices=[Tgoal],execute=False)
def test_navigationmanip(self):
env=self.env
self.LoadEnv('data/pr2test2.env.xml')
robot = env.GetRobots()[0]
manip = robot.SetActiveManipulator('leftarm_torso')
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot,iktype=IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
nonadjlinks = array(robot.GetNonAdjacentLinks(KinBody.AdjacentOptions.Enabled))
basemanip = interfaces.BaseManipulation(robot)
taskmanip = interfaces.TaskManipulation(robot)
target=env.GetKinBody('TibitsBox1')
with env:
targetcollision = env.CheckCollision(target)
jointnames = ['l_shoulder_lift_joint','l_elbow_flex_joint','l_wrist_flex_joint','r_shoulder_lift_joint','r_elbow_flex_joint','r_wrist_flex_joint']
armindices = [robot.GetJoint(name).GetDOFIndex() for name in jointnames]
armgoal = [1.29023451,-2.32099996,-0.69800004,1.27843491,-2.32100002,-0.69799996]
robot.SetActiveDOFs(armindices)
traj=basemanip.MoveActiveJoints(goal=armgoal,execute=False,outputtrajobj=True)
self.RunTrajectory(robot,traj)
assert( transdist(robot.GetActiveDOFValues(),armgoal) <= g_epsilon )
robot.SetActiveDOFs([],Robot.DOFAffine.X|Robot.DOFAffine.Y|Robot.DOFAffine.RotationAxis,[0,0,1])
traj = basemanip.MoveActiveJoints(goal=[2.8,-1.3,0],maxiter=1000,steplength=0.15,maxtries=2,execute=False,outputtrajobj=True)
assert( transdist(nonadjlinks,array(robot.GetNonAdjacentLinks(KinBody.AdjacentOptions.Enabled))) == 0 )
self.RunTrajectory(robot,traj)
traj = taskmanip.ReleaseFingers(execute=False,outputtrajobj=True)[1]
assert( transdist(robot.GetDOFValues(armindices),armgoal) <= g_epsilon )
assert( transdist(nonadjlinks,array(robot.GetNonAdjacentLinks(KinBody.AdjacentOptions.Enabled))) == 0 )
self.RunTrajectory(robot,traj)
Tgoal = array([[0,-1,0,3.5],[-1,0,0,-1.3],[0,0,-1,0.842],[0,0,0,1]])
traj = basemanip.MoveToHandPosition(matrices=[Tgoal],seedik=16,execute=False,outputtrajobj=True)
assert( transdist(nonadjlinks,array(robot.GetNonAdjacentLinks(KinBody.AdjacentOptions.Enabled))) == 0 )
self.RunTrajectory(robot,traj)
traj = taskmanip.CloseFingers(execute=False,outputtrajobj=True)[1]
assert( transdist(nonadjlinks,array(robot.GetNonAdjacentLinks(KinBody.AdjacentOptions.Enabled))) == 0 )
self.RunTrajectory(robot,traj)
robot.Grab(target)
assert( transdist(nonadjlinks,array(robot.GetNonAdjacentLinks(KinBody.AdjacentOptions.Enabled))) == 0 )
assert( not targetcollision or env.CheckCollision(robot) )
traj = basemanip.MoveManipulator(goal=[0, 0, 1.29023451, 0, -2.32099996, 0, -0.69800004, 0],execute=False,outputtrajobj=True)
self.RunTrajectory(robot,traj)
def test_planwithcollision(self):
env=self.env
self.LoadEnv('data/pr2test1.env.xml')
robot=env.GetRobots()[0]
with env:
defaultvalues = robot.GetDOFValues()
manip = robot.SetActiveManipulator('rightarm')
basemanip = interfaces.BaseManipulation(robot)
robot.SetDOFValues([0.187],[robot.GetJoint('l_shoulder_lift_joint').GetDOFIndex()])
assert(env.CheckCollision(robot))
self.log.debug('environment collision')
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot,iktype=IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
Tdelta = eye(4)
Tdelta[0,3] = -0.2
Tdelta[2,3] = -0.2
Tnew = dot(manip.GetEndEffectorTransform(),Tdelta)
ret = basemanip.MoveToHandPosition([Tnew],execute=False)
assert(ret is not None)
self.log.debug('self collision')
robot.SetDOFValues(defaultvalues)
robot.SetDOFValues([ 1.34046301, 0.94535038, 3.03934583, -1.30743665, 0 , 0 , 0], robot.GetManipulator('leftarm').GetArmIndices())
assert(robot.CheckSelfCollision())
ret = basemanip.MoveToHandPosition([Tnew],execute=False)
assert(ret is not None)
manip = robot.SetActiveManipulator('rightarm_torso')
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot,iktype=IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
try:
ret = basemanip.MoveToHandPosition([Tnew],execute=False)
except planning_error:
ret = None
assert(ret==None)
robot.SetDOFValues([ 1.34046301, -0.52360053, 0.03541482, -2.32130534, 0, 0, 0], robot.GetManipulator('leftarm').GetArmIndices())
assert(robot.CheckSelfCollision())
ret = basemanip.MoveToHandPosition([Tnew],execute=False)
assert(ret is not None)
def test_movebase(self):
env=self.env
xml = """
<robot name="diffdrive_caster">
<kinbody>
<body name="base" type="static">
<mass type="box">
<total>50</total>
<extents>1 1 1</extents>
</mass>
<geom type="box">
<extents>0.1 0.2 0.25</extents>
<translation>0 0 0.35</translation>
</geom>
</body>
<body name="wheel_left">
<geom type="cylinder">
<radius>0.2</radius>
<height>0.05</height>
<translation>0 0.26 0.2</translation>
</geom>
</body>
<body name="wheel_right">s
<geom type="cylinder">
<radius>0.2</radius>
<height>0.05</height>
<translation>0 -0.26 0.2</translation>
</geom>
</body>
</kinbody>
</robot>
"""
robot=self.LoadRobotData(xml)
robot.SetActiveDOFs([], DOFAffine.X | DOFAffine.Y |DOFAffine.RotationAxis, [0,0,1])
basemanip = interfaces.BaseManipulation(robot)
traj=basemanip.MoveActiveJoints([1,1,1],outputtrajobj=True)
self.RunTrajectory(robot,traj)
def test_wamtaskplanwithgoal(self):
env = self.env
self.LoadEnv('data/lab1.env.xml')
robot=env.GetRobots()[0]
def ComputeDestinations(target, table,transdelta=0.1,zoffset=0.01):
with table:
Ttable = table.GetTransform()
table.SetTransform(eye(4))
ab = table.ComputeAABB()
table.SetTransform(Ttable)
p = ab.pos()
e = ab.extents()
Nx = floor(2*e[0]/transdelta)
Ny = floor(2*e[1]/transdelta)
X = []
Y = []
for x in arange(Nx):
X = r_[X, tile((x+1)/(Nx+1),Ny)]
Y = r_[Y, arange(0.5,Ny,1.0)/(Ny+1)]
translations = c_[p[0]-e[0]+2*e[0]*X,p[1]-e[1]+2*e[1]*Y,tile(p[2]+e[2]+zoffset,len(X))]
Trolls = [matrixFromAxisAngle(array((0,0,1)),roll) for roll in arange(0,2*pi,pi/2)] + [matrixFromAxisAngle(array((1,0,0)),roll) for roll in [pi/2,pi,1.5*pi]]
dests = []
Torg = eye(4)
with target:
dests = []
for translation in translations:
for Troll in Trolls:
Troll = array(Troll)
Troll[0:3,3] = translation
target.SetTransform(dot(Ttable, dot(Troll, Torg)))
if not table.GetEnv().CheckCollision(target):
dests.append(target.GetTransform())
return dests
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
gmodel = databases.grasping.GraspingModel(robot=robot,target=env.GetKinBody('mug4'))
if not gmodel.load():
# don't do multithreaded yet since ode on some ubuntu distors does not support it
gmodel.numthreads = 2 # at least two threads
gmodel.generate(approachrays=gmodel.computeBoxApproachRays(delta=0.04))
gmodel.save()
with env:
basemanip = interfaces.BaseManipulation(robot)
taskmanip = interfaces.TaskManipulation(robot,graspername=gmodel.grasper.plannername)
robot.SetDOFValues(array([-1.04058615, -1.66533689, 1.38425976, 2.01136615, -0.60557912, -1.19215041, 1.96465159, 0. , 0. , 0. , 1.57079633]))
approachoffset = 0.02
dests = ComputeDestinations(gmodel.target,env.GetKinBody('table'))
Ttarget = gmodel.target.GetTransform()
goals,graspindex,searchtime,traj = taskmanip.GraspPlanning(gmodel=gmodel,approachoffset=approachoffset,destposes=dests, seedgrasps = 3,seeddests=8,seedik=1,maxiter=1000, randomgrasps=False,randomdests=False,execute=False,outputtrajobj=True)
assert(transdist(Ttarget,gmodel.target.GetTransform()) <= g_epsilon)
self.RunTrajectory(robot,traj)
assert(transdist(Ttarget,gmodel.target.GetTransform()) <= g_epsilon)
grasp = gmodel.grasps[graspindex]
direction=gmodel.getGlobalApproachDir(grasp)
Tcurgrasp = gmodel.manip.GetTransform()
Tgoalgrasp = gmodel.getGlobalGraspTransform(grasp,collisionfree=False)
assert(transdist(Tcurgrasp[0:3,0:3],Tgoalgrasp[0:3,0:3]) <= g_epsilon)
assert(transdist(Tcurgrasp[0:3,3]+direction*approachoffset, Tgoalgrasp[0:3,3]) <= g_epsilon)
stepsize=0.001
expectedsteps = floor(approachoffset/stepsize)
with gmodel.target:
gmodel.target.Enable(False)
traj = basemanip.MoveHandStraight(direction=direction, ignorefirstcollision=0,stepsize=stepsize,minsteps=expectedsteps,maxsteps=expectedsteps,execute=False,outputtrajobj=True)
with robot:
robot.SetActiveDOFValues(traj.GetWaypoint(-1,robot.GetActiveConfigurationSpecification()))
Tfinal = gmodel.manip.GetTransform()
assert( transdist(Tfinal[0:3,3]-Tcurgrasp[0:3,3],direction*stepsize*expectedsteps) <= g_epsilon )
self.RunTrajectory(robot,traj)
Tcurgrasp = gmodel.manip.GetTransform()
assert(transdist(Tgoalgrasp,Tcurgrasp) <= g_epsilon )
# grasp, and try to lift up
robot.Grab(gmodel.target)
trajup = basemanip.MoveHandStraight(direction=[0,0,1], stepsize=0.003,minsteps=1,maxsteps=60,execute=False,outputtrajobj=True)
robot.Release(gmodel.target)
gmodel.target.Enable(False)
# try another
gmodel = databases.grasping.GraspingModel(robot=robot,target=env.GetKinBody('mug2'))
gmodel.load()
goals,graspindex,searchtime,traj = taskmanip.GraspPlanning(gmodel=gmodel,approachoffset=approachoffset,destposes=dests, seedgrasps = 3,seeddests=8,seedik=1,maxiter=1000, randomgrasps=False,randomdests=False,execute=False,outputtrajobj=True)
self.RunTrajectory(robot,traj)
# should be able to solve it again
goals,graspindex,searchtime,traj = taskmanip.GraspPlanning(gmodel=gmodel,approachoffset=approachoffset,destposes=dests, seedgrasps = 3,seeddests=8,seedik=1,maxiter=1000, randomgrasps=False,randomdests=False,execute=False,outputtrajobj=True)
self.RunTrajectory(robot,traj)
def test_releasefingers(self):
env=self.env
self.LoadEnv('data/katanatable.env.xml')
with env:
robot=env.GetRobots()[0]
m=robot.SetActiveManipulator('arm')
body=env.GetKinBody('mug2')
T = eye(4)
T[0:3,3] = [-0.053,0.39,1.58643]
body.SetTransform(T)
initialvalues = tile(-0.43,len(m.GetGripperIndices()))
robot.SetDOFValues(initialvalues,m.GetGripperIndices())
taskmanip=interfaces.TaskManipulation(robot)
traj=taskmanip.ReleaseFingers(execute=False,outputtrajobj=True)[1]
assert(traj.GetDuration()>0)
newvalues=traj.GetConfigurationSpecification().ExtractJointValues(traj.GetWaypoint(-1),robot,m.GetGripperIndices(),0)
assert(transdist(initialvalues,newvalues) > 0.1 )
self.RunTrajectory(robot,traj)
def test_releasefingerscollision(self):
env=self.env
self.LoadEnv('data/lab1.env.xml')
with env:
robot=env.GetRobots()[0]
# move the robot until in collision
Trobot=robot.GetTransform()
Trobot[2,3] -= 1.5
robot.SetTransform(Trobot)
assert(env.CheckCollision(robot))
m=robot.SetActiveManipulator('arm')
Tmanip = m.GetTransform()
assert( not m.CheckEndEffectorCollision(Tmanip) )
taskmanip=interfaces.TaskManipulation(robot)
traj=taskmanip.CloseFingers(execute=False,outputtrajobj=True)[1]
self.RunTrajectory(robot,traj)
traj=taskmanip.ReleaseFingers(execute=False,outputtrajobj=True)[1]
self.RunTrajectory(robot,traj)
body=env.GetKinBody('mug2')
body.SetTransform(Tmanip)
initialvalues = tile(-0.43,len(m.GetGripperIndices()))
robot.SetDOFValues(initialvalues,m.GetGripperIndices())
taskmanip=interfaces.TaskManipulation(robot)
traj=taskmanip.CloseFingers(execute=False,outputtrajobj=True)[1]
self.RunTrajectory(robot,traj)
assert( m.CheckEndEffectorCollision(Tmanip) )
traj=taskmanip.ReleaseFingers(execute=False,outputtrajobj=True)[1]
self.RunTrajectory(robot,traj)
def test_handgoal_collision(self):
env=self.env
self.LoadEnv('data/lab1.env.xml')
robot=env.GetRobots()[0]
robot.SetDOFValues(array([ -8.44575603e-02, 1.48528347e+00, -5.09108824e-08, 6.48108822e-01, -4.57571203e-09, -1.04008750e-08, 7.26855048e-10, 5.50807826e-08, 5.50807826e-08, -1.90689327e-08, 0.00000000e+00]))
assert(env.CheckCollision(robot))
def test_invalidactive(self):
env=self.env
self.LoadEnv('data/lab1.env.xml')
robot=env.GetRobots()[0]
with env:
manip=robot.GetActiveManipulator()
robot.SetActiveDOFs(manip.GetArmIndices())
manip.GetEndEffector().SetStatic(True)
basemanip=interfaces.BaseManipulation(robot)
try:
basemanip.MoveActiveJoints(goal=robot.GetActiveDOFValues())
raise ValueError('let static link pass')
except openrave_exception, ex:
assert(ex.GetCode()==ErrorCode.InvalidState)
try:
params=Planner.PlannerParameters()
params.SetRobotActiveJoints(robot)
raise ValueError('let static link pass')
except openrave_exception, ex:
assert(ex.GetCode()==ErrorCode.InvalidState)
def test_multipath(self):
env = self.env
self.LoadEnv('data/lab1.env.xml')
robot = env.GetRobots()[0]
with env:
manip = robot.GetActiveManipulator()
basemanip = interfaces.BaseManipulation(robot)
basemanip.prob.SendCommand('SetMinimumGoalPaths 6')
robot.SetActiveDOFs(manip.GetArmIndices())
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot, iktype=IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
startpose = array([ 4.75570553e-01, -3.09601285e-16, 8.79677582e-01, -5.55111505e-17, 2.80273561e-01, 1.40000001e-01, 8.88603999e-01])
sol = manip.FindIKSolution(startpose, IkFilterOptions.CheckEnvCollisions)
robot.SetActiveDOFValues(sol)
goalpose = array([ 0.42565319, -0.30998409, 0.60514354, -0.59710177, 0.06460554, 0.386792 , 1.22894527])
ikgoal = IkParameterization(matrixFromPose(goalpose),IkParameterizationType.Transform6D)
basemanip.prob.SendCommand('SetMinimumGoalPaths 1')
traj1 = basemanip.MoveToHandPosition(ikparams=[ikgoal],maxiter=5000,steplength=0.01,maxtries=1,execute=False,outputtrajobj=True)
traj2 = basemanip.MoveToHandPosition(ikparams=[ikgoal],maxiter=5000,steplength=0.01,maxtries=1,execute=False,outputtrajobj=True,goalsampleprob=0.4,minimumgoalpaths=40)
# there is a small probability that this check will fail..
assert(traj2.GetDuration() < traj1.GetDuration())
self.RunTrajectory(robot,traj1)
self.RunTrajectory(robot,traj2)
def test_jittertransform(self):
env=self.env
self.LoadEnv('data/lab1.env.xml')
with env:
collisionbody = env.GetKinBody('mug1')
T0 = collisionbody.GetTransform()
assert(env.CheckCollision(collisionbody))
success = planningutils.JitterTransform(collisionbody,0.005)
assert(not success)
assert(transdist(T0, collisionbody.GetTransform()) <= 1e-7)
success = planningutils.JitterTransform(collisionbody,0.02)
assert(success)
assert(not env.CheckCollision(collisionbody))
#generate_classes(RunPlanning, globals(), [('ode','ode'),('bullet','bullet')])
class test_ode(RunPlanning):
def __init__(self):
RunPlanning.__init__(self, 'ode')
#
# class test_bullet(RunPlanning):
# def __init__(self):
# RunPlanning.__init__(self, 'bullet')
#
```
#### File: openrave/test/test_robot.py
```python
from common_test_openrave import *
class RunRobot(EnvironmentSetup):
def __init__(self,collisioncheckername):
self.collisioncheckername = collisioncheckername
def setup(self):
EnvironmentSetup.setup(self)
self.env.SetCollisionChecker(RaveCreateCollisionChecker(self.env,self.collisioncheckername))
def test_dualarm_grabbing(self):
with self.env:
robot = self.LoadRobot('robots/schunk-lwa3-dual.robot.xml')
body = self.env.ReadKinBodyXMLFile('data/box3.kinbody.xml')
self.env.Add(body)
T = eye(4)
T[1,3] = -1.18
T[2,3] = 0.712
body.SetTransform(T)
robot.SetActiveManipulator('leftarm')
assert(self.env.CheckCollision(robot))
robot.Grab(body)
assert(not self.env.CheckCollision(robot))
robot.SetDOFValues(array([ 0.00000000e+00, -1.43329144e+00, -3.99190831e-15, -1.86732388e+00, 5.77239752e-01, -3.37631690e-07, 6.67713991e-08, 0.00000000e+00, -1.70089030e+00, -6.42544150e-01, -1.25030589e+00, -3.33493233e-08, -5.58212676e-08, 1.60115015e-08]))
assert(robot.CheckSelfCollision())
def test_basic(self):
with self.env:
for robotfile in g_robotfiles:
self.env.Reset()
robot = self.LoadRobot(robotfile)
assert(robot.GetDOF() == robot.GetActiveDOF())
assert(robot.GetLinks()[0].GetParent().GetActiveDOF() == robot.GetActiveDOF())
def test_collisionmaprobot(self):
env=self.env
xml = """<environment>
<robot file="robots/collisionmap.robot.xml">
</robot>
</environment>
"""
self.LoadDataEnv(xml)
with env:
robot=env.GetRobots()[0]
assert(robot.GetXMLId().lower()=='collisionmaprobot')
robot.SetDOFValues([9/180.0*pi,1/180.0*pi],[1,2])
assert(robot.CheckSelfCollision())
robot.SetDOFValues([0/180.0*pi,1/180.0*pi],[1,2])
assert(not robot.CheckSelfCollision())
env.Reset()
robot=self.LoadRobot('robots/collisionmap.robot.xml')
assert(robot.GetXMLId().lower()=='collisionmaprobot')
def test_grabcollision(self):
env=self.env
self.LoadEnv('robots/man1.zae') # load a simple scene
with env:
robot = env.GetRobots()[0] # get the first robot
leftarm = robot.GetManipulator('leftarm')
rightarm = robot.GetManipulator('rightarm')
self.LoadEnv('data/mug1.kinbody.xml');
leftmug = env.GetKinBody('mug')
self.LoadEnv('data/mug2.kinbody.xml')
rightmug = env.GetKinBody('mug2')
env.StopSimulation()
leftMugGrabPose = array([[ 0.99516672, -0.0976999 , 0.00989374, 0.14321238],
[ 0.09786028, 0.99505007, -0.01728364, 0.94120538],
[-0.00815616, 0.01816831, 0.9998017 , 0.38686624],
[ 0. , 0. , 0. , 1. ]])
leftmug.SetTransform(leftMugGrabPose)
rightMugGrabPose = array([[ 9.99964535e-01, -1.53668225e-08, 8.41848925e-03, -1.92047462e-01],
[ -8.40134174e-03, -6.37951940e-02, 9.97927606e-01, 9.22815084e-01],
[ 5.37044369e-04, -9.97963011e-01, -6.37929291e-02, 4.16847348e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
rightmug.SetTransform(rightMugGrabPose);
assert(not env.CheckCollision(leftmug,rightmug))
grabJointAngles = array([ -3.57627869e-07, 0.00000000e+00, -1.46997878e-15, -1.65528119e+00, -1.23030146e-08, -8.41909389e-11, 0.00000000e+00], dtype=float32)
robot.SetDOFValues(grabJointAngles,rightarm.GetArmIndices())
robot.SetDOFValues(grabJointAngles,leftarm.GetArmIndices())
robot.SetActiveManipulator(rightarm)
robot.Grab(rightmug)
robot.SetActiveManipulator(leftarm)
robot.Grab(leftmug)
assert(not robot.CheckSelfCollision())
assert(not env.CheckCollision(robot))
self.log.debug('Now changing arm joint angles so that the two mugs collide. The checkSelfCollision returns:')
collisionJointAngles = array([ -2.38418579e-07, 0.00000000e+00, -2.96873480e-01, -1.65527940e+00, -3.82479293e-08, -1.23165381e-10, 1.35525272e-20]);
robot.SetDOFValues(collisionJointAngles,rightarm.GetArmIndices())
robot.SetDOFValues(collisionJointAngles,leftarm.GetArmIndices())
assert(robot.CheckSelfCollision())
assert(not env.CheckCollision(robot))
grabbedinfos = robot.GetGrabbedInfo()
grabbedbodies = robot.GetGrabbed()
# try saving the grabbed state
robot.ReleaseAllGrabbed()
robot.ResetGrabbed(grabbedinfos)
grabbedinfo2 = robot.GetGrabbedInfo()
assert(set([g._grabbedname for g in grabbedinfo2]) == set([b.GetName() for b in grabbedbodies]))
robot.ReleaseAllGrabbed()
assert(env.CheckCollision(leftmug,rightmug))
def test_grabcollision_dynamic(self):
self.log.info('test if can handle grabbed bodies being enabled/disabled')
env=self.env
robot = self.LoadRobot('robots/barrettwam.robot.xml')
with env:
target = env.ReadKinBodyURI('data/mug1.kinbody.xml')
env.Add(target,True)
manip=robot.GetActiveManipulator()
target.SetTransform(manip.GetEndEffector().GetTransform())
assert(env.CheckCollision(robot,target))
self.log.info('check disabling target')
target.Enable(False)
robot.Grab(target,manip.GetEndEffector())
assert(not robot.CheckSelfCollision())
target.Enable(True)
assert(not robot.CheckSelfCollision())
target.Enable(False)
assert(not robot.CheckSelfCollision())
target.GetLinks()[0].Enable(True)
assert(not robot.CheckSelfCollision())
self.log.info('check disabling links')
robot.Enable(False)
assert(not robot.CheckSelfCollision())
robot.RegrabAll()
assert(not robot.CheckSelfCollision())
robot.Enable(True)
assert(not robot.CheckSelfCollision())
def test_grabcollision_dynamic2(self):
self.log.info('more tests for dynamic bodies and self-collisions')
env=self.env
with env:
robot = self.LoadRobot('robots/barrettwam.robot.xml')
b=RaveCreateKinBody(env,'')
b.InitFromBoxes(array([[0,0,0,0.05,1,0.05]]),True)
b.SetName('obstacle')
env.Add(b)
Tbody=eye(4)
Tbody[2,3] = 1
b.SetTransform(Tbody)
b2=RaveCreateKinBody(env,'')
b2.InitFromBoxes(array([[0,0,0,0.2,0.2,0.2]]),True)
b2.SetName('obstacle2')
b2.GetLinks()[0].GetGeometries()[0].SetDiffuseColor([0,1,0])
env.Add(b2)
Tbody2=eye(4)
Tbody2[0:3,3] = [0.7,0,0.3]
b2.SetTransform(Tbody2)
manip=robot.GetActiveManipulator()
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot,IkParameterizationType.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
robot.Grab(b)
robot.SetActiveDOFs(manip.GetArmIndices())
posegoal = array([ 1.03713883e-02, 7.52075143e-01, 6.58889422e-01, 1.18381978e-02, 3.04044037e-01, -5.96046308e-10, 1.61406347e-01])
b2.Enable(False)
sols = manip.FindIKSolutions(posegoal,IkFilterOptions.CheckEnvCollisions)
assert(len(sols)>0)
# test the solution
with robot:
for sol in sols:
robot.SetActiveDOFValues(sol)
assert(not robot.CheckSelfCollision())
assert(not env.CheckCollision(robot))
sols = manip.FindIKSolutions(posegoal,IkFilterOptions.IgnoreSelfCollisions)
assert(len(sols)>0)
# test the solution
with robot:
# make sure there is at least one self-collision
hasself = False
for sol in sols:
robot.SetActiveDOFValues(sol)
if robot.CheckSelfCollision():
hasself = True
assert(hasself)
b2.Enable(True)
sols = manip.FindIKSolutions(posegoal,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorCollisions)
assert(len(sols)>0)
with robot:
for sol in sols:
robot.SetActiveDOFValues(sol)
assert(not robot.CheckSelfCollision())
b.Enable(False)
manip.GetEndEffector().Enable(False)
sols = manip.FindIKSolutions(posegoal,IkFilterOptions.CheckEnvCollisions)
assert(len(sols)>0)
with robot:
for sol in sols:
robot.SetActiveDOFValues(sol)
assert(not robot.CheckSelfCollision())
assert(not env.CheckCollision(robot))
b2.Enable(True)
b.Enable(True)
manip.GetEndEffector().Enable(True)
sols = manip.FindIKSolutions(posegoal,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorCollisions)
assert(len(sols)>0)
with robot:
for sol in sols:
robot.SetActiveDOFValues(sol)
assert(not robot.CheckSelfCollision())
def test_ikcollision(self):
self.log.info('test if can solve IK during collisions')
env=self.env
with env:
robot = self.LoadRobot('robots/pr2-beta-static.zae')
target = env.ReadKinBodyURI('data/mug1.kinbody.xml')
env.Add(target,True)
T=target.GetTransform()
T[0:3,3] = [-0.342,0,0.8]
target.SetTransform(T)
floor = RaveCreateKinBody(env,'')
floor.InitFromBoxes(array([[0,0,0,2,2,0.01]]),True)
floor.SetName('floor')
env.Add(floor,True)
assert(env.CheckCollision(robot))
manip=robot.SetActiveManipulator('leftarm')
manip2 = robot.GetManipulator('rightarm')
robot.SetActiveDOFs(manip.GetArmIndices())
assert(not manip.CheckEndEffectorCollision(manip.GetTransform()))
assert(not manip2.CheckEndEffectorCollision(manip2.GetTransform()))
assert(not manip.CheckEndEffectorCollision(manip.GetIkParameterization(IkParameterizationType.Transform6D)))
assert(not manip2.CheckEndEffectorCollision(manip2.GetIkParameterization(IkParameterizationType.Transform6D)))
# with bullet, robot gets into self-collision when first angle reaches 0.5
robot.SetActiveDOFValues([0.678, 0, 1.75604762, -1.74228108, 0, 0, 0])
assert(not robot.CheckSelfCollision())
Tmanip = manip.GetTransform()
robot.SetActiveDOFValues(zeros(robot.GetActiveDOF()))
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions) is not None)
basemanip = interfaces.BaseManipulation(robot)
out=basemanip.MoveToHandPosition(matrices=[Tmanip],execute=False)
assert(out is not None)
# self colliding
robot.SetActiveDOFValues([ 2.20622614e-01, 0.00000000e+00, 1.75604762e+00, -1.74228108e+00, 0.00000000e+00, -9.56775092e-16, 0.00000000e+00])
assert(robot.CheckSelfCollision())
Tmanip = manip.GetTransform()
robot.SetActiveDOFValues(zeros(robot.GetActiveDOF()))
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions) is None)
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorCollisions) is None)
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorEnvCollisions|IkFilterOptions.IgnoreEndEffectorSelfCollisions) is not None)
assert(not manip.CheckEndEffectorCollision(Tmanip))
box = RaveCreateKinBody(env,'')
box.InitFromBoxes(array([[0,0,0,0.05,0.05,0.2]]),True)
box.SetName('box')
env.Add(box,True)
box.SetTransform(manip.GetTransform())
robot.Grab(box)
robot.SetActiveDOFValues([ 0.5, 0.00000000e+00, 1.57, -1.74228108e+00, 3.23831570e-16, 0.00000000e+00, 0.00000000e+00])
assert(robot.CheckSelfCollision())
Tmanip = manip.GetTransform()
robot.SetActiveDOFValues(zeros(robot.GetActiveDOF()))
assert(not robot.CheckSelfCollision())
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorEnvCollisions) is None)
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorSelfCollisions) is not None)
assert(not robot.CheckSelfCollision())
assert(not manip.CheckEndEffectorCollision(Tmanip))
robot.SetActiveDOFValues([ 0.00000000e+00, 0.858, 2.95911693e+00, -0.1, 0.00000000e+00, -3.14018492e-16, 0.00000000e+00])
Tmanip = manip.GetTransform()
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorCollisions) is not None)
# test if initial colliding attachments are handled correctly
robot.SetActiveDOFValues(zeros(robot.GetActiveDOF()))
T = manip.GetTransform()
T[0,3] += 0.2
target.SetTransform(T)
assert(not robot.CheckSelfCollision())
assert(env.CheckCollision(box,target))
assert(manip.CheckEndEffectorCollision(manip.GetTransform()))
assert(not manip2.CheckEndEffectorCollision(manip2.GetTransform()))
robot.Grab(target)
assert(robot.IsGrabbing(target))
assert(not robot.CheckSelfCollision())
robot.RegrabAll()
assert(not robot.CheckSelfCollision())
robot.Release(target)
assert(not robot.IsGrabbing(target))
box2 = RaveCreateKinBody(env,'')
box2.InitFromBoxes(array([[0,0,0,0.05,0.05,0.2]]),True)
box2.SetName('box2')
env.Add(box2,True)
box2.SetTransform(manip2.GetTransform())
robot.Grab(box2,grablink=manip2.GetEndEffector())
assert(not manip2.CheckEndEffectorCollision(manip2.GetTransform()))
robot.Grab(target)
Tmanip = manip.GetTransform()
assert(not manip.CheckEndEffectorCollision(Tmanip))
robot.SetActiveDOFValues([ 0.00000000e+00, 0.858, 2.95911693e+00, -1.57009246e-16, 0.00000000e+00, -3.14018492e-16, 0.00000000e+00])
assert(not manip.CheckEndEffectorCollision(Tmanip))
def test_checkendeffector(self):
self.log.info('test if can check end effector collisions with ik params')
env=self.env
self.LoadEnv('data/katanatable.env.xml')
robot=env.GetRobots()[0]
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot, iktype=IkParameterization.Type.TranslationDirection5D)
if not ikmodel.load():
ikmodel.autogenerate()
with env:
robot.SetActiveDOFs(ikmodel.manip.GetArmIndices())
robot.SetActiveDOFValues([ 0, 0.89098841, 0.92174268, -1.32022237, 0])
ikparam=ikmodel.manip.GetIkParameterization(IkParameterizationType.TranslationDirection5D)
assert(not env.CheckCollision(robot))
robot.SetActiveDOFValues(zeros(robot.GetActiveDOF()))
assert(not ikmodel.manip.CheckEndEffectorCollision(ikparam))
T = eye(4)
T[2,3] = -0.1
ikparam2 = IkParameterization(ikparam)
ikparam2.MultiplyTransform(T)
assert(ikmodel.manip.FindIKSolution(ikparam2,0) is not None)
assert(ikmodel.manip.FindIKSolution(ikparam2,IkFilterOptions.CheckEnvCollisions) is None)
assert(ikmodel.manip.CheckEndEffectorCollision(ikparam2))
assert(ikmodel.manip.FindIKSolution(ikparam2,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorCollisions) is not None)
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot, iktype=IkParameterization.Type.Translation3D)
if not ikmodel.load():
ikmodel.autogenerate()
with env:
robot.SetActiveDOFs(ikmodel.manip.GetArmIndices())
robot.SetActiveDOFValues([ 0, 0.89098841, 0.92174268, -1.32022237, 0])
ikparam=ikmodel.manip.GetIkParameterization(IkParameterizationType.Translation3D)
robot.SetActiveDOFValues(zeros(robot.GetActiveDOF()))
try:
ikmodel.manip.CheckEndEffectorCollision(ikparam)
raise ValueError('expected exception')
except openrave_exception:
pass
T = eye(4)
T[2,3] = -0.1
ikparam2 = IkParameterization(ikparam)
ikparam2.MultiplyTransform(T)
assert(ikmodel.manip.FindIKSolution(ikparam2,0) is not None)
assert(ikmodel.manip.FindIKSolution(ikparam2,IkFilterOptions.CheckEnvCollisions) is None)
assert(ikmodel.manip.FindIKSolution(ikparam2,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorCollisions) is not None)
def test_badtrajectory(self):
self.log.info('create a discontinuous trajectory and check if robot throws exception')
env=self.env
robot=self.LoadRobot('robots/mitsubishi-pa10.zae')
with env:
orgvalues = robot.GetActiveDOFValues()
lower,upper = robot.GetDOFLimits()
traj=RaveCreateTrajectory(env,'')
traj.Init(robot.GetActiveConfigurationSpecification())
traj.Insert(0,r_[orgvalues,upper+0.1])
assert(traj.GetNumWaypoints()==2)
try:
ret=planningutils.RetimeActiveDOFTrajectory(traj,robot,False)
assert(ret==PlannerStatus.HasSolution)
self.RunTrajectory(robot,traj)
raise ValueError('controller did not throw limit expected exception!')
except Exception, e:
pass
traj.Init(robot.GetActiveConfigurationSpecification())
traj.Insert(0,r_[lower,upper])
assert(traj.GetNumWaypoints()==2)
try:
ret=planningutils.RetimeActiveDOFTrajectory(traj,robot,False,maxvelmult=10)
assert(ret==PlannerStatus.HasSolution)
self.RunTrajectory(robot,traj)
raise ValueError('controller did not throw velocity limit expected exception!')
except Exception, e:
pass
def test_bigrange(self):
env=self.env
robot=self.LoadRobot('robots/kuka-kr5-r650.zae')
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot, iktype=IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
with env:
j=robot.GetJointFromDOFIndex(ikmodel.manip.GetArmIndices()[-1])
lower,upper = j.GetLimits()
assert( upper-lower > 3*pi )
robot.SetDOFValues(lower+0.1,[j.GetDOFIndex()])
assert(transdist(robot.GetDOFValues([j.GetDOFIndex()]),lower+0.1) <= g_epsilon)
robot.SetDOFValues(ones(len(ikmodel.manip.GetArmIndices())),ikmodel.manip.GetArmIndices(),True)
ikparam = ikmodel.manip.GetIkParameterization(IkParameterization.Type.Transform6D)
sols = ikmodel.manip.FindIKSolutions(ikparam,IkFilterOptions.CheckEnvCollisions)
assert(len(sols)==8)
# add a filter
numrepeats = [0]
indices = []
def customfilter(solution, manip, ikparam):
out = manip.GetIkSolver().SendCommand('GetRobotLinkStateRepeatCount')
if out=='1':
numrepeats[0] += 1
out = manip.GetIkSolver().SendCommand('GetSolutionIndices')
for index in out.split()[1:]:
indices.append(int(index))
return IkReturnAction.Success
handle = ikmodel.manip.GetIkSolver().RegisterCustomFilter(0,customfilter)
sols = ikmodel.manip.FindIKSolutions(ikparam,IkFilterOptions.CheckEnvCollisions)
assert(len(sols)==8)
assert(numrepeats[0]==4)
indices.sort()
assert(indices == [0,3,4,7,0x20000,0x20003,0x20004,0x20007])
handle.Close()
# customfilter shouldn't be executed anymore
sols = ikmodel.manip.FindIKSolutions(ikparam,IkFilterOptions.CheckEnvCollisions)
assert(numrepeats[0]==4)
def test_manipulators(self):
env=self.env
robot=self.LoadRobot('robots/pr2-beta-static.zae')
manip=robot.GetManipulator('leftarm_torso')
links = manip.GetChildLinks()
assert(all([l.GetName().startswith('l_gripper') or l.GetName() == 'l_wrist_roll_link' for l in links]))
ilinks = manip.GetIndependentLinks()
expectednames = set([u'base_footprint', u'base_link', u'base_bellow_link', u'base_laser_link', u'bl_caster_rotation_link', u'bl_caster_l_wheel_link', u'bl_caster_r_wheel_link', u'br_caster_rotation_link', u'br_caster_l_wheel_link', u'br_caster_r_wheel_link', u'fl_caster_rotation_link', u'fl_caster_l_wheel_link', u'fl_caster_r_wheel_link', u'fr_caster_rotation_link', u'fr_caster_l_wheel_link', u'fr_caster_r_wheel_link', u'torso_lift_motor_screw_link'])
curnames = set([l.GetName() for l in ilinks])
assert(expectednames==curnames)
cjoints = manip.GetChildJoints()
assert(len(cjoints)==4)
assert(all([j.GetName().startswith('l_') for j in cjoints]))
cdofs = manip.GetChildDOFIndices()
assert(cdofs == [22,23,24,25])
# test if manipulator can be created
manip = robot.GetManipulator('leftarm')
manipinfo = Robot.ManipulatorInfo()
manipinfo._name = 'testmanip'
manipinfo._sBaseLinkName = manip.GetBase().GetName()
manipinfo._sEffectorLinkName = manip.GetEndEffector().GetName()
manipinfo._tLocalTool = eye(4)
manipinfo._tLocalTool[2,3] = 1.0
manipinfo._vGripperJointNames = ['l_gripper_l_finger_joint']
manipinfo._vdirection = [0,1,0]
manipinfo._vClosingDirection = [1.0]
newmanip = robot.AddManipulator(manipinfo)
assert(newmanip.GetBase().GetName() == manip.GetBase().GetName())
assert(newmanip.GetEndEffector().GetName() == manip.GetEndEffector().GetName())
assert(robot.GetManipulator('testmanip')==newmanip)
assert(transdist(newmanip.GetLocalToolTransform(),manipinfo._tLocalTool) <= g_epsilon)
robot.SetActiveManipulator(newmanip)
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot)
if not ikmodel.load():
ikmodel.autogenerate()
def test_grabdynamics(self):
self.log.info('test is grabbed bodies have correct')
env=self.env
with env:
robot=self.LoadRobot('robots/pr2-beta-static.zae')
body = env.ReadKinBodyURI('data/mug1.kinbody.xml')
env.Add(body)
manip=robot.SetActiveManipulator('leftarm')
velocities = zeros(robot.GetDOF())
velocities[manip.GetArmIndices()] = ones(len(manip.GetArmIndices()))
robot.SetDOFVelocities(velocities)
Tmanip = manip.GetTransform()
Tbody = array(Tmanip)
Tbody[0,3] += 0.1
body.SetTransform(Tbody)
robot.Grab(body)
diff = Tbody[0:3,3] - Tmanip[0:3,3]
bodyvelocity = body.GetLinkVelocities()[0]
manipvelocity = manip.GetVelocity()
assert(transdist(manipvelocity[0:3] + cross(manipvelocity[3:6],diff),bodyvelocity[0:3]) <= g_epsilon)
assert(transdist(manipvelocity[3:6],bodyvelocity[3:6]) <= g_epsilon)
# change velocity and try again
velocities[manip.GetArmIndices()] = -ones(len(manip.GetArmIndices()))
robot.SetDOFVelocities(velocities)
bodyvelocity = body.GetLinkVelocities()[0]
manipvelocity = manip.GetVelocity()
assert(transdist(manipvelocity[0:3] + cross(manipvelocity[3:6],diff),bodyvelocity[0:3]) <= g_epsilon)
assert(transdist(manipvelocity[3:6],bodyvelocity[3:6]) <= g_epsilon)
# set robot base velocity
robot.SetVelocity([1,2,3],[4,5,6])
bodyvelocity = body.GetLinkVelocities()[0]
manipvelocity = manip.GetVelocity()
assert(transdist(manipvelocity[0:3] + cross(manipvelocity[3:6],diff),bodyvelocity[0:3]) <= g_epsilon)
assert(transdist(manipvelocity[3:6],bodyvelocity[3:6]) <= g_epsilon)
def test_quaternionjacobian(self):
self.log.info('test jacobiaquaternions')
env=self.env
with env:
affine = DOFAffine.Transform
self.LoadEnv('robots/pr2-beta-static.zae')
robot=env.GetRobots()[0]
robot.SetActiveDOFs(range(robot.GetDOF()), affine, [0,0,1])
lowerlimit, upperlimit = robot.GetActiveDOFLimits()
deltastep = 0.0001
for itry in range(20):
# set dofs to random values
offset_local = random.rand(3)-0.5
dofvalues = randlimits(numpy.minimum(lowerlimit+5*deltastep,upperlimit), numpy.maximum(upperlimit-5*deltastep,lowerlimit))
robot.SetActiveDOFValues(dofvalues)
for link in robot.GetLinks():
link_trans = link.GetTransform()
offset = dot(link_trans[0:3,0:3], offset_local) + link_trans[0:3,3]
J = robot.CalculateActiveJacobian(link.GetIndex(), offset)
with robot.CreateKinBodyStateSaver():
dofvals = robot.GetActiveDOFValues()
pert_dofvals = array(dofvals)
numerical_jac = zeros((3,robot.GetActiveDOF()))
for idof in range(robot.GetActiveDOF()):
pert_dofvals[idof] = dofvals[idof] + deltastep
robot.SetActiveDOFValues(pert_dofvals,0)
pert_link_trans = link.GetTransform()
pert_offset = dot(pert_link_trans[0:3,0:3],offset_local) + pert_link_trans[0:3,3]
for j in range(3):
numerical_jac[j,idof] = (pert_offset[j]-offset[j])/deltastep
pert_dofvals[idof] = dofvals[idof]
assert(all(abs(numerical_jac - J) <= 5*deltastep))
def test_getlinkjointinfo(self):
env=self.env
with env:
robot=self.LoadRobot('robots/barrettwam.robot.xml')
robot.SetTransform(eye(4))
Trobot = robot.GetTransform()
linkinfos = [link.UpdateAndGetInfo() for link in robot.GetLinks()]
jointinfos = [joint.UpdateAndGetInfo() for joint in robot.GetJoints()]
jointinfos += [joint.UpdateAndGetInfo() for joint in robot.GetPassiveJoints()]
manipinfos = [manip.GetInfo() for manip in robot.GetManipulators()]
# try to re-create the robot
env2=Environment()
robot2=RaveCreateRobot(env2,'')
robot2.Init(linkinfos,jointinfos,manipinfos,[])
robot2.SetName(robot.GetName())
env2.Add(robot2)
robot2.SetTransform(Trobot)
misc.CompareBodies(robot,robot2,computeadjacent=False,comparemanipulators=True,comparesensors=False)
def test_distancechecking(self):
env=self.env
robot = self.LoadRobot('robots/barrettwam.robot.xml')
manip = robot.GetActiveManipulator()
report = CollisionReport()
env.SetCollisionChecker(RaveCreateCollisionChecker(env,'ode'))
distancechecker = RaveCreateCollisionChecker(env,'pqp')
distancechecker.SetCollisionOptions(CollisionOptions.Contacts|CollisionOptions.Distance)
target = env.ReadKinBodyURI('data/mug1.kinbody.xml')
env.Add(target)
T = manip.GetTransform()
target.SetTransform(T)
coltarget = env.ReadKinBodyURI('data/mug1.kinbody.xml')
coltarget.SetName('collision')
env.Add(coltarget)
T = eye(4)
T[0:3,3] = [0.5,0,0.5]
coltarget.SetTransform(T)
#assert(not robot.CheckSelfCollision())
distancechecker.CheckCollision(robot,report=report)
assert(abs(report.minDistance - 0.02014762095143412) <= 1e-7)
robot.CheckSelfCollision(report=report,collisionchecker=distancechecker)
assert(abs(report.minDistance - 0.22146797060110873) <= 1e-7)
robot.Grab(target)
robot.CheckSelfCollision(report=report,collisionchecker=distancechecker)
assert(abs(report.minDistance - 0.0244822362795) <= 1e-7)
robot.SetDOFValues([-4.42034423e-01,2.02136660e+00],[1,3])
distancechecker.CheckCollision(robot,report=report)
assert(abs(report.minDistance - 0.077989158061126093) <= 1e-7)
assert(report.plink1.GetParent() == target)
assert(report.plink2.GetParent() == coltarget)
def test_cloneselfcollision(self):
# check to make sure cloned robots respect self-collision properties.
env=self.env
robot=self.LoadRobot('robots/barrettwam.robot.xml')
robot.SetDOFValues([3],[3])
cloned_robot = RaveCreateRobot(env, robot.GetXMLId())
cloned_robot.Clone(robot, 0)
env.Add(cloned_robot, True)
assert robot.CheckSelfCollision() # succeeds
assert cloned_robot.CheckSelfCollision() # fails
#generate_classes(RunRobot, globals(), [('ode','ode'),('bullet','bullet')])
class test_ode(RunRobot):
def __init__(self):
RunRobot.__init__(self, 'ode')
# class test_bullet(RunRobot):
# def __init__(self):
# RunRobot.__init__(self, 'bullet')
#
```
#### File: holy_controllers/scripts/holy_joint_state_publisher.py
```python
import rospy
from sensor_msgs.msg import JointState as JointStateMoveIt
from dynamixel_msgs.msg import JointState as JointStateDynamixel
class JointStatePublisher():
def __init__(self):
rospy.init_node('holy_joint_state_publisher')
self.joints={ 'R_SAA': {'pos':0,'vel':0,'load':0}, 'R_SFE': {'pos':0,'vel':0,'load':0}, 'R_EB': {'pos':0,'vel':0,'load':0}, 'R_HAA': {'pos':0,'vel':0,'load':0}, 'R_HR': {'pos':0,'vel':0,'load':0}, 'R_HFE': {'pos':0,'vel':0,'load':0}, 'R_KFE': {'pos':0,'vel':0,'load':0}, 'R_AFE': {'pos':0,'vel':0,'load':0}, 'R_AR': {'pos':0,'vel':0,'load':0}, 'L_SAA': {'pos':0,'vel':0,'load':0}, 'L_SFE': {'pos':0,'vel':0,'load':0}, 'L_EB': {'pos':0,'vel':0,'load':0}, 'L_HAA': {'pos':0,'vel':0,'load':0}, 'L_HR': {'pos':0,'vel':0,'load':0}, 'L_HFE': {'pos':0,'vel':0,'load':0}, 'L_KFE': {'pos':0,'vel':0,'load':0}, 'L_AFE': {'pos':0,'vel':0,'load':0}, 'L_AR': {'pos':0,'vel':0,'load':0}}
rate = 20 # 20Hz
r = rospy.Rate(rate)
# Start controller state subscribers
for joint in self.joints:
rospy.Subscriber('/'+joint+'_controller/state', JointStateDynamixel, self.controller_state_handler)
# Start publisher
self.joint_states_pub = rospy.Publisher('/joint_states', JointStateMoveIt)
rospy.loginfo("Publishing joint_state at " + str(rate) + "Hz")
while not rospy.is_shutdown():
self.publish_joint_states()
r.sleep()
def controller_state_handler(self, msg):
self.joints[msg.name]['pos']=msg.current_pos
self.joints[msg.name]['vel']=msg.velocity
self.joints[msg.name]['load']=msg.load
def publish_joint_states(self):
# Construct message & publish joint states
msg = JointStateMoveIt()
msg.name = []
msg.position = []
msg.velocity = []
msg.effort = []
for joint in self.joints:
msg.name.append(joint)
msg.position.append(self.joints[joint]['pos'])
msg.velocity.append(self.joints[joint]['vel'])
msg.effort.append(self.joints[joint]['load'])
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = 'base_link'
self.joint_states_pub.publish(msg)
if __name__ == '__main__':
try:
s = JointStatePublisher()
rospy.spin()
except rospy.ROSInterruptException:
pass
```
|
{
"source": "jdsims/Refresh_Tool",
"score": 2
}
|
#### File: jdsims/Refresh_Tool/controller.py
```python
from subprocess import call
from shutil import rmtree, copytree
from os import mkdir, listdir
from simple_salesforce import Salesforce
from datetime import datetime
import xml.etree.ElementTree as ET
import string
import time
import getpass
import sys
# "login_information" Holds the login information for use during the
# Tooling API calls.
login_information = {}
# "selected_refresh_scripts" Holds the position of the selected scripts
# in get_refresh_options()
selected_refresh_scripts = []
# Declaring namespace to make ElementTree lines more readable
ET.register_namespace('', 'http://soap.sforce.com/2006/04/metadata')
class RefreshMethods():
"""The methods used to manipulate, download, and upload the refresh
data.
"""
# Variable for the ElementTree namespace. Make later lines more
# readable.
nsPrePend = '{http://soap.sforce.com/2006/04/metadata}'
# Initialize constants for folder references
RETRIEVED = 'retrieved'
TOGGLE = 'toggle'
SETTING = 'setting'
COPY_TOGGLE = 'copyToggle'
WORKFLOW_PATH = 'toggle/workflows/'
OBJECT_PATH = 'toggle/objects/'
TRIGGER_PATH = 'toggle/triggers/'
FLOW_PATH = 'toggle/flowDefinitions/'
RETRIEVED_OBJECT_PATH = 'retrieved/objects/'
RETRIEVED_WORKFLOW_PATH = 'retrieved/workflows/'
RETRIEVED_EMAIL_PATH = 'retrieved/email/'
# List out the triggers that need to be disabled
triggers_to_toggle = (
'contactTrigger',
'accountTrigger'
)
# List out the objects that need their workflows toggled off
object_workflows_to_toggle = (
'Account',
'Contact'
)
@staticmethod
def on_complete_refresh(org_name, server):
RefreshMethods().save_connection_information(org_name, server)
refresh_options = RefreshOptions()
refresh_options.refresh_main_refresh_steps()
@staticmethod
def run_specified_steps(org_name, server, steps):
refresh_options = RefreshOptions()
RefreshMethods().save_connection_information(org_name, server)
method_dict = {}
for func in dir(RefreshOptions):
if func.startswith('refresh_'):
method_dict[string.capwords(func.replace('refresh_','').replace('_', ' '))] = func
for step in steps:
method = getattr(refresh_options, method_dict[step])
method()
@staticmethod
def save_connection_information(org_name, server):
admin_username = '<EMAIL>.' + org_name
admin_password = '<PASSWORD>'
server = server
RefreshMethods.save_login_info(admin_username, admin_password, "", 'https://' + server + '.salesforce.com')
@staticmethod
def save_login_info(username, password, security_token, server):
"""Save the user login information in the build.properties file
for the Salesforce Migration Tool and add it to the
login_information dictionary.
"""
global login_information
login_information['sf.username'] = username
login_information['sf.passwordWithToken'] = password + security_token
login_information['sf.password'] = password
login_information['sf.securityToken'] = security_token
login_information['sf.serverurl'] = server
if not server:
login_information['sf.serverurl'] = 'https://test.salesforce.com'
else:
login_information['sf.serverurl'] = server
updated_lines = []
with open('build.properties') as file:
for line in file:
splitter = line.split(' ')
if line.strip().startswith('sf.username'):
line = splitter[0] + ' = ' + username + '\n'
elif line.strip().startswith('sf.passwordWithToken'):
line = splitter[0] + ' = ' + password + security_token + '\n'
elif line.strip().startswith('sf.password'):
line = splitter[0] + ' = ' + password + '\n'
elif line.strip().startswith('sf.securityToken'):
line = splitter[0] + ' = ' + security_token + '\n'
elif line.strip().startswith('sf.serverurl'):
if not server:
line = splitter[0] + ' = https://test.salesforce.com/'
else:
line = splitter[0] + ' = ' + server
updated_lines.append(line)
with open('build.properties', 'w') as file:
for line in updated_lines:
file.write(line)
@staticmethod
def remove_login_information():
"""Remove the saved user information from the build.properties
file.
"""
updated_lines = []
with open('build.properties') as file:
for line in file:
splitter = line.split(' ')
if line.strip().startswith('sf.username'):
line = splitter[0] + ' = username\n'
elif line.strip().startswith('sf.passwordWithToken'):
line = splitter[0] + ' = passwordSecurityToken\n'
elif line.strip().startswith('sf.password'):
line = splitter[0] + ' = password\n'
elif line.strip().startswith('sf.securityToken'):
line = splitter[0] + ' = securityToken\n'
elif line.strip().startswith('sf.serverurl'):
line = splitter[0] + ' = url'
updated_lines.append(line)
with open('build.properties', 'w') as file:
for line in updated_lines:
file.write(line)
@staticmethod
def connect_to_simplesalesforce():
"""Initialize the SimpleSalesforce connection. Returns
the active connection.
"""
global login_information
print('Connecting with SimpleSalesforce...')
salesf = Salesforce(
username=login_information['sf.username'],
password=login_information['<PASSWORD>'],
security_token=login_information['sf.securityToken'],
custom_url=login_information['sf.serverurl']
)
return salesf
@staticmethod
def end_of_script_reminders():
"""Return a string containing the end of refresh reminders.
"""
reminder_string = 'Please finish the remaining manual steps:'
reminder_string += '\nUpdate the stored Secret and Key for connected apps.'
return reminder_string
@staticmethod
def before_execution_reminders():
"""Return a string containing the before execution reminders.
"""
reminder_string = 'Prior to refreshing:'
reminder_string += '\nEnsure email deliverability is set to "System Only".'
return reminder_string
@staticmethod
def url_example_text():
"""Return a string containing the valid url examples.
"""
url_example = 'Use a full url like:'
url_example += '\nhttps://example.my.salesforce.com'
url_example += '\nhttps://cs91.salesforce.com'
return url_example
@staticmethod
def get_refresh_options_display():
"""Return the list of method display names in class RefreshOptions that
are prepended with "refresh_".
"""
method_list = [string.capwords(func.replace('refresh_','').replace('_', ' ')) for func in dir(RefreshOptions) if func.startswith('refresh_')]
method_list.sort()
return method_list
@staticmethod
def get_refresh_options():
"""Return the list of method display names in class RefreshOptions that
are prepended with "refresh_".
"""
method_list = [func for func in dir(RefreshOptions) if func.startswith('refresh_')]
method_list.sort()
return method_list
@staticmethod
def refresh_the_environment():
"""Begin the refresh of the logged in environment.
"""
global selected_refresh_scripts
refresh = RefreshOptions()
additional_refresh_methods = RefreshMethods().get_refresh_options()
for pos in selected_refresh_scripts:
method = getattr(refresh, additional_refresh_methods[pos])
method()
@staticmethod
def validate_credentials(username, password, url):
"""Validate the username is not blank, password is not blank,
and url is a valid url. Returns an error message that is
blank if no errors are found.
"""
error_message = ''
if not username:
error_message += 'A username is required.\n'
if not password:
error_message += 'A password is required.\n'
if not url:
error_message += 'A login URL is required.\n'
if not url.startswith('https://'):
error_message += 'The login URL must start with "https://".\n'
return error_message
def call_shell(self, args):
"""Call the shell with the specified arguement.
"""
print('Calling shell with ' + args)
return call(args, shell=True)
def clean_directory(self, directory):
"""Delete an existing directory and remake it as an empty
directory.
"""
print('Cleaning ' + directory + ' directory...')
rmtree(directory)
mkdir(directory)
print(directory + ' directory deleted.')
def replace_values(self, directory, tag_name, original, replacement):
"""Replace the "original" value with the "replacement" value in
the specified "directory" between all instances of the
"tag_name". "tag_name" does not include brackets.
"""
print('Looking for replacements...')
for fileName in listdir(directory):
updatedLines = []
with open(directory + fileName) as file:
for line in file:
if line.strip().startswith('<' + tag_name + '>'):
print('Found a matching {tag_name} in {fileName}')
line = line.replace(original, replacement)
updatedLines.append(line)
with open(directory + fileName, 'w') as file:
for line in updatedLines:
file.write(line)
def disable_triggers(self):
"""Disable all triggers in the triggers_to_toggle list.
"""
print('Disabling triggers...')
for trigger in self.triggers_to_toggle:
tree = ET.parse(self.TRIGGER_PATH + trigger + '.trigger-meta.xml')
root = tree.getroot()
for status in root.findall(self.nsPrePend + 'status'):
status.text = 'Inactive'
tree.write(self.TRIGGER_PATH + trigger + '.trigger-meta.xml',
encoding='UTF-8',
xml_declaration = True)
def disable_active_workflows(self):
"""Disable all workflows on the objects listed in the
object_workflows_to_toggle list.
"""
print('Disabling workflows...')
for object in self.object_workflows_to_toggle:
tree = ET.parse(self.WORKFLOW_PATH + object + '.workflow')
root = tree.getroot()
for rule in root.findall(self.nsPrePend + 'rules'):
node = rule.find(self.nsPrePend + 'active')
if node.text == 'true':
node.text = 'false'
tree.write(self.WORKFLOW_PATH + object + '.workflow',
encoding='UTF-8',
xml_declaration = True)
def copy_toggle_directory(self):
"""Copy the TOGGLE directory into the COPY_TOGGLE directory.
"""
rmtree(self.COPY_TOGGLE)
copytree(self.TOGGLE, self.COPY_TOGGLE)
def queue_apex_batch(self, class_name):
"""Queue a queueable apex batch for execution.
"""
print('Queueing apex batch ' + class_name + '...')
salesf = self.connect_to_simplesalesforce()
tooling_access = 'executeAnonymous'
script = 'ID jobID = System.enqueueJob(new ' + class_name + '());'
parameters = {'anonymousBody': script}
salesf.tooling(path=tooling_access, params=parameters)
def update_formula_field_on_object(self, object_api_name, field_api_name,
original_value, new_value):
"""Update the formula field "field_api_name" on the object
"object_api_name". Replace the "original_value" with the
"new_value".
"""
print('Updating custom links on Contact...')
tree = ET.parse(self.RETRIEVED_OBJECT_PATH + object_api_name +'.object')
root = tree.getroot()
for field in root.findall(self.nsPrePend + 'fields'):
node = field.find(self.nsPrePend + 'fullName')
link = field.find(self.nsPrePend + 'formula')
if node.text == field_api_name:
link.text = link.text.replace(original_value, new_value)
tree.write(self.RETRIEVED_OBJECT_PATH + object_api_name + '.object',
encoding = "UTF-8",
xml_declaration = True)
def disable_additional_send_to_emails_in_alerts(self):
"""Disable all emails listed as "Additional Send To Emails"
on email alerts by appending ".off" to them.
"""
print('Disabling additional send to emails...')
for file_name in listdir(self.RETRIEVED_WORKFLOW_PATH):
tree = ET.parse(self.RETRIEVED_WORKFLOW_PATH + file_name)
root = tree.getroot()
for node in root.findall(self.nsPrePend + 'alerts'):
for email in node.findall(self.nsPrePend + 'ccEmails'):
email.text = email.text + '.off'
tree.write(self.RETRIEVED_WORKFLOW_PATH + file_name,
encoding = "UTF-8",
xml_declaration = True)
def update_hardcoded_email_template(self, email_api_name, orginal_value,
replacement_value, line_identifier):
"""Update the URL that is hard coded in an email template.
"""
print('Updating hardcoded email templates...')
updated_lines = []
with open(self.RETRIEVED_EMAIL_PATH + 'VF/' + email_api_name + '.email') as file:
for line in file:
if line_identifier in line:
line = line.replace(orginal_value, replacement_value)
updated_lines.append(line)
with open(self.RETRIEVED_EMAIL_PATH + 'VF/' + email_api_name + '.email', 'w') as file:
for line in updated_lines:
file.write(line)
# Begin the update script steps ###############################################
class RefreshOptions():
"""The different refresh options available.
"""
methods = RefreshMethods()
def refresh_main_refresh_steps(self):
"""Execute the main refresh steps.
"""
self.methods.clean_directory(self.methods.TOGGLE)
self.methods.call_shell('ant pullToggle')
self.methods.copy_toggle_directory()
self.methods.disable_triggers()
self.methods.disable_active_workflows()
self.methods.call_shell('ant deployToggle')
self.methods.clean_directory(self.methods.RETRIEVED)
self.methods.call_shell('ant pull')
self.methods.update_formula_field_on_object('Contact', 'Custom_Formula__c', '||', '&&')
self.methods.disable_additional_send_to_emails_in_alerts()
self.methods.update_hardcoded_email_template('Custom_VF_Email', 'login', 'test', 'https://')
self.methods.call_shell('ant deploy')
self.methods.call_shell('ant deployCopyToggle')
def refresh_fix_admin_emails(self):
"""Fix emails on admin users.
"""
self.methods.call_shell('ant deployFixAdminEmails')
self.methods.queue_apex_batch('RefreshApex1')
def refresh_create_test_records(self):
"""Create test data records.
"""
self.methods.call_shell('ant deployCreateTestRecords')
self.methods.queue_apex_batch('RefreshApex2')
# End the update script steps #################################################
```
|
{
"source": "jdsjdk/PythonSportBoxScoresNCAA",
"score": 4
}
|
#### File: jdsjdk/PythonSportBoxScoresNCAA/sportsref_boxscores.py
```python
from sportsreference.ncaab.boxscore import Boxscores
from datetime import datetime as dt
from pathlib import Path as fp
from datetime import date as d
import copy
import csv
class SportsRefGetBoxScores:
def __init__(self):
return None
def main(self):
def save_file_format():
# A simple funtion that saves the correct file format
t = d.today()
mm = str(t.month)
dd = str(t.day)
yy = str(t.year)
tmdy = "_" + mm + "_" + dd + "_" + yy
fname = "sports_reference_ncaa_boxscore" + tmdy + ".csv"
return fname
def output_csvfile(fname, fheaders=[], boxscore_dict={}):
# Get the file path, write the folder and the file name, output the CSV file
p = fp("ouput/")
p.mkdir(parents=True, exist_ok=True)
fpath = p / fname
# Output the CSV file
with fpath.open("w", encoding="utf-8") as f:
fwriter = csv.DictWriter(
f, dialect="excel", fieldnames=fheaders, delimiter='\t')
# Write the headers
fwriter.writeheader()
# Write the elements out to a file
fwriter.writerow(boxscore_dict)
return None
box_games_dict = {}
# get the boxscore and the boxscore range
ncaa_boxscore_rge = Boxscores(dt(2020, 11, 1), dt.today())
# get the file name
file_name = save_file_format()
# Create a list of headers for the csv file
file_keys_list = list(ncaa_boxscore_rge.games.keys())
# Do a deep copy of the boxscore games
box_games_dict = copy.deepcopy(ncaa_boxscore_rge.games)
output_csvfile(file_name, file_keys_list, box_games_dict)
print("Success!")
return None
sprt_ref_box = SportsRefGetBoxScores()
sprt_ref_box.main()
```
|
{
"source": "jdsjdk/PythonSportWebScrappingNCAA_2",
"score": 3
}
|
#### File: jdsjdk/PythonSportWebScrappingNCAA_2/sports_wscraper.py
```python
import sports_fout as spf, requests as req
from collections import defaultdict as dd
from bs4 import BeautifulSoup as bsoup
class SportsWebScraper:
def __init__(self):
return None
def main(self):
# Get the main URL
rurl = "https://www.teamrankings.com"
burl = "https://www.teamrankings.com/ncb/team-stats/"
wsrc = req.get(burl)
# Setup xpaths
bsoupy = bsoup(wsrc.content, 'lxml')
# setup all my functions
def get_team_names(soupy, root):
# Find all the team names, and save them to a new list.
teams = []
new_url = ""
expanded = soupy.find_all("ul", {"class": "expand-content hidden"})
for e in expanded:
tnames = e.find("a", href=True).get_text()
if tnames == "Points per Game":
turl = e.find("a", href=True)
new_url = root + turl.get('href')
break
nreq = req.get(new_url)
nnsoupy = bsoup(nreq.content, 'lxml')
nno_wrap = nnsoupy.find_all("td", {"class", "text-left nowrap"})
for n in nno_wrap:
tname = n.find("a", href=True).get_text()
teams.append(tname)
return teams
def get_key_names(soupy):
# Get all of the Titles from all the stats
key_names = []
choosey = soupy.find("ul", {"class", "chooser-list"})
expanded = choosey.find_all("li")
for e in expanded:
ehyper = e.find("a", href=True)
if ehyper.get('href') != '#':
tnames = ehyper.get_text()
key_names.append(tnames)
return key_names
def get_stat_urls(soupy, root):
# Find all of the stat URLs
surls = []
choosey = soupy.find("ul", {"class", "chooser-list"})
expanded = choosey.find_all("li")
for e in expanded:
ehyper = e.find("a", href=True)
if ehyper.get('href') != '#':
url = root + ehyper.get('href')
surls.append(url)
return surls
def req_get_wrapper(l, urlsl = []):
# get the request from the url list, and then save the first column of data
surl = urlsl[l]
rsrc = req.get(surl)
# get a new request from a new url
ssoup = bsoup(rsrc.content, 'lxml')
nwrapy = ssoup.find_all("td", {"class", "text-left nowrap"})
return nwrapy
key_list, urlstat_list, wrappers, sdict_list, team_list = [], [], [], [], []
key1 = "<NAME>"
team_list = get_team_names(bsoupy, rurl)
key_list = get_key_names(bsoupy)
urlstat_list = get_stat_urls(bsoupy, rurl)
data_dict = dd(dict)
for t in team_list:
data_dict[t][key1] = t
wrappers = []
for idx in range(0, len(urlstat_list)):
wrappers.append(req_get_wrapper(idx, urlstat_list))
for wr in wrappers[idx]:
team_name = wr.find("a", href=True).get_text()
data = wr.find_next("td").contents[0]
colname = key_list[idx]
data_dict[team_name][colname] = data
for key, val in data_dict.items():
sdict_list.append(val)
fname = spf.sfout.save_file_format()
spf.sfout.output_csvfile(fname, key1, key_list, sdict_list)
return None
s_scrap = SportsWebScraper()
s_scrap.main()
```
|
{
"source": "Jdsleppy/broker-parse",
"score": 3
}
|
#### File: broker-parse/brokerparse/fidelity.py
```python
import csv
import itertools
from typing import Dict, List, Union
from brokerparse.common import Holding
def parse_csv(csv_content: str) -> List[Holding]:
csv_lines = csv_content.splitlines()
# take consecutive lines, ignore disclaimer and metadata at bottom of file
holdings_lines = itertools.takewhile(lambda line: line, csv_lines)
# if the CSV is read with the wrong encoding, this byte-order mark character
# leads the first line
stripped_lines = (line.lstrip("\ufeff") for line in holdings_lines)
reader = csv.DictReader(stripped_lines)
return [_parse_reader_row(row) for row in reader]
def _parse_reader_row(row: Dict[str, Union[str, float]]) -> Holding:
current_value = float(row["Current Value"].lstrip("$").replace(",", ""))
# your "core position" is decorated with asterisks
symbol = row["Symbol"].rstrip("*")
return Holding(
account=row["Account Name/Number"],
symbol=symbol,
verbose_name=row["Description"],
dollar_value=current_value,
)
```
|
{
"source": "Jdsleppy/django_bread",
"score": 3
}
|
#### File: django_bread/tests/test_edit.py
```python
from django import forms
from django.urls import reverse
from bread.bread import Bread, EditView
from .base import BreadTestCase
from .models import BreadTestModel
class BreadEditTest(BreadTestCase):
def setUp(self):
super(BreadEditTest, self).setUp()
self.set_urls(self.bread)
def test_edit_item(self):
item = self.model_factory()
url = reverse(self.bread.get_url_name("edit"), kwargs={"pk": item.pk})
request = self.request_factory.post(
url, data={"name": "<NAME>", "age": "19"}
)
request.user = self.user
self.give_permission("change")
view = self.bread.get_edit_view()
rsp = view(request, pk=item.pk)
self.assertEqual(302, rsp.status_code)
self.assertEqual(reverse(self.bread.get_url_name("browse")), rsp["Location"])
item = self.model.objects.get(pk=item.pk)
self.assertEqual("<NAME>", item.name)
def test_fail_validation(self):
item = self.model_factory()
url = reverse(self.bread.get_url_name("edit"), kwargs={"pk": item.pk})
request = self.request_factory.post(
url, data={"name": "this name is too much long yeah", "age": "19"}
)
request.user = self.user
self.give_permission("change")
view = self.bread.get_edit_view()
rsp = view(request, pk=item.pk)
self.assertEqual(400, rsp.status_code)
self.assertTrue(rsp.context_data["bread_test_class"])
context = rsp.context_data
form = context["form"]
errors = form.errors
self.assertIn("name", errors)
def test_get(self):
# Get should give you a form with the item filled in
item = self.model_factory()
url = reverse(self.bread.get_url_name("edit"), kwargs={"pk": item.pk})
request = self.request_factory.get(url)
request.user = self.user
self.give_permission("change")
view = self.bread.get_edit_view()
rsp = view(request, pk=item.pk)
self.assertEqual(200, rsp.status_code)
form = rsp.context_data["form"]
self.assertFalse(form.is_bound)
self.assertEqual(item.pk, form.initial["id"])
self.assertEqual(item.name, form.initial["name"])
rsp.render()
body = rsp.content.decode("utf-8")
self.assertIn('method="POST"', body)
def test_setting_form_class(self):
class DummyForm(forms.Form):
pass
glob = {}
class TestView(EditView):
form_class = DummyForm
# To get hold of a reference to the actual view object created by
# bread, use a fake dispatch method that saves 'self' into a
# dictionary we can access in the test.
def dispatch(self, *args, **kwargs):
glob["view_object"] = self
class BreadTest(Bread):
model = BreadTestModel
edit_view = TestView
bread = BreadTest()
view_function = bread.get_edit_view()
# Call the view function to invoke dispatch so we can get to the view itself
view_function(None, None, None)
self.assertEqual(DummyForm, glob["view_object"].form_class)
```
|
{
"source": "Jdsleppy/django-security",
"score": 2
}
|
#### File: django-security/security/views.py
```python
import json
from django.http import HttpResponseForbidden, HttpResponse
from django.views.decorators.csrf import csrf_exempt
import logging
log = logging.getLogger(__name__)
def require_ajax(view):
"""
A view decorator which ensures that the request being processed
by view is an AJAX request. We return a 403 error if the request
is not an AJAX request.
"""
def check_ajax(request, *args, **kwargs):
if request.is_ajax():
return view(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return check_ajax
@csrf_exempt
def csp_report(request, csp_save=False, csp_log=True):
"""
.. _csp_report:
Collect Content Security Policy reports from browsers. This view has
two optional keyword arguments:
``csp_save`` if True, reports will be saved as ``CspReport`` objects
in database; this table is registered with Django
Admin, so they can be later viewed in admin console.
``csp_log`` if True, reports will be logged through Django logging
facility under ``security`` class
By default only logging is enabled. To collect reports, this view needs to
be added to project's urls.py. Examples:
Default mode, only logger enable, no database logging:
``url(r'^csp-report/$', security.views.csp_report),``
Logger and database enabled:
``url(r'^csp-report/$', security.views.csp_report,
kwargs={'csp_save':True,'csp_log':True}),``
"""
# http://www.w3.org/TR/CSP/#sample-violation-report
if not request.method == 'POST':
log.debug('Unexpect CSP report method %s', request.method)
return HttpResponseForbidden()
if (
'CONTENT_TYPE' not in request.META or
request.META['CONTENT_TYPE'] != 'application/json'
):
log.debug('Missing CSP report Content-Type %s', request.META)
return HttpResponseForbidden()
try:
csp_dict = json.loads(request.body)
except ValueError:
log.debug('Cannot JSON decode CSP report %s', request.body)
return HttpResponseForbidden()
if 'csp-report' not in csp_dict:
log.debug('Invalid CSP report structure %s', csp_dict)
return HttpResponseForbidden()
report = csp_dict['csp-report']
reporting_ip = request.META['REMOTE_ADDR']
reporting_ua = request.META['HTTP_USER_AGENT']
# log message about received CSP violation to Django log
if csp_log:
log.warn(
'Content Security Policy violation: '
'%s, reporting IP %s, user agent %s',
report, reporting_ip, reporting_ua
)
# save received CSP violation to database
if csp_save:
from security.models import CspReport
csp_report = CspReport(
document_uri=report.get('document-uri'),
referrer=report.get('referrer'),
blocked_uri=report.get('blocked-uri'),
violated_directive=report.get('violated-directive'),
original_policy=report.get('original-policy'),
sender_ip=reporting_ip,
user_agent=reporting_ua,
)
csp_report.save()
# return 204 No Content to the client
# per http://www.w3.org/TR/CSP/#report-uri
# "Note: The user agent ignores the fetched resource"
resp = HttpResponse()
resp.status_code = 204
return resp
```
|
{
"source": "Jdsleppy/mockextras",
"score": 3
}
|
#### File: mockextras/mockextras/_matchers.py
```python
__all__ = ['Any', 'Contains', 'AnyOf']
class Any(object):
"""Matchers act as wildcards when defining a stub or when asserting call arguments.
The Any matcher will match any object.
>>> whatever = Any()
>>> assert whatever == 'hello'
>>> assert whatever == 100
>>> assert whatever == range(10)
You can optionally specify a type so that Any only matches objects of that type.
>>> anystring = Any(basestring)
>>> assert anystring == 'hello'
>>> assert anystring == 'monkey'
>>> assert anystring == u'bonjour'
>>> assert anystring != ['hello', 'world']
Test additional predicates as needed:
>>> loud_short_string = (
... Any(str)
... .such_that(lambda s: len(s) < 6)
... .such_that(lambda s: s.upper() == s)
... )
>>> assert loud_short_string == "HI"
>>> assert loud_short_string != "TOO LONG"
>>> assert loud_short_string != "quiet"
Any can be used when specifying stubs:
>>> try:
... from unittest.mock Mock, call
... except ImportError:
... from mock import Mock, call
>>>
>>> from mockextras import stub
>>> mock = Mock()
>>> mock.side_effect = stub((call("hello", "world"), 100),
... (call("bye bye", Any()), 200))
>>> mock("bye bye", "world")
200
>>> mock("bye bye", "Fred")
200
>>> mock("bye bye", range(100))
200
>>> mock("bye bye", { 'a' : 1000, 'b' : 2000})
200
or when asserting call arguments:
>>> try:
... from unittest.mock Mock
... except ImportError:
... from mock import Mock
>>>
>>> mock = Mock()
>>> mock("bye bye", "world")
<Mock name='mock()' id='...'>
>>> mock.assert_called_once_with("bye bye", Any())
>>> mock("bye bye", "Fred")
<Mock name='mock()' id='...'>
>>> assert mock.call_args_list == [call("bye bye", "world"),
... call("bye bye", Any())]
"""
def __init__(self, cls=object, predicates=None):
self._cls = cls
self._predicates = predicates or []
def __eq__(self, other):
return (
isinstance(other, self._cls) and
all(predicate(other) for predicate in self._predicates)
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
base = 'Any(%s)' % ('' if self._cls is object else self._cls)
such_thats = (
'.' +
'.'.join('such_that(%s)' % getattr(p, "__name__", p) for p in self._predicates)
) if self._predicates else ''
return base + such_thats
def such_that(self, predicate):
return Any(cls=self._cls, predicates=self._predicates + [predicate])
class Contains(object):
"""Matchers act as wildcards when defining a stub or when asserting call arguments.
The Contains matcher will match objects that contain the given value or substring.
>>> contains_five = Contains(5)
>>> assert contains_five == range(10)
>>> assert contains_five != range(4)
>>> contains_ello = Contains('ello')
>>> assert contains_ello == "hello"
>>> assert contains_ello != "bye bye"
Contains can be used when specifying stubs:
>>> try:
... from unittest.mock Mock, call
... except ImportError:
... from mock import Mock, call
>>>
>>> from mockextras import stub
>>> mock = Mock()
>>> mock.side_effect = stub((call("hello", "world"), 100),
... (call("bye bye", Contains('monkey')), 200))
>>> mock("bye bye", "uncle monkey")
200
or when asserting call arguments:
>>> try:
... from unittest.mock Mock
... except ImportError:
... from mock import Mock
>>>
>>> mock = Mock()
>>> mock("bye bye", "world")
<Mock name='mock()' id='...'>
>>> mock.assert_called_once_with("bye bye", Contains('or'))
>>> mock("bye bye", "Fred")
<Mock name='mock()' id='...'>
>>> assert mock.call_args_list == [call("bye bye", "world"),
... call("bye bye", Contains('red'))]
"""
def __init__(self, value):
self._value = value
def __eq__(self, other):
return self._value in other
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'Contains(%r)' % self._value
class AnyOf(object):
"""Matchers act as wildcards when defining a stub or when asserting call arguments.
The AnyOf matcher will ....
>>> is_a_small_prime = AnyOf(2,3,5,7,11,13)
>>> assert is_a_small_prime == 3
>>> assert is_a_small_prime != 4
AnyOf can be used when specifying stubs:
>>> try:
... from unittest.mock Mock, call
... except ImportError:
... from mock import Mock, call
>>>
>>> from mockextras import stub
>>> mock = Mock()
>>> mock.side_effect = stub((call("hello"), 100),
... (call(AnyOf('monkey', 'donkey', 'badger')), 200))
>>> mock("monkey")
200
or when asserting call arguments:
>>> try:
... from unittest.mock Mock
... except ImportError:
... from mock import Mock
>>>
>>> mock = Mock()
>>> mock("donkey")
<Mock name='mock()' id='...'>
>>> mock.assert_called_once_with(AnyOf('monkey', 'donkey', 'badger'))
>>> mock("monkey")
<Mock name='mock()' id='...'>
>>> assert mock.call_args_list == [call("donkey"),
... call(AnyOf('monkey', 'donkey', 'badger'))]
"""
def __init__(self, *args):
self._set = set(args)
def __eq__(self, other):
return other in self._set
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'AnyOf(%s)' % ', '.join(map(repr, self._set))
```
#### File: mockextras/mockextras/_stub.py
```python
from ._matchers import __all__ as matchers_all
try:
from unittest.mock import _is_exception, call
except ImportError:
try:
from mock.mock import call, _is_exception
except ImportError:
from mock import call, _is_exception
from os import linesep
__all__ = ['seq', 'stub', 'UnexpectedStubCall']
class _Sequence(object):
def __init__(self, iterable):
self._iterator = iter(iterable)
def __call__(self):
retval = next(self._iterator)
if _is_exception(retval):
raise retval
return retval
def seq(iterable):
"""Used to define a sequence of return values for a stub based on an iterable, such as a container:
>>> try:
... from unittest.mock Mock, call
... except ImportError:
... from mock import Mock, call
>>>
>>> l = range(1, 5)
>>> fn = stub((call(), seq(l)))
>>> fn()
1
>>> fn()
2
>>> fn()
3
or a generator:
>>> i = xrange(1, 5)
>>> fn = stub((call(), seq(i)))
>>> fn()
1
>>> fn()
2
>>> fn()
3
"""
return _Sequence(iterable)
class UnexpectedStubCall(Exception):
pass
def _one_per_line_indented(results, indent=4):
return ("""
""" + " " * indent).join(str(k) for k, _ in results)
class _Stub(object):
def __init__(self, *args):
self._results = tuple((conf[0], seq(conf[1:])) if len(conf) > 2 else conf for conf in args)
def _lookup(self, k):
for key, value in self._results:
# Some classes don't play by the rules so try the equals both ways around
if key == k or k == key:
return value
if self._results:
raise UnexpectedStubCall("""Unexpected stub call:
%s
The following calls are configured:
%s
""" % (k, _one_per_line_indented(self._results)))
else:
raise UnexpectedStubCall("Unexpected call of an unconfigured stub")
def __call__(self, *args, **kwargs):
obj = self._lookup(call(*args, **kwargs))
if _is_exception(obj):
raise obj
if isinstance(obj, _Sequence):
return obj()
return obj
def stub(*args):
return _Stub(*args)
stub.__doc__ = """Makes stubs that can be used stand-alone or with mock.
Stubs are dumb functions, used in testing, they do no processing but they can take arguments and return
predefined results.
A stub is configured so it returns different values depending on the arguments passed to it. You configure
it with one or more pairs of call arguments and results then when the stub is called with a given set of call
arguments the corresponding result is returned. If the result is an Exception the result is raised. If more
than one result is specified the results will be returned/raised one at a time over successive calls to the
stub. If you wish to specify successive results using an iterable you must wrap it with seq().
You can use a stub in place of a function, for example:
>>> try:
... from unittest.mock call
... except ImportError:
... from mock import call
>>>
>>> fn = stub((call("hello"), "world"),
... (call("foo"), 1, 2, 4, 8),
... (call("bar"), seq(xrange(100))),
... (call("baz"), KeyError('baz')),
... (call("boom"), 100, RuntimeError, 200, ValueError("boom")))
>>> fn("hello")
'world'
>>> fn("foo")
1
>>> fn("foo")
2
>>> fn("foo")
4
Or you can combine it with a mock by setting it as the side_effect. This has the advantage that you can later
verify the function was called as expected.
>>> try:
... from unittest.mock Mock, call
... except ImportError:
... from mock import Mock, call
>>>
>>> mock = Mock()
>>> mock.side_effect = stub((call("hello"), "world"),
... (call("foo"), 1,2,4,8))
>>> mock("hello")
'world'
>>> mock("foo")
1
>>> mock("foo")
2
>>> mock("foo")
4
>>> assert mock.call_args_list == [call("hello"), call("foo"), call("foo"), call("foo")]
Also you can use stubs as methods on Mock objects. Whether you use them directly as the methods or as the
side_effect of a mock method depends on whether you want to verify the method calls.
>>> mock_obj = Mock(my_first_method=stub((call(50), 100), (call(100), 200)))
>>> mock_obj.my_second_method = stub((call("a"), "aa"), (call("b"), "bb"))
>>> mock_obj.my_third_method.side_effect = stub((call(123), 456), (call(789), 54321))
>>> mock_obj.my_first_method(50)
100
>>> mock_obj.my_second_method('b')
'bb'
>>> mock_obj.my_third_method(123)
456
>>> assert mock_obj.mock_calls == [call.my_third_method(123)] # only the mocked call is recorded
You can use matchers, such as Any(), as wild-card arguments when matching call arguments. The stub's
configuration is searched in the order it was specified so you can put more specific call argument
specifications ahead of more general ones.
For example:
>>> from mockextras import Any
>>> fn = stub((call(100, 200), "monkey"),
... (call(100, Any()), "hello"))
>>> fn(100, 200)
'monkey'
>>> fn(100, 300)
'hello'
>>> fn(100, "monkey")
'hello'
>>> fn(100, { "key" : 1000 })
'hello'
The following matchers are available in mockextras:
%s
See their documentation for more info.
""" % linesep.join(' * %s' % m for m in matchers_all)
```
#### File: mockextras/test/test_fluent.py
```python
from mockextras import when, Any, UnexpectedStubCall
try:
from unittest.mock import Mock, MagicMock, sentinel
except ImportError:
from mock import Mock, MagicMock, sentinel
import pytest
import sys
def test_when_with_mock():
mock_fn = Mock()
when(mock_fn).called_with(sentinel.arg).then(sentinel.result)
assert mock_fn(sentinel.arg) == sentinel.result
def test_when_with_magic_mock():
mock_fn = MagicMock()
when(mock_fn).called_with(sentinel.arg).then(sentinel.result)
assert mock_fn(sentinel.arg) == sentinel.result
def test_can_not_use_when_with_non_mock():
mock_fn = lambda _ : 10
with pytest.raises(RuntimeError):
when(mock_fn)
def test_can_not_use_when_with_mock_that_has_already_got_a_side_effect():
with pytest.raises(RuntimeError):
when(Mock(side_effect=lambda _ : 10))
with pytest.raises(RuntimeError):
when(Mock(side_effect=[1, 2, 3, 4]))
def test_when_with_any():
mock_fn = Mock()
when(mock_fn).called_with(Any()).then(sentinel.result1)
when(mock_fn).called_with(sentinel.arg1, Any()).then(sentinel.result2)
when(mock_fn).called_with(sentinel.arg2, Any(list)).then(sentinel.result3)
when(mock_fn).called_with(sentinel.arg2, Any(str)).then(sentinel.result4)
assert mock_fn(sentinel.arg1) == sentinel.result1
assert mock_fn(sentinel.arg2) == sentinel.result1
assert mock_fn("hello") == sentinel.result1
assert mock_fn(100) == sentinel.result1
assert mock_fn(sentinel.arg1, "hello") == sentinel.result2
assert mock_fn(sentinel.arg1, "world") == sentinel.result2
assert mock_fn(sentinel.arg2, []) == sentinel.result3
assert mock_fn(sentinel.arg2, [1, 2, 3]) == sentinel.result3
assert mock_fn(sentinel.arg2, ["hello", "world"]) == sentinel.result3
assert mock_fn(sentinel.arg2, "world") == sentinel.result4
def test_when_call_then_return():
mock_fn = Mock()
when(mock_fn).called_with().then(sentinel.result0)
when(mock_fn).called_with(sentinel.arg1).then(sentinel.result1)
when(mock_fn).called_with(sentinel.arg2).then(sentinel.result2)
when(mock_fn).called_with(sentinel.arg1, sentinel.arg2).then(sentinel.result3)
when(mock_fn).called_with(sentinel.arg1, sentinel.arg1).then(sentinel.result4)
when(mock_fn).called_with(sentinel.arg1, other=sentinel.other).then(sentinel.result5)
when(mock_fn).called_with(x=sentinel.x, y=sentinel.y).then(sentinel.result6)
assert mock_fn() == sentinel.result0
assert mock_fn(sentinel.arg1) == sentinel.result1
assert mock_fn(sentinel.arg2) == sentinel.result2
assert mock_fn(sentinel.arg1, sentinel.arg2) == sentinel.result3
assert mock_fn(sentinel.arg1, sentinel.arg1) == sentinel.result4
assert mock_fn(sentinel.arg1, other=sentinel.other) == sentinel.result5
assert mock_fn(x=sentinel.x, y=sentinel.y) == sentinel.result6
def test_when_call_then__return_single():
mock_fn = Mock()
when(mock_fn).called_with(sentinel.arg1).then(sentinel.result1)
assert mock_fn(sentinel.arg1) == sentinel.result1
assert mock_fn(sentinel.arg1) == sentinel.result1
def test_when_call_then_return_multiple():
mock_fn = Mock()
when(mock_fn).called_with(sentinel.arg1).then(sentinel.result1)\
.then(sentinel.result2)\
.then(sentinel.result3)
assert mock_fn(sentinel.arg1) == sentinel.result1
assert mock_fn(sentinel.arg1) == sentinel.result2
assert mock_fn(sentinel.arg1) == sentinel.result3
assert mock_fn(sentinel.arg1) == sentinel.result3
class TestException(Exception):
pass
def test_when_call_then_raise():
mock_fn = Mock()
when(mock_fn).called_with().then(TestException(sentinel.exception0))
when(mock_fn).called_with(sentinel.arg1).then(TestException(sentinel.exception1))
when(mock_fn).called_with(sentinel.arg2).then(TestException(sentinel.exception2))
when(mock_fn).called_with(sentinel.arg1, sentinel.arg2).then(TestException(sentinel.exception3))
when(mock_fn).called_with(sentinel.arg1, sentinel.arg1).then(TestException(sentinel.exception4))
when(mock_fn).called_with(sentinel.arg1, other=sentinel.other).then(TestException(sentinel.exception5))
when(mock_fn).called_with(x=sentinel.x, y=sentinel.y).then(TestException(sentinel.exception6))
with pytest.raises(TestException) as err:
mock_fn()
assert str(err.value) == str(sentinel.exception0)
with pytest.raises(TestException) as err:
mock_fn(sentinel.arg1)
assert str(err.value) == str(sentinel.exception1)
with pytest.raises(TestException) as err:
mock_fn(sentinel.arg2)
assert str(err.value) == str(sentinel.exception2)
with pytest.raises(TestException) as err:
mock_fn(sentinel.arg1, sentinel.arg2)
assert str(err.value) == str(sentinel.exception3)
with pytest.raises(TestException) as err:
mock_fn(sentinel.arg1, sentinel.arg1)
assert str(err.value) == str(sentinel.exception4)
with pytest.raises(TestException) as err:
mock_fn(sentinel.arg1, other=sentinel.other)
assert str(err.value) == str(sentinel.exception5)
with pytest.raises(TestException) as err:
mock_fn(x=sentinel.x, y=sentinel.y)
assert str(err.value) == str(sentinel.exception6)
def test_when_call_then_raise_single():
mock_fn = Mock()
when(mock_fn).called_with(sentinel.arg1).then(TestException(sentinel.exception1))
with pytest.raises(TestException) as err:
mock_fn(sentinel.arg1)
assert str(err.value) == str(sentinel.exception1)
with pytest.raises(TestException):
mock_fn(sentinel.arg1)
def test_when_call_then_raise_multiple():
mock_fn = Mock()
when(mock_fn).called_with(sentinel.arg1).then(TestException(sentinel.exception1))\
.then(TestException)\
.then(TestException(sentinel.exception3))
with pytest.raises(TestException) as err:
mock_fn(sentinel.arg1)
assert str(err.value) == str(sentinel.exception1)
with pytest.raises(TestException) as err:
mock_fn(sentinel.arg1)
assert str(err.value) == ""
with pytest.raises(TestException) as err:
mock_fn(sentinel.arg1)
assert str(err.value) == str(sentinel.exception3)
with pytest.raises(TestException):
mock_fn(sentinel.arg1)
def test_when_call_then_mixed():
mock_fn = Mock()
when(mock_fn).called_with(sentinel.arg1).then(sentinel.result1)\
.then(TestException(sentinel.exception2))\
.then(TestException)\
.then(sentinel.result3)
assert mock_fn(sentinel.arg1) == sentinel.result1
with pytest.raises(TestException):
mock_fn(sentinel.arg1)
with pytest.raises(TestException):
mock_fn(sentinel.arg1)
assert mock_fn(sentinel.arg1) == sentinel.result3
assert mock_fn(sentinel.arg1) == sentinel.result3
def test_when_missing_case():
mock_fn = Mock()
when(mock_fn).called_with(sentinel.arg1).then(sentinel.result1)
when(mock_fn).called_with(sentinel.arg2, sentinel.arg3).then(sentinel.result2)
with pytest.raises(UnexpectedStubCall) as err:
mock_fn(sentinel.arg4)
assert str(err.value) == """Unexpected stub call:
call(sentinel.arg4)
The following calls are configured:
call(sentinel.arg1)
call(sentinel.arg2, sentinel.arg3)
"""
@pytest.mark.skipif(sys.version_info < (3, 0), reason="Skip if Python 2")
def test_when_missing_unicode_case_py3():
mock_fn = Mock()
when(mock_fn).called_with("hello \u2698").then(sentinel.result1)
with pytest.raises(UnexpectedStubCall) as err:
mock_fn("goodbye \u2698")
assert str(err.value) == """Unexpected stub call:
call('goodbye \u2698')
The following calls are configured:
call('hello \u2698')
"""
@pytest.mark.skipif(sys.version_info >= (3, 0), reason="Skip if Python 3")
def test_when_missing_unicode_case_py2():
mock_fn = Mock()
when(mock_fn).called_with(u"hello \u2698").then(sentinel.result1)
with pytest.raises(UnexpectedStubCall) as err:
mock_fn(u"goodbye \u2698")
assert str(err.value) == """Unexpected stub call:
call(u'goodbye \\u2698')
The following calls are configured:
call(u'hello \\u2698')
"""
def test_duplicate_called_with_statements_second_ignored():
mock = Mock()
when(mock).called_with(100, 200).then("monkey")
when(mock).called_with(100, 200).then("hello")
assert 'monkey' == mock(100, 200)
assert 'hello' != mock(100, 200)
def test_most_general_last():
mock = Mock()
when(mock).called_with(100, 200).then("monkey")
when(mock).called_with(100, Any()).then("hello")
assert 'monkey' == mock(100, 200)
assert 'hello' == mock(100, 300)
assert 'hello' == mock(100, "monkey")
assert 'hello' == mock(100, { "key" : 1000 })
def test_called_with_object_has_empty_string_representation():
mock_fn = MagicMock()
assert repr(when(mock_fn).called_with(sentinel.arg)) == ""
```
|
{
"source": "Jdsleppy/observable-lite",
"score": 3
}
|
#### File: observable-lite/observable_lite/observable.py
```python
from .subscription import Subscription
class Observable(object):
"""
Objects of this type publish data to be consumed by any of its subscribing
observers.
"""
def __init__(self):
self.observers = []
def __call__(self, data=None):
for observer in self.observers:
observer(data)
def subscribe(self, observer):
"""
Subscribe a callback to all future publications.
:param observer: A callable to be called with each publication.
:return: A handle to the subscription, to be used for unsubscribing.
:rtype: observable_lite.subscription.Subscription
"""
self.observers.append(observer)
return Subscription(self._unsubscribe, observer)
def _unsubscribe(self, observer):
self.observers.remove(observer)
```
|
{
"source": "jdsmith04/subnetlms",
"score": 4
}
|
#### File: jdsmith04/subnetlms/Bank_functions.py
```python
import ipaddress
import random
# seems to work finding the smallest subnet that also contains all the networks given.
def one_net(_subnets):
"""
Get the one IP network that covers all subnets in input or None is subnets are disjoint.
"""
if len(_subnets) == 0:
return None
minimum_length = min([net.prefixlen for net in _subnets])
while _subnets.count(_subnets[0]) < len(_subnets) and minimum_length > 0:
# all subnets are not (yet) equal
_subnets = [net.supernet(new_prefix=minimum_length) for net in _subnets]
minimum_length -= 1
# 0.0.0.0/? -> no common subnet
if _subnets[0].network_address == ipaddress.ip_address(u'0.0.0.0'):
return None
return _subnets[0]
# function to generate a random A, B, or C ip address. unless 10, 172, or 192 is passed then first octet is set to that
# and that class ip is generated. Remember that Info is returned in a tuple and must be set to a variable/list to be
# called.
def random_abc_network(class_octet=-1):
# determine class A, B, or C
classes = [10, 172, 192]
ip_address = ''
number_of_network_bits = 0
if class_octet not in classes:
first_octet = classes[random.randint(0, 2)]
else:
first_octet = class_octet
# Actually do some work (make ip and other info)
if first_octet == 10:
ip_address = f'10.{random.randint(1, 255)}.{random.randint(1, 255)}.{random.randint(1, 255)}'
number_of_network_bits = random.randint(8, 30)
elif first_octet == 172:
ip_address = f'172.{random.randint(16, 31)}.{random.randint(1, 255)}.{random.randint(1, 255)}'
number_of_network_bits = random.randint(12, 30)
elif first_octet == 192:
ip_address = f'192.168.{random.randint(1, 255)}.{random.randint(1, 255)}'
number_of_network_bits = random.randint(16, 30)
cidr_notation = f'/{number_of_network_bits}'
network_id = ipaddress.IPv4Network(ip_address + cidr_notation, False)
broadcast = network_id.broadcast_address
subnet_mask = network_id.netmask
return cidr_notation, network_id, broadcast, subnet_mask, ip_address, number_of_network_bits
# network_info = random_abc_network(192)
# print(f'CIDR notation: {network_info[0]}')
# print(f'Network ID: {network_info[1]}')
# print(f'Broadcast: {network_info[2]}')
# print(f'Subnet Mask: {network_info[3]}')
# print(f'IP Address: {network_info[4]}')
# print(f'Number of network bits: {network_info[5]}')
def ordinal(num):
suffixes = {1: 'st', 2: 'nd', 3: 'rd'}
# I'm checking for 10-20 because those are the digits that don't follow the normal counting scheme.
if 10 <= num % 100 <= 20:
suffix = 'th'
else:
# the second parameter is a default.
suffix = suffixes.get(num % 10, 'th')
return str(num) + suffix
# for i in range(1, 100):
# print(ordinal(i))
def summarization(function):
information = function
parent_network = information[1]
number_of_network_bits = information[5]
if number_of_network_bits <= 27:
new_mask_number = number_of_network_bits + random.randint(1, 3)
elif number_of_network_bits == 28:
new_mask_number = number_of_network_bits + random.randint(1, 2)
elif number_of_network_bits == 29:
new_mask_number = number_of_network_bits + 1
else:
new_mask_number = 30
_subnets = parent_network.subnets(new_prefix=new_mask_number)
return _subnets, new_mask_number, parent_network
def host_requirements(network):
starting_mask_number = int(network[-2:]) + 1
starting_max_hosts = 2 ** (32 - starting_mask_number) - 2
network1 = random.randint(2, starting_max_hosts)
network2 = random.randint(2, network1//2)
network3 = random.randint(2, network2//2)
network4 = random.randint(2, network3)
return network1, network2, network3, network4
def subnets(parent_network, _host_requirements):
network = ipaddress.IPv4Network(parent_network)
subnet_list = [0, 1, 2, 3]
for i in range(4):
if _host_requirements[i] == 2:
subnet_list[i] = list(network.subnets(new_prefix=30))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=30))[1]
elif _host_requirements[i] <= 6:
subnet_list[i] = list(network.subnets(new_prefix=29))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=29))[1]
elif _host_requirements[i] <= 14:
subnet_list[i] = list(network.subnets(new_prefix=28))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=28))[1]
elif _host_requirements[i] <= 30:
subnet_list[i] = list(network.subnets(new_prefix=27))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=27))[1]
elif _host_requirements[i] <= 62:
subnet_list[i] = list(network.subnets(new_prefix=26))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=26))[1]
elif _host_requirements[i] <= 126:
subnet_list[i] = list(network.subnets(new_prefix=25))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=25))[1]
elif _host_requirements[i] <= 254:
subnet_list[i] = list(network.subnets(new_prefix=24))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=24))[1]
elif _host_requirements[i] <= 510:
subnet_list[i] = list(network.subnets(new_prefix=23))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=23))[1]
elif _host_requirements[i] <= 1022:
subnet_list[i] = list(network.subnets(new_prefix=22))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=22))[1]
elif _host_requirements[i] <= 2046:
subnet_list[i] = list(network.subnets(new_prefix=21))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=21))[1]
elif _host_requirements[i] <= 4094:
subnet_list[i] = list(network.subnets(new_prefix=20))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=20))[1]
elif _host_requirements[i] <= 8190:
subnet_list[i] = list(network.subnets(new_prefix=19))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=19))[1]
elif _host_requirements[i] <= 16382:
subnet_list[i] = list(network.subnets(new_prefix=18))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=18))[1]
elif _host_requirements[i] <= 32766:
subnet_list[i] = list(network.subnets(new_prefix=17))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=17))[1]
elif _host_requirements[i] <= 65534:
subnet_list[i] = list(network.subnets(new_prefix=16))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=16))[1]
elif _host_requirements[i] <= 131070:
subnet_list[i] = list(network.subnets(new_prefix=15))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=15))[1]
elif _host_requirements[i] <= 262142:
subnet_list[i] = list(network.subnets(new_prefix=14))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=14))[1]
elif _host_requirements[i] <= 524286:
subnet_list[i] = list(network.subnets(new_prefix=13))[0]
if i < 3:
subnet_list[i + 1] = list(network.subnets(new_prefix=13))[1]
if i < 3:
network = subnet_list[i + 1]
return subnet_list
def greater_16_networks():
classes = [172, 192]
first_octet = classes[random.randint(0, 1)]
if first_octet == 172:
ip_address = f'172.{random.randint(16, 31)}.{random.randint(1, 255)}.{random.randint(1, 255)}'
number_of_network_bits = random.randint(16, 30)
else:
ip_address = f'192.168.{random.randint(1, 255)}.{random.randint(1, 255)}'
number_of_network_bits = random.randint(24, 30)
cidr_notation = f'/{number_of_network_bits}'
network = ipaddress.IPv4Network(ip_address + cidr_notation, False)
return network, cidr_notation
```
|
{
"source": "jdsnape/pyatv",
"score": 3
}
|
#### File: pyatv/companion/__init__.py
```python
import asyncio
import logging
from typing import Dict, List, cast
from pyatv import exceptions
from pyatv.companion.connection import CompanionConnection, FrameType
from pyatv.companion.protocol import CompanionProtocol
from pyatv.conf import AppleTV
from pyatv.const import Protocol
from pyatv.interface import App, Apps
from pyatv.support.hap_srp import SRPAuthHandler
_LOGGER = logging.getLogger(__name__)
async def _connect(loop, config: AppleTV) -> CompanionProtocol:
"""Connect to the device."""
service = config.get_service(Protocol.Companion)
if service is None:
raise exceptions.NoCredentialsError("No Companion credentials loaded")
_LOGGER.debug("Connect to Companion from API")
connection = CompanionConnection(loop, str(config.address), service.port)
protocol = CompanionProtocol(connection, SRPAuthHandler(), service)
await protocol.start()
return protocol
# TODO: Maybe move to separate file?
class CompanionAPI:
"""Implementation of Companion API.
This class implements a simple one-shot request based API. It will connect and
disconnect for every request. Mainly as a workaround for not knowing how to keep
a connection open at all time (as the remote device disconnects after a while).
"""
def __init__(self, config: AppleTV, loop: asyncio.AbstractEventLoop):
"""Initialize a new CompanionAPI instance."""
self.config = config
self.loop = loop
async def _send_command(
self, identifier: str, content: Dict[str, object]
) -> Dict[str, object]:
"""Send a command to the device and return response."""
protocol = None
try:
protocol = await _connect(self.loop, self.config)
resp = await protocol.exchange_opack(
FrameType.E_OPACK,
{
"_i": identifier,
"_x": 12356, # Dummy XID, not sure what to use
"_t": "2", # Request
"_c": content,
},
)
except Exception as ex:
raise exceptions.ProtocolError(f"Command {identifier} failed") from ex
else:
# Check if an error was present and throw exception if that's the case
if "_em" in resp:
raise exceptions.ProtocolError(
f"Command {identifier} failed: {resp['_em']}"
)
finally:
if protocol:
protocol.stop()
return resp
async def launch_app(self, bundle_identifier: str) -> None:
"""Launch an app on the remote device."""
await self._send_command("_launchApp", {"_bundleID": bundle_identifier})
async def app_list(self) -> Dict[str, object]:
"""Return list of launchable apps on remote device."""
return await self._send_command("FetchLaunchableApplicationsEvent", {})
class CompanionApps(Apps):
"""Implementation of API for app handling."""
def __init__(self, api: CompanionAPI):
"""Initialize a new instance of CompanionApps."""
self.api = api
async def app_list(self) -> List[App]:
"""Fetch a list of apps that can be launched."""
app_list = await self.api.app_list()
if "_c" not in app_list:
raise exceptions.ProtocolError("missing content in response")
content = cast(dict, app_list["_c"])
return [App(name, bundle_id) for bundle_id, name in content.items()]
async def launch_app(self, bundle_id: str) -> None:
"""Launch an app based on bundle ID."""
await self.api.launch_app(bundle_id)
```
|
{
"source": "jdsnape/RATDecoders",
"score": 3
}
|
#### File: RATDecoders/decoders/LuminosityLink.py
```python
import re
import hashlib
from base64 import b64decode
from Crypto.Cipher import AES
def config(raw_data):
try:
re_pattern = '[a-zA-Z0-9+/]{60,}={0,2}'
conf_string = re.findall(re_pattern, raw_data)[0]
decoded = decrypt_string('Specify a Password', conf_string)
config_dict = parse_config(decoded.split('|'))
if config_dict["BackUp Domain"] == 'Disabled':
return [config_dict, [config_dict["Domain"]]]
else:
return config_dict
except Exception as e:
return False
#Helper Functions Go Here
def decrypt_string(key_string, coded):
try:
# Derive key
key_hash = hashlib.md5(key_string).hexdigest()
aes_key = key_hash[:30]+key_hash+'00'
#Crypto
cipher = AES.new(aes_key.decode('hex'))
value = cipher.decrypt(b64decode(coded))
return value
except:
return False
#Turn the strings in to a python config_dict
def parse_config(string_list):
config_dict = {}
config_dict["Domain"] = string_list[0]
config_dict["Port"] = string_list[1]
config_dict["BackUp Domain"] = string_list[2]
config_dict["Install Name"] = string_list[3]
config_dict["Startup Name"] = string_list[4]
config_dict["Campaign ID"] = string_list[5]
return config_dict
```
#### File: RATDecoders/decoders/_SpyGate.py
```python
import os
import sys
import string
import pype32
import database
import re
import createIOC
def run(md5, rawData):
rawconfig = rawData.split("abccba")
if len(rawconfig) > 1:
print "Running Abccba"
conf = oldversions(rawconfig)
else:
print "Running pype32"
pe = pype32.PE(data=rawData)
rawConfig = getStream(pe)
conf = parseConfig(rawConfig)
if not conf:
return None
database.insertDomain(md5, [conf["Domain"]])
return conf
# Confirm if there is Net MetaData in the File
def getStream(pe):
counter = 0
for dir in pe.ntHeaders.optionalHeader.dataDirectory:
if dir.name.value == "NET_METADATA_DIRECTORY":
rawConfig = findUSStream(pe, counter)
else:
counter += 1
return rawConfig
# I only want to extract the User Strings Section
def findUSStream(pe, dir):
for i in range(0,4):
name = pe.ntHeaders.optionalHeader.dataDirectory[dir].info.netMetaDataStreams[i].name.value
if name.startswith("#US"):
return pe.ntHeaders.optionalHeader.dataDirectory[dir].info.netMetaDataStreams[i].info
#Walk the User Strings and create a list of individual strings
def parseConfig(rawConfig):
stringList = []
offset = 1
config = bytearray(rawConfig)
while offset < len(config):
length = int(config[offset])
that = config[offset+1:offset+int(length)]
stringList.append(str(that.replace("\x00", "")))
offset += int(length+1)
print stringList
config = {}
for i in range(0,60):
config["Domain"] = stringList[37]
config["Port"] = stringList[39]
config["CampaignID"] = stringList[38]
config["FolderName"] = stringList[41]
config["StartUpName"] = stringList[40]
config["InstallPath"] = stringList[44]
return config
def oldversions(config):
config = {}
if len(config) == 48:
config["Version"] = "V0.2.6"
for i in range(1, len(config)):
config["Domain"] = config[1] #
config["Port"] = config[2] #
config["CampaignID"] = config[3] #
config["DanOption"] = config[5] #
config["StartupName"] = config[7] #
config["Password"] = config[9] #
config["AntiKillServer"] = config[10] #
config["USBSpreadlnk"] = config[11]
config["AntiProcessExplorer"] = config[12]
config["AntiProcessHacker"] = config[13]
config["AntiApateDNS"] = config[14]
config["AntiMalwareBytes"] = config[15]
config["AntiAntiLogger"] = config[16]
config["BlockVirusTotal"] = config[17] #
config["Mutex"] = config[18] #
config["Persistance"] = config[19] #
config["SpyGateKey"] = config[20]
config["StartupFolder"] = config[21] #
config["AntiAvira"] = config[23]
config["USBSpread"] = config[24]
# 25 if statement below
config["InstallPath"] = config[26] #
config["StartUpName"] = config[27] #
config["MeltAfterRun"] = config[28] #
config["HideAfterRun"] = config[29] #
config["InstallPath2"] = config[33] #
# 34 and 35 in if statement below
config["InstallPath3"] = config[36]
config["AntiSbieCtrl"] = config[38]
config["AntiSpyTheSpy"] = config[39]
config["AntiSpeedGear"] = config[40]
config["AntiWireshark"] = config[41]
config["AntiIPBlocker"] = config[42]
config["AntiCports"] = config[43]
config["AntiAVG"] = config[44]
config["AntiOllyDbg"] = config[45]
config["AntiXNetstat"] = config[46]
if config[25] == "True":
config["AppDataFolder"] = "True"
else:
config["ApplDataFolder"] = "False"
if config[34] == "True":
config["TemplatesFolder"] = "True"
else:
config["TemplatesFolder"] = "False"
if config[35] == "True":
config["ProgramsFolder"] = "True"
else:
config["ProgramsFolder"] = "False"
return config
elif len(config) == 18:
config["Version"] = "V2.0"
for i in range(1, len(config)):
print i, config[i]
config["Domain"] = config[1] #
config["Port"] = config[2] #
config["CampaignID"] = config[3] #
config["DanOption"] = config[5] #
config["AddToStartup"] = config[5] #
config["StartupKey"] = config[7] #
config["Password"] = config[9] #
config["AntiKillServer"] = config[10] #
config["USBSpread"] = config[11] #
config["KillProcessExplorer"] = config[12] #
config["AntiProcessHacker"] = config[13] #
config["AntiApateDNS"] = config[14]
config["AntiMalwareBytes"] = config[15]
config["AntiAntiLogger"] = config[16]
config["BlockVirusTotal"] = config[17]
return config
else:
return None
def snortRule(md5, confDict):
rules = []
domain = confDict["Domain"]
ipPattern = re.compile("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}")
ipTest = ipPattern.search(domain)
if len(domain) > 1:
if ipTest:
rules.append('''alert tcp any any -> '''+domain+''' any (msg: "Pandora Beacon Domain: '''+domain+'''"; classtype:trojan-activity; sid:5000000; rev:1; priority:1; reference:url,http://malwareconfig.com;)''')
else:
rules.append('''alert udp any any -> any 53 (msg: "Pandora Beacon Domain: '''+domain+'''"; content:"|0e|'''+domain+'''|00|"; nocase; classtype:trojan-activity; sid:5000000; rev:1; priority:1; reference:url,http://malwareconfig.com;)''')
rules.append('''alert tcp any any -> any 53 (msg: "Pandora Beacon Domain: '''+domain+'''"; content:"|0e|'''+domain+'''|00|"; nocase; classtype:trojan-activity; sid:5000000; rev:1; priority:1; reference:url,http://malwareconfig.com;)''')
database.insertSnort(md5, rules)
# IOC Creator Two elements Domain or install
def generateIOC(md5, confDict):
# Create the list for File Artefacts
fileIOC = []
fileIOC.append(('is','FileItem','FileItem/FileName','string',confDict["InstallName"]))
fileIOC.append(('contains','FileItem','FileItem/FilePath','string',confDict["InstallPath"]))
fileIOC.append(('is','FileItem','FileItem/Md5sum','md5',md5))
fileIOC.append(('is','ProcessItem','ProcessItem/HandleList/Handle/Name','string',confDict["Mutex"]))
# Create the list for Registry Artefacts
regIOC = []
regIOC.append(('contains','RegistryItem','RegistryItem/Path','string','HKEY_LOCAL_MACHINE\Software\Microsoft\Active Setup\Installed Components'))
regIOC.append(('is','RegistryItem','RegistryItem/Value','string',confDict["ActiveXKey"]))
regIOC.append(('contains','RegistryItem','RegistryItem/Path','string','HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Run'))
regIOC.append(('is','RegistryItem','RegistryItem/Value','string',confDict["HKLMValue"]))
# add each list to our master list
items = []
items.append(fileIOC)
items.append(regIOC)
domList = []
domains = confDict["Domains"].split("|")
for x in domains:
domain = x.split(":")[0]
domList.append(domain)
database.insertDomain(md5, domList)
for domain in domList:
if domain != '':
items.append([("contains", "Network", "Network/DNS", "string", domain)])
IOC = createIOC.main(items, 'PoisonIvy', md5)
database.insertIOC(md5, IOC)
```
#### File: RATDecoders/decoders/TrickBot.py
```python
import sys
import binascii
import pefile
import struct
import hashlib
from Crypto.Cipher import AES
import xml.etree.ElementTree as ET
def derive_key(n_rounds,input_bf):
intermediate = input_bf
for i in range(0, n_rounds):
sha = hashlib.sha256()
sha.update(intermediate)
current = sha.digest()
intermediate += current
return current
#expects a str of binary data open().read()
def trick_decrypt(data):
key = derive_key(128, data[:32])
iv = derive_key(128,data[16:48])[:16]
aes = AES.new(key, AES.MODE_CBC, iv)
mod = len(data[48:]) % 16
if mod != 0:
data += '0' * (16 - mod)
return aes.decrypt(data[48:])[:-(16-mod)]
def get_rsrc(pe):
ret = []
for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries:
if resource_type.name is not None:
name = str(resource_type.name)
else:
name = str(pefile.RESOURCE_TYPE.get(resource_type.struct.Id))
if name == None:
name = str(resource_type.struct.name)
if hasattr(resource_type, 'directory'):
for resource_id in resource_type.directory.entries:
if hasattr(resource_id, 'directory'):
for resource_lang in resource_id.directory.entries:
data = pe.get_data(resource_lang.data.struct.OffsetToData,resource_lang.data.struct.Size)
ret.append((name,data,resource_lang.data.struct.Size,resource_type))
return ret
def decode_onboard_config(data):
pe = pefile.PE(data=data)
rsrcs = get_rsrc(pe)
a = rsrcs[0][1]
data = trick_decrypt(a[4:])
length = struct.unpack_from('<I',data)[0]
return data[8:length+8]
def config(data):
xml = decode_onboard_config(f)
root = ET.fromstring(xml)
raw_config = {}
for child in root:
if hasattr(child, 'key'):
tag = child.attrib["key"]
else:
tag = child.tag
if tag == 'autorun':
val = str(map(lambda x: x.items(), child.getchildren()))
elif tag == 'servs':
val = ','.join(map(lambda x: x.text, child.getchildren()))
else:
val = child.text
raw_config[tag] = val
return raw_config
```
#### File: RATDecoders/StandAlone/darkddoser.py
```python
import argparse
import os
import string
import pefile
def decrypt_str(encrypted_str,key_str):
d = 0
decrypted = ''
for e in encrypted_str:
for c in key_str:
d = (ord(c)+d) ^ 9
decrypted += chr(((d>>3) ^ ord(e)) % 256)
return decrypted
def load_rsrc(pe):
strs = {}
rcd = pefile.RESOURCE_TYPE['RT_RCDATA']
for entry in pe.DIRECTORY_ENTRY_RESOURCE.entries:
if entry.id == rcd:
for e in entry.directory.entries:
data_rva = e.directory.entries[0].data.struct.OffsetToData
size = e.directory.entries[0].data.struct.Size
data = pe.get_memory_mapped_image()[data_rva:data_rva+size]
strs[str(e.name)] = data
break
return strs
def extract(filename,rsrc_name,key):
decrypted = []
try:
pe = pefile.PE(filename)
rsrc = load_rsrc(pe)
if rsrc.get(rsrc_name,''):
crypted_config = rsrc[rsrc_name]
if crypted_config.find('[{#}]') != -1:
for crypt_str in crypted_config.split('[{#}]'):
crypt_str = ''.join([chr(ord(c)^0xbc) for c in crypt_str])
decrypted.append(decrypt_str(crypt_str,key))
except Exception as e:
print('[+] %s: %s' % (Exception, e))
if decrypted:
try:
int(decrypted[1]) # easiest way to test success, port = int
print('[+] Filename: %s' % filename)
print('[+] CnC: %s:%s' % (decrypted[0],decrypted[1]))
print('[+] Server: %s' % decrypted[2])
print('[+] Version: %s' % decrypted[8])
print('[+] Mutex: %s' % decrypted[4])
print('[+] Install: %s' % decrypted[7])
print('[+] Service Name: %s' % decrypted[6])
print()
except:
print('[+] Filename: %s' % filename)
print('[+] Did not successfully decrypt config')
else:
print('[+] Could not locate encrypted config')
def main():
parser = argparse.ArgumentParser(description='Extract configuration data from DarkDDoser')
parser.add_argument('filenames',nargs='+',help='Executables to extract configuration from')
parser.add_argument('--resource',default='BUBZ',help='Custom resource string name where encrypted config is kept')
parser.add_argument('--key',default='darkddoser',help='Custom encryption key for encrypted config')
args = parser.parse_args()
if args.filenames:
for filename in args.filenames:
extract(filename,args.resource,args.key)
else:
print(args.usage())
if __name__ == "__main__":
main()
```
#### File: RATDecoders/StandAlone/SpyGate.py
```python
__description__ = 'CyberGate Config Extractor'
__author__ = '<NAME> http://techanarchy.net'
__version__ = '0.1'
__date__ = '2014/03/15'
import sys
import string
from optparse import OptionParser
import pype32
def run(rawData):
#try:
rawconfig = rawData.split("abccba")
if len(rawconfig) > 1:
print("Running Abccba")
dict = oldversions(rawconfig)
else:
print("Running pype32")
pe = pype32.PE(data=rawData)
rawConfig = getStream(pe)
if rawConfig.startswith("bute"): # workaround for an error in pype32 will still work when fixed
rawConfig = rawConfig[8:]
dict = parseConfig(rawConfig)
#except:
#return None
print(dict)
# Confirm if there is Net MetaData in the File
def getStream(pe):
counter = 0
for dir in pe.ntHeaders.optionalHeader.dataDirectory:
if dir.name.value == "NET_METADATA_DIRECTORY":
rawConfig = findUSStream(pe, counter)
else:
counter += 1
return rawConfig
# I only want to extract the User Strings Section
def findUSStream(pe, dir):
for i in range(0,4):
name = pe.ntHeaders.optionalHeader.dataDirectory[dir].info.netMetaDataStreams[i].name.value
if name.startswith("#US"):
return pe.ntHeaders.optionalHeader.dataDirectory[dir].info.netMetaDataStreams[i].info
#Walk the User Strings and create a list of individual strings
def parseConfig(rawConfig):
stringList = []
offset = 1
config = bytearray(rawConfig)
while offset < len(config):
length = int(config[offset])
that = config[offset+1:offset+int(length)]
stringList.append(str(that.replace("\x00", "")))
offset += int(length+1)
print(stringList)
dict = {}
for i in range(0,60):
dict["Domain"] = stringList[37]
dict["Port"] = stringList[39]
dict["Campaign Name"] = stringList[38]
dict["FolderName"] = stringList[41]
dict["Exe Name"] = stringList[40]
dict["Install Folder"] = stringList[44]
return dict
def oldversions(config):
dict = {}
if len(config) == 48:
dict["Version"] = "V0.2.6"
for i in range(1, len(config)):
dict["Domain"] = config[1] #
dict["Port"] = config[2] #
dict["Campaign Name"] = config[3] #
dict["Dan Option"] = config[5] #
dict["Startup Name"] = config[7] #
dict["Password"] = config[9] #
dict["Anti Kill Server"] = config[10] #
dict["USB Spread / lnk"] = config[11]
dict["Anti Process Explorer"] = config[12]
dict["Anti Process Hacker"] = config[13]
dict["Anti ApateDNS"] = config[14]
dict["Anti MalwareBytes"] = config[15]
dict["Anti AntiLogger"] = config[16]
dict["Block Virus Total"] = config[17] #
dict["Mutex"] = config[18] #
dict["Persistance"] = config[19] #
dict["SpyGate Key"] = config[20]
dict["Startup Folder"] = config[21] #
dict["Anti Avira"] = config[23]
dict["USB Spread / exe"] = config[24]
# 25 if statement below
dict["Install Folder1"] = config[26] #
dict["StartUp Name"] = config[27] #
dict["Melt After Run"] = config[28] #
dict["Hide After Run"] = config[29] #
#dict[""] = config[30]
#dict[""] = config[31]
#dict[""] = config[32]
dict["Install Folder2"] = config[33] #
# 34 and 35 in if statement below
dict["Install Folder3"] = config[36]
#dict[""] = config[37]
dict["Anti SbieCtrl"] = config[38]
dict["Anti SpyTheSpy"] = config[39]
dict["Anti SpeedGear"] = config[40]
dict["Anti Wireshark"] = config[41]
dict["Anti IPBlocker"] = config[42]
dict["Anti Cports"] = config[43]
dict["Anti AVG"] = config[44]
dict["Anti OllyDbg"] = config[45]
dict["Anti X Netstat"] = config[46]
#dict["Anti Keyscrambler"] = config[47]
if config[25] == "True":
dict["Application Data Folder"] = "True"
else:
dict["Application Data Folder"] = "False"
if config[34] == "True":
dict["Templates Folder"] = "True"
else:
dict["Templates Folder"] = "False"
if config[35] == "True":
dict["Programs Folder"] = "True"
else:
dict["Programs Folder"] = "False"
elif len(config) == 18:
dict["Version"] = "V2.0"
for i in range(1, len(config)):
print(i, config[i])
dict["Domain"] = config[1] #
dict["Port"] = config[2] #
dict["Campaign Name"] = config[3] #
dict["Dan Option"] = config[5] #
dict["Add To Startup"] = config[5] #
dict["Startup Key"] = config[7] #
dict["Password"] = config[9] #
dict["Anti Kill Server"] = config[10] #
dict["USB Spread"] = config[11] #
dict["Kill Process Explorer"] = config[12] #
dict["Anti Process Hacker"] = config[13] #
dict["Anti ApateDNS"] = config[14]
dict["Anti MalwareBytes"] = config[15]
dict["Anti AntiLogger"] = config[16]
dict["Block Virus Total"] = config[17]
else:
return None
return dict
if __name__ == "__main__":
parser = OptionParser(usage='usage: %prog inFile outConfig\n' + __description__, version='%prog ' + __version__)
(options, args) = parser.parse_args()
if len(args) > 0:
pass
else:
parser.print_help()
sys.exit()
try:
print("[+] Reading file")
fileData = open(args[0], 'rb').read()
except:
print("[+] Couldn't Open File {0}".format(args[0]))
print("[+] Searching for Config")
config = run(fileData)
if config == None:
print("[+] Config not found")
sys.exit()
if len(args) == 2:
print("[+] Writing Config to file {0}".format(args[1]))
with open(args[1], 'a') as outFile:
for key, value in sorted(config.items()):
clean_value = [x for x in value if x in string.printable]
outFile.write("Key: {0}\t Value: {1}\n".format(key,clean_value))
else:
print("[+] Printing Config to screen")
for key, value in sorted(config.items()):
clean_value = [x for x in value if x in string.printable]
print(" [-] Key: {0}\t Value: {1}".format(key,clean_value))
print("[+] End of Config")
```
#### File: RATDecoders/StandAlone/Tapaoux.py
```python
__description__ = 'Tapaoux Config Extractor'
__author__ = '<NAME> http://techanarchy.net http://malwareconfig.com'
__version__ = '0.1'
__date__ = '2015/05/11'
import re
import sys
import string
from operator import xor
from optparse import OptionParser
keys = ['Error Code', 'Last Error', '(R) Microsoft Corporation.']
marker_1 = '\xFF\xC3\x4C\xFF\xFC\xCC\x22\xCC\xAA\xAF\x32\x00\x0A\x7C\x44\x4D'
marker_2 = '\xFF\x3C\xC4\xFF\xFC\xCC\x22\xCC\xAA\xAF\x32\x00\x0A\x7C\x44\x4D'
def string_clean(line):
return ''.join((char for char in line if 32< ord(char) < 127))
def find_config(file_data):
split_data = file_data.split(marker_1)
if len(split_data) < 2:
split_data = file_data.split(marker_2)
if len(split_data) == 2:
return split_data[1][:500]
def config_decrypt(keys, data):
for enc_key in keys:
print(" [-] Testing for Key: {0}".format(enc_key))
key_pointer = 0
decoded = ''
for i in range(len(data)):
if key_pointer >= len(enc_key):
key_pointer = 0
data_slice = ord(data[i])
key_slice = ord(enc_key[key_pointer])
if data_slice == key_slice or data[i].encode('hex') == '00':
decoded += data[i]
else:
decoded += chr(xor(data_slice, key_slice))
key_pointer += 1
conf_test = re.search('/[a-zA-Z0-9-]*\x2ephp', decoded)
if conf_test:
print(" [-] Found Config")
return string_clean(decoded)
if __name__ == "__main__":
parser = OptionParser(usage='usage: %prog inFile\n' + __description__, version='%prog ' + __version__)
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit()
print("[+] Reading File")
file_data = open(args[0], 'rb').read()
print(" [-] Searching for config")
config_section = find_config(file_data)
if config_section == None:
print("[!] Config Not Found")
sys.exit()
dec_config = config_decrypt(keys, config_section)
print("----------------------")
print(dec_config)
print("----------------------")
print("[+] Complete")
```
|
{
"source": "jd/sqlalchemy-utils",
"score": 3
}
|
#### File: sqlalchemy_utils/functions/mock.py
```python
import contextlib
import datetime
import inspect
import re
import six
import sqlalchemy as sa
def create_mock_engine(bind, stream=None):
"""Create a mock SQLAlchemy engine from the passed engine or bind URL.
:param bind: A SQLAlchemy engine or bind URL to mock.
:param stream: Render all DDL operations to the stream.
"""
if not isinstance(bind, six.string_types):
bind_url = str(bind.url)
else:
bind_url = bind
if stream is not None:
def dump(sql, *args, **kwargs):
class Compiler(type(sql._compiler(engine.dialect))):
def visit_bindparam(self, bindparam, *args, **kwargs):
return self.render_literal_value(
bindparam.value, bindparam.type)
def render_literal_value(self, value, type_):
if isinstance(value, six.integer_types):
return str(value)
elif isinstance(value, (datetime.date, datetime.datetime)):
return "'%s'" % value
return super(Compiler, self).render_literal_value(
value, type_)
text = str(Compiler(engine.dialect, sql).process(sql))
text = re.sub(r'\n+', '\n', text)
text = text.strip('\n').strip()
stream.write('\n%s;' % text)
else:
dump = lambda *a, **kw: None
engine = sa.create_engine(bind_url, strategy='mock', executor=dump)
return engine
@contextlib.contextmanager
def mock_engine(engine, stream=None):
"""Mocks out the engine specified in the passed bind expression.
Note this function is meant for convenience and protected usage. Do NOT
blindly pass user input to this function as it uses exec.
:param engine: A python expression that represents the engine to mock.
:param stream: Render all DDL operations to the stream.
"""
# Create a stream if not present.
if stream is None:
stream = six.moves.cStringIO()
# Navigate the stack and find the calling frame that allows the
# expression to execuate.
for frame in inspect.stack()[1:]:
try:
frame = frame[0]
expression = '__target = %s' % engine
six.exec_(expression, frame.f_globals, frame.f_locals)
target = frame.f_locals['__target']
break
except:
pass
else:
raise ValueError('Not a valid python expression', engine)
# Evaluate the expression and get the target engine.
frame.f_locals['__mock'] = create_mock_engine(target, stream)
# Replace the target with our mock.
six.exec_('%s = __mock' % engine, frame.f_globals, frame.f_locals)
# Give control back.
yield stream
# Put the target engine back.
frame.f_locals['__target'] = target
six.exec_('%s = __target' % engine, frame.f_globals, frame.f_locals)
six.exec_('del __target', frame.f_globals, frame.f_locals)
six.exec_('del __mock', frame.f_globals, frame.f_locals)
```
#### File: sqlalchemy-utils/sqlalchemy_utils/i18n.py
```python
from .exceptions import ImproperlyConfigured
try:
from babel.dates import get_day_names
except ImportError:
def get_day_names():
raise ImproperlyConfigured(
'Could not load get_day_names function from babel. Either install '
' babel or make a similar function and override it in this '
'module.'
)
try:
from flask.ext.babel import get_locale
except ImportError:
def get_locale():
raise ImproperlyConfigured(
'Could not load get_locale function from Flask-Babel. Either '
'install babel or make a similar function and override it '
'in this module.'
)
```
#### File: sqlalchemy-utils/sqlalchemy_utils/proxy_dict.py
```python
import sqlalchemy as sa
class ProxyDict(object):
def __init__(self, parent, collection_name, mapping_attr):
self.parent = parent
self.collection_name = collection_name
self.child_class = mapping_attr.class_
self.key_name = mapping_attr.key
self.cache = {}
@property
def collection(self):
return getattr(self.parent, self.collection_name)
def keys(self):
descriptor = getattr(self.child_class, self.key_name)
return [x[0] for x in self.collection.values(descriptor)]
def __contains__(self, key):
if key in self.cache:
return self.cache[key] is not None
return self.fetch(key) is not None
def has_key(self, key):
return self.__contains__(key)
def fetch(self, key):
session = sa.orm.object_session(self.parent)
if session and sa.orm.util.has_identity(self.parent):
obj = self.collection.filter_by(**{self.key_name: key}).first()
self.cache[key] = obj
return obj
def create_new_instance(self, key):
value = self.child_class(**{self.key_name: key})
self.collection.append(value)
self.cache[key] = value
return value
def __getitem__(self, key):
if key in self.cache:
if self.cache[key] is not None:
return self.cache[key]
else:
value = self.fetch(key)
if value:
return value
return self.create_new_instance(key)
def __setitem__(self, key, value):
try:
existing = self[key]
self.collection.remove(existing)
except KeyError:
pass
self.collection.append(value)
self.cache[key] = value
def proxy_dict(parent, collection_name, mapping_attr):
try:
parent._proxy_dicts
except AttributeError:
parent._proxy_dicts = {}
try:
return parent._proxy_dicts[collection_name]
except KeyError:
parent._proxy_dicts[collection_name] = ProxyDict(
parent,
collection_name,
mapping_attr
)
return parent._proxy_dicts[collection_name]
def expire_proxy_dicts(target, context):
if hasattr(target, '_proxy_dicts'):
target._proxy_dicts = {}
sa.event.listen(sa.orm.mapper, 'expire', expire_proxy_dicts)
```
#### File: sqlalchemy_utils/types/bit.py
```python
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import BIT
class BitType(sa.types.TypeDecorator):
"""
BitType offers way of saving BITs into database.
"""
impl = sa.types.BINARY
def __init__(self, length=1, **kwargs):
self.length = length
sa.types.TypeDecorator.__init__(self, **kwargs)
def load_dialect_impl(self, dialect):
# Use the native BIT type for drivers that has it.
if dialect.name == 'postgresql':
return dialect.type_descriptor(BIT(self.length))
elif dialect.name == 'sqlite':
return dialect.type_descriptor(sa.String(self.length))
else:
return dialect.type_descriptor(type(self.impl)(self.length))
```
#### File: tests/aggregate/test_multiple_aggregates_per_class.py
```python
import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestAggregateValueGenerationForSimpleModelPaths(TestCase):
def create_models(self):
class Thread(self.Base):
__tablename__ = 'thread'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated(
'comments',
sa.Column(sa.Integer, default=0)
)
def comment_count(self):
return sa.func.count('1')
@aggregated('comments', sa.Column(sa.Integer))
def last_comment_id(self):
return sa.func.max(Comment.id)
comments = sa.orm.relationship(
'Comment',
backref='thread'
)
Thread.last_comment = sa.orm.relationship(
'Comment',
primaryjoin='Thread.last_comment_id == Comment.id',
foreign_keys=[Thread.last_comment_id],
viewonly=True
)
class Comment(self.Base):
__tablename__ = 'comment'
id = sa.Column(sa.Integer, primary_key=True)
content = sa.Column(sa.Unicode(255))
thread_id = sa.Column(sa.Integer, sa.ForeignKey('thread.id'))
self.Thread = Thread
self.Comment = Comment
def test_assigns_aggregates_on_insert(self):
thread = self.Thread()
thread.name = u'some article name'
self.session.add(thread)
comment = self.Comment(content=u'Some content', thread=thread)
self.session.add(comment)
self.session.commit()
self.session.refresh(thread)
assert thread.comment_count == 1
assert thread.last_comment_id == comment.id
def test_assigns_aggregates_on_separate_insert(self):
thread = self.Thread()
thread.name = u'some article name'
self.session.add(thread)
self.session.commit()
comment = self.Comment(content=u'Some content', thread=thread)
self.session.add(comment)
self.session.commit()
self.session.refresh(thread)
assert thread.comment_count == 1
assert thread.last_comment_id == 1
def test_assigns_aggregates_on_delete(self):
thread = self.Thread()
thread.name = u'some article name'
self.session.add(thread)
self.session.commit()
comment = self.Comment(content=u'Some content', thread=thread)
self.session.add(comment)
self.session.commit()
self.session.delete(comment)
self.session.commit()
self.session.refresh(thread)
assert thread.comment_count == 0
assert thread.last_comment_id is None
```
#### File: tests/aggregate/test_with_ondelete_cascade.py
```python
import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestAggregateValueGenerationWithCascadeDelete(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Thread(self.Base):
__tablename__ = 'thread'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('comments', sa.Column(sa.Integer, default=0))
def comment_count(self):
return sa.func.count('1')
comments = sa.orm.relationship(
'Comment',
passive_deletes=True,
backref='thread'
)
class Comment(self.Base):
__tablename__ = 'comment'
id = sa.Column(sa.Integer, primary_key=True)
content = sa.Column(sa.Unicode(255))
thread_id = sa.Column(
sa.Integer,
sa.ForeignKey('thread.id', ondelete='CASCADE')
)
self.Thread = Thread
self.Comment = Comment
def test_something(self):
thread = self.Thread()
thread.name = u'some article name'
self.session.add(thread)
comment = self.Comment(content=u'Some content', thread=thread)
self.session.add(comment)
self.session.commit()
self.session.expire_all()
self.session.delete(thread)
self.session.commit()
```
#### File: tests/batch_fetch/test_compound_fetching.py
```python
import sqlalchemy as sa
from sqlalchemy_utils import batch_fetch
from tests import TestCase
class TestCompoundOneToManyBatchFetching(TestCase):
def create_models(self):
class Building(self.Base):
__tablename__ = 'building'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
class BusinessPremise(self.Base):
__tablename__ = 'business_premise'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
building_id = sa.Column(sa.Integer, sa.ForeignKey(Building.id))
building = sa.orm.relationship(
Building,
backref=sa.orm.backref(
'business_premises'
)
)
class Equipment(self.Base):
__tablename__ = 'equipment'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
building_id = sa.Column(sa.Integer, sa.ForeignKey(Building.id))
business_premise_id = sa.Column(
sa.Integer, sa.ForeignKey(BusinessPremise.id)
)
building = sa.orm.relationship(
Building,
backref=sa.orm.backref(
'equipment'
)
)
business_premise = sa.orm.relationship(
BusinessPremise,
backref=sa.orm.backref(
'equipment'
)
)
self.Building = Building
self.BusinessPremise = BusinessPremise
self.Equipment = Equipment
def setup_method(self, method):
TestCase.setup_method(self, method)
self.buildings = [
self.Building(id=12, name=u'B 1'),
self.Building(id=15, name=u'B 2'),
self.Building(id=19, name=u'B 3'),
]
self.business_premises = [
self.BusinessPremise(
id=22, name=u'BP 1', building=self.buildings[0]
),
self.BusinessPremise(
id=33, name=u'BP 2', building=self.buildings[0]
),
self.BusinessPremise(
id=44, name=u'BP 3', building=self.buildings[2]
),
]
self.equipment = [
self.Equipment(
id=2, name=u'E 1', building=self.buildings[0]
),
self.Equipment(
id=4, name=u'E 2', building=self.buildings[2]
),
self.Equipment(
id=6, name=u'E 3', business_premise=self.business_premises[0]
),
self.Equipment(
id=8, name=u'E 4', business_premise=self.business_premises[2]
),
]
self.session.add_all(self.buildings)
self.session.add_all(self.business_premises)
self.session.add_all(self.equipment)
self.session.commit()
def test_compound_fetching(self):
buildings = self.session.query(self.Building).all()
batch_fetch(
buildings,
'business_premises',
(
'equipment',
'business_premises.equipment'
)
)
query_count = self.connection.query_count
assert len(buildings[0].equipment) == 1
assert buildings[0].equipment[0].name == 'E 1'
assert not buildings[1].equipment
assert buildings[0].business_premises[0].equipment
assert self.business_premises[2].equipment
assert self.business_premises[2].equipment[0].name == 'E 4'
assert self.connection.query_count == query_count
class TestCompoundManyToOneBatchFetching(TestCase):
def create_models(self):
class Equipment(self.Base):
__tablename__ = 'equipment'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
class Building(self.Base):
__tablename__ = 'building'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
equipment_id = sa.Column(sa.Integer, sa.ForeignKey(Equipment.id))
equipment = sa.orm.relationship(Equipment)
class BusinessPremise(self.Base):
__tablename__ = 'business_premise'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
building_id = sa.Column(sa.Integer, sa.ForeignKey(Building.id))
building = sa.orm.relationship(
Building,
backref=sa.orm.backref(
'business_premises'
)
)
equipment_id = sa.Column(sa.Integer, sa.ForeignKey(Equipment.id))
equipment = sa.orm.relationship(Equipment)
self.Building = Building
self.BusinessPremise = BusinessPremise
self.Equipment = Equipment
def setup_method(self, method):
TestCase.setup_method(self, method)
self.equipment = [
self.Equipment(
id=2, name=u'E 1',
),
self.Equipment(
id=4, name=u'E 2',
),
self.Equipment(
id=6, name=u'E 3',
),
self.Equipment(
id=8, name=u'E 4',
),
]
self.buildings = [
self.Building(id=12, name=u'B 1', equipment=self.equipment[0]),
self.Building(id=15, name=u'B 2', equipment=self.equipment[1]),
self.Building(id=19, name=u'B 3'),
]
self.business_premises = [
self.BusinessPremise(
id=22,
name=u'BP 1',
building=self.buildings[0]
),
self.BusinessPremise(
id=33,
name=u'BP 2',
building=self.buildings[0],
equipment=self.equipment[2]
),
self.BusinessPremise(
id=44,
name=u'BP 3',
building=self.buildings[2],
equipment=self.equipment[1]
),
]
self.session.add_all(self.buildings)
self.session.add_all(self.business_premises)
self.session.add_all(self.equipment)
self.session.commit()
def test_compound_fetching(self):
buildings = self.session.query(self.Building).all()
batch_fetch(
buildings,
'business_premises',
(
'equipment',
'business_premises.equipment'
)
)
query_count = self.connection.query_count
assert buildings[0].equipment.name == 'E 1'
assert buildings[1].equipment.name == 'E 2'
assert not buildings[2].equipment
assert not buildings[1].business_premises
assert buildings[2].business_premises[0].equipment.name == 'E 2'
assert self.connection.query_count == query_count
```
#### File: tests/batch_fetch/test_many_to_one_relationships.py
```python
import sqlalchemy as sa
from sqlalchemy_utils import batch_fetch
from tests import TestCase
class TestBatchFetchManyToOneRelationships(TestCase):
def create_models(self):
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
class Article(self.Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
author_id = sa.Column(sa.Integer, sa.ForeignKey(User.id))
author = sa.orm.relationship(
User,
backref=sa.orm.backref(
'articles'
)
)
self.User = User
self.Article = Article
def setup_method(self, method):
TestCase.setup_method(self, method)
self.users = [
self.User(id=333, name=u'John'),
self.User(id=334, name=u'Matt')
]
articles = [
self.Article(
id=1,
name=u'Article 1',
author=self.users[0]
),
self.Article(
id=2,
name=u'Article 2',
author=self.users[1]
),
self.Article(
id=3,
name=u'Article 3'
)
]
self.session.add_all(articles)
self.session.commit()
def test_supports_relationship_attributes(self):
articles = self.session.query(self.Article).all()
batch_fetch(
articles,
'author'
)
query_count = self.connection.query_count
assert articles[0].author == self.users[0] # no lazy load should occur
assert articles[1].author == self.users[1] # no lazy load should occur
assert articles[2].author is None # no lazy load should occur
assert self.connection.query_count == query_count
```
#### File: tests/functions/test_get_column_key.py
```python
from pytest import raises
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_utils import get_column_key
class TestGetColumnKey(object):
def setup_method(self, method):
Base = declarative_base()
class Building(Base):
__tablename__ = 'building'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column('_name', sa.Unicode(255))
self.Building = Building
def test_supports_aliases(self):
assert (
get_column_key(self.Building, self.Building.__table__.c.id)
==
'id'
)
assert (
get_column_key(self.Building, self.Building.__table__.c._name)
==
'name'
)
def test_throws_value_error_for_unknown_column(self):
with raises(ValueError):
get_column_key(self.Building, 'unknown')
```
#### File: tests/functions/test_has_changes.py
```python
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_utils import has_changes
class HasChangesTestCase(object):
def setup_method(self, method):
Base = declarative_base()
class Article(Base):
__tablename__ = 'article_translation'
id = sa.Column(sa.Integer, primary_key=True)
title = sa.Column(sa.String(100))
self.Article = Article
class TestHasChangesWithStringAttr(HasChangesTestCase):
def test_without_changed_attr(self):
article = self.Article()
assert not has_changes(article, 'title')
def test_with_changed_attr(self):
article = self.Article(title='Some title')
assert has_changes(article, 'title')
class TestHasChangesWithMultipleAttrs(HasChangesTestCase):
def test_without_changed_attr(self):
article = self.Article()
assert not has_changes(article, ['title'])
def test_with_changed_attr(self):
article = self.Article(title='Some title')
assert has_changes(article, ['title', 'id'])
class TestHasChangesWithExclude(HasChangesTestCase):
def test_without_changed_attr(self):
article = self.Article()
assert not has_changes(article, exclude=['id'])
def test_with_changed_attr(self):
article = self.Article(title='Some title')
assert has_changes(article, exclude=['id'])
assert not has_changes(article, exclude=['title'])
```
#### File: tests/generic_relationship/test_single_table_inheritance.py
```python
from __future__ import unicode_literals
import six
import sqlalchemy as sa
from sqlalchemy_utils import generic_relationship
from tests import TestCase
class TestGenericRelationship(TestCase):
def create_models(self):
class Employee(self.Base):
__tablename__ = 'employee'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(50))
type = sa.Column(sa.String(20))
__mapper_args__ = {
'polymorphic_on': type,
'polymorphic_identity': 'employee'
}
class Manager(Employee):
__mapper_args__ = {
'polymorphic_identity': 'manager'
}
class Engineer(Employee):
__mapper_args__ = {
'polymorphic_identity': 'engineer'
}
class Event(self.Base):
__tablename__ = 'event'
id = sa.Column(sa.Integer, primary_key=True)
object_type = sa.Column(sa.Unicode(255))
object_id = sa.Column(sa.Integer, nullable=False)
object = generic_relationship(object_type, object_id)
self.Employee = Employee
self.Manager = Manager
self.Engineer = Engineer
self.Event = Event
def test_set_as_none(self):
event = self.Event()
event.object = None
assert event.object is None
def test_set_manual_and_get(self):
manager = self.Manager()
self.session.add(manager)
self.session.commit()
event = self.Event()
event.object_id = manager.id
event.object_type = six.text_type(type(manager).__name__)
assert event.object is None
self.session.add(event)
self.session.commit()
assert event.object == manager
def test_set_and_get(self):
manager = self.Manager()
self.session.add(manager)
self.session.commit()
event = self.Event(object=manager)
assert event.object_id == manager.id
assert event.object_type == type(manager).__name__
self.session.add(event)
self.session.commit()
assert event.object == manager
def test_compare_instance(self):
manager1 = self.Manager()
manager2 = self.Manager()
self.session.add_all([manager1, manager2])
self.session.commit()
event = self.Event(object=manager1)
self.session.add(event)
self.session.commit()
assert event.object == manager1
assert event.object != manager2
def test_compare_query(self):
manager1 = self.Manager()
manager2 = self.Manager()
self.session.add_all([manager1, manager2])
self.session.commit()
event = self.Event(object=manager1)
self.session.add(event)
self.session.commit()
q = self.session.query(self.Event)
assert q.filter_by(object=manager1).first() is not None
assert q.filter_by(object=manager2).first() is None
assert q.filter(self.Event.object == manager2).first() is None
def test_compare_not_query(self):
manager1 = self.Manager()
manager2 = self.Manager()
self.session.add_all([manager1, manager2])
self.session.commit()
event = self.Event(object=manager1)
self.session.add(event)
self.session.commit()
q = self.session.query(self.Event)
assert q.filter(self.Event.object != manager2).first() is not None
def test_compare_type(self):
manager1 = self.Manager()
manager2 = self.Manager()
self.session.add_all([manager1, manager2])
self.session.commit()
event1 = self.Event(object=manager1)
event2 = self.Event(object=manager2)
self.session.add_all([event1, event2])
self.session.commit()
statement = self.Event.object.is_type(self.Manager)
q = self.session.query(self.Event).filter(statement)
assert q.first() is not None
def test_compare_super_type(self):
manager1 = self.Manager()
manager2 = self.Manager()
self.session.add_all([manager1, manager2])
self.session.commit()
event1 = self.Event(object=manager1)
event2 = self.Event(object=manager2)
self.session.add_all([event1, event2])
self.session.commit()
statement = self.Event.object.is_type(self.Employee)
q = self.session.query(self.Event).filter(statement)
assert q.first() is not None
```
#### File: sqlalchemy-utils/tests/test_case_insensitive_comparator.py
```python
import sqlalchemy as sa
from sqlalchemy_utils import EmailType
from tests import TestCase
class TestCaseInsensitiveComparator(TestCase):
def create_models(self):
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
email = sa.Column(EmailType)
def __repr__(self):
return 'Building(%r)' % self.id
self.User = User
def test_supports_equals(self):
query = (
self.session.query(self.User)
.filter(self.User.email == u'<EMAIL>')
)
assert '"user".email = lower(:lower_1)' in str(query)
def test_supports_in_(self):
query = (
self.session.query(self.User)
.filter(self.User.email.in_([u'<EMAIL>', u'a']))
)
assert (
'"user".email IN (lower(:lower_1), lower(:lower_2))'
in str(query)
)
def test_supports_notin_(self):
query = (
self.session.query(self.User)
.filter(self.User.email.notin_([u'<EMAIL>', u'a']))
)
assert (
'"user".email NOT IN (lower(:lower_1), lower(:lower_2))'
in str(query)
)
```
#### File: sqlalchemy-utils/tests/test_expressions.py
```python
from pytest import raises
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_utils.expressions import (
tsvector_match,
tsvector_concat,
to_tsquery,
plainto_tsquery
)
from tests import TestCase
class TSVectorTestCase(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Article(self.Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
content = sa.Column(sa.UnicodeText)
search_vector = sa.Column(TSVectorType)
search_vector2 = sa.Column(TSVectorType)
self.Article = Article
class TestMatchTSVector(TSVectorTestCase):
def test_raises_exception_if_less_than_2_parameters_given(self):
with raises(Exception):
str(
tsvector_match(
self.Article.search_vector,
)
)
def test_supports_postgres(self):
assert str(tsvector_match(
self.Article.search_vector,
to_tsquery('something'),
)) == '(article.search_vector) @@ to_tsquery(:to_tsquery_1)'
class TestToTSQuery(TSVectorTestCase):
def test_requires_atleast_one_parameter(self):
with raises(Exception):
str(to_tsquery())
def test_supports_postgres(self):
assert str(to_tsquery('something')) == 'to_tsquery(:to_tsquery_1)'
class TestPlainToTSQuery(TSVectorTestCase):
def test_requires_atleast_one_parameter(self):
with raises(Exception):
str(plainto_tsquery())
def test_supports_postgres(self):
assert str(plainto_tsquery('s')) == (
'plainto_tsquery(:plainto_tsquery_1)'
)
class TestConcatTSVector(TSVectorTestCase):
def test_concatenate_search_vectors(self):
assert str(tsvector_match(
tsvector_concat(
self.Article.search_vector,
self.Article.search_vector2
),
to_tsquery('finnish', 'something'),
)) == (
'(article.search_vector || article.search_vector2) '
'@@ to_tsquery(:to_tsquery_1, :to_tsquery_2)'
)
```
#### File: sqlalchemy-utils/tests/test_instrumented_list.py
```python
from tests import TestCase
class TestInstrumentedList(TestCase):
def test_any_returns_true_if_member_has_attr_defined(self):
category = self.Category()
category.articles.append(self.Article())
category.articles.append(self.Article(name=u'some name'))
assert category.articles.any('name')
def test_any_returns_false_if_no_member_has_attr_defined(self):
category = self.Category()
category.articles.append(self.Article())
assert not category.articles.any('name')
```
#### File: tests/types/test_tsvector.py
```python
import six
import sqlalchemy as sa
from sqlalchemy_utils import TSVectorType
from tests import TestCase
class TestTSVector(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
search_index = sa.Column(TSVectorType())
def __repr__(self):
return 'User(%r)' % self.id
self.User = User
def test_generates_table(self):
assert 'search_index' in self.User.__table__.c
def test_type_autoloading(self):
reflected_metadata = sa.schema.MetaData()
table = sa.schema.Table(
'user',
reflected_metadata,
autoload=True,
autoload_with=self.engine
)
assert isinstance(table.c['search_index'].type, TSVectorType)
def test_catalog_and_columns_as_args(self):
type_ = TSVectorType('name', 'age', catalog='pg_catalog.simple')
assert type_.columns == ('name', 'age')
assert type_.options['catalog'] == 'pg_catalog.simple'
def test_match(self):
expr = self.User.search_index.match_tsquery(u'something')
assert six.text_type(expr) == (
'("user".search_index) @@ to_tsquery(:to_tsquery_1)'
)
def test_match_with_catalog(self):
expr = self.User.search_index.match_tsquery(
u'something', catalog='pg_catalog.simple'
)
assert six.text_type(expr) == (
'("user".search_index) @@ to_tsquery(:to_tsquery_1, :to_tsquery_2)'
)
```
|
{
"source": "jdstar-666/maroon",
"score": 2
}
|
#### File: util/db/dao.py
```python
import os
import time
import random
import logging
import chardet
import MySQLdb
import threading
from dbconn import MySQLConn, MongoDBConn
from core.exceptions import ImproperlyConfigured
from sqlfactory import SQLFactory
class Dao(object):
"""docstring for DBUtil"""
def __init__(self, config):
super(Dao, self).__init__()
self._config = config
self.logger = logging.getLogger(__name__)
self._setup()
self._exec_error_desc_tmpl = 'Error in Dao, see the detail: %s.'
def _setup(self):
desc_tmpl = (
'"%s" in MySQL settings is necessary for Dao.'
'Please make sure it is well configured.'
)
if 'mysql' not in self._config:
desc = desc_tmpl%'mysql'
self.logger.error(desc)
raise ImproperlyConfigured(desc)
mysql = self._config['mysql']
if 'default' not in mysql:
desc = desc_tmpl%'default'
self.logger.error(desc)
raise ImproperlyConfigured(desc)
default = mysql['default']
if 'tables' not in default:
desc = desc_tmpl%'default.ables'
self.logger.error(desc)
raise ImproperlyConfigured(desc)
if 'connection' not in default:
desc = desc_tmpl%'connection'
self.logger.error(desc)
raise ImproperlyConfigured(desc)
tables = default['tables']
self.table_table = {}
table_db = {}
for key in ['checkdup', 'app', 'channel']:
if key not in tables:
desc = desc_tmpl%('default.table.%s'%key)
self.logger.error(desc)
raise ImproperlyConfigured(desc)
else:
self.table_table[key] = tables[key]
self.mysqlconn = MySQLConn(default['connection'])
self.sqls = SQLFactory(self.table_table)
desc_tmpl = (
'"%s" in Mongo settings is necessary for Dao if mongo.use is true.'
'Please make sure it is well configured.'
)
if 'mongo' in self._config:
mongo = self._config['mongo']
if not mongo.get('use', True):
self.logger.info('MongoDB will not be used')
else:
if 'default' not in mongo:
desc = desc_tmpl%'default'
self.logger.error(desc)
raise ImproperlyConfigured(desc)
import pymongo
self.mongoconn = MongoDBConn(mongo['default'])
def try_encode(self, sql):
try:
if type(sql)==unicode:
return sql.encode('utf-8')
return sql
except Exception, e:
return sql
def escape_encode(self, s):
# print str(type(s))
try:
st = type(s)
if st != unicode and st != str:
return s
ns = MySQLdb.escape_string(s.encode('utf-8') if st==unicode else s)
return ns
except Exception, e:
return s
def check_news_in_dup(self, news):
if not news:
return False, 'news is null'
sql =self.sqls['check_news_in_dup']
dbr, r = self.mysqlconn.query(sql, news)
if not dbr:
desc = self._exec_error_desc_tmpl%(
'Error when try to check news in dup database, the wc_gid is: '
'%(wc_gid)s'
)%news
self.logger.error(desc)
return False, desc
return r[0]['exist'] >= 1, None
def insert_news_to_dup(self, news):
if not news:
return False, 'news is null'
sql = self.sqls['insert_news_to_dup'] #self.tryencode(self.sql['insertnews']%kwargs)
dbr, r = self.mysqlconn.execute(sql, news)
if not dbr:
desc = self._exec_error_desc_tmpl%(
'Error when try to insert news to dup database, the title of news is: '
'%s'
)%news.get('wc_title')
self. logger.error(desc)
return False, desc
return r==1, None
def insert_app(self, app):
if not app:
return False, 'app is null'
sql = self.sqls['insert_app']
dbr, r = self.mysqlconn.execute(sql, app)
if not dbr:
desc = self._exec_error_desc_tmpl%(
'Error when try to insert app to config database, the name of app is: '
'%s'
)%app.get('wc_name')
return False, desc
return r==1, None
def insert_channel(self, channel):
if not channel:
return False, 'channel is null'
sql = self.sqls['insert_channel']
dbr, r = self.mysqlconn.execute(sql, channel)
if not dbr:
desc = self._exec_error_desc_tmpl%(
'Error when try to insert channel to config database, the name of channel is: '
'%s'
)%channel.get('wc_name')
return False, desc
return r==1, None
def status_newscount_by_app(self, args=None):
sql = self.sqls['get_news_count_by']
if args is not None:
if args.get('wc_app'):
sql += '`wc_app` = %(wc_app)s AND '
if args.get('begindate'):
sql += '`wc_updatetime` > %(begindate)s AND '
if args.get('enddate'):
sql += '`wc_updatetime` < %(enddate)s AND '
sql+=' 1'
sql += ' GROUP BY `wc_app`'
dbr, r = self.mysqlconn.query(sql, args)
if not dbr:
desc = self._exec_error_desc_tmpl%(
'Error when try to get status of news count by app'
)
return None, desc
return r, None
def delete_dup(self, args=None):
sql = self.sqls['delete_from_dup_by']
if args is not None:
if args.get('before'):
sql += '`wc_updatetime` > %(before)s AND '
if args.get('wc_app'):
sql += '`wc_app` = %(wc_app)s AND '
sql += ' 1'
dbr, r = self.mysqlconn.execute(sql, args)
if not dbr:
desc = self._exec_error_desc_tmpl%(
'Error when try to delete news from dup database'
)
return False, desc
return r, None
def get_app(self, args=None):
sql = self.sqls['get_app_by']
if args is not None:
if args.get('wc_ename'):
sql+='`wc_ename`=%(wc_ename)s AND '
sql+=' 1'
dbr, r = self.mysqlconn.query(sql, args)
if not dbr:
desc = self._exec_error_desc_tmpl%(
'Error when try to get app from config database'
)
return None, desc
return r, None
def delete_app(self, args=None):
sql = self.sqls['delete_app']
dbr, r = self.mysqlconn.execute(sql, args)
if not dbr:
desc = self._exec_error_desc_tmpl%(
'Error when try to delete app from config database'
)
return False, desc
return r==1, None
def delete_channel(self, args = None):
sql = self.sqls['delete_channel_by']
if args is not None:
if args.get('wc_app'):
sql += '`wc_app`=%(wc_app)s AND'
sql += ' 1'
dbr, r = self.mysqlconn.execute(sql, args)
if not dbr:
desc = self._exec_error_desc_tmpl%(
'Error when try to delete channels from config database'
)
return None, desc
return r, None
def get_channel(self, args = None):
sql = self.sqls['get_all_channel_by']
if args is not None:
if args.get('wc_app'):
sql += '`wc_app`=%(wc_app)s AND'
if args.get('wc_ename'):
sql += '`wc_ename`=%(wc_ename)s AND '
if args.get('wc_collect'):
sql += '`wc_collect`=%(wc_collect)s AND '
if args.get('channel_name_kw'):
sql += '`wc_name` like %(channel_name_kw)s AND '
sql += ' 1'
if args is not None:
if args.get('limit'):
sql += ' LIMIT %(limit)s'
dbr, r = self.mysqlconn.query(sql, args)
if not dbr:
desc = self._exec_error_desc_tmpl%(
'Error when try to get channels from config database'
)
return None, desc
return r, None
def set_channel_collect(self, args = None):
sql = self.sqls['set_channel_collect']
if args is not None:
if args.get('wc_id') is None:
return False, 'wc_id is null'
if args.get('wc_collect') is None:
args['wc_collect'] = False
else:
return False, 'args is null'
dbr, r = self.mysqlconn.execute(sql, args)
if not dbr:
desc = self._exec_error_desc_tmpl%(
'Error when try to set channel collect in config database'
)
return False, desc
return r==1, None
def insert_news_to_mongo(self, news):
dbr, r = self.mongoconn.insert(news)
if not dbr:
desc = self._exec_error_desc_tmpl%'Error when try to insert news to mongodb.'
return False, desc
return True, r
def insert_comments_to_mongo(self, comments):
dbr, r = self.mongoconn.insert(comments)
if not dbr:
desc = self._exec_error_desc_tmpl%'Error when try to insert comments to mongodb.'
return False, desc
return True, r
if __name__ == '__main__':
mysqlconfig = {
'checkdup':{
'host':'localhost',
'user':'wde',
'passwd':'<PASSWORD>',
'db':'wde_wiesler_checkdup'
},
'task':{
'host':'localhost',
'user':'wde',
'passwd':'<PASSWORD>',
'db':'wde_wiesler'
}
}
dbutil = DBUtil(mysqlconfig)
app = {
}
```
#### File: util/db/sqlfactory.py
```python
from core.exceptions import ArguementInvalid
class SQLFactory(object):
"""docstring for SQLFactory"""
_sqls_tmpl = {
'check_news_in_dup':'SELECT EXISTS( SELECT 1 FROM `%(checkdup)s` WHERE `wc_id` = %%(wc_gid)s) as exist',
'delete_from_dup_by': 'DELETE FROM `%(checkdup)s` WHERE ',
'insert_news_to_dup':(
'INSERT INTO `%(checkdup)s`(`wc_id`, `wc_app`, `wc_title`) '
'VALUES(%%(wc_gid)s, %%(wc_app)s, %%(wc_title)s)'
),
'insert_app': (
'INSERT INTO `%(app)s`(`wc_ename`, `wc_name`, `wc_alias`, `i_sid`, `i_sn`) '
'VALUES(%%(wc_ename)s, %%(wc_name)s, %%(wc_alias)s, %%(i_sid)s, %%(i_sn)s) '
'ON DUPLICATE KEY UPDATE wc_updatetime=CURRENT_TIMESTAMP'
),
'delete_app': 'DELETE FROM `%(app)s` WHERE `wc_ename`=%%(wc_ename)s',
'insert_channel':(
'INSERT INTO `%(channel)s`(`wc_iid`, `wc_name`, `wc_ename`, `wc_gid`, '
'`wc_collect`, `wc_app`, `i_bid`, `i_bn`) '
'VALUES (%%(wc_iid)s, %%(wc_name)s, %%(wc_ename)s, %%(wc_gid)s, '
'%%(wc_collect)s, %%(wc_app)s, %%(i_bid)s, %%(i_bn)s) '
'ON DUPLICATE KEY UPDATE `wc_updatetime`=CURRENT_TIMESTAMP'
),
'delete_channel_by': 'DELETE FROM `%(channel)s` WHERE ',
'get_app_by':'SELECT * FROM `%(app)s` WHERE ',
'get_all_app':'SELECT * FROM `%(app)s`',
'get_channel':'SELECT * FROM `%(channel)s` WHERE wc_app=%%(wc_ename)s',
'get_all_channel_by':'SELECT * FROM `%(channel)s` WHERE ',
'set_channels_collect_yes':'UPDATE `%(channel)s` SET wc_collect=true WHERE `wc_id` in %%s',
'set_channels_collect_no':'UPDATE `%(channel)s` SET wc_collect=false WHERE `wc_id` in %%s',
'set_channel_collect_yes':'UPDATE `%(channel)s` SET wc_collect=true WHERE `wc_id` = %%(wc_id)s',
'set_channel_collect_no':'UPDATE `%(channel)s` SET wc_collect=false WHERE `wc_id` = %%(wc_id)s',
'set_channel_collect':'UPDATE `%(channel)s` SET `wc_collect`=%%(wc_collect)s WHERE `wc_id` = %%(wc_id)s',
'get_channel_by': 'SELECT * FROM `%(channel)s`',
'get_news_count_by':'SELECT SQL_NO_CACHE `wc_app`, count(*) from `%(checkdup)s` WHERE ',
}
def __init__(self, tables):
super(SQLFactory, self).__init__()
self._tables = tables
self._setup()
def _setup(self):
self._sqls = {}
for k, v in SQLFactory._sqls_tmpl.items():
self._sqls[k] = v%self._tables
def __getattr__(self, sql_name):
return self.__getitem__(sql_name)
def __getitem__(self, sql_name):
if sql_name not in self._sqls:
desc = (
'There is no sql named "%s".'
)%sql_name
raise ArguementInvalid(desc)
return self._sqls[sql_name]
class _SQLFactory(object):
"""docstring for Sqls"""
def __init__(self):
super(Sqls, self).__init__()
_sqls = {
'check_news_in_dup':'SELECT EXISTS( SELECT 1 FROM `%(checkdup)s` WHERE wc_gid = %%s)',
'delete_all_from_dup_by': 'DELETE FROM `%(checkdup)s` WHERE ',
'insert_news_to_dup':(
'INSERT INTO `%(checkdup)s`(`wc_gid`, `wc_app`, `wc_title`) '
'VALUES(%%(wc_gid)s, %%(wc_app)s, %%(wc_title)s)'
),
'insert_app': (
'INSERT INTO `%(app)s`(`wc_ename`, `wc_name`, `wc_alias`, `i_sid`, `i_sn`) '
'VALUES(%%(wc_ename)s, %%(wc_name)s, %%(wc_alias)s, %%(i_sid)s, %%(i_sn)s) '
'ON DUPLICATE KEY UPDATE wc_updatetime=CURRENT_TIMESTAMP'
),
'insert_channel':(
'INSERT INTO `%(channel)s`(`wc_id`, `wc_name`, `wc_ename`, `wc_gid`, '
'`wc_collect`, `wc_app`, `i_bid`, `i_bn`) '
'VALUES (%%(wc_id)s, %%(wc_name)s, %%(wc_ename)s, %%(gchannelid)s, '
'%%(wc_collect)s, %%(wc_app)s, %%(i_bid)s, %%(i_bn)s) '
'ON DUPLICATE KEY UPDATE `wc_updatetime`=CURRENT_TIMESTAMP'
),
'get_app_by':'SELECT * FROM `%(app)s` WHERE ',
'get_all_app':'SELECT * FROM `%(app)s`',
'get_channel':'SELECT * FROM `%(channel)s` WHERE wc_app=%%(ename)s',
'get_all_channel_by':'SELECT * FROM `%(channel)s` WHERE ',
'set_channels_collect_yes':'UPDATE `%(channel)s` SET wc_collect=true WHERE `wc_id` in %%s',
'set_channels_collect_no':'UPDATE `%(channel)s` SET wc_collect=false WHERE `wc_id` in %%s',
'set_channel_collect_yes':'UPDATE `%(channel)s` SET wc_collect=true WHERE `wc_id` = ',
'set_channel_collect_no':'UPDATE `%(channel)s` SET wc_collect=false WHERE `wc_id` = ',
'get_channel_by': 'SELECT * FROM `%(channel)s`',
'get_news_count_by':'SELECT SQL_NO_CACHE `wc_app`, count(*) from `%(checkdup)s` WHERE ',
}
_sql_cache = {}
@staticmethod
def get_sql(sql_name, tables, from_cache=True):
if sql_name not in SQLFactory._sql_cache or not from_cache:
if sql_name not in _sqls:
desc = (
'There is no sql named "%s".'
)%sql_name
raise ArguementInvalid(desc)
try:
sql = SQLFactory._sqls%tables
except Exception as e:
raise e
SQLFactory._sql_cache[sql_name] = sql
return SQLFactory._sql_cache[sql_name]
```
|
{
"source": "jdstemmler/resume",
"score": 4
}
|
#### File: jdstemmler/resume/build.py
```python
from argparse import ArgumentParser
import json
class Resume:
def __init__(self, json_file):
with open(json_file, 'r') as f:
self.raw_data = json.load(f)
for a in ['basics', 'profiles', 'work', 'education', 'skills', 'interests']:
setattr(self, a, self.raw_data.get(a, None))
def __repr__(self):
s = [
f"{self.basics.get('name')}",
f"{self.basics.get('label')}",
"-"*len(self.basics.get('label'))
]
return '\n'.join(s)
def parse_arguments():
parser = ArgumentParser(description = "Process your resume!")
parser.add_argument('input_file', default='resume.json', type=str, help="path to the json input file")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_arguments()
resume = Resume(json_file = args.input_file)
print(resume)
```
|
{
"source": "jdstmporter/midifiles",
"score": 3
}
|
#### File: MIDI/chunks/chunk.py
```python
from MIDI.base import Base
class Chunk(Base):
def __init__(self,data=b''):
super().__init__()
self.data=data
def __len__(self):
return len(self.data)
def __str__(self):
return self.stringify(self.data)
```
#### File: Events/messages/other.py
```python
from MIDI.Events.messages.converters import Converter
class SimpleMessage(object):
def compute(self,data):
return data[0]
def __init__(self,name,data=b''):
self.name=name
self.value=self.compute(data)
def __str__(self):
return f'{self.name} := {self.value}'
def __len__(self):
return 1
class ProgramMessage(SimpleMessage):
def __init__(self,data=b''):
super().__init__('Program',data)
class ChannelPressureMessage(SimpleMessage):
def __init__(self,data=b''):
super().__init__('Pressure',data)
class PitchBendMessage(SimpleMessage):
def compute(self, data):
return Converter.Int16(data)-8192
def __init__(self,data=b''):
super().__init__('Bend',data)
def __len__(self):
return 2
```
#### File: PL/chunks/event.py
```python
from PL.base import Base
from PL.messagetypes import messageType, UnknownMessage
def printable(c):
if c>30 and c<128 : return chr(c)
return '.'
class Event(Base):
def __init__(self,buffer):
super().__init__(buffer[1:])
self.header=buffer[0]
self.isNumeric=self.header<192
generator = messageType(self.header)
self.payloadLength = generator.payloadLength()
try:
self.message = generator(self.header)
self.name = self.message.name
#print(f'Message and name are {self.message} & {self.name}')
except:
self.message = UnknownMessage.Unknown
self.name = 'Unknown'
#print(f'Message and name are {self.message} & {self.name}')
if self.isNumeric:
self.data=self.getInt(self.payloadLength)
n=0
else:
length, n=self.getVarLengthInt()
self.data=self.getChunk(length)
self.payloadLength=length
self.length=self.payloadLength+n+1
self.code=generator.__name__
def __len__(self):
return self.length
def value(self):
return self.message.asString(self.data)
def __str__(self):
return f'{self.code} {self.header}({self.name}) = {self.value()}'
```
#### File: messages/converters/text.py
```python
from .base import Converter
class String8Converter(Converter):
def __init__(self):
super().__init__(separator='')
def process(self,d):
return chr(d) if d>30 and 3<128 else '.'
'''
class String16Converter(String8Converter):
def __init__(self):
super().__init__()
def __call__(self,data):
s=super().__call__(data)
if len(s)>0: return s[0:-1:2]
return ''
'''
class String16Converter(Converter):
def __init__(self):
super().__init__()
def __call__(self,data):
return data.decode('utf16')
```
#### File: PL/messagetypes/converters.py
```python
from MIDI.util import SafeEnum
class Converter(object):
@staticmethod
def Null(_):
return None
@staticmethod
def OnOff127(data):
x = data[0]
return {0: 'OFF', 127: 'ON'}.get(x,'???')
@staticmethod
def Id1(x):
return x[0]
@staticmethod
def Int16(data):
return (data[0]&0x7f) + (data[1]&0x7f)*128
class ConversionEnum(SafeEnum):
def __init__(self,*args):
self.code=args[0]
self.converter=args[1] if len(args)>1 else Converter.Id1
@classmethod
def make(cls, n):
for obj in cls:
if obj.code==n: return obj
return None
def __call__(self,value):
return self.converter(value)
```
#### File: src/PL/partitioner.py
```python
import PL.chunks
from PL.base import Base
class PLFile(object):
def __init__(self,filename):
with open(filename,mode='rb') as file:
self.bytes=file.read()
self.header=None
self.tracks=[]
def readHeader(self):
try:
if len(self.bytes)<14:
raise Exception('FLP file requires at least 14 bytes')
buffer=self.bytes[0:14]
header=buffer[0:4].decode()
length=Base.getInt32(buffer[4:8])
if header=='FLhd' : # header
if length != 6:
raise Exception('Header chunk must have length 6')
self.header = PL.chunks.Header(buffer[8:])
return True
else:
return False
except:
return False
def parse(self):
buffer=self.bytes
while len(buffer)>8:
header=buffer[0:4].decode()
length=Base.getInt32(buffer[4:8])
if header=='FLhd' : # header
if length != 6:
raise Exception(f'Header chunk must have length 6 : got {hex(length)}')
self.header = PL.chunks.Header(buffer[8:])
print(f"Header is {self.header}")
elif header=='FLdt' : # track
self.tracks.append(PL.chunks.Track(buffer[8:]))
else:
print(f'Unknown chunk type {header} - skipping')
buffer = buffer[8+length:]
def __str__(self):
out=[]
if self.header:
out.append(str(self.header))
else:
out.append('No header!')
for idx, track in enumerate(self):
out.append(f'\tTrack {idx} of length {len(track)}')
return '\n'.join(out)
def __iter__(self):
return iter(self.tracks)
def __len__(self):
return len(self.tracks)
def __getitem__(self,index):
return self.tracks[index]
@property
def format(self):
return self.header.format
@property
def division(self):
return self.header.division
```
|
{
"source": "jdstmporter/python-polled-inotify",
"score": 2
}
|
#### File: tests/actions/generator.py
```python
import random
import collections
import time
import os
import shutil
class EventGenerator(object):
def __init__(self,oBarrier,sBarrier,pipe,duration,path='.',minInterval=10,maxInterval=1000):
self.oBarrier=oBarrier
self.sBarrier=sBarrier
self.sBarrier.acquire()
self.pipe=pipe
self.counts=collections.defaultdict(lambda : 0)
self.path=path
self.duration=duration
self.minInterval=minInterval
self.maxInterval=maxInterval
self.fileSet=[]
self.fileCount=0
def newFile(self):
self.fileCount+=1
name='file%s_%s' % (self.fileCount,random.randint(0,100))
name=os.path.join(self.path,name)
return name
def randomFile(self):
return self.fileSet[random.randrange(0,len(self.fileSet))]
def randomText(self,f):
for _ in range(random.randint(0,100)):
f.write(str(random.random()))
def write(self):
name=self.newFile()
with open(name,'w') as f:
self.randomText(f)
self.fileSet.append(name)
def move(self):
name=self.randomFile()
newName=self.newFile()
os.rename(name, newName)
self.fileSet.append(newName)
self.fileSet.remove(name)
def read(self):
name=self.randomFile()
with open(name,'r') as f:
f.read()
def delete(self):
name=self.randomFile()
os.remove(name)
self.fileSet.remove(name)
def modify(self):
name=self.randomFile()
with open(name,'a') as f:
self.randomText(f)
def touch(self):
name=self.randomFile()
open(name,'a').close()
def init(self):
for _ in range(10):
self.write()
def __call__(self):
self.oBarrier.acquire()
modes=['READ','WRITE','DELETE','MOVE','TOUCH','MODIFY']
functions={
'READ':self.read,
'WRITE':self.write,
'MODIFY':self.modify,
'MOVE':self.move,
'DELETE':self.delete,
'TOUCH':self.touch
}
n=len(modes)
now=time.time()
cls=self.__class__
while time.time()-now<self.duration:
delay=random.randint(self.minInterval,self.maxInterval)
time.sleep(delay/1000.0)
mode=modes[random.randrange(0,n)]
functions[mode]()
self.counts[mode]+=1
self.pipe.send({k:v for k,v in self.counts.items()})
self.pipe.close()
self.sBarrier.release()
def shutdown(self):
shutil.rmtree(self.path)
```
#### File: python-polled-inotify/tests/pollinotify_test.py
```python
import random
import unittest
import multiprocessing
import collections
import tempfile
import time
import os
import sys
from .actions import EventGenerator, EventObserver
class TestInotify(unittest.TestCase):
def __init__(self,methodName='runTest',nTests=10):
super(TestInotify,self).__init__(methodName)
self.nTests=nTests
def setUp(self):
self.duration=random.randint(2,10)
self.path=os.path.join(tempfile.gettempdir(),'inotify')
try:
os.mkdir(self.path)
except:
pass
srcE, self.dstE = multiprocessing.Pipe()
srcO, self.dstO = multiprocessing.Pipe()
observerBarrier=multiprocessing.Semaphore()
sourceBarrier=multiprocessing.Semaphore()
self.observer=EventObserver(observerBarrier,sourceBarrier,srcO,path=self.path)
self.source=EventGenerator(observerBarrier,sourceBarrier,srcE,self.duration,path=self.path)
self.source.init()
self.observer.init()
self.observerTask=multiprocessing.Process(target=self.observer)
self.sourceTask=multiprocessing.Process(target=self.source)
self.observerTask.start()
self.sourceTask.start()
self.wait()
self.events=collections.defaultdict(lambda : 0)
self.events.update(self.dstE.recv())
self.observations=collections.defaultdict(lambda : 0)
self.observations.update(self.dstO.recv())
self.dstE.close()
self.dstO.close()
def tearDown(self):
self.observer.shutdown()
self.source.shutdown()
def wait(self):
for _ in range(self.duration):
print('.', end='')
sys.stdout.flush()
time.sleep(1)
def test_Create(self):
self.assertEqual(self.observations['Create'],self.events['WRITE'])
def test_Read(self):
self.assertEqual(self.observations['CloseOther'],self.events['READ'])
def test_Write(self):
self.assertEqual(self.observations['CloseWrite'],self.events['WRITE']+self.events['TOUCH']+self.events['MODIFY'])
def test_Change(self):
self.assertEqual(self.observations['Modify'],self.events['WRITE']+self.events['MODIFY'])
def test_Delete(self):
self.assertEqual(self.observations['Delete'], self.events['DELETE'])
def test_Move(self):
self.assertEqual(self.observations['MoveFrom'],self.events['MOVE'])
self.assertEqual(self.observations['MoveTo'],self.events['MOVE'])
def test_Open(self):
self.assertEqual(self.observations['Open'],self.events['READ']+self.events['WRITE']+self.events['TOUCH']+self.events['MODIFY'])
def teardown_module():
pass
if __name__=='__main__':
unittest.main(exit=False)
```
|
{
"source": "jdstmporter/SDRAudio",
"score": 3
}
|
#### File: sdr-py/audio/formats.py
```python
import sounddevice
import numpy as np
class PCMFormat(object):
def __init__(self,name,minimum,maximum):
self.name=name
self.min=minimum
self.max=maximum
self.a=2.0/(maximum-minimum)
self.b=-(maximum+minimum)/(maximum-minimum)
self.divisor=np.max(np.abs([self.min,self.max]))
def __str__(self):
return self.name
def __call__(self,values):
return np.clip(values,self.min,self.max)*self.a - self.b
class PCMStreamCharacteristics(object):
FORMATS = {
'uint8': PCMFormat('uint8',0,255),
'int8' : PCMFormat('int8',-128,127),
'int16': PCMFormat('int16',-32768,32767),
'int32': PCMFormat('int32',-4294967296,4294967295),
'float': PCMFormat('float',-1,1)
}
def __init__(self,rate=48000,fmt='int16',blocksize=64):
self.rate=rate
self.dtype=fmt
self.blocksize=blocksize
self.format=self.FORMATS.get(self.dtype)
def check(self,dev):
sounddevice.check_input_settings(device=dev.index, dtype=self.dtype, samplerate=self.rate)
```
#### File: sdr-py/audio/session.py
```python
import sounddevice
import numpy as np
from .device import PCMDeviceSpecification
from .formats import PCMStreamCharacteristics
from util import SYSLOG
class PCMSessionDelegate(object):
def __call__(self,data):
pass
def connect(self,samplerate):
pass
def startListeners(self):
pass
def stopListeners(self):
pass
class PCMSessionHandler(object):
def __init__(self,delegate=PCMSessionDelegate()):
self.pcm=None
self.format=None
self.delegate=delegate
def connect(self,dev):
try:
if self.pcm:
self.stop()
self.pcm=PCMInputSession(dev,delegate=self.delegate)
self.delegate.connect(self.pcm.samplerate)
except Exception as ex:
SYSLOG.error(f'Error connecting to {dev} : {ex}')
def disconnect(self):
try:
if self.pcm:
self.stop()
self.pcm=None
except Exception as ex:
SYSLOG.error(f'Error disconnecting from {self.pcm} : {ex}')
def start(self):
self.delegate.startListeners()
self.pcm.start()
self.format=self.pcm.format
SYSLOG.info(f'Started {self.pcm}')
def stop(self):
if self.pcm:
self.pcm.stop()
self.format=None
self.delegate.stopListeners()
class PCMOutputSession(object):
def __init__(self,specification : PCMDeviceSpecification):
self.specification=specification
self.device=str(specification)
self.name=specification.name
self.index=specification.index
self.pcm = None
self.format=None
def start(self,characteristics = PCMStreamCharacteristics()):
characteristics.check(self.specification)
self.format=characteristics.format
self.pcm=sounddevice.OutputStream(samplerate=characteristics.rate,blocksize=characteristics.blocksize,device=self.index,
dtype=characteristics.dtype,callback=self.callback)
self.pcm.start()
def stop(self):
if self.pcm: self.pcm.stop(True)
self.pcm=None
def kill(self):
if self.pcm: self.pcm.abort(True)
self.pcm=None
def write(self,buffer):
self.pcm.write(buffer)
@property
def samplerate(self):
return self.pcm.samplerate
@property
def active(self):
if self.pcm==None: return None
return self.pcm.active
def callback(self,data,frames,time,status):
if status:
SYSLOG.info(f'{status}; wrote {frames} frames')
class PCMInputSession(object):
def __init__(self,specification : PCMDeviceSpecification, delegate : PCMSessionHandler = PCMSessionHandler()):
self.specification=specification
self.device=str(specification)
self.name=specification.name
self.index=specification.index
self.delegate=delegate
self.pcm = None
self.format=None
@property
def samplerate(self):
return self.pcm.samplerate
def callback(self,indata,frames,time,status):
if status:
SYSLOG.info(f'{status} but got {frames} frames')
if len(indata)>0:
self.delegate(np.mean(indata,axis=1)/self.format.divisor)
@property
def active(self):
if self.pcm==None: return None
return self.pcm.active
def start(self,characteristics = PCMStreamCharacteristics()):
characteristics.check(self.specification)
self.format=characteristics.format
self.pcm=sounddevice.InputStream(samplerate=characteristics.rate,blocksize=characteristics.blocksize,device=self.index,
dtype=characteristics.dtype,callback=self.callback)
self.pcm.start()
def stop(self):
if self.pcm: self.pcm.stop(True)
self.pcm=None
def kill(self):
if self.pcm: self.pcm.abort(True)
self.pcm=None
```
|
{
"source": "jdstmporter/Simple-Python-Oscilloscope",
"score": 2
}
|
#### File: Simple-Python-Oscilloscope/graphs/spectra.py
```python
import numpy as np
import threading
import queue
class Transforms(object):
EPSILON = 1.0e-10
def __init__(self,size=1024,samplerate=48000,average=10):
self.size=size
self.samplerate=samplerate
self.normaliser=10*np.log10(size*samplerate)
self.average=average
def logNorm(self,vector):
return 20*np.log10(np.absolute(vector)+Transforms.EPSILON)-self.normaliser
def powerSpectrum(self,data=[]):
spec=np.fft.rfft(data,self.size)
return self.logNorm(spec)
def rCepstrum(self,data=[]):
reals = self.powerSpectrum(data)
return np.fft.irfft(reals,self.size)
def cxCepstrum(self,data=[]):
spec = np.log2(np.fft.fft(data,self.size))
return np.fft.ifft(spec,self.size)
def powerRCepstrum(self,data=[]):
return Transforms.logNorm(self.rCepstrum(data))
def powerCxCepstrum(self,data=[]):
return Transforms.logNorm(self.cxCepstrum(data))
class SpectralRunner(threading.Thread):
def __init__(self,queue,callback,fft,fftSize,average):
super().__init__()
self.buffer=[]
self.queue=queue
self.callback=callback
self.fft=fft
self.fftSize=fftSize
self.active=False
self.average=average
self.ffts=[]
def run(self):
self.active=True
while self.active:
item=self.queue.get()
self.buffer.extend(np.mean(item,axis=1))
while len(self.buffer)>=self.fftSize:
values = self.buffer[:self.fftSize]
self.buffer=self.buffer[self.fftSize:]
self.callback(self.fft.powerSpectrum(values))
def shutdown(self):
self.active=False
class SpectralBase(object):
def __init__(self,fftSize,average=10,viewers=[]):
self.queue = queue.Queue()
self.thread=None
self.average=average
self.fft =Transforms(fftSize)
self.buffer=[]
self.fftSize=fftSize
self.xflen = 1+fftSize//2
self.viewers=viewers
self.ffts=[]
def setSampleRate(self,rate=48000):
self.fft=Transforms(self.fftSize,rate)
def start(self):
def callback(xf):
self.ffts.append(xf)
for viewer in self.viewers: viewer(xf)
self.thread=SpectralRunner(self.queue,callback,self.fft,self.fftSize,self.average)
self.thread.start()
def stop(self):
if self.thread:
self.thread.shutdown()
self.thread=None
def add(self,values):
self.queue.put(values,block=False)
''' self.buffer.extend(values)
if len(self.buffer)>=self.fftSize:
chunk = self.buffer[:self.fftSize]
self.buffer=self.buffer[self.fftSize:]
# if len(self.buffer)>=self.minpoints:
# values = self.buffer[:self.minpoints]
# self.buffer=self.buffer[self.minpoints:]
# xf=[]
# for n in range(self.average):
# start=n*self.offset
# xf.append(values[start:start+self.fftSize])
# chunk=np.average(xf,axis=0)
xformed=self.fft.powerSpectrum(chunk)
#ma = np.max(xformed)
#mi = np.min(xformed)
#print(f'{mi} <-> {ma}')
for viewer in self.viewers: viewer(xformed)
'''
```
#### File: Simple-Python-Oscilloscope/graphs/spectrogram.py
```python
from .graphic import Graphic, Range
from .gradient import Gradient
import tkinter as tk
class Spectrogram(Graphic):
def __init__(self,root,bounds=Range(-1,1),xscale=1,background='black',line='red',
gradient=Gradient(),xflen=513):
super().__init__(root,bounds,xscale,background,line)
self.graph.config(scrollregion=(0,0,3200,400))
self.graph.pack(side=tk.TOP,fill=tk.BOTH)
self.scroller = tk.Scrollbar(self.root,orient=tk.HORIZONTAL)
self.scroller.pack(side=tk.BOTTOM, fill=tk.X)
self.graph.configure(xscrollcommand=self.scroller.set)
self.scroller.configure(command=self.scroll)
self.gradient=gradient
self.photo=tk.PhotoImage(width=3200,height=400)
self.graph.create_image(400,200,image=self.photo,state='normal')
self.xflen=xflen
self.average=10
self.ffts=[]
def scroll(self,*args):
print(*args)
self.graph.xview(*args)
def __call__(self,xformed):
self.ffts.append(xformed)
h=self.photo.height()
factor=h/self.xflen
x=len(self.ffts)-1
for y in range(h):
f = int(y/factor)
c=str(self.gradient(self.range(xformed[f])))
#print(f'{c} @(0,{y}) with {w} {h} for {value} => {v}')
self.photo.put(c,(x,h-y))
def configure(self,**kwargs):
super().configure(**kwargs)
self.photo.configure(**kwargs)
def clear(self):
self.ffts=[]
```
#### File: Simple-Python-Oscilloscope/graphs/spectrum.py
```python
from .graphic import Graphic, Range
class SpectrumView(Graphic):
def __init__(self,root,bounds=Range(-1,1),xscale=1,background='black',line='green',xflen=513):
super().__init__(root,bounds,xscale,background,line)
self.line = self.graph.create_line(-1,0,-1,0,fill=line)
self.xflen=xflen
self.points= [0]*2*self.xflen
def fixSize(self):
s=self.size
if s.width != self.width:
self.width=s.width
xscale = s.width / self.xflen
for n in range(self.xflen): self.points[2*n]=n*xscale
self.height=s.height
def __call__(self,xformed):
self.fixSize()
for index,value in enumerate(xformed):
y=(1-self.range(value))*self.height
self.points[2*index+1]=y
self.graph.coords(self.line,self.points)
```
|
{
"source": "jdswalker/Advent-of-Code-2015",
"score": 4
}
|
#### File: Advent-of-Code-2015/advent_of_code/__main__.py
```python
import argparse
import sys
# Application-specific Imports
from advent_of_code.solvers import factory
def get_args():
"""Parses command-line inputs for the script or assigns default values
Args: None
Returns:
Namespace: Values parsed from command-line arguments
"""
parser = argparse.ArgumentParser(
prog='advent_of_code',
epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-v',
'--version',
action='version',
version='Advent of Code 2015 Puzzle Solver v1.0.1',
help='Show the version number of this package',
)
parser.add_argument(
'-p',
'--puzzle',
type=int,
choices=range(1, 26),
required=True,
help='Day of the Advent of Code 2015 puzzle',
metavar='DAY',
dest='day',
)
parser.add_argument(
'-f',
'--file',
default=None,
type=str,
help='Input file for the Advent of Code 2015 puzzle',
metavar='PATH',
dest='file_name',
)
return parser.parse_args()
def main():
"""Solves the puzzle input from a file, if given, otherwise runs tests
Args: None
Returns: None
"""
args = get_args()
solver = factory.get_solver(args.day, args.file_name)
if args.file_name:
print(solver.puzzle_title)
print(solver.get_puzzle_solution())
else:
print('No puzzle input was provided')
print('Running test cases for ' + solver.puzzle_title)
solver.run_test_cases()
if __name__ == '__main__':
sys.exit(main())
```
#### File: advent_of_code/solvers/day_06.py
```python
from collections import namedtuple
import re
# Application-specific Imports
from advent_of_code.solvers import solver
# Stores a pair of light grid coordinates
Point = namedtuple('Point', 'x y')
class LightGrid(object):
"""Abstract class for representing a 2D grid of lights
Attributes:
grid_size (Point): Stores maximum x- and y-coordinate of the light grid
light_grid (list): Lights in the grid as a list of rows
"""
def __init__(self, width=1000, height=1000):
self._grid_size = Point(width, height)
self._light_grid = [None] * height
self.reset_grid()
def reset_grid(self):
"""Turns off every light in the grid
Args: None
Returns: None
"""
for row in range(0, self._grid_size.y):
self._light_grid[row] = bytearray(self._grid_size.x)
def count_lights(self):
"""Counts the number or total intensity of turned on lights in the grid
Args: None
Returns:
int: The number or intensity of turned on lights in the grid
"""
return sum(sum(row) for row in self._light_grid)
def set_light_state(self, start, end, light_state):
"""Sets a row of lights to a specific state
Args:
start (int): The grid index for the light to begin setting states
end (int): The grid index for the light to stop setting states
light_state (str): Whether the lights should be 'on' or 'off'
Returns: None
"""
raise NotImplementedError()
class SimpleLightGrid(LightGrid):
"""Represents a 2D grid of lights that can be turned on and off
Attributes:
grid_size (Point): Stores maximum x- and y-coordinate of the light grid
light_grid (list): Lights in the grid as a list of rows
"""
def set_light_state(self, start, end, light_state):
"""Sets a row of lights to a specific state
Args:
start (int): The grid index for the light to begin setting states
end (int): The grid index for the light to stop setting states
light_state (str): Whether the lights should be 'on' or 'off'
Returns: None
"""
state = 1 if light_state == 'on' else 0
for row in range(start.y, end.y + 1):
for column in range(start.x, end.x + 1):
self._light_grid[row][column] = state
def toggle_light_state(self, start, end):
"""Toggles lights between on and off along indices of a grid row
Args:
start (int): The grid index for the light to begin toggling lights
end (int): The grid index for the light to stop toggling lights
Returns: None
"""
for row in range(start.y, end.y + 1):
for column in range(start.x, end.x + 1):
self._light_grid[row][column] ^= 1
class ComplexLightGrid(LightGrid):
"""Represents a 2D grid of lights that can change intensity
Attributes:
grid_size (Point): Stores maximum x- and y-coordinate of the light grid
light_grid (list): Lights in the grid as a list of rows
"""
def set_light_state(self, start, end, light_state):
"""Sets a row of lights to a specific state
Args:
start (int): The grid index for the light to begin setting states
end (int): The grid index for the light to stop setting states
light_state (str): Whether the lights should be 'on' or 'off'
Returns: None
"""
if light_state == 'on':
self.toggle_light_state(start, end, 1)
else:
for row in range(start.y, end.y + 1):
for column in range(start.x, end.x + 1):
if self._light_grid[row][column]:
self._light_grid[row][column] -= 1
def toggle_light_state(self, start, end, increment=2):
"""Increases the intensity for a row of lights between two indices
Args:
start (int): The grid index for the light to begin toggling lights
end (int): The grid index for the light to stop toggling lights
increment (int): The amount to increase each light's intensity
Returns: None
"""
end_x = end.x + 1
for row in range(start.y, end.y + 1):
for column in range(start.x, end_x):
self._light_grid[row][column] += increment
class Solver(solver.AdventOfCodeSolver):
"""Advent of Code 2015 Day 6: Probably a Fire Hazard
Attributes:
puzzle_input (list): A list of instructions for solving the puzzle
puzzle_title (str): Name of the Advent of Code puzzle
solved_output (str): A template string for solution output
toggle (RegexObject): Pattern for matching toggle instructions
turn (RegexObject): Pattern for matching turn instructions
"""
def __init__(self, *args):
solver.AdventOfCodeSolver.__init__(self, *args)
self._solved_output = '\n'.join((
'The first grid had {0} lights lit and',
'the second grid had a total brightness of {1}',
))
toggle_pattern = 'toggle {0}'
turn_pattern = 'turn (?P<state>on|off) {0}'
overlap = r'(?P<x1>\d+),(?P<y1>\d+) through (?P<x2>\d+),(?P<y2>\d+)'
self._toggle = re.compile(toggle_pattern.format(overlap))
self._turn = re.compile(turn_pattern.format(overlap))
@staticmethod
def _parse_points(instruction):
"""Parses start and end points from the instruction
Args:
instruction (dict): Parsed coordinates for which lights to change
Returns:
tuple: Start and end coordinates for the intruction
"""
start = Point(int(instruction['x1']), int(instruction['y1']))
end = Point(int(instruction['x2']), int(instruction['y2']))
return (start, end)
def toggle_lights(self, light_grid, toggle_instr):
"""Changes the given lights based on their state (i.e., on or off)
Args:
light_grid (LightGrid): Represents a 2D grid of lights
toggle_instr (MatchObject): Parsed coordinates for lights to change
Returns: None
"""
start, end = self._parse_points(toggle_instr.groupdict())
light_grid.toggle_light_state(start, end)
def change_light_state(self, light_grid, turn_instr):
"""Sets the given lights to a particular state (i.e., on or off)
Args:
light_grid (LightGrid): Represents a 2D grid of lights
turn_instr (MatchObject): Parsed coordinates for lights to change
Returns: None
"""
light_instr = turn_instr.groupdict()
start, end = self._parse_points(light_instr)
light_grid.set_light_state(start, end, light_instr['state'])
def _solve_puzzle_parts(self):
"""Solves each part of a Advent of Code 2015 puzzle
Args: None
Returns:
tuple: Pair of solutions for the two parts of the puzzle
"""
simple_grid = SimpleLightGrid(width=1000, height=1000)
complex_grid = ComplexLightGrid(width=1000, height=1000)
for instruction in self._puzzle_input.splitlines():
if instruction.startswith('toggle'):
toggle_instr = self._toggle.match(instruction)
self.toggle_lights(simple_grid, toggle_instr)
self.toggle_lights(complex_grid, toggle_instr)
elif instruction.startswith('turn'):
turn_instr = self._turn.match(instruction)
self.change_light_state(simple_grid, turn_instr)
self.change_light_state(complex_grid, turn_instr)
return (simple_grid.count_lights(), complex_grid.count_lights())
def run_test_cases(self):
"""Runs a series of inputs and compares against expected outputs
Args: None
Returns: None
"""
input1 = 'turn on 0,0 through 999,999'
input2 = 'toggle 0,0 through 999,0'
input3 = '\n'.join((input1, input2))
input4 = 'turn off 499,499 through 500,500'
input5 = '\n'.join((input1, input4))
input6 = 'turn on 0,0 through 0,0'
input7 = 'toggle 0,0 through 999,999'
test_cases = (
solver.TestCase(input1, '1000000', '1000000'),
solver.TestCase(input2, '1000', '2000'),
solver.TestCase(input3, '999000', '1002000'),
solver.TestCase(input4, '0', '0'),
solver.TestCase(input5, '999996', '999996'),
solver.TestCase(input6, '1', '1'),
solver.TestCase(input7, '1000000', '2000000'),
)
for test_case in test_cases:
self._run_test_case(test_case)
```
#### File: advent_of_code/solvers/day_12.py
```python
import json
# Application-specific Imports
from advent_of_code.solvers import solver
# Equivalent to <class 'dict_values'>
DICT_VALUES = type({}.values())
class Solver(solver.AdventOfCodeSolver):
"""Advent of Code 2015 Day 12: JSAbacusFramework.io
Attributes:
puzzle_input (list): A list of instructions for solving the puzzle
puzzle_title (str): Name of the Advent of Code puzzle
solved_output (str): A template string for solution output
"""
def __init__(self, *args):
solver.AdventOfCodeSolver.__init__(self, *args)
self._solved_output = '\n'.join((
'The sum of all numbers in the document is {0}',
'The sum using the correct numbers in the document is {1}',
))
@staticmethod
def _get_sum(document, item=None):
"""Recursively sums all numeric fields from the JSON input
Args:
document (mixed): JSON parsed into int, list, dict, and/or str
item (mixed): Dicts storing this value will be ignored (optional)
Returns:
int: Sum of numbers from the JSON input excluding ignored objects
"""
if isinstance(document, int):
total = document
elif isinstance(document, (list, DICT_VALUES)):
total = sum(Solver._get_sum(val, item) for val in document)
elif isinstance(document, dict) and item not in document.values():
total = Solver._get_sum(document.values(), item)
else:
total = 0
return total
def _solve_puzzle_parts(self):
"""Solves each part of a Advent of Code 2015 puzzle
Args: None
Returns:
tuple: Pair of solutions for the two parts of the puzzle
"""
document = json.loads(self.puzzle_input)
return self._get_sum(document), self._get_sum(document, "red")
def run_test_cases(self):
"""Runs a series of inputs and compares against expected outputs
Args: None
Returns: None
"""
test_cases = (
solver.TestCase('[1,2,3]', 6, 6),
solver.TestCase('{"a":2,"b":4}', 6, 6),
solver.TestCase('[[[3]]]', 3, 3),
solver.TestCase('{"a":{"b":4},"c":-1}', 3, 3),
solver.TestCase('{"a":[-1,1]}', 0, 0),
solver.TestCase('[-1,{"a":1}]', 0, 0),
solver.TestCase('[]', 0, 0),
solver.TestCase('{}', 0, 0),
solver.TestCase('[1,{"c":"red","b":2},3]', 6, 4),
solver.TestCase('{"d":"red","e":[1,2,3,4],"f":5}', 15, 0),
solver.TestCase('[1,"red",5]', 6, 6),
solver.TestCase('["a", {"red":1}]', 1, 1), # "red" keys are ok
)
for test_case in test_cases:
self._run_test_case(test_case)
```
#### File: advent_of_code/solvers/day_15.py
```python
from collections import namedtuple
from itertools import combinations_with_replacement as combinations
import re
# Application-specific Imports
from advent_of_code.solvers import solver
# Stores metadata about a recipe ingredient
Ingredient = namedtuple(
typename='Ingredient',
field_names='capacity durability flavor texture calories',
)
class Solver(solver.AdventOfCodeSolver):
"""Advent of Code 2015 Day 15: Science for Hungry People
Attributes:
puzzle_input (list): A list of instructions for solving the puzzle
puzzle_title (str): Name of the Advent of Code puzzle
solved_output (str): A template string for solution output
"""
def __init__(self, *args):
solver.AdventOfCodeSolver.__init__(self, *args)
self._solved_output = '\n'.join((
'The highest-scoring with these cookie ingredients has {0} points.',
'The highest-scoring cookie with 500 calories has {1} points.',
))
def _parse_input(self):
"""Parses recipe ingredients and associated metadata
Args: None
Returns:
dict: Item names mapped to Ingredient namedtuples
"""
ingredient_pattern = r'(\w+)' + ''.join([r'[^\d-]+(-?\d+)'] * 5)
parser = re.compile(ingredient_pattern)
pantry = {}
for line in self.puzzle_input.splitlines():
ingredient = parser.match(line)
if ingredient is None:
continue
item = ingredient.group(1)
pantry[item] = Ingredient(
capacity=int(ingredient.group(2)),
durability=int(ingredient.group(3)),
flavor=int(ingredient.group(4)),
texture=int(ingredient.group(5)),
calories=int(ingredient.group(6)),
)
return pantry
@staticmethod
def _get_max_score(recipe, pantry):
"""Calculates the highest score possible for the recipe
Args:
recipe (list): Combination of ingredients for making the cookie
pantry (dict): Item names mapped to Ingredient namedtuples
Returns:
int: Highest possible score for the given cookie recipe
"""
score = max(0, sum(pantry[item].capacity for item in recipe))
score *= max(0, sum(pantry[item].durability for item in recipe))
score *= max(0, sum(pantry[item].flavor for item in recipe))
score *= max(0, sum(pantry[item].texture for item in recipe))
return score
def _get_alt_score(self, recipe, pantry):
"""Calculates the highest score if the recipe has exactly 500 calories
Args:
recipe (list): Combination of ingredients for making the cookie
pantry (dict): Item names mapped to Ingredient namedtuples
Returns:
int: Highest possible score for a 500 calory cookie recipe
"""
calories = sum(pantry[ingredient].calories for ingredient in recipe)
return self._get_max_score(recipe, pantry) if calories == 500 else -1
def _solve_puzzle_parts(self):
"""Solves each part of a Advent of Code 2015 puzzle
Args: None
Returns:
tuple: Pair of solutions for the two parts of the puzzle
"""
pantry = self._parse_input()
teaspoons = 100
max_score, alt_score = 0, 0
for recipe in combinations(pantry.keys(), teaspoons):
max_score = max(max_score, self._get_max_score(recipe, pantry))
alt_score = max(alt_score, self._get_alt_score(recipe, pantry))
return max_score, alt_score
def run_test_cases(self):
"""Runs a series of inputs and compares against expected outputs
Args: None
Returns: None
"""
item = '{0}: capacity {1}, durability {2}, flavor {3}'
item += ', texture {4}, calories {5}'
input1 = (
item.format('Butterscotch', -1, -2, 6, 3, 8),
item.format('Cinnamon', 2, 3, -2, -1, 3),
)
input2 = input1 + (item.format('Sugar', 1, 1, 1, 1, 1),)
input3 = input1 + (item.format('Boogers', 2, 2, 2, 2, 2),)
test_cases = (
solver.TestCase('\n'.join(input1), 62842880, 57600000),
solver.TestCase('\n'.join(input2), 105187500, 65014560),
solver.TestCase('\n'.join(input3), 1600000000, 130975000),
)
for test_case in test_cases:
self._run_test_case(test_case)
```
#### File: advent_of_code/solvers/day_16.py
```python
from advent_of_code.solvers import solver
class Solver(solver.AdventOfCodeSolver):
"""Advent of Code 2015 Day 16: Aunt Sue
Attributes:
puzzle_input (list): A list of instructions for solving the puzzle
puzzle_title (str): Name of the Advent of Code puzzle
solved_output (str): A template string for solution output
mfcsam (dict): Output from My First Crime Scene Analysis Machine
"""
def __init__(self, *args):
solver.AdventOfCodeSolver.__init__(self, *args)
self._solved_output = '\n'.join((
'Aunt {0} was thought to have given the gift initially.',
'Aunt {1} was the one that actually sent the gift.',
))
self._mfcsam = {
'children': 3,
'cats': 7,
'samoyeds': 2,
'pomeranians': 3,
'akitas': 0,
'vizslas': 0,
'goldfish': 5,
'trees': 3,
'cars': 2,
'perfumes': 1,
}
def _parse_input(self):
"""Parses input to map memories of each Aunt Sue into a dictionary
Args: None
Returns:
dict: Memories as a dict mapped to each Aunt Sue as a key
"""
aunts = {}
for details in self.puzzle_input.replace(':', ',').splitlines():
sue, key1, val1, key2, val2, key3, val3 = details.split(', ')
aunts[sue] = {key1: int(val1), key2: int(val2), key3: int(val3)}
return aunts
@staticmethod
def _get_aunts_with_detail_eq(aunts, detail, target):
"""Get aunts without the detail or with a value equal to the target
Args:
aunts (dict): Stores remembered details about each Aunt Sue
detail (str): Name of a detail from memory (e.g., cats)
target (int): Exact detail value for the correct Aunt Sue
Returns:
dict: Aunts without the detail or with a detail value == target
"""
return {
aunt: memory for aunt, memory in aunts.items()
if detail not in memory or memory[detail] == target
}
@staticmethod
def _get_aunts_with_detail_lt(aunts, detail, target):
"""Get aunts without the detail or with a value less than the target
Args:
aunts (dict): Stores remembered details about each Aunt Sue
detail (str): Check aunts based on this remembered detail
target (int): Upper limit for the detail value of the correct Aunt
Returns:
dict: Aunts without the detail or with a detail value < target
"""
return {
aunt: memory for aunt, memory in aunts.items()
if detail not in memory or memory[detail] < target
}
@staticmethod
def _get_aunts_with_detail_gt(aunts, detail, target):
"""Get aunts without the detail or with a value greater than the target
Args:
aunts (dict): Stores remembered details about each Aunt Sue
detail (str): Check aunts based on this remembered detail
target (int): Lower limit for the detail value of the correct Aunt
Returns:
dict: Aunts without the detail or with a detail value > target
"""
return {
aunt: memory for aunt, memory in aunts.items()
if detail not in memory or memory[detail] > target
}
def _get_aunt_sue(self, aunts, filters):
"""Solves each part of a Advent of Code 2015 puzzle
Args:
aunts (dict): Stores remembered details about each Aunt Sue
filters (dict): Methods to filter aunts by detail
Returns:
tuple: Pair of solutions for the two parts of the puzzle
"""
aunt_sue = None
for detail, target in self._mfcsam.items():
aunt_filter = filters.get(detail, Solver._get_aunts_with_detail_eq)
aunts = aunt_filter(aunts, detail, target)
if len(aunts) == 1:
aunt_sue, _ = aunts.popitem()
break
return aunt_sue
def _solve_puzzle_parts(self):
"""Solves each part of a Advent of Code 2015 puzzle
Args: None
Returns:
tuple: Pair of solutions for the two parts of the puzzle
"""
aunts = self._parse_input()
return (
self._get_aunt_sue(aunts, filters={}),
self._get_aunt_sue(aunts, filters={
'cats': Solver._get_aunts_with_detail_gt,
'trees': Solver._get_aunts_with_detail_gt,
'goldfish': Solver._get_aunts_with_detail_lt,
'pomeranians': Solver._get_aunts_with_detail_lt,
}),
)
def run_test_cases(self):
"""Runs a series of inputs and compares against expected outputs
Args: None
Returns: None
"""
aunt = 'Sue {0}: {1}: {2}, {3}: {4}, {5}: {6}'
input1 = (aunt.format(1, 'akitas', 0, 'cars', 2, 'cats', 7),)
input2 = (
aunt.format(1, 'akitas', 0, 'cars', 2, 'cats', 8),
aunt.format(2, 'children', 3, 'goldfish', 5, 'perfumes', 1),
)
input3 = (
aunt.format(1, 'cats', 8, 'goldfish', 4, 'pomeranians', 2),
aunt.format(2, 'akitas', 10, 'perfumes', 10, 'children', 5),
aunt.format(3, 'cars', 2, 'pomeranians', 3, 'vizslas', 0),
aunt.format(4, 'goldfish', 5, 'children', 8, 'perfumes', 3),
aunt.format(5, 'vizslas', 2, 'akitas', 7, 'perfumes', 6),
aunt.format(6, 'vizslas', 0, 'akitas', 1, 'perfumes', 2),
aunt.format(7, 'perfumes', 8, 'cars', 4, 'goldfish', 10),
aunt.format(8, 'perfumes', 7, 'children', 2, 'cats', 1),
aunt.format(9, 'pomeranians', 3, 'goldfish', 10, 'trees', 10),
aunt.format(10, 'akitas', 7, 'trees', 8, 'pomeranians', 4),
)
test_cases = (
solver.TestCase('\n'.join(input1), 'Sue 1', 'Sue 1'),
solver.TestCase('\n'.join(input2), 'Sue 2', 'Sue 1'),
solver.TestCase('\n'.join(input3), 'Sue 3', 'Sue 1'),
)
for test_case in test_cases:
self._run_test_case(test_case)
```
#### File: advent_of_code/solvers/day_17.py
```python
from itertools import combinations
# Application-specific Imports
from advent_of_code.solvers import solver
class Solver(solver.AdventOfCodeSolver):
"""Advent of Code 2015 Day 17: No Such Thing as Too Much
Attributes:
puzzle_input (list): A list of instructions for solving the puzzle
puzzle_title (str): Name of the Advent of Code puzzle
solved_output (str): A template string for solution output
"""
def __init__(self, *args):
solver.AdventOfCodeSolver.__init__(self, *args)
self._solved_output = '\n'.join((
'The number of 150 litre container combinations is {0}.',
'The number of 150 litre fewest container combinations is {1}.',
))
@staticmethod
def _get_150_litre_combos(cups, min_length_combos=False):
"""
Args:
cups (list):
min_length_combos (bool):
Returns:
list:
"""
cup_combos = []
for length in range(1, len(cups) + 1):
cup_combos.extend((
tuple(combo) for combo in combinations(cups, length)
if sum(combo) == 150
))
if min_length_combos and cup_combos:
break
return cup_combos
def _solve_puzzle_parts(self):
"""Solves each part of a Advent of Code 2015 puzzle
Args: None
Returns:
tuple: Pair of solutions for the two parts of the puzzle
"""
cups = [int(cup) for cup in self.puzzle_input.splitlines()]
count_all_combos = len(self._get_150_litre_combos(cups, False))
count_min_length_combos = len(self._get_150_litre_combos(cups, True))
return (count_all_combos, count_min_length_combos)
def run_test_cases(self):
"""Runs a series of inputs and compares against expected outputs
Args: None
Returns: None
"""
test_input = '\n'.join(('120', '90', '60', '30', '30'))
self._run_test_case(solver.TestCase(test_input, 4, 3))
```
#### File: advent_of_code/solvers/day_21.py
```python
from collections import namedtuple
from itertools import combinations
import re
import sys
# Application-specific Imports
from advent_of_code.solvers import solver
# Defines an item from the shop
Item = namedtuple(typename='Item', field_names='cost damage armor')
class Solver(solver.AdventOfCodeSolver):
"""Advent of Code 2015 Day 21: RPG Simulator 20XX
Attributes:
puzzle_input (list): A list of instructions for solving the puzzle
puzzle_title (str): Name of the Advent of Code puzzle
solved_output (str): A template string for solution output
"""
def __init__(self, *args):
solver.AdventOfCodeSolver.__init__(self, *args)
self._solved_output = '\n'.join((
'The least amount of gold needed to win is {0}.',
'The greatest amount of gold needed to lose is {1}',
))
def _get_boss(self):
"""Parses attributes for the boss from the input
Args: None
Returns:
dict: Stores hit points, damage, and armor attributes for the boss
"""
parser = re.compile(r'\d+', re.DOTALL)
stats = parser.findall(self.puzzle_input.strip())
return {
'Hit Points': int(stats[0]),
'Damage': int(stats[1]),
'Armor': int(stats[2]),
}
@staticmethod
def _get_weapons():
"""Gets list of items that can be equiped as weapons
Args: None
Returns:
tuple: Every item that can be equiped as a weapon
"""
return (
Item(cost=8, damage=4, armor=0), # Dagger
Item(cost=10, damage=5, armor=0), # Shortsword
Item(cost=25, damage=6, armor=0), # Warhammer
Item(cost=40, damage=7, armor=0), # Longsword
Item(cost=74, damage=8, armor=0), # Greataxe
)
@staticmethod
def _get_armors():
"""Gets list of items that can be equiped as armor (including nothing)
Args: None
Returns:
tuple: Every item that can be equiped as armor
"""
return (
Item(cost=0, damage=0, armor=0), # Nothing
Item(cost=13, damage=0, armor=1), # Leather
Item(cost=31, damage=0, armor=2), # Chainmail
Item(cost=53, damage=0, armor=3), # Splintmail
Item(cost=75, damage=0, armor=4), # Bandedmail
Item(cost=102, damage=0, armor=5), # Platemail
)
@staticmethod
def _get_rings():
"""Gets list of items that can be equiped as rings (including nothing)
Args: None
Returns:
tuple: Every item that can be equiped as a ring
"""
return (
Item(cost=0, damage=0, armor=0), # Damage +0 (Nothing, Ring 1)
Item(cost=25, damage=1, armor=0), # Damage +1
Item(cost=50, damage=2, armor=0), # Damage +2
Item(cost=100, damage=3, armor=0), # Damage +3
Item(cost=0, damage=0, armor=0), # Defense +0 (Nothing, Ring 2)
Item(cost=20, damage=0, armor=1), # Defense +1
Item(cost=40, damage=0, armor=2), # Defense +2
Item(cost=80, damage=0, armor=3), # Defense +3
)
@staticmethod
def _get_damage(attacker, defender):
"""Calculates the net damage inflicted by the attacker on the defender
Args:
attacker (dict): Stores attributes for the attacker
defender (dict): Stores attributes for the defender
Returns:
int: Net damage inflicted by attacker if above 0, else 1
"""
net_damage = attacker['Damage'] - defender['Armor']
return net_damage if net_damage > 1 else 1
def _player_wins_battle(self, player, boss):
"""
Args:
player (dict): Stores attributes for the player
boss (dict): Stores attributes for the boss
Returns:
bool: True if the player wins the battle, else False
"""
player_wins = False
player_hp, player_hit = 100, self._get_damage(player, boss)
boss_hp, boss_hit = boss['Hit Points'], self._get_damage(boss, player)
while player_hp > 0:
boss_hp -= player_hit
if boss_hp < 1:
player_wins = True
break
player_hp -= boss_hit
return player_wins
@staticmethod
def _get_player(weapon, armor, ring1, ring2):
"""Calculates the damage and armor for a player with their equipment
Args:
weapon (Item): Stores a weapon's cost and damage attributes
armor (Item): Stores an armor's cost and armor attributes
ring1 (Item): Stores a ring's cost and damage or armor attributes
ring2 (Item): Stores a ring's cost and damage or armor attributes
Returns:
dict: Stores damage and armor attributes for a player
"""
return {
'Damage': weapon.damage + ring1.damage + ring2.damage,
'Armor': armor.armor + ring1.armor + ring2.armor,
}
def _get_player_equipment(self):
"""
Args: None
Yields:
tuple:
"""
for weapon in self._get_weapons():
for armor in self._get_armors():
for ring1, ring2 in combinations(self._get_rings(), 2):
yield (weapon, armor, ring1, ring2)
def _solve_puzzle_parts(self):
"""Solves each part of a Advent of Code 2015 puzzle
Args: None
Returns:
tuple: Pair of solutions for the two parts of the puzzle
"""
boss = self._get_boss()
min_cost = sys.maxsize
max_cost = -1
for weapon, armor, ring1, ring2 in self._get_player_equipment():
cost = weapon.cost + armor.cost + ring1.cost + ring2.cost
player = self._get_player(weapon, armor, ring1, ring2)
if cost < min_cost and self._player_wins_battle(player, boss):
min_cost = cost
if cost > max_cost and not self._player_wins_battle(player, boss):
max_cost = cost
return (min_cost, max_cost)
def run_test_cases(self):
"""Runs a series of inputs and compares against expected outputs
Args: None
Returns: None
"""
test_input1 = '\n'.join((
'Hit Points: 1',
'Damage: 1000',
'Armor: 1',
))
test_input2 = '\n'.join((
'Hit Points: 1',
'Damage: 1',
'Armor: 1000',
))
test_input3 = '\n'.join((
'Hit Points: 1000',
'Damage: 1000',
'Armor: 1000',
))
test_input4 = '\n'.join((
'Hit Points: 200',
'Damage: 0',
'Armor: 3',
))
test_input5 = '\n'.join((
'Hit Points: 150',
'Damage: 7',
'Armor: 2',
))
self._run_test_case(solver.TestCase(test_input1, 8, -1))
self._run_test_case(solver.TestCase(test_input2, 8, -1))
self._run_test_case(solver.TestCase(test_input3, sys.maxsize, 356))
self._run_test_case(solver.TestCase(test_input4, 10, 230))
self._run_test_case(solver.TestCase(test_input5, 101, 189))
```
|
{
"source": "jdswinbank/asterogap",
"score": 3
}
|
#### File: asterogap/asterogap/run_plotting.py
```python
import argparse
import textwrap
import numpy as np
import corner
import matplotlib.pyplot as plt
import h5py
from asterogap.GP import GPFit
def calc_periods(data, nperiods=1, period=None, p_range=None, bins=20, width=0.1):
"""
NOTE: Should work for both single-kernel and double-kernel results
Roughly calculates the most likely period(s).
Parameters
----------
data : numpy.ndarray
Results pulled from hdf5 file. Assumes the shape to be [nwalkers, iterations, parameters].
nperiods : int
The number of top periods to look for.
period : float
The period (in hours) around which to calculate the probability.
p_range : list
List of starting and ending values for an interval.
bins : int
The number of bins to calculate with for the numpy histogram. It's best to keep this number very large.
width : float
The distance on either side of the period over which will be integrated. Width will be ignored
if p_range is specified.
Returns
-------
probs : numpy.float64
Total integrated area (probability) of the posterior period distribution within the edges listed.
edges : list
List of starting and ending values for the interval used to calculate the probability.
This will be the same as p_range if p_range was specified.
"""
flat_data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
h, bins = np.histogram(flat_data[:, -1], bins=bins, density=True)
# find the period(s) of hmax
top_h = -np.sort(-h)[0:nperiods]
# collect the indicies for these periods
indices = []
for i, v in enumerate(top_h):
ind = np.where(h == top_h[i])
indices.append(ind[0][0])
indices = np.array(indices)
# find the width of the bins
dx = bins[1] - bins[0]
periods = bins[indices] + dx/2. # add half the bin to center
#
# edges = [periods - width/2., periods + width/2.]
#
# prob_sum = []
#
# for i, v in enumerate(edges[0]):
# # find all the periods that fall within the defined edges
# periods_where = flat_data[:,-1][np.where((flat_data[:,-1]>= edges[0][i]) & (flat_data[:,-1] <= edges[1][i]))]
#
# # find the sum of the number of walkers within the edges and add to list
# prob_sum.append(periods_where.shape[0]/flat_data.shape[0])
#
# return prob_sum, edges
return periods
def plot_corner(data, true_period=None, colours=None, zoom=False, trim=None, fig=None):
"""
NOTE: Should work for both single-kernel and double-kernel results.
Plot a corner plot showing the projections of a data set in multi-dimesional space,
with the different dimensions corresponding to the different kernel parameters.
Parameters
----------
data : numpy.ndarray
Results pulled from hdf5 file. Assumes the shape to be [nwalkers, iterations, parameters].
true_period : float
The true period of the asteroid light curves.
colours : [str, str, str]
List of (up to) three colours. First colour is used for the data, the second
colour for the true underlying data, the third for the models.
Note, for a corner plot, the first colour will always default to black.
Only the second true underlying data color can be changed.
zoom : bool
Toggles whether the corner plot will show a zoomed in version of the histogram,
focusing on the densest region of the previous binned histogram.
trim : [float, float]
Specifies what percentile range of the period is to be used for plotting.
This can be used to eliminate extreme outliers that would otherwise distort the plot.
Returns
-------
figure : matplotlib.figure.Figure
The object with the plot
"""
if colours is None:
colours = ["#000000", "#0072B2", "#E69F00", "#009E73", "#F0E442"]
if trim:
try:
lower, upper = np.percentile(data[:, :, -1], [trim[0], trim[1]])
flat_data = data[(data[:, :, -1] > lower) & (data[:, :, -1] < upper)]
except TypeError:
print(
"ERROR: Values for trim must be formatted as an array (e.g. [5], not 5).\n Plot will be processed without any trimming."
)
flat_data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
except IndexError:
print(
"WARNING: No upper percentile was specified. Upper percentile was set to 100."
)
trim = np.append(trim, 100)
lower, upper = np.percentile(data[:, :, -1], [[0], trim[1]])
flat_data = data[(data[:, :, -1] > lower) & (data[:, :, -1] < upper)]
else:
flat_data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
if zoom:
periods = calc_periods(data, period=true_period, )
width = 0.5
edges = [periods - width/2., periods + width/2.]
# if np.any(prob == 0):
# raise Exception(
# "WARNING: Probability around period is 0 and therefore cannot display a valid corner plot."
# )
flat_data = data[(data[:, :, -1] > edges[0]) & (data[:, :, -1] < edges[1])]
if data.shape[2] == 6:
labels = ["mean", "log_amp_long", "log_metric", "log_amp", "gamma", "period"]
truths = [None, None, None, None, None, true_period]
else:
labels = ["mean", "log_amp", "gamma", "period"]
truths = [None, None, None, true_period]
figure = corner.corner(
flat_data,
labels=labels,
truths=truths,
show_titles=True,
title_kwargs={"fontsize": 9},
)
return figure
def plot_trace(data, iterations, colours=None):
"""
NOTE: Should work for both single-kernel and double-kernel results
Create a plot of walker movement after the burn-in period.
Parameters
----------
data : numpy.ndarray
Results pulled from hdf5 file. Assumes the shape to be [nwalkers, iterations, parameters].
iterations : int
The number of iterations the MCMC code was run for. Can use data.shape[1].
colours : [str]
One colour. First colour is used for the data.
Returns
-------
fig : matplotlib.figure.Figure
The object with the plot
"""
if colours is None:
colours = ["black"]
# print(int(data.shape[2]/2))
fig, ax = plt.subplots(
2, int(data.shape[2] / 2), figsize=(5 * int(data.shape[2] / 2), 9)
)
fig.subplots_adjust(wspace=0.5, hspace=0.3)
if data.shape[2] == 6:
dims = ["mean", "log_amp_long", "log_metric", "log_amp_periodic", "log_gamma", "period"]
axs = [ax[0, 0], ax[0, 1], ax[0, 2], ax[1, 0], ax[1, 1], ax[1, 2]]
else:
dims = ["mean", "log_amp", "log_gamma", "period"]
axs = [ax[0, 0], ax[0, 1], ax[1, 0], ax[1, 1]]
x = np.arange(iterations)
for i in range(data.shape[2]):
axs[i].set_xlabel("Step Number")
axs[i].set_ylabel("{}".format(dims[i]))
for j in range(data.shape[0]):
param = data[j, :, i]
axs[i].plot(x, param, color=colours[0], alpha=0.3)
return fig
def run_lsp(
time,
flux,
flux_err=None,
data=None,
true_period=None,
true_lightcurve=None,
use_radians=False,
legend=True,
colours=None,
plot=False,
):
"""
NOTE: Should work for both single-kernel and double-kernel results
Determine the Lomb-Scargle Periodogram (nterms = 1-3) for the light curve data
and plot the frequencies as well as the a folded light curve using
Parameters
----------
time : numpy.ndarray
The time stamps of the periodic light curve
flux : numpy.ndarray
Flux measurements corresponding to the time stamps
flux_err : numpy.ndarray
The flux uncertainties corresponding to the data.
data : numpy.ndarray
Results pulled from hdf5 file. Assumes the shape to be [nwalkers, iterations, parameters].
true_period : float
The period on which to fold in hours
true_lightcurve : iterable containing (true_time, true_flux)
In the case of simulated data, this contains the times and flux values from which the
simulated data was created (could be higher-resolution than the "data"), useful for
comparison between models created e.g. from MCMC samples and the true underlying process
use_radians : bool, default False
If True, the phase will be plotted from (0, 2pi) instead of (0,1), which is the default.
legend : bool, default True
If True, include a legend in the plot
colours : [str, str, str]
List of (up to) three colours. First colour is used for the data, the second
colour for the true underlying data, the third for the models.
plot : bool
Do you want a plot?
Returns
-------
figure : matplotlib.figure.Figure
The object with the plot
lsp_period : numpy.ndarray
An array of the periods calculated from the Lomb-Scargle periodograms with corresponding
n_terms [1,2,3].
"""
if colours is None:
colours = ["#000000", "#0072B2", "#E69F00", "#009E73", "#F0E442"]
# NEW LSP CODE
#---------------------
from gatspy import periodic
if data is not None:
lower, upper = np.percentile(data[:, :, -1], [5, 95])
masked_data = data[(data[:, :, -1] > lower) & (data[:, :, -1] < upper)]
# set up 2term LSP model
model = periodic.LombScargle(Nterms=2)
max_period=60.0/24.0 # 2.5 days
# make sure the max period isn't longer than the time range of observations
big_period = np.min([max_period, (time.max() - time.min())])
min_period=1.0/24.0 # 1 hour
model.optimizer.period_range = (min_period, big_period)
model.optimizer.first_pass_coverage = 200
# add the obs data
model.fit(time, flux, flux_err)
# find the 5 best periods (in days)
best_periods = model.find_best_periods()
if plot:
fig, ax = plt.subplots(len(best_periods)+1, figsize=(10, 4*(len(best_periods)+1)))
# have range spanning the min period-1 hour (unless that's less than 0) to max period+1 hour
p_range = np.linspace(np.max([0, best_periods.min()-1/24.]), best_periods.max()+1/24., 1000)
# calculate LSP for the range
LSP = model.periodogram(p_range)
ax[0].plot(p_range*24., LSP, color=colours[0], alpha=0.7)
# set xlim same as p_range
ax[0].set_xlim([np.max([0, best_periods.min()-1/24.])*24, best_periods.max()*24.+1])
if data is not None:
#lower, upper = np.percentile(data[:, :, -1], [5, 95])
lower = p_range.min()*24.
upper = p_range.max()*24.
masked_data = data[(data[:, :, -1] > lower) & (data[:, :, -1] < upper)]
if data is not None:
ax[0].hist(
masked_data[:, -1],
bins=20,
color=colours[3],
alpha=0.5,
density=True,
label="Posterior",
)
ax[0].set_xlabel("Period (hrs)")
ax[0].set_ylabel("Normalized Power")
ax[0].set_ylim([0, 1])
if legend:
ax[0].legend()
for i in np.arange(len(best_periods)):
ax[0].vlines(
best_periods[i]*24.,
0,
1,
colors=colours[2],
linestyles="--",
#label="Best fit : %.5f" % (best_periods[i] * 24.0),
)
plot_folded_lightcurve(
time,
flux,
period=best_periods[i] * 24.0,
ax=ax[i+1],
true_lightcurve=true_lightcurve,
use_radians=use_radians,
legend=False,
)
plt.tight_layout()
#---------------------
#
# from scipy.signal import argrelextrema
#
# # get l-s best period estimate
# from lombscargle import make_lsp
# # from astropy.timeseries import LombScargle
#
# if data is not None:
# lower, upper = np.percentile(data[:, :, -1], [5, 95])
# masked_data = data[(data[:, :, -1] > lower) & (data[:, :, -1] < upper)]
#
# lsp_periods = np.array([])
#
# if plot:
# fig, ax = plt.subplots(3, 2, figsize=(10, 10), squeeze=False)
#
# for i in np.arange(3):
# freq, power = make_lsp(time, flux, flux_err, p_max=5.0, nterms=(i + 1))
#
# # determine the indices of local power maxima
# best_idx = argrelextrema(power, np.greater)
#
# # sort these indices based on actual power value
# # reverse list so max is read first
# indices = np.argsort(power[best_idx[0]])[::-1]
#
# # sort our original indices based on the new
# # power-sorted indices
# best_idx = (best_idx[0]).T[indices]
# best_freqs = freq[best_idx].T
#
# new_freq = best_freqs[0]
# new_period = 1.0 / new_freq
#
# if plot:
#
# # plot all the frequencies
# ax[i][0].plot((1.0 / freq) * 24.0, power, color=colours[0], alpha=0.7)
#
# if data is not None:
# ax[i][0].hist(
# masked_data[:, -1],
# bins=20,
# color=colours[3],
# alpha=0.5,
# density=True,
# label="Posterior",
# )
#
# y_max = (ax[i][0].get_ylim())[1]
#
# ax[i][0].vlines(
# new_period * 24.0,
# 0,
# y_max,
# colors=colours[2],
# linestyles="--",
# label="Best fit : %.5f" % (new_period * 24.0),
# )
#
# if true_period:
# ax[i][0].vlines(
# true_period,
# 0,
# y_max,
# colors=colours[1],
# linestyles="--",
# label="True fit : %.5f" % true_period,
# )
#
# ax[i][0].set_xlabel("Period (hrs)")
# ax[i][0].set_ylabel("Normalized Power")
# ax[i][0].set_title("nterms = %s" % (i + 1))
# ax[i][0].set_xlim([0, 24])
# ax[i][0].set_ylim([0, y_max])
#
# if legend:
# ax[i][0].legend()
#
# plot_folded_lightcurve(
# time,
# flux,
# period=new_period * 24.0,
# ax=ax[i][1],
# true_lightcurve=true_lightcurve,
# use_radians=use_radians,
# legend=False,
# )
#
# lsp_periods = np.append(lsp_periods, new_period * 24.0)
#
# if plot:
# plt.tight_layout()
return best_periods
def plot_posterior(data, true_period=None, legend=True, colours=None):
"""
NOTE: Should work for both single-kernel and double-kernel results
Plot a histogram of the posterior distribution, showing the full distribution,
the 5th-95th percentile of the distribution, and a zoomed-in view of
the region with the highest probability (or region around the period if specified).
Parameters
----------
data : numpy.ndarray
Results pulled from hdf5 file. Assumes the shape to be [nwalkers, iterations, parameters].
true_period : float
The period (in hours) around which to calculate the probability.
legend : bool, default True
If True, include a legend in the plot
colours : [str, str, str]
List of (up to) three colours. First colour is used for the data, the second
colour for the true underlying data, the third for the models.
Returns
-------
ax : matplotlib.Axes object
The object with the plot
"""
if colours is None:
colours = ["black", "#0072B2", "#E69F00", "#009E73", "#F0E442"]
fig, ax = plt.subplots(3, 2, figsize=(10, 15))
# plot the full histogram of period results
flat_data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
ax[0, 0].hist(flat_data[:, -1], bins="scott", density=True, color=colours[0], alpha=0.3)
if true_period:
ylim = ax[0, 0].get_ylim()
ax[0, 0].vlines(
x=true_period,
ymin=ylim[0],
ymax=ylim[-1],
lw=1,
color=colours[1],
linestyle="dashed",
label="true period : %.5f" % true_period,
)
ax[0, 0].set_xlabel("Period in hours")
ax[0, 0].set_ylabel("Probability")
ax[0, 0].set_ylim(ax[0, 0].get_ylim())
ax[0, 0].set_title("Posterior Period Distibution")
# plot the 5th-95th percentile
lower, upper = np.percentile(data[:, :, -1], [5, 95])
masked_data = data[(data[:, :, -1] > lower) & (data[:, :, -1] < upper)]
ax[0, 1].hist(masked_data[:, -1], bins="scott", density=True, color=colours[0], alpha=0.3)
if true_period:
ylim = ax[0, 1].get_ylim()
ax[0, 1].vlines(
x=true_period,
ymin=ylim[0],
ymax=ylim[-1],
lw=1,
color=colours[1],
linestyle="dashed",
label="true period : %.5f" % true_period,
)
ax[0, 1].set_title("5th - 95th Percentile")
ax[0, 1].set_xlabel("Period in hours")
ax[0, 1].set_ylabel("Probability")
ax[0, 1].set_ylim(ax[0, 1].get_ylim())
# zoom in on the part of the graph that has the highest probability
periods = calc_periods(data, 4, true_period)
#
# if not np.any(probs):
# raise Exception(
# "WARNING: Probability around period is 0 and therefore cannot display a valid corner plot."
# )
best_periods = []
probs = []
for i, p in enumerate(periods):
# trim data to +/- 10-20% of what the period is
# dividing the log2 of the period by 35 should give you
# ~10% for 10 hours, ~20% for 200 hours, and ~30% for 1500 hours
# and if the period is less than 10 hour (since the log2(1)=0),
# then just set the trim to 10%
if p > 10:
period_data = flat_data[(flat_data[:,-1]>(p-p*np.log2(p)/35)) & (flat_data[:,-1]<(p+p*np.log2(p)/35)) ]
else:
period_data = flat_data[(flat_data[:,-1]>(p-p*0.1)) & (flat_data[:,-1]<(p+p*0.1)) ]
if len(period_data)==0:
# this can happen if the period is too close to 0
period_data = flat_data[(flat_data[:,-1]>0) & (flat_data[:,-1]<(p+p*0.1)) ]
h, bins = np.histogram(period_data[:,-1], bins=1000, density=True)
top_h = -np.sort(-h)[0:1]
half_h = top_h/2
# half max h values
hmin = h[h>half_h][0]
hmax = h[h>half_h][-1]
# if the half_h is too much (aka hmin is the same as hmax), try 10%?
# this can happen if curve is too steep
if hmax==hmin:
hmin = h[h>(0.1*top_h)][0]
hmax = h[h>(0.1*top_h)][-1]
bin_edges = [bins[np.where(h==hmin)[0][0]], bins[np.where(h==hmax)[0][0]]]
bin_center = bins[np.where(h==top_h)]
# move the bin edges to 3 sigma away
bin_edges[0] = bin_center - 3*(bin_center - bin_edges[0])
bin_edges[1] = bin_center + 3*(bin_edges[1] - bin_center)
# if the 10%h fix didn't work earlier, manually move the bins out one each,
# so that there's at least something to plot
if bin_edges[0] == bin_edges[1]:
bin_edges[0] = bins[np.where(h==hmin)[0][0]-1]
bin_edges[1] = bins[np.where(h==hmin)[0][0]+1]
# now zoom in on the half-width half-max * 3 area
zoom_data = data[(data[:, :, -1] > bin_edges[0]) & (data[:, :, -1] < bin_edges[1])]
#now we can see what the probs are
prob = period_data.shape[0]/(data.shape[0] * data.shape[1])
probs = np.append(probs, prob)
# plot and get bin info
# set density to false so the y-axis gives a relative sense of scale
# for the different periods
n, bins, p = ax[1+int(i/2), i % 2].hist(zoom_data[:, -1], bins="auto", density=False, color=colours[0], alpha=0.3, label="probability: %.3f"%prob)
# find the bin with the max n and add half a bin width
best_period = bins[np.where(n==n.max())]+(bins[1]-bins[0])/2
best_periods = np.append(best_periods, best_period)
ylim = ax[1+int(i/2), i % 2].get_ylim()
xlim = ax[1+int(i/2), i % 2].get_xlim()
if true_period:
ax[1+int(i/2), i % 2].vlines(
true_period,
0,
ylim[-1],
lw=1,
color=colours[1],
linestyle="dashed",
label="true period : %.5f" % true_period,
)
ax[1+int(i/2), i % 2].vlines(
best_period,
0,
ylim[-1],
lw=1,
color=colours[2],
linestyle="dashed",
label="best period : %.5f" % best_period,
)
else:
ax[1+int(i/2), i % 2].vlines(
best_period,
0,
ylim[-1],
lw=1,
color=colours[2],
linestyle="dashed",
label="best period : %.5f" % best_period,
)
#ax[1+int(i/2), i % 2].set_title("Probability %.3f" % probs[i])
ax[1+int(i/2), i % 2].set_xlabel("Period in hours")
ax[1+int(i/2), i % 2].set_ylabel("Probability")
ax[1+int(i/2), i % 2].set_ylim(ylim)
ax[1+int(i/2), i % 2].set_xlim(xlim)
if legend:
if true_period:
ax[0, 0].legend()
ax[0, 1].legend()
ax[1+int(i/2), i % 2].legend()
plt.tight_layout()
return best_periods, probs
def plot_folded_lightcurve(
time,
flux,
period,
flux_err=None,
models=None,
true_lightcurve=None,
ax=None,
use_radians=False,
legend=True,
colours=None,
):
"""
NOTE: Should work for both single-kernel and double-kernel results
Plot a folded periodic light curve, potentially including the true underlying
model that produced the data (in the case of simulations), or model
light curves from MCMC.
Parameters
----------
time : numpy.ndarray
The time stamps of the periodic light curve
flux : numpy.ndarray
Flux measurements corresponding to the time stamps
flux_err : numpy.ndarray
The flux uncertainties corresponding to the data.
period : array
The period on which to fold **in hours**
models : iterable of shape (model_time, numpy.ndarray of shape (nsamples, len(model_time)))
First element here contains the time stamps for the models (which may not be the same
as for the data), the second is an array of shape (nsamples, ndatapoints), where nsamples
is the number of model light curves, and ndatapoints == len(model_time)
true_lightcurve : iterable containing (true_time, true_flux)
In the case of simulated data, this contains the times and flux values from which the
simulated data was created (could be higher-resolution than the "data"), useful for
comparison between models created e.g. from MCMC samples and the true underlying process
ax : matplotlib.Axes object
An Axes object in which to plot the results. If not given, the code will create a
new figure.
use_radians : bool, default False
If True, the phase will be plotted from (0, 2pi) instead of (0,1), which is the default.
legend : bool, default True
If True, include a legend in the plot
colours : [str, str, str]
List of (up to) three colours. First colour is used for the data, the second
colour for the true underlying data, the third for the models.
Returns
-------
ax : matplotlib.Axes object
The object with the plot
"""
if colours is None:
colours = ["#000000", "#0072B2", "#E69F00", "#009E73", "#F0E442"]
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
period_days = period / 24.0
t0 = np.min(time)
if models:
t0 = np.min([t0, np.min(models[0])])
if true_lightcurve:
t0 = np.min([t0, np.min(true_lightcurve[0])])
phase = (time - t0) / period_days - np.floor((time - t0) / period_days)
if use_radians:
phase *= 2.0 * np.pi
if flux_err is None:
ax.scatter(phase, flux, s=5, color=colours[0], label="Observations")
else:
ax.errorbar(
phase,
flux,
yerr=flux_err,
fmt="o",
c=colours[0],
markersize=5,
label="Observations",
)
if true_lightcurve:
true_time = true_lightcurve[0] - t0
true_flux = true_lightcurve[1]
true_phase = true_time / period_days - np.floor(true_time / period_days)
if use_radians:
true_phase *= 2.0 * np.pi
# compute the difference from one phase bin to the next
tdiff = np.diff(true_phase)
# find all differences < 0, which is where the phase wraps around
idx = np.where(tdiff < 0)[0]
# loop through indices where phase goes from 1 (or 2pi) to 0
# plot each phase light curve separately
istart = 0
iend = idx[0] + 1
# first phase cycle also contains the label for the legend
ax.plot(
true_phase[istart:iend],
true_flux[istart:iend],
alpha=0.3,
c=colours[1],
label="True Lightcurve",
)
for i, x in enumerate(idx[:-1]):
ax.plot(
true_phase[istart:iend],
true_flux[istart:iend],
alpha=0.3,
c=colours[1],
label="",
)
istart = x + 1
iend = idx[i + 1] + 1
# last plot
istart = idx[-1] + 1
ax.plot(
true_phase[istart:], true_flux[istart:], alpha=0.3, c=colours[1], label=""
)
# TODO: figure out how to actually use this code
if models:
m_time = models[0] - t0
m_flux = models[1]
m_phase = (m_time / period_days) - np.floor(m_time / period_days)
#print("mphase " + str(m_phase))
if use_radians:
m_phase *= 2.0 * np.pi
# compute the difference from one phase bin to the next
tdiff = np.diff(m_phase)
#print("tdiff " + str(tdiff))
# find all differences < 0, which is where the phase wraps around
idx = np.where(tdiff < 0)[0]
# if idx.size == 0:
# idx = np.array(0)
# loop through the different samples
for i, m in enumerate(m_flux):
# loop through indices where phase goes from 1 (or 2pi) to 0
# plot each phase light curve separately
istart = 0
iend = idx[0] + 1
if i == 0:
# first phase cycle also contains the label for the legend
ax.plot(
m_phase[istart:iend],
m[istart:iend],
alpha=0.1,
c=colours[2],
label="model",
)
else:
ax.plot(
m_phase[istart:iend],
m[istart:iend],
alpha=0.1,
c=colours[2],
label="",
)
form_time = models[0] - t0
m_flux = models[1]
m_phase = (m_time / period_days) - np.floor(m_time / period_days)
if use_radians:
m_phase *= 2.0 * np.pi
# compute the difference from one phase bin to the next
tdiff = np.diff(m_phase)
#print("tdiff " + str(tdiff))
# find all differences < 0, which is where the phase wraps around
idx = np.where(tdiff < 0)[0]
# if idx.size == 0:
# idx = np.array(0)
# loop through the different samples
for i, m in enumerate(m_flux):
# loop through indices where phase goes from 1 (or 2pi) to 0
# plot each phase light curve separately
istart = 0
iend = idx[0] + 1
if i == 0:
# first phase cycle also contains the label for the legend
ax.plot(
m_phase[istart:iend],
m[istart:iend],
alpha=0.1,
c=colours[2],
label="model",
)
else:
ax.plot(
m_phase[istart:iend],
m[istart:iend],
alpha=0.1,
c=colours[2],
label="",
)
for j, x in enumerate(idx[:-1]):
ax.plot(
m_phase[istart:iend],
m[istart:iend],
alpha=0.1,
c=colours[2],
label="",
)
istart = x + 1
iend = idx[j + 1] + 1
# last plot
istart = idx[-1] + 1
ax.plot(m_phase[istart:], m[istart:], alpha=0.1, c=colours[2], label="")
if legend:
ax.legend()
ax.set_xlabel("Rotational Phase")
ax.set_ylabel("Flux")
ax.set_title(r"period $P = %.5f hours$" % period)
if use_radians:
ax.set_xlim(0, 2 * np.pi)
else:
ax.set_xlim(0, 1)
plt.tight_layout()
return ax
def read_data(filename, period_set=None):
"""
Function reading in the resultant HDF5 file produced from run_gp.py.
Parameters
----------
filename : string
Name of the HDF5 file to be read in
period_set: int
If the true period is known and should be included in some of the plots
Returns
----------
data : MCMC chains
time : original time data used for MCMC run
flux : original flux data used for MCMC run
flux_err : original flux error data used for MCMC run
true_period : the true period that was set, or None if it wasn't
"""
with h5py.File(filename, "r") as f:
data = f["chain"][:]
time = f["time"][:]
flux = f["flux"][:]
flux_err = f["flux_err"][:]
iterations = f.attrs["iterations"]
true_period = period_set
if true_period == 0:
true_period = None
return data, time, flux, flux_err, true_period, iterations
def make_summary_plots(
filename, save_fig=False, true_period=None, true_lightcurve=None, lsp=False, models=False, trim_steps=False
):
"""
Plots and saves all the necessary plots you can get from an hdf5 results file.
Parameters
----------
filename : hdf5 file
Name of the file containing the data results.
true_lightcurve : iterable containing (true_time, true_flux)
In the case of simulated data, this contains the times and flux values from which the
simulated data was created (could be higher-resolution than the "data"), useful for
comparison between models created e.g. from MCMC samples and the true underlying process
true_period : float
The true period of the asteroid light curves
"""
data, time, flux, flux_err, true_period, iterations = read_data(filename, period_set)
# convert period from log_days to hours
data[:, :, -1] = np.exp(data[:, :, -1]) * 24.0
if trim_steps:
print(trim_steps)
data = data[:,trim_steps[0]:trim_steps[1],:]
### LOMB-SCARGLE ###
### should be fully functional in both 4 and 6 dim, with period and without
if lsp:
print("\nplotting lomb-scargle periodogram")
run_lsp(
time, flux, flux_err, data, true_period, true_lightcurve, plot=True
)
if save_fig:
print("saving lomb-scargle periodogram")
plt.savefig(filename.replace(".hdf5", "_lsp.pdf"), format="pdf")
### TRACE PLOT ###
### should be fully functional in both 4 and 6 dim, with period and without
# print("\nplotting trace plot")
# plot_trace(data, iterations)
#
# if save_fig:
# print("saving trace plot")
# plt.savefig(filename.replace(".hdf5", "_trace.pdf"), format="pdf")
#
# ### CORNER PLOTS ###
# ### should be fully functional in both 4 and 6 dim, with period and without
# print("\nplotting corner plot")
# plot_corner(data, true_period)
#
# if save_fig:
# print("saving corner plot")
# plt.savefig(filename.replace(".hdf5", "_corner.pdf"), format="pdf")
#
# print("\nplotting trimmed corner plot")
# plot_corner(data, true_period, trim=[5, 95])
#
# if save_fig:
# print("saving trimmed corner plot")
# plt.savefig(filename.replace(".hdf5", "_corner_5_95.pdf"), format="pdf")
#
# # print("\nplotting zoomed-in corner plot")
# # plot_corner(data, true_period, zoom=True)
# #
# # if save_fig:
# # print("saving zoomed-in corner plot")
# # plt.savefig(filename.replace(".hdf5", "_corner_zoom.pdf"), format="pdf")
### POSTERIOR ###
### should be fully functional in both 4 and 6 dim, with period and without
print("\nplotting posterior plot")
best_period, probs = plot_posterior(data, true_period)
if save_fig:
print("saving posterior plot")
plt.savefig(filename.replace(".hdf5", "_posterior.pdf"), format="pdf")
print("\nBEST PERIODS")
print(best_period)
# ### FOLDED LIGHTCURVE ###
### should be fully functional in both 4 and 6 dim, with period and without
print("\nplotting folded lightcurve")
fig, ax = plt.subplots(2, 2, figsize=(10, 10))
if models:
flat_data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
nsamples = flat_data.shape[0]
nmodels = 3
npred = 1000
t_pred = np.linspace(time[0], time[-1], npred)
m_all = np.zeros((nmodels, t_pred.shape[0]))
idx = np.random.choice(np.arange(0, nsamples, 1, dtype=int), size=nmodels)
asteroid = GPFit(time, flux, flux_err, True)
asteroid.set_params()
asteroid.set_walker_param_matrix(data.shape[0])
asteroid.set_gp_kernel()
gp = asteroid.gp
for i,j in enumerate(idx):
p = flat_data[j]
print(p)
pnew = [p[0], p[1], p[2], p[3], p[4], np.log(p[5]/24.)] #KEY! need to convert back to log days
gp.set_parameter_vector(pnew)
mean_model = gp.sample_conditional(flux, t_pred)
m_all[i] = mean_model
models = [t_pred, m_all]
else:
models = None
for i, v in enumerate(best_period):
plot_folded_lightcurve(
time,
flux,
flux_err=flux_err,
legend=False,
ax=ax[int(i/2), i % 2],
period=best_period[i],
true_lightcurve=true_lightcurve,
models = models,
)
if save_fig:
print("saving folded lightcurve")
plt.savefig(filename.replace(".hdf5", "_folded.pdf"), format="pdf")
def main():
make_summary_plots(filename, save_fig, period_set, lsp=lsp, models=models, trim_steps=trim_steps)
return
if __name__ == "__main__":
### DEFINE PARSER FOR COMMAND LINE ARGUMENTS
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=" ", # Bayesian QPO searches for burst light curves.",
epilog=textwrap.dedent(
"""
Examples
--------
Print this help message:
$> python run_gp.py --help
"""
),
)
### other arguments
parser.add_argument(
"-f",
"--filename",
action="store",
dest="filename",
required=True,
help="HDF5 file with results.",
)
parser.add_argument(
"-s",
"--save_fig",
action="store_true",
dest="save_fig",
required=False,
default=False,
help="Sets to true if you want to save the figures generated.",
)
parser.add_argument(
"-l",
"--lsp",
action="store_true",
dest="lsp",
required=False,
default=False,
help="Creates an LSP plot.",
)
parser.add_argument(
"-m",
"--models",
action="store_true",
dest="models",
required=False,
default=False,
help="Creates model lightcurves for the folded lightcurve plot.",
)
parser.add_argument(
"-p",
"--period",
action="store",
dest="period",
required=False,
type=float,
help="Set to a value (hours) if you want to plot a known period.",
)
parser.add_argument(
"-t",
"--trim",
nargs=2,
action="store",
dest="trim",
required=False,
type = int,
help="Set the boundaries for trimming the walkers.",
)
clargs = parser.parse_args()
filename = clargs.filename
save_fig = clargs.save_fig
lsp = clargs.lsp
models = clargs.models
period_set = clargs.period
trim_steps = clargs.trim
main()
```
#### File: asterogap/tests/test_GP.py
```python
import pytest
import numpy as np
import scipy.stats
import george
from ..GP import GPFit
class Test_GPFit(object):
@classmethod
def setup_class(cls):
# setting up some input parameters
time = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
flux = np.array([4, 5, 6, 7, 8, 9, 8, 7, 6, 5])
flux_err = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
kernel_long = True
cls.asteroid = GPFit(time, flux, flux_err, kernel_long)
def test_init(self):
assert len(self.asteroid.time) == 10
assert len(self.asteroid.time) == len(self.asteroid.flux) == len(self.asteroid.flux_err)
time_true = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert np.all(self.asteroid.time == time_true)
# NOTE: np.all only works on np.arrays when comparing to one number
flux_true = np.array([4, 5, 6, 7, 8, 9, 8, 7, 6, 5])
assert np.all(self.asteroid.flux == flux_true)
assert np.all(self.asteroid.flux_err == 1)
#assert self.asteroid.kernel_long is True
def test_init_w_incorrect_inputs(self):
"""
Test that the method shouldn't be able to initialize properly
if the input parameters aren't correct.
"""
# Daniela said that setting up the asteroid again shouldn't be a problem
# setting up some incorrect input parameters (missing the last value)
time = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
flux = np.array([5, 5, 5, 5, 5, 5, 5, 5, 5, 6])
flux_err = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
kernel_long = True
asteroid = GPFit(time, flux, flux_err, kernel_long)
# AssertionError
with pytest.raises(AssertionError):
assert len(asteroid.time) == 10, "Length of time array does not match expectations."
assert len(asteroid.time) == len(asteroid.flux) == len(asteroid.flux_err), "Inputs are not"
" the same size. Make sure your time, flux, and flux errors are all there."
assert np.all(asteroid.flux == 5), "Flux input does not match expectations."
assert np.all(asteroid.flux_err == 1), "Flux error input does not not match expectations."
def test_set_params(self):
"""
Test to see if the set_param method sets up appropriate parameters based
off the values given.
"""
# run the method
self.asteroid.set_params()
assert self.asteroid.params["mean"] == 6.5, "Mean flux parameter was not computed correctly."
assert self.asteroid.params["log_amp_k2"] == np.log(self.asteroid.flux.max() - self.asteroid.flux.min()), "Log amp parameter for kernel 1 was not calculated properly. "
assert self.asteroid.params["gamma"] == 10, "Gamma parameter was not assigned correctly."
assert self.asteroid.params["log_period"] == 0, "Log period parameter was not assigned correctly."
if self.asteroid.kernel_long == True:
assert self.asteroid.params["log_amp_k1"] == np.log(self.asteroid.flux.max() - self.asteroid.flux.min()), "Log amp parameter for kernel 2 was not calculated properly. "
assert self.asteroid.params["log_metric"] == np.log(25), "Log metric parameter was not assigned correctly."
else:
# check to see that the attributes don't exist
assert self.asteroid.params.get("log_amp_k1") is None, "Log amp parameter for long_kernel exists when it shouldn't."
assert self.asteroid.params.get("log_metric") is None, "Log metric parameter for long_kernel exists when it shouldn't."
def test_set_walker_param_matrix(self):
"""
Test to see if the set_walker_param_matrix method sets up an appropriate
matrix of values.
"""
seed=0
nwalkers=10
self.asteroid.set_walker_param_matrix(nwalkers=nwalkers, seed=seed)
# recreate the true starting matrix
p_start = np.array(list(self.asteroid.params.values()))
p0_true = scipy.stats.multivariate_normal.rvs(
p_start, size=nwalkers, random_state=seed
)
np.random.seed(seed)
p0_true[:, -1] = np.random.normal(size=nwalkers) * 0.5 + np.log(4 / 24.0)
assert np.all(self.asteroid.walker_params == p0_true), "Walker matrix was not set up correctly."
def test_set_gp_kernel(self):
"""
Test to see if the GP kernel is set up correctly or not.
"""
# set up the expected GP kernel
if self.asteroid.kernel_long:
k1 = np.exp(self.asteroid.params["log_amp_k1"]) * george.kernels.ExpSquaredKernel(
metric=np.exp(self.asteroid.params["log_metric"])
)
k2 = np.exp(self.asteroid.params["log_amp_k2"]) * george.kernels.ExpSine2Kernel(
gamma=(self.asteroid.params["gamma"]), log_period=self.asteroid.params["log_period"]
)
if self.asteroid.kernel_long:
kernel = k1 * k2
else:
kernel = k2
gp_true = george.GP(kernel, fit_mean=True, mean=self.asteroid.params["mean"])
gp_true.compute(self.asteroid.time, self.asteroid.flux_err)
self.asteroid.set_gp_kernel()
# since you can't directly compare kernels, it's easiest to see if they
# calculate the same things with the same values
if self.asteroid.kernel_long:
param_vector = np.array([ 8.26405235, 2.00959512, 4.19761381, 3.85033111, 11.86755799, -0.9097333 ])
else:
param_vector = np.array([ 8.26405235, 3.85033111, 11.86755799, -0.9097333 ])
gp_test = self.asteroid.gp
gp_test.set_parameter_vector(param_vector)
gp_true.set_parameter_vector(param_vector)
lnlike_test = gp_test.lnlikelihood(np.arange(0,10))
lnlike_true = gp_true.lnlikelihood(np.arange(0,10))
assert lnlike_test == lnlike_true, "Kernel was not compiled correctly."
def test_run_emcee(self):
"""
Test to see if the MCMC run will produce the same results.
"""
nwalkers = 10
niter = 100
burn_in = 10
self.asteroid.run_emcee(nwalkers=nwalkers, niter=niter, threads=1, burn_in=burn_in)
# will this method assign the values for params to the top cls class? Cuz I need it later.
# def test_init():
# """
# Test that the class is initialized correctly.
# """
#
# # setting up some input parameters
# time = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# flux = np.array([5, 5, 5, 5, 5, 5, 5, 5, 5, 5])
# flux_err = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
# kernel_long = True
#
# asteroid = GPFit(time, flux, flux_err, kernel_long)
#
# assert len(asteroid.time) == 10
# assert len(asteroid.time) == len(asteroid.flux) == len(asteroid.flux_err)
#
# time_true = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# assert np.all(asteroid.time == time_true)
# # NOTE: np.all only works on np.arrays when comparing to one number
# assert np.all(asteroid.flux == 5)
# assert np.all(asteroid.flux_err == 1)
# assert asteroid.kernel_long is True
#
# def test_init_w_incorrect_inputs():
# """
# Test that the function shouldn't be able to initialize properly
# if the input parameters aren't correct.
# """
#
# # setting up some incorrect input parameters (missing the last value)
# time = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
# flux = np.array([5, 5, 5, 5, 5, 5, 5, 5, 5, 6])
# flux_err = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
# kernel_long = True
#
# asteroid = GPFit(time, flux, flux_err, kernel_long)
#
# # AssertionError
# with pytest.raises(AssertionError):
# assert len(asteroid.time) == 10, "Length of time array does not match expectations."
# assert len(asteroid.time) == len(asteroid.flux) == len(asteroid.flux_err), "Inputs are not"
# " the same size. Make sure your time, flux, and flux errors are all there."
# assert np.all(asteroid.flux == 5), "Flux input does not match expectations."
# assert np.all(asteroid.flux_err == 1), "Flux error input does not not match expectations."
# time_true = [0,1,2,3,4,5,6,7,8,9]
# assert np.all(asteroid.time == time_true)
# NOTE: np.all only works on np.arrays when comparing to one number
#
#
# assert kernel_long == True
def test():
"""
"""
```
|
{
"source": "jdswinbank/lsstvaultutils",
"score": 3
}
|
#### File: lsstvaultutils/lsstvaultutils/vaultconfig.py
```python
import hvac # type:ignore
import json
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Dict, Optional
class Verb(Enum):
ADD = 1
REMOVE = 2
@dataclass
class Keyset:
"""Contains "accessor" and "id" tokens for a Vault path."""
accessor: str
id: str
@dataclass
class Enclave:
"""Maps a Vault path to 'read' and 'write' Keysets. The 'Enclave' is
simply a particular vault path with its own Keysets, e.g.
'k8s_operator/nublado.lsst.codes'
"""
name: str
read: Keyset
write: Keyset
class VaultConfig:
"""Contains the vault address (a URL represented as a string), the
secret to be added, which is a dict mapping strings to strings
(but can be None, if you're deleting a secret from a path), the
rendered-to-memory JSON document representing all the vault paths
and tokens, and the list of vault paths to not update.
"""
def __init__(
self,
vault_address: Optional[str],
vault_file: str,
skip_list: Optional[List[str]],
secret_file: Optional[str] = None,
):
self.vault_address: Optional[str] = os.getenv("VAULT_ADDR")
self.secret: Optional[Dict[str, str]] = None
self.enclaves: Dict[str, Enclave] = {}
if vault_address:
self.vault_address = vault_address
if secret_file:
self.load_secret(secret_file)
with open(vault_file, "r") as f:
vault_dict = json.load(f)
for item in vault_dict:
name: str = list(item.keys())[0]
if skip_list and name in skip_list:
continue
read_k = Keyset(**item[name]["read"])
write_k = Keyset(**item[name]["write"])
enclave = Enclave(name=name, read=read_k, write=write_k)
self.enclaves[name] = enclave
def load_secret(self, secret_file: str) -> None:
with open(secret_file, "r") as f:
self.secret = json.load(f)
def _get_write_key_for_enclave(self, enclave: Enclave) -> str:
"""Given a loaded Vault enclave, return its write key."""
return enclave.write.id
def get_enclave_for_path(self, vault_path: str) -> Optional[Enclave]:
"""Given a Vault path (e.g. 'k8s_operator/nublado.lsst.codes'),
return the associated enlave.
"""
return self.enclaves.get(vault_path)
def add_secrets(self, secret_name: str, dry_run: bool = False) -> None:
self._change_secrets(
verb=Verb.ADD, secret_name=secret_name, dry_run=dry_run
)
def remove_secrets(self, secret_name: str, dry_run: bool = False) -> None:
self._change_secrets(
verb=Verb.REMOVE, secret_name=secret_name, dry_run=dry_run
)
def _change_secrets(
self, verb: Verb, secret_name: str, dry_run: bool = False
) -> None:
for name in self.enclaves:
self._change_secret(
verb=verb,
enclave=self.enclaves[name],
secret_name=secret_name,
dry_run=dry_run,
)
def add_secret(
self, enclave: Enclave, secret_name: str, dry_run: bool = False
) -> None:
self._change_secret(
verb=Verb.ADD,
enclave=enclave,
secret_name=secret_name,
dry_run=dry_run,
)
def remove_secret(
self, enclave: Enclave, secret_name: str, dry_run: bool = False
) -> None:
self._change_secret(
verb=Verb.REMOVE,
enclave=enclave,
secret_name=secret_name,
dry_run=dry_run,
)
def _change_secret(
self,
verb: Verb,
enclave: Enclave,
secret_name: str,
dry_run: bool = False,
) -> None:
client = hvac.Client(url=self.vault_address)
client.token = self._get_write_key_for_enclave(enclave)
assert client.is_authenticated()
secret_path = "{}/{}".format(enclave.name, secret_name)
vstr = "add"
if verb == Verb.REMOVE:
vstr = "remove"
if dry_run:
print(
"Dry run: {} secret at ".format(vstr)
+ "{}/{}".format(self.vault_address, secret_path)
)
else:
if verb == verb.REMOVE:
client.secrets.kv.v2.delete_metadata_and_all_versions(
path=secret_path
)
else:
client.secrets.kv.v2.create_or_update_secret(
path=secret_path, secret=self.secret
)
```
|
{
"source": "jdswinbank/stingray",
"score": 2
}
|
#### File: stingray/tests/test_lightcurve.py
```python
import copy
import numpy as np
from astropy.tests.helper import pytest
import warnings
import os
import matplotlib.pyplot as plt
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.time import Time
from stingray import Lightcurve
from stingray.exceptions import StingrayError
from stingray.gti import create_gti_mask
np.random.seed(20150907)
_H5PY_INSTALLED = True
_HAS_LIGHTKURVE = True
_HAS_TIMESERIES = True
_HAS_YAML = True
try:
import h5py
except ImportError:
_H5PY_INSTALLED = False
try:
import Lightkurve
except ImportError:
_HAS_LIGHTKURVE = False
try:
import astropy.timeseries
from astropy.timeseries import TimeSeries
except ImportError:
_HAS_TIMESERIES = False
try:
import yaml
except ImportError:
_HAS_YAML = False
curdir = os.path.abspath(os.path.dirname(__file__))
datadir = os.path.join(curdir, 'data')
def fvar_fun(lc):
from stingray.utils import excess_variance
return excess_variance(lc, normalization='fvar')
def nvar_fun(lc):
from stingray.utils import excess_variance
return excess_variance(lc, normalization='norm_xs')
def evar_fun(lc):
from stingray.utils import excess_variance
return excess_variance(lc, normalization='none')
class TestProperties(object):
@classmethod
def setup_class(cls):
dt = 0.1
tstart = 0
tstop = 1
times = np.arange(tstart, tstop, dt)
cls.gti = np.array([[tstart - dt/2, tstop - dt/2]])
# Simulate something *clearly* non-constant
counts = np.zeros_like(times) + 100
cls.lc = Lightcurve(times, counts, gti=cls.gti)
cls.lc_lowmem = Lightcurve(times, counts, gti=cls.gti, low_memory=True)
def test_warn_wrong_keywords(self):
lc = copy.deepcopy(self.lc)
with pytest.warns(UserWarning) as record:
_ = Lightcurve(lc.time, lc.counts, gti=lc.gti, bubu='settete')
assert np.any(["Unrecognized keywords:" in r.message.args[0]
for r in record])
def test_time(self):
lc = copy.deepcopy(self.lc)
assert lc._bin_lo is None
# When I call bin_lo, _bin_lo gets set
_ = lc.bin_lo
assert lc._bin_lo is not None
# When I set time, _bin_lo gets deleted.
lc.time = lc.time / 10
assert lc._bin_lo is None
_ = lc.bin_lo
assert lc._bin_lo is not None
def test_lightcurve_from_astropy_time(self):
time = Time([57483, 57484], format='mjd')
counts = np.array([2, 2])
lc = Lightcurve(time, counts)
assert lc.dt == 86400
assert np.all(lc.counts == counts)
def test_time_is_quantity_or_astropy_time(self):
counts = [34, 21.425]
times = np.array([57000, 58000])
times_q = (times - times[0]) * u.d
times_t = Time(times, format='mjd')
lc_q = Lightcurve(time=times_q, counts=counts, mjdref=times[0])
lc_t = Lightcurve(time=times_t, counts=counts)
assert_allclose(lc_q.time, lc_t.time)
def test_gti(self):
lc = copy.deepcopy(self.lc)
assert lc._mask is None
_ = lc.mask
assert lc._mask is not None
lc.gti = [[0, 1]]
assert lc._mask is None
def test_counts_and_countrate(self):
lc = copy.deepcopy(self.lc)
# At initialization, _countrate is None and _counts is not.
assert lc._countrate is None
assert lc._counts is not None
assert lc._meancounts is None
# Now we retrieve meancounts; it gets calculated.
_ = lc.meancounts
assert lc._meancounts is not None
# Now we retrieve countrate, and it gets calculated
_ = lc.countrate
assert lc._countrate is not None
# Now I set counts; countrate gets deleted together with the other
# statistics.
lc.counts = np.zeros_like(lc.counts) + 3
assert lc._countrate is None
assert lc._meancounts is None
assert lc._meanrate is None
# Now I retrieve meanrate. It gets calculated
_ = lc.meanrate
assert lc._meanrate is not None
# Finally, we set count rate and test that the rest has been deleted.
lc.countrate = np.zeros_like(lc.countrate) + 3
lc.countrate_err = np.zeros_like(lc.countrate) + 3
assert lc._counts is None
assert lc._counts_err is None
assert lc._meancounts is None
_ = lc.counts_err
assert lc._counts_err is not None
def test_counts_and_countrate_lowmem(self):
lc = copy.deepcopy(self.lc_lowmem)
# At initialization, _countrate is None and _counts is not.
assert lc._countrate is None
assert lc._counts is not None
assert lc._meancounts is None
# Now we retrieve meancounts; it gets calculated.
_ = lc.meancounts
assert lc._meancounts is not None
# Now we retrieve countrate, and it gets calculated but not saved
# (because low_memory)
_ = lc.countrate
assert lc._countrate is None
_ = lc.countrate_err
assert lc._countrate_err is None
# Now I set counts; countrate gets deleted together with the other
# statistics.
lc.counts = np.zeros_like(lc.counts) + 3
assert lc.input_counts
assert lc._countrate is None
assert lc._meancounts is None
assert lc._meanrate is None
# Now I retrieve meanrate. It gets calculated
_ = lc.meanrate
assert lc._meanrate is not None
# Finally, we set count rate and test that the rest has been deleted,
# AND input_counts is changed to False.
lc.countrate = np.zeros_like(lc.countrate) + 3
assert lc._counts is None
assert lc._meancounts is None
assert not lc.input_counts
_ = lc.counts
# Now we retrieve counts, and it gets calculated but not saved
# (because low_memory, and input_counts is now False)
assert lc._counts is None
_ = lc.counts_err
# Now we retrieve counts, and it gets calculated but not saved
# (because low_memory, and input_counts is now False)
assert lc._counts_err is None
@pytest.mark.parametrize('property', 'time,counts,counts_err,'
'countrate,countrate_err'.split(','))
def test_assign_bad_shape_fails(self, property):
lc = copy.deepcopy(self.lc)
# Same shape passes
setattr(lc, property, np.zeros_like(lc.time))
# Different shape doesn't
with pytest.raises(ValueError):
setattr(lc, property, 3)
with pytest.raises(ValueError):
setattr(lc, property, np.arange(2))
class TestChunks(object):
@classmethod
def setup_class(cls):
dt = 0.1
tstart = 0
tstop = 100
times = np.arange(tstart, tstop, dt)
cls.gti = np.array([[tstart - dt/2, tstop - dt/2]])
# Simulate something *clearly* non-constant
counts = np.random.poisson(
10000 + 2000 * np.sin(2 * np.pi * times))
cls.lc = Lightcurve(times, counts, gti=cls.gti)
def test_analyze_lc_chunks_fvar_fracstep(self):
start, stop, res = self.lc.analyze_lc_chunks(20, fvar_fun,
fraction_step=0.5)
# excess_variance returns fvar and fvar_err
fvar, fvar_err = res
assert np.allclose(start[0], self.gti[0, 0])
assert np.all(fvar > 0)
# This must be a clear measurement of fvar
assert np.all(fvar > fvar_err)
def test_analyze_lc_chunks_nvar_fracstep(self):
start, stop, res = self.lc.analyze_lc_chunks(20, fvar_fun,
fraction_step=0.5)
# excess_variance returns fvar and fvar_err
fvar, fvar_err = res
start, stop, res = self.lc.analyze_lc_chunks(20, nvar_fun,
fraction_step=0.5)
# excess_variance returns fvar and fvar_err
nevar, nevar_err = res
assert np.allclose(nevar, fvar**2, rtol=0.01)
def test_analyze_lc_chunks_nvar_fracstep_mean(self):
start, stop, mean = self.lc.analyze_lc_chunks(20, np.mean,
fraction_step=0.5)
start, stop, res = self.lc.analyze_lc_chunks(20, evar_fun,
fraction_step=0.5)
# excess_variance returns fvar and fvar_err
evar, evar_err = res
start, stop, res = self.lc.analyze_lc_chunks(20, nvar_fun,
fraction_step=0.5)
# excess_variance returns fvar and fvar_err
nevar, nevar_err = res
assert np.allclose(nevar * mean ** 2, evar, rtol=0.01)
assert np.allclose(nevar_err * mean ** 2, evar_err, rtol=0.01)
class TestLightcurve(object):
@classmethod
def setup_class(cls):
cls.times = np.array([1, 2, 3, 4])
cls.counts = np.array([2, 2, 2, 2])
cls.counts_err = np.array([0.2, 0.2, 0.2, 0.2])
cls.dt = 1.0
cls.gti = np.array([[0.5, 4.5]])
def test_create(self):
"""
Demonstrate that we can create a trivial Lightcurve object.
"""
lc = Lightcurve(self.times, self.counts)
def test_irregular_time_warning(self):
"""
Check if inputting an irregularly spaced time iterable throws out
a warning.
"""
times = [1, 2, 3, 5, 6]
counts = [2, 2, 2, 2, 2]
warn_str = ("SIMON says: Bin sizes in input time array aren't equal "
"throughout! This could cause problems with Fourier "
"transforms. Please make the input time evenly sampled.")
with warnings.catch_warnings(record=True) as w:
_ = Lightcurve(times, counts, err_dist="poisson")
assert np.any([str(wi.message) == warn_str for wi in w])
def test_unrecognize_err_dist_warning(self):
"""
Check if a non-poisson error_dist throws the correct warning.
"""
times = [1, 2, 3, 4, 5]
counts = [2, 2, 2, 2, 2]
warn_str = ("SIMON says: Stingray only uses poisson err_dist at "
"the moment")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
lc = Lightcurve(times, counts, err_dist='gauss')
assert np.any([warn_str in str(wi.message) for wi in w])
def test_dummy_err_dist_fail(self):
"""
Check if inputting an irregularly spaced time iterable throws out
a warning.
"""
times = [1, 2, 3, 4, 5]
counts = [2, 2, 2, 2, 2]
with pytest.raises(StingrayError):
lc = Lightcurve(times, counts, err_dist='joke')
def test_invalid_data(self):
times = [1, 2, 3, 4, 5]
counts = [2, 2, np.nan, 2, 2]
counts_err = [1, 2, 3, np.nan, 2]
with pytest.raises(ValueError):
lc = Lightcurve(times, counts)
with pytest.raises(ValueError):
lc = Lightcurve(times, [2]*5, err=counts_err)
times[2] = np.inf
with pytest.raises(ValueError):
lc = Lightcurve(times, [2]*5)
def test_n(self):
lc = Lightcurve(self.times, self.counts)
assert lc.n == 4
def test_analyze_lc_chunks(self):
lc = Lightcurve(self.times, self.counts, gti=self.gti)
def func(lc):
return lc.time[0]
start, stop, res = lc.analyze_lc_chunks(2, func)
assert start[0] == 0.5
assert np.all(start + lc.dt / 2 == res)
def test_bin_edges(self):
bin_lo = [0.5, 1.5, 2.5, 3.5]
bin_hi = [1.5, 2.5, 3.5, 4.5]
lc = Lightcurve(self.times, self.counts)
assert np.allclose(lc.bin_lo, bin_lo)
assert np.allclose(lc.bin_hi, bin_hi)
def test_lightcurve_from_toa(self):
lc = Lightcurve.make_lightcurve(self.times, self.dt, use_hist=True,
tstart=0.5)
lc2 = Lightcurve.make_lightcurve(self.times, self.dt, use_hist=False,
tstart=0.5)
assert np.allclose(lc.time, lc2.time)
assert np.all(lc.counts == lc2.counts)
def test_lightcurve_from_toa_quantity(self):
lc = Lightcurve.make_lightcurve(self.times * u.s, self.dt,
use_hist=True, tstart=0.5)
lc2 = Lightcurve.make_lightcurve(self.times, self.dt, use_hist=False,
tstart=0.5)
assert np.allclose(lc.time, lc2.time)
assert np.all(lc.counts == lc2.counts)
def test_lightcurve_from_toa_Time(self):
mjdref = 56789
mjds = Time(self.times / 86400 + mjdref, format='mjd')
lc = Lightcurve.make_lightcurve(mjds, self.dt, mjdref=mjdref,
use_hist=True, tstart=0.5)
lc2 = Lightcurve.make_lightcurve(self.times, self.dt, use_hist=False,
tstart=0.5, mjdref=mjdref)
assert np.allclose(lc.time, lc2.time)
assert np.all(lc.counts == lc2.counts)
def test_lightcurve_from_toa_halfbin(self):
lc = Lightcurve.make_lightcurve(self.times + 0.5, self.dt,
use_hist=True,
tstart=0.5)
lc2 = Lightcurve.make_lightcurve(self.times + 0.5, self.dt,
use_hist=False,
tstart=0.5)
assert np.allclose(lc.time, lc2.time)
assert np.all(lc.counts == lc2.counts)
def test_lightcurve_from_toa_random_nums(self):
times = np.random.uniform(0, 10, 1000)
lc = Lightcurve.make_lightcurve(times, self.dt, use_hist=True,
tstart=0.5)
lc2 = Lightcurve.make_lightcurve(times, self.dt, use_hist=False,
tstart=0.5)
assert np.allclose(lc.time, lc2.time)
assert np.all(lc.counts == lc2.counts)
def test_tstart(self):
tstart = 0.0
lc = Lightcurve.make_lightcurve(self.times, self.dt, tstart=0.0)
assert lc.tstart == tstart
assert lc.time[0] == tstart + 0.5*self.dt
def test_tseg(self):
tstart = 0.0
tseg = 5.0
lc = Lightcurve.make_lightcurve(self.times, self.dt,
tseg=tseg, tstart=tstart)
assert lc.tseg == tseg
assert lc.time[-1] - lc.time[0] == tseg - self.dt
def test_nondivisble_tseg(self):
"""
If the light curve length input is not divisible by the time
resolution, the last (fractional) time bin will be dropped.
"""
tstart = 0.0
tseg = 5.5
lc = Lightcurve.make_lightcurve(self.times, self.dt,
tseg=tseg, tstart=tstart)
assert lc.tseg == int(tseg/self.dt)
def test_correct_timeresolution(self):
lc = Lightcurve.make_lightcurve(self.times, self.dt)
assert np.isclose(lc.dt, self.dt)
def test_bin_correctly(self):
ncounts = np.array([2, 1, 0, 3])
tstart = 0.0
tseg = 4.0
toa = np.hstack([np.random.uniform(i, i+1, size=n)
for i, n in enumerate(ncounts)])
dt = 1.0
lc = Lightcurve.make_lightcurve(toa, dt, tseg=tseg, tstart=tstart)
assert np.allclose(lc.counts, ncounts)
def test_countrate(self):
dt = 0.5
mean_counts = 2.0
times = np.arange(0 + dt/2, 5 - dt/2, dt)
counts = np.zeros_like(times) + mean_counts
lc = Lightcurve(times, counts)
assert np.allclose(lc.countrate, np.zeros_like(counts) +
mean_counts/dt)
def test_input_countrate(self):
dt = 0.5
mean_counts = 2.0
times = np.arange(0 + dt/2, 5 - dt/2, dt)
countrate = np.zeros_like(times) + mean_counts
lc = Lightcurve(times, countrate, input_counts=False)
assert np.allclose(lc.counts, np.zeros_like(countrate) +
mean_counts*dt)
def test_meanrate(self):
times = [0.5, 1.0, 1.5, 2.0]
counts = [2, 3, 3, 4]
lc = Lightcurve(times, counts)
assert lc.meanrate == 6
def test_meancounts(self):
counts = [2, 3, 3, 4]
lc = Lightcurve(self.times, counts)
assert lc.meancounts == 3
def test_lc_gtis(self):
t = [0.5, 1.5, 2.5, 3.5, 4.5]
lc = [5, 5, 0, 5, 5]
gtis = [[0, 2], [3, 5]]
lc = Lightcurve(t, lc, gti=gtis)
assert lc.meanrate == 5
assert lc.meancounts == 5
def test_creating_lightcurve_raises_type_error_when_input_is_none(self):
dt = 0.5
mean_counts = 2.0
times = np.arange(0 + dt/2, 5 - dt/2, dt)
counts = np.array([None] * times.shape[0])
with pytest.raises(TypeError):
lc = Lightcurve(times, counts)
def test_creating_lightcurve_raises_type_error_when_input_is_inf(self):
dt = 0.5
mean_counts = 2.0
times = np.arange(0 + dt/2, 5 - dt/2, dt)
counts = np.array([np.inf] * times.shape[0])
with pytest.raises(ValueError):
lc = Lightcurve(times, counts)
def test_creating_lightcurve_raises_type_error_when_input_is_nan(self):
dt = 0.5
mean_counts = 2.0
times = np.arange(0 + dt/2, 5 - dt/2, dt)
counts = np.array([np.nan] * times.shape[0])
with pytest.raises(ValueError):
lc = Lightcurve(times, counts)
def test_init_with_diff_array_lengths(self):
time = [1, 2, 3]
counts = [2, 2, 2, 2]
with pytest.raises(StingrayError):
lc = Lightcurve(time, counts)
def test_add_with_different_time_arrays(self):
_times = [1.1, 2.1, 3.1, 4.1, 5.1]
_counts = [2, 2, 2, 2, 2]
with pytest.raises(ValueError):
lc1 = Lightcurve(self.times, self.counts)
lc2 = Lightcurve(_times, _counts)
lc = lc1 + lc2
def test_add_with_different_err_dist(self):
lc1 = Lightcurve(self.times, self.counts)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
lc2 = Lightcurve(self.times, self.counts, err=self.counts / 2,
err_dist="gauss")
with warnings.catch_warnings(record=True) as w:
lc = lc1 + lc2
assert np.any(["ightcurves have different statistics"
in str(wi.message) for wi in w])
def test_add_with_same_gtis(self):
lc1 = Lightcurve(self.times, self.counts, gti=self.gti)
lc2 = Lightcurve(self.times, self.counts, gti=self.gti)
lc = lc1 + lc2
np.testing.assert_almost_equal(lc.gti, self.gti)
def test_add_with_different_gtis(self):
gti = [[0., 3.5]]
lc1 = Lightcurve(self.times, self.counts, gti=self.gti)
lc2 = Lightcurve(self.times, self.counts, gti=gti)
lc = lc1 + lc2
np.testing.assert_almost_equal(lc.gti, [[0.5, 3.5]])
def test_add_with_unequal_time_arrays(self):
_times = [1, 3, 5, 7]
with pytest.raises(ValueError):
lc1 = Lightcurve(self.times, self.counts)
lc2 = Lightcurve(_times, self.counts)
lc = lc1 + lc2
def test_add_with_equal_time_arrays(self):
_counts = [1, 1, 1, 1]
lc1 = Lightcurve(self.times, self.counts)
lc2 = Lightcurve(self.times, _counts)
lc = lc1 + lc2
assert np.all(lc.counts == lc1.counts + lc2.counts)
assert np.all(lc.countrate == lc1.countrate + lc2.countrate)
assert lc1.mjdref == lc.mjdref
def test_sub_with_diff_time_arrays(self):
_times = [1.1, 2.1, 3.1, 4.1, 5.1]
_counts = [2, 2, 2, 2, 2]
with pytest.raises(ValueError):
lc1 = Lightcurve(self.times, self.counts)
lc2 = Lightcurve(_times, _counts)
lc = lc1 - lc2
def test_sub_with_different_err_dist(self):
lc1 = Lightcurve(self.times, self.counts)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
lc2 = Lightcurve(self.times, self.counts, err=self.counts / 2,
err_dist="gauss")
with warnings.catch_warnings(record=True) as w:
lc = lc1 - lc2
assert np.any(["ightcurves have different statistics"
in str(wi.message) for wi in w])
def test_subtraction(self):
_counts = [3, 4, 5, 6]
lc1 = Lightcurve(self.times, self.counts)
lc2 = Lightcurve(self.times, _counts)
lc = lc2 - lc1
expected_counts = np.array([1, 2, 3, 4])
assert np.all(lc.counts == expected_counts)
assert lc1.mjdref == lc.mjdref
def test_negation(self):
lc = Lightcurve(self.times, self.counts)
_lc = lc + (-lc)
assert not np.all(_lc.counts)
assert _lc.mjdref == lc.mjdref
def test_len_function(self):
lc = Lightcurve(self.times, self.counts)
assert len(lc) == 4
def test_indexing_with_unexpected_type(self):
lc = Lightcurve(self.times, self.counts)
with pytest.raises(IndexError):
count = lc['first']
def test_indexing(self):
lc = Lightcurve(self.times, self.counts)
assert lc[0] == lc[1] == lc[2] == lc[3] == 2
def test_slicing(self):
lc = Lightcurve(self.times, self.counts, gti=self.gti)
assert np.all(lc[1:3].counts == np.array([2, 2]))
assert np.all(lc[:2].counts == np.array([2, 2]))
assert np.all(lc[:2].gti == [[0.5, 2.5]])
assert np.all(lc[2:].counts == np.array([2, 2]))
assert np.all(lc[2:].gti == [[2.5, 4.5]])
assert np.all(lc[:].counts == np.array([2, 2, 2, 2]))
assert np.all(lc[::2].gti == [[0.5, 1.5], [2.5, 3.5]])
assert np.all(lc[:].gti == lc.gti)
assert lc[:].mjdref == lc.mjdref
assert lc[::2].n == 2
def test_slicing_index_error(self):
lc = Lightcurve(self.times, self.counts)
with pytest.raises(StingrayError):
lc_new = lc[1:2]
def test_index(self):
lc = Lightcurve(self.times, self.counts)
index = 1
index_np32, index_np64 = np.int32(index), np.int64(index)
assert lc[index] == lc[index_np32] == lc[index_np64]
def test_join_with_different_dt(self):
_times = [5, 5.5, 6]
_counts = [2, 2, 2]
lc1 = Lightcurve(self.times, self.counts)
lc2 = Lightcurve(_times, _counts)
with warnings.catch_warnings(record=True) as w:
lc1.join(lc2)
assert np.any(["different bin widths"
in str(wi.message) for wi in w])
def test_join_with_different_mjdref(self):
shift = 86400. # day
lc1 = Lightcurve(self.times + shift, self.counts, gti=self.gti + shift,
mjdref=57000)
lc2 = Lightcurve(self.times, self.counts, gti=self.gti, mjdref=57001)
with warnings.catch_warnings(record=True) as w:
newlc = lc1.join(lc2)
# The join operation *averages* the overlapping arrays
assert np.allclose(newlc.counts, lc1.counts)
assert np.any(["MJDref is different in the two light curves"
in str(wi.message) for wi in w])
assert np.any(["The two light curves have overlapping time ranges"
in str(wi.message) for wi in w])
def test_sum_with_different_mjdref(self):
shift = 86400. # day
lc1 = Lightcurve(self.times + shift, self.counts, gti=self.gti + shift,
mjdref=57000)
lc2 = Lightcurve(self.times, self.counts, gti=self.gti, mjdref=57001)
with pytest.warns(UserWarning) as record:
newlc = lc1 + lc2
assert np.any(["MJDref"
in r.message.args[0] for r in record])
assert np.allclose(newlc.counts, lc1.counts * 2)
def test_subtract_with_different_mjdref(self):
shift = 86400. # day
lc1 = Lightcurve(self.times + shift, self.counts, gti=self.gti + shift,
mjdref=57000)
lc2 = Lightcurve(self.times, self.counts, gti=self.gti, mjdref=57001)
with pytest.warns(UserWarning) as record:
newlc = lc1 - lc2
assert np.any(["MJDref"
in r.message.args[0] for r in record])
assert np.allclose(newlc.counts, 0)
def test_join_disjoint_time_arrays(self):
_times = [5, 6, 7, 8]
_counts = [2, 2, 2, 2]
lc1 = Lightcurve(self.times, self.counts)
lc2 = Lightcurve(_times, _counts)
lc = lc1.join(lc2)
assert len(lc.counts) == len(lc.time) == 8
assert np.all(lc.counts == 2)
assert lc.mjdref == lc1.mjdref
def test_join_overlapping_time_arrays(self):
_times = [3, 4, 5, 6]
_counts = [4, 4, 4, 4]
lc1 = Lightcurve(self.times, self.counts)
lc2 = Lightcurve(_times, _counts)
with warnings.catch_warnings(record=True) as w:
lc = lc1.join(lc2)
assert np.any(["overlapping time ranges" in str(wi.message)
for wi in w])
assert len(lc.counts) == len(lc.time) == 6
assert np.all(lc.counts == np.array([2, 2, 3, 3, 4, 4]))
def test_join_different_err_dist_disjoint_times(self):
_times = [5 , 6, 7, 8]
_counts =[2, 2, 2, 2]
lc1 = Lightcurve(self.times, self.counts, err_dist = "poisson")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
lc2 = Lightcurve(_times, _counts, err_dist = "gauss")
lc3 = lc1.join(lc2)
assert np.all(lc3.counts_err[:len(self.times)] == lc1.counts_err)
assert np.all(lc3.counts_err[len(self.times):] == np.zeros_like(lc2.counts))
def test_join_different_err_dist_overlapping_times(self):
_times = [3, 4, 5, 6]
_counts = [4, 4, 4, 4]
lc1 = Lightcurve(self.times, self.counts, err_dist = "poisson")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
lc2 = Lightcurve(_times, _counts, err_dist = "gauss")
with warnings.catch_warnings(record=True) as w:
lc3 = lc1.join(lc2)
assert "We are setting the errors to zero." in str(w[1].message)
assert np.all(lc3.counts_err == np.zeros_like(lc3.time))
def test_truncate_by_index(self):
lc = Lightcurve(self.times, self.counts, gti=self.gti)
lc1 = lc.truncate(start=1)
assert np.all(lc1.time == np.array([2, 3, 4]))
assert np.all(lc1.counts == np.array([2, 2, 2]))
np.testing.assert_almost_equal(lc1.gti[0][0], 1.5)
assert lc1.mjdref == lc.mjdref
lc2 = lc.truncate(stop=2)
assert np.all(lc2.time == np.array([1, 2]))
assert np.all(lc2.counts == np.array([2, 2]))
np.testing.assert_almost_equal(lc2.gti[-1][-1], 2.5)
assert lc2.mjdref == lc.mjdref
def test_truncate_by_time_stop_less_than_start(self):
lc = Lightcurve(self.times, self.counts)
with pytest.raises(ValueError):
lc1 = lc.truncate(start=2, stop=1, method='time')
def test_truncate_fails_with_incorrect_method(self):
lc = Lightcurve(self.times, self.counts)
with pytest.raises(ValueError):
lc1 = lc.truncate(start=1, method="wrong")
def test_truncate_by_time(self):
lc = Lightcurve(self.times, self.counts, gti=self.gti)
lc1 = lc.truncate(start=1, method='time')
assert np.all(lc1.time == np.array([1, 2, 3, 4]))
assert np.all(lc1.counts == np.array([2, 2, 2, 2]))
np.testing.assert_almost_equal(lc1.gti[0][0], 0.5)
assert lc1.mjdref == lc.mjdref
lc2 = lc.truncate(stop=3, method='time')
assert np.all(lc2.time == np.array([1, 2]))
assert np.all(lc2.counts == np.array([2, 2]))
np.testing.assert_almost_equal(lc2.gti[-1][-1], 2.5)
assert lc2.mjdref == lc.mjdref
def test_split_with_two_segments(self):
test_time = np.array([1, 2, 3, 6, 7, 8])
test_counts = np.random.rand(len(test_time))
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
lc_test = Lightcurve(test_time, test_counts)
slc = lc_test.split(1.5)
assert len(slc) == 2
def test_split_has_correct_data_points(self):
test_time = np.array([1, 2, 3, 6, 7, 8])
test_counts = np.random.rand(len(test_time))
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
lc_test = Lightcurve(test_time, test_counts)
slc = lc_test.split(1.5)
assert np.all((slc[0].time == [1, 2, 3]))
assert np.all((slc[1].time == [6, 7 ,8]))
assert np.all((slc[0].counts == test_counts[:3]))
assert np.all((slc[1].counts == test_counts[3:]))
def test_split_with_three_segments(self):
test_time = np.array([1, 2, 3, 6, 7, 8, 10, 11, 12])
test_counts = np.random.rand(len(test_time))
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
lc_test = Lightcurve(test_time, test_counts)
slc = lc_test.split(1.5)
assert len(slc) == 3
def test_threeway_split_has_correct_data_points(self):
test_time = np.array([1, 2, 3, 6, 7, 8, 10, 11, 12])
test_counts = np.random.rand(len(test_time))
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
lc_test = Lightcurve(test_time, test_counts)
slc = lc_test.split(1.5)
assert np.all((slc[0].time == [1, 2, 3]))
assert np.all((slc[1].time == [6, 7 ,8]))
assert np.all((slc[2].time == [10, 11 ,12]))
assert np.all((slc[0].counts == test_counts[:3]))
assert np.all((slc[1].counts == test_counts[3:6]))
assert np.all((slc[2].counts == test_counts[6:]))
def test_split_with_gtis(self):
test_time = np.array([1, 2, 3, 6, 7, 8, 10, 11, 12])
test_counts = np.random.rand(len(test_time))
gti = [[0,4], [9, 13]]
lc_test = Lightcurve(test_time, test_counts, gti=gti)
slc = lc_test.split(1.5)
assert np.all((slc[0].time == [1, 2, 3]))
assert np.all((slc[1].time == [10, 11 ,12]))
assert np.all((slc[0].counts == test_counts[:3]))
assert np.all((slc[1].counts == test_counts[6:]))
def test_consecutive_gaps(self):
test_time = np.array([1, 2, 3, 6, 9, 10, 11])
test_counts = np.random.rand(len(test_time))
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
lc_test = Lightcurve(test_time, test_counts)
slc = lc_test.split(1.5)
assert np.all((slc[0].time == [1, 2, 3]))
assert np.all((slc[1].time == [9, 10, 11]))
assert np.all((slc[0].counts == test_counts[:3]))
assert np.all((slc[1].counts == test_counts[4:]))
def test_sort(self):
_times = [2, 1, 3, 4]
_counts = [40, 10, 20, 5]
_counts_err = [4, 1, 2, 0.5]
lc = Lightcurve(_times, _counts, err=_counts_err, mjdref=57000)
mjdref = lc.mjdref
lc_new = lc.sort()
assert np.all(lc_new.counts_err == np.array([1, 4, 2, 0.5]))
assert np.all(lc_new.counts == np.array([10, 40, 20, 5]))
assert np.all(lc_new.time == np.array([1, 2, 3, 4]))
assert lc_new.mjdref == mjdref
lc_new = lc.sort(reverse=True)
assert np.all(lc_new.counts == np.array([5, 20, 40, 10]))
assert np.all(lc_new.time == np.array([4, 3, 2, 1]))
assert lc_new.mjdref == mjdref
def test_sort_counts(self):
_times = [1, 2, 3, 4]
_counts = [40, 10, 20, 5]
lc = Lightcurve(_times, _counts, mjdref=57000)
mjdref = lc.mjdref
lc_new = lc.sort_counts()
assert np.all(lc_new.counts == np.array([5, 10, 20, 40]))
assert np.all(lc_new.time == np.array([4, 2, 3, 1]))
assert lc_new.mjdref == mjdref
lc_new = lc.sort_counts(reverse=True)
assert np.all(lc_new.counts == np.array([40, 20, 10, 5]))
assert np.all(lc_new.time == np.array([1, 3, 2, 4]))
assert lc_new.mjdref == mjdref
def test_sort_reverse(self):
times = np.arange(1000)
counts = np.random.rand(1000)*100
lc = Lightcurve(times, counts)
lc_1 = lc
lc_2 = Lightcurve(np.arange(1000, 2000), np.random.rand(1000)*1000)
lc_long = lc_1.join(lc_2) # Or vice-versa
new_lc_long = lc_long[:] # Copying into a new object
assert new_lc_long.n == lc_long.n
@pytest.mark.skipif('not _HAS_LIGHTKURVE')
def test_to_lightkurve(self):
time, counts, counts_err = range(3), np.ones(3), np.zeros(3)
lc = Lightcurve(time, counts, counts_err)
lk = lc.to_lightkurve()
assert_allclose(lk.time, time)
assert_allclose(lk.flux, counts)
assert_allclose(lk.flux_err, counts_err)
@pytest.mark.skipif(not _HAS_LIGHTKURVE,
reason='Lightkurve not installed')
def test_from_lightkurve(self):
from Lightkurve import LightCurve
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
lk = LightCurve(time, flux, flux_err)
sr = Lightcurve.from_lightkurve(lk)
assert_allclose(sr.time, lc.time)
assert_allclose(sr.counts, lc.flux)
assert_allclose(sr.counts_err, lc.flux_err)
def test_plot_matplotlib_not_installed(self):
try:
import matplotlib.pyplot as plt
except Exception as e:
lc = Lightcurve(self.times, self.counts)
try:
lc.plot()
except Exception as e:
assert type(e) is ImportError
assert str(e) == "Matplotlib required for plot()"
def test_plot_simple(self):
lc = Lightcurve(self.times, self.counts)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
lc.plot()
assert plt.fignum_exists(1)
def test_plot_wrong_label_type(self):
lc = Lightcurve(self.times, self.counts)
with pytest.raises(TypeError):
with warnings.catch_warnings(record=True) as w:
lc.plot(labels=123)
assert np.any(["must be either a list or tuple"
in str(wi.message) for wi in w])
def test_plot_labels_index_error(self):
lc = Lightcurve(self.times, self.counts)
with warnings.catch_warnings(record=True) as w:
lc.plot(labels=('x'))
assert np.any(["must have two labels" in str(wi.message) for wi in w])
def test_plot_default_filename(self):
lc = Lightcurve(self.times, self.counts)
lc.plot(save=True)
assert os.path.isfile('out.png')
os.unlink('out.png')
def test_plot_custom_filename(self):
lc = Lightcurve(self.times, self.counts)
lc.plot(save=True, filename='lc.png')
assert os.path.isfile('lc.png')
os.unlink('lc.png')
def test_plot_axis(self):
lc = Lightcurve(self.times, self.counts)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
lc.plot(axis=[0, 1, 0, 100])
assert plt.fignum_exists(1)
def test_plot_title(self):
lc = Lightcurve(self.times, self.counts)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
lc.plot(title="Test Lightcurve")
assert plt.fignum_exists(1)
def test_read_from_lcurve_1(self):
fname = 'lcurveA.fits'
with pytest.warns(UserWarning):
lc = Lightcurve.read(os.path.join(datadir, fname),
format_='hea', skip_checks=True)
ctrate = 1
assert np.isclose(lc.countrate[0], ctrate)
def test_read_from_lcurve_2(self):
fname = 'lcurve_new.fits'
with pytest.warns(UserWarning):
lc = Lightcurve.read(os.path.join(datadir, fname),
format_='hea', skip_checks=True)
ctrate = 0.91
assert np.isclose(lc.countrate[0], ctrate)
assert np.isclose(lc.mjdref, 55197.00076601852)
@pytest.mark.skipif('not _HAS_YAML')
def test_io_with_ascii(self):
lc = Lightcurve(self.times, self.counts)
lc.write('ascii_lc.ecsv', format_='ascii')
lc = lc.read('ascii_lc.ecsv', format_='ascii')
assert np.all(lc.time == self.times)
assert np.all(lc.counts == self.counts)
os.remove('ascii_lc.ecsv')
def test_io_with_pickle(self):
lc = Lightcurve(self.times, self.counts)
lc.write('lc.pickle', format_='pickle')
lc.read('lc.pickle', format_='pickle')
assert np.all(lc.time == self.times)
assert np.all(lc.counts == self.counts)
assert np.all(lc.gti == self.gti)
os.remove('lc.pickle')
@pytest.mark.skipif('not _H5PY_INSTALLED')
def test_io_with_hdf5(self):
lc = Lightcurve(self.times, self.counts)
lc.write('lc.hdf5', format_='hdf5')
data = lc.read('lc.hdf5', format_='hdf5')
assert np.all(data.time == self.times)
assert np.all(data.counts == self.counts)
assert np.all(data.gti == self.gti)
os.remove('lc.hdf5')
def test_split_lc_by_gtis(self):
times = [1, 2, 3, 4, 5, 6, 7, 8]
counts = [1, 1, 1, 1, 2, 3, 3, 2]
gti = [[0.5, 4.5], [5.5, 7.5]]
lc = Lightcurve(times, counts, gti=gti)
list_of_lcs = lc.split_by_gti()
lc0 = list_of_lcs[0]
lc1 = list_of_lcs[1]
assert np.all(lc0.time == [1, 2, 3, 4])
assert np.all(lc1.time == [6, 7])
assert np.all(lc0.counts == [1, 1, 1, 1])
assert np.all(lc1.counts == [3, 3])
assert np.all(lc0.gti == [[0.5, 4.5]])
assert np.all(lc1.gti == [[5.5, 7.5]])
def test_split_lc_by_gtis_minpoints(self):
times = [1, 2, 3, 4, 5, 6, 7, 8]
counts = [1, 1, 1, 1, 2, 3, 3, 2]
gti = [[0.5, 3.5], [3.5, 5.5], [5.5, 8.5]]
min_points = 3
lc = Lightcurve(times, counts, gti=gti)
list_of_lcs = lc.split_by_gti(min_points=min_points)
lc0 = list_of_lcs[0]
lc1 = list_of_lcs[1]
assert np.all(lc0.time == [1, 2, 3])
assert np.all(lc1.time == [6, 7, 8])
assert np.all(lc0.counts == [1, 1, 1])
assert np.all(lc1.counts == [3, 3, 2])
def test_shift(self):
times = [1, 2, 3, 4, 5, 6, 7, 8]
counts = [1, 1, 1, 1, 2, 3, 3, 2]
lc = Lightcurve(times, counts, input_counts=True)
lc2 = lc.shift(1)
assert np.all(lc2.time - 1 == times)
lc2 = lc.shift(-1)
assert np.all(lc2.time + 1 == times)
assert np.all(lc2.counts == lc.counts)
assert np.all(lc2.countrate == lc.countrate)
lc = Lightcurve(times, counts, input_counts=False)
lc2 = lc.shift(1)
assert np.all(lc2.counts == lc.counts)
assert np.all(lc2.countrate == lc.countrate)
def test_table_roundtrip(self):
"""Test that io methods raise Key Error when
wrong format is provided.
"""
N = len(self.times)
lc = Lightcurve(self.times, self.counts, err=self.counts_err,
mission="BUBU", instr="BABA",
mjdref=53467.)
ts = lc.to_astropy_table()
new_lc = lc.from_astropy_table(ts)
for attr in ['time', 'gti', 'counts']:
assert np.all(getattr(lc, attr) == getattr(new_lc, attr))
for attr in ['mission', 'instr', 'mjdref']:
assert getattr(lc, attr) == getattr(new_lc, attr)
def test_table_roundtrip_ctrate(self):
"""Test that io methods raise Key Error when
wrong format is provided.
"""
N = len(self.times)
dt = 0.5
mean_counts = 2.0
times = np.arange(0 + dt / 2, 5 - dt / 2, dt)
countrate = np.zeros_like(times) + mean_counts
err = np.zeros_like(times) + mean_counts / 2
lc = Lightcurve(times, countrate, err=err,
mission="BUBU", instr="BABA",
mjdref=53467., input_counts=False)
ts = lc.to_astropy_table()
new_lc = Lightcurve.from_astropy_table(ts)
for attr in ['time', 'gti', 'countrate']:
assert np.allclose(getattr(lc, attr), getattr(new_lc, attr))
assert np.allclose(new_lc.counts, lc.countrate * lc.dt)
for attr in ['mission', 'instr', 'mjdref']:
assert getattr(lc, attr) == getattr(new_lc, attr)
@pytest.mark.skipif('not _HAS_TIMESERIES')
def test_timeseries_roundtrip(self):
"""Test that io methods raise Key Error when
wrong format is provided.
"""
N = len(self.times)
lc = Lightcurve(self.times, self.counts, mission="BUBU", instr="BABA",
mjdref=53467.)
ts = lc.to_astropy_timeseries()
new_lc = lc.from_astropy_timeseries(ts)
for attr in ['time', 'gti', 'counts']:
assert np.all(getattr(lc, attr) == getattr(new_lc, attr))
for attr in ['mission', 'instr', 'mjdref']:
assert getattr(lc, attr) == getattr(new_lc, attr)
@pytest.mark.skipif('not _HAS_TIMESERIES')
def test_timeseries_roundtrip_ctrate(self):
"""Test that io methods raise Key Error when
wrong format is provided.
"""
N = len(self.times)
dt = 0.5
mean_counts = 2.0
times = np.arange(0 + dt / 2, 5 - dt / 2, dt)
countrate = np.zeros_like(times) + mean_counts
lc = Lightcurve(times, countrate, mission="BUBU", instr="BABA",
mjdref=53467., input_counts=False)
ts = lc.to_astropy_timeseries()
new_lc = lc.from_astropy_timeseries(ts)
for attr in ['time', 'gti', 'countrate']:
assert np.allclose(getattr(lc, attr), getattr(new_lc, attr))
assert np.allclose(new_lc.counts, lc.countrate * lc.dt)
for attr in ['mission', 'instr', 'mjdref']:
assert getattr(lc, attr) == getattr(new_lc, attr)
@pytest.mark.skipif('not _HAS_TIMESERIES')
def test_from_timeseries_bad(self):
from astropy.time import TimeDelta
times = TimeDelta(np.arange(10) * u.s)
ts = TimeSeries(time=times)
with pytest.raises(ValueError) as excinfo:
Lightcurve.from_astropy_timeseries(ts)
assert "Input timeseries must contain at least" in str(excinfo.value)
class TestLightcurveRebin(object):
@classmethod
def setup_class(cls):
dt = 0.0001220703125
n = 1384132
mean_counts = 2.0
times = np.arange(dt/2, dt/2 + n*dt, dt)
counts = np.zeros_like(times) + mean_counts
cls.lc = Lightcurve(times, counts)
def test_rebin_even(self):
dt_new = 2.0
lc_binned = self.lc.rebin(dt_new)
assert np.isclose(lc_binned.dt, dt_new)
counts_test = np.zeros_like(lc_binned.time) + \
self.lc.counts[0]*dt_new/self.lc.dt
assert np.allclose(lc_binned.counts, counts_test)
def test_rebin_even_factor(self):
f = 200
dt_new = f * self.lc.dt
lc_binned = self.lc.rebin(f=f)
assert np.isclose(dt_new, f * self.lc.dt)
counts_test = np.zeros_like(lc_binned.time) + \
self.lc.counts[0]*dt_new/self.lc.dt
assert np.allclose(lc_binned.counts, counts_test)
def test_rebin_odd(self):
dt_new = 1.5
lc_binned = self.lc.rebin(dt_new)
assert np.isclose(lc_binned.dt, dt_new)
counts_test = np.zeros_like(lc_binned.time) + \
self.lc.counts[0]*dt_new/self.lc.dt
assert np.allclose(lc_binned.counts, counts_test)
def test_rebin_odd_factor(self):
f = 100.5
dt_new = f * self.lc.dt
lc_binned = self.lc.rebin(f=f)
assert np.isclose(dt_new, f * self.lc.dt)
counts_test = np.zeros_like(lc_binned.time) + \
self.lc.counts[0]*dt_new/self.lc.dt
assert np.allclose(lc_binned.counts, counts_test)
def rebin_several(self, dt):
lc_binned = self.lc.rebin(dt)
assert len(lc_binned.time) == len(lc_binned.counts)
def test_rebin_equal_numbers(self):
dt_all = [2, 3, np.pi, 5]
for dt in dt_all:
self.rebin_several(dt)
def test_rebin_with_gtis(self):
times = np.arange(0, 100, 0.1)
counts = np.random.normal(100, 0.1, size=times.shape[0])
gti = [[0, 40], [60, 100]]
good = create_gti_mask(times, gti)
counts[np.logical_not(good)] = 0
lc = Lightcurve(times, counts, gti=gti, skip_checks=True, dt=0.1)
lc.apply_gtis()
lc_rebin = lc.rebin(1.0)
assert (lc_rebin.time[39] - lc_rebin.time[38]) > 1.0
def test_lc_baseline(self):
times = np.arange(0, 100, 0.01)
counts = np.random.normal(100, 0.1, len(times)) + \
0.001 * times
gti = [[-0.005, 50.005], [59.005, 100.005]]
good = create_gti_mask(times, gti)
counts[np.logical_not(good)] = 0
lc = Lightcurve(times, counts, gti=gti)
baseline = lc.baseline(10000, 0.01)
assert np.all(lc.counts - baseline < 1)
def test_lc_baseline_offset(self):
times = np.arange(0, 100, 0.01)
input_stdev = 0.1
counts = np.random.normal(100, input_stdev, len(times)) + \
0.001 * times
gti = [[-0.005, 50.005], [59.005, 100.005]]
good = create_gti_mask(times, gti)
counts[np.logical_not(good)] = 0
lc = Lightcurve(times, counts, gti=gti)
baseline = lc.baseline(10000, 0.01, offset_correction=True)
assert np.isclose(np.std(lc.counts - baseline), input_stdev, rtol=0.1)
def test_lc_baseline_offset_fewbins(self):
times = np.arange(0, 4, 1)
input_stdev = 0.1
counts = np.random.normal(100, input_stdev, len(times)) + \
0.001 * times
gti = [[-0.005, 4.005]]
lc = Lightcurve(times, counts, gti=gti)
with pytest.warns(UserWarning) as record:
lc.baseline(10000, 0.01, offset_correction=True)
assert np.any(["Too few bins to perform baseline offset correction"
in r.message.args[0] for r in record])
def test_change_mjdref(self):
lc_new = self.lc.change_mjdref(57000)
assert lc_new.mjdref == 57000
def testapply_gtis(self):
time = np.arange(150)
count = np.zeros_like(time) + 3
lc = Lightcurve(time, count, gti=[[-0.5, 150.5]])
lc.gti = [[-0.5, 2.5], [12.5, 14.5]]
lc.apply_gtis()
assert lc.n == 5
assert np.all(lc.time == np.array([0, 1, 2, 13, 14]))
lc.gti = [[-0.5, 10.5]]
lc.apply_gtis()
assert np.all(lc.time == np.array([0, 1, 2]))
def test_eq_operator(self):
time = [1, 2, 3]
count1 = [100, 200, 300]
count2 = [100, 200, 300]
lc1 = Lightcurve(time, count1)
lc2 = Lightcurve(time, count2)
assert lc1 == lc2
def test_eq_bad_lc(self):
time = [1, 2, 3]
count1 = [100, 200, 300]
count2 = [100, 200, 300]
lc1 = Lightcurve(time, count1)
with pytest.raises(ValueError):
lc1 == count2
def test_eq_different_times(self):
time1 = [1, 2, 3]
time2 = [2, 3, 4]
count1 = [100, 200, 300]
count2 = [100, 200, 300]
lc1 = Lightcurve(time1, count1)
lc2 = Lightcurve(time2, count2)
assert not lc1 == lc2
def test_eq_different_counts(self):
time = [1, 2, 3, 4]
count1 = [5, 10, 15, 20]
count2 = [2, 4, 5, 8]
lc1 = Lightcurve(time, count1)
lc2 = Lightcurve(time, count2)
assert not lc1 == lc2
```
|
{
"source": "jdtatz/numba",
"score": 2
}
|
#### File: tests/hsadrv/test_driver.py
```python
from __future__ import print_function, absolute_import
import ctypes
import os
import threading
try:
import queue
except ImportError:
import Queue as queue
import numpy as np
import numba.unittest_support as unittest
from numba.roc.hsadrv.driver import hsa, Queue, Program, Executable,\
BrigModule, Context, dgpu_present
from numba.roc.hsadrv.driver import hsa as roc
import numba.roc.api as hsaapi
from numba import float32, float64, vectorize
from numba.roc.hsadrv import drvapi
from numba.roc.hsadrv import enums
from numba.roc.hsadrv import enums_ext
from numba import config
class TestLowLevelApi(unittest.TestCase):
"""This test checks that all the functions defined in drvapi
bind properly using ctypes."""
def test_functions_available(self):
missing_functions = []
for fname in drvapi.API_PROTOTYPES.keys():
try:
getattr(hsa, fname)
except Exception as e:
missing_functions.append("'{0}': {1}".format(fname, str(e)))
self.assertEqual(len(missing_functions), 0,
msg='\n'.join(missing_functions))
class TestAgents(unittest.TestCase):
def test_agents_init(self):
self.assertGreater(len(roc.agents), 0)
def test_agents_create_queue_single(self):
for agent in roc.agents:
if agent.is_component:
queue = agent.create_queue_single(2 ** 5)
self.assertIsInstance(queue, Queue)
def test_agents_create_queue_multi(self):
for agent in roc.agents:
if agent.is_component:
queue = agent.create_queue_multi(2 ** 5)
self.assertIsInstance(queue, Queue)
class _TestBase(unittest.TestCase):
def setUp(self):
self.gpu = [a for a in roc.agents if a.is_component][0]
self.cpu = [a for a in roc.agents if not a.is_component][0]
self.queue = self.gpu.create_queue_multi(self.gpu.queue_max_size)
def tearDown(self):
del self.queue
del self.gpu
del self.cpu
def get_brig_file():
path = os.path.join('/opt/rocm/hsa/sample/vector_copy_full.brig')
assert os.path.isfile(path)
return path
def _check_example_file():
try:
get_brig_file()
except BaseException:
return False
return True
has_brig_example = _check_example_file()
@unittest.skipUnless(has_brig_example, "Brig example not found")
class TestBrigModule(unittest.TestCase):
def test_from_file(self):
brig_file = get_brig_file()
brig_module = BrigModule.from_file(brig_file)
self.assertGreater(len(brig_module), 0)
@unittest.skipUnless(has_brig_example, "Brig example not found")
class TestProgram(_TestBase):
def test_create_program(self):
brig_file = get_brig_file()
symbol = '&__vector_copy_kernel'
brig_module = BrigModule.from_file(brig_file)
program = Program()
program.add_module(brig_module)
code = program.finalize(self.gpu.isa)
ex = Executable()
ex.load(self.gpu, code)
ex.freeze()
sym = ex.get_symbol(self.gpu, symbol)
self.assertGreater(sym.kernarg_segment_size, 0)
class TestMemory(_TestBase):
def test_region_list(self):
self.assertGreater(len(self.gpu.regions.globals), 0)
self.assertGreater(len(self.gpu.regions.groups), 0)
# The following maybe empty
# print(self.gpu.regions.privates)
# print(self.gpu.regions.readonlys)
def test_register(self):
src = np.random.random(1024).astype(np.float32)
roc.hsa_memory_register(src.ctypes.data, src.nbytes)
roc.hsa_memory_deregister(src.ctypes.data, src.nbytes)
def test_allocate(self):
regions = self.gpu.regions
# More than one region
self.assertGreater(len(regions), 0)
# Find kernel argument regions
kernarg_regions = list()
for r in regions:
if r.supports(enums.HSA_REGION_GLOBAL_FLAG_KERNARG):
kernarg_regions.append(r)
self.assertGreater(len(kernarg_regions), 0)
# Test allocating at the kernel argument region
kernarg_region = kernarg_regions[0]
nelem = 10
ptr = kernarg_region.allocate(ctypes.sizeof(ctypes.c_float) * nelem)
self.assertNotEqual(ctypes.addressof(ptr), 0,
"pointer must not be NULL")
# Test writing to it
src = np.random.random(nelem).astype(np.float32)
ctypes.memmove(ptr, src.ctypes.data, src.nbytes)
ref = (ctypes.c_float * nelem).from_address(ptr.value)
for i in range(src.size):
self.assertEqual(ref[i], src[i])
roc.hsa_memory_free(ptr)
@unittest.skipUnless(dgpu_present, "dGPU only")
def test_coarse_grained_allocate(self):
"""
Tests the coarse grained allocation works on a dGPU.
It performs a data copying round trip via:
memory
|
HSA cpu memory
|
HSA dGPU host accessible memory <---|
| |
HSA dGPU memory --------------------|
"""
gpu_regions = self.gpu.regions
gpu_only_coarse_regions = list()
gpu_host_accessible_coarse_regions = list()
for r in gpu_regions:
if r.supports(enums.HSA_REGION_GLOBAL_FLAG_COARSE_GRAINED):
if r.host_accessible:
gpu_host_accessible_coarse_regions.append(r)
else:
gpu_only_coarse_regions.append(r)
# check we have 1+ coarse gpu region(s) of each type
self.assertGreater(len(gpu_only_coarse_regions), 0)
self.assertGreater(len(gpu_host_accessible_coarse_regions), 0)
cpu_regions = self.cpu.regions
cpu_coarse_regions = list()
for r in cpu_regions:
if r.supports(enums.HSA_REGION_GLOBAL_FLAG_COARSE_GRAINED):
cpu_coarse_regions.append(r)
# check we have 1+ coarse cpu region(s)
self.assertGreater(len(cpu_coarse_regions), 0)
# ten elements of data used
nelem = 10
# allocation
cpu_region = cpu_coarse_regions[0]
cpu_ptr = cpu_region.allocate(ctypes.sizeof(ctypes.c_float) * nelem)
self.assertNotEqual(ctypes.addressof(cpu_ptr), 0,
"pointer must not be NULL")
gpu_only_region = gpu_only_coarse_regions[0]
gpu_only_ptr = gpu_only_region.allocate(ctypes.sizeof(ctypes.c_float) *
nelem)
self.assertNotEqual(ctypes.addressof(gpu_only_ptr), 0,
"pointer must not be NULL")
gpu_host_accessible_region = gpu_host_accessible_coarse_regions[0]
gpu_host_accessible_ptr = gpu_host_accessible_region.allocate(
ctypes.sizeof(ctypes.c_float) * nelem)
self.assertNotEqual(ctypes.addressof(gpu_host_accessible_ptr), 0,
"pointer must not be NULL")
# Test writing to allocated area
src = np.random.random(nelem).astype(np.float32)
roc.hsa_memory_copy(cpu_ptr, src.ctypes.data, src.nbytes)
roc.hsa_memory_copy(gpu_host_accessible_ptr, cpu_ptr, src.nbytes)
roc.hsa_memory_copy(gpu_only_ptr, gpu_host_accessible_ptr, src.nbytes)
# check write is correct
cpu_ref = (ctypes.c_float * nelem).from_address(cpu_ptr.value)
for i in range(src.size):
self.assertEqual(cpu_ref[i], src[i])
gpu_ha_ref = (ctypes.c_float * nelem).\
from_address(gpu_host_accessible_ptr.value)
for i in range(src.size):
self.assertEqual(gpu_ha_ref[i], src[i])
# zero out host accessible GPU memory and CPU memory
z0 = np.zeros(nelem).astype(np.float32)
roc.hsa_memory_copy(cpu_ptr, z0.ctypes.data, z0.nbytes)
roc.hsa_memory_copy(gpu_host_accessible_ptr, cpu_ptr, z0.nbytes)
# check zeroing is correct
for i in range(z0.size):
self.assertEqual(cpu_ref[i], z0[i])
for i in range(z0.size):
self.assertEqual(gpu_ha_ref[i], z0[i])
# copy back the data from the GPU
roc.hsa_memory_copy(gpu_host_accessible_ptr, gpu_only_ptr, src.nbytes)
# check the copy back is ok
for i in range(src.size):
self.assertEqual(gpu_ha_ref[i], src[i])
# free
roc.hsa_memory_free(cpu_ptr)
roc.hsa_memory_free(gpu_only_ptr)
roc.hsa_memory_free(gpu_host_accessible_ptr)
@unittest.skipUnless(has_brig_example, "Brig example not found")
@unittest.skipUnless(dgpu_present, "dGPU only")
@unittest.skip("Permanently skip? HSA spec violation causes corruption")
def test_coarse_grained_kernel_execution(self):
"""
This tests the execution of a kernel on a dGPU using coarse memory
regions for the buffers.
NOTE: the code violates the HSA spec in that it uses a coarse region
for kernargs, this is a performance hack.
"""
from numba.roc.hsadrv.driver import BrigModule, Program, hsa,\
Executable
# get a brig file
brig_file = get_brig_file()
brig_module = BrigModule.from_file(brig_file)
self.assertGreater(len(brig_module), 0)
# use existing GPU regions for computation space
gpu_regions = self.gpu.regions
gpu_only_coarse_regions = list()
gpu_host_accessible_coarse_regions = list()
for r in gpu_regions:
if r.supports(enums.HSA_REGION_GLOBAL_FLAG_COARSE_GRAINED):
if r.host_accessible:
gpu_host_accessible_coarse_regions.append(r)
else:
gpu_only_coarse_regions.append(r)
# check we have 1+ coarse gpu region(s) of each type
self.assertGreater(len(gpu_only_coarse_regions), 0)
self.assertGreater(len(gpu_host_accessible_coarse_regions), 0)
# Compilation phase:
# FIXME: this is dubious, assume launching agent is indexed first
agent = roc.components[0]
prog = Program()
prog.add_module(brig_module)
# get kernel and load
code = prog.finalize(agent.isa)
ex = Executable()
ex.load(agent, code)
ex.freeze()
# extract symbols
sym = ex.get_symbol(agent, "&__vector_copy_kernel")
self.assertNotEqual(sym.kernel_object, 0)
self.assertGreater(sym.kernarg_segment_size, 0)
# attempt kernel excution
import ctypes
import numpy as np
# Do memory allocations
# allocate and initialise memory
nelem = 1024 * 1024
src = np.random.random(nelem).astype(np.float32)
z0 = np.zeros_like(src)
# alloc host accessible memory
nbytes = ctypes.sizeof(ctypes.c_float) * nelem
gpu_host_accessible_region = gpu_host_accessible_coarse_regions[0]
host_in_ptr = gpu_host_accessible_region.allocate(nbytes)
self.assertNotEqual(host_in_ptr.value, None,
"pointer must not be NULL")
host_out_ptr = gpu_host_accessible_region.allocate(nbytes)
self.assertNotEqual(host_out_ptr.value, None,
"pointer must not be NULL")
# init mem with data
roc.hsa_memory_copy(host_in_ptr, src.ctypes.data, src.nbytes)
roc.hsa_memory_copy(host_out_ptr, z0.ctypes.data, z0.nbytes)
# alloc gpu only memory
gpu_only_region = gpu_only_coarse_regions[0]
gpu_in_ptr = gpu_only_region.allocate(nbytes)
self.assertNotEqual(gpu_in_ptr.value, None, "pointer must not be NULL")
gpu_out_ptr = gpu_only_region.allocate(nbytes)
self.assertNotEqual(gpu_out_ptr.value, None,
"pointer must not be NULL")
# copy memory from host accessible location to gpu only
roc.hsa_memory_copy(gpu_in_ptr, host_in_ptr, src.nbytes)
# Do kernargs
# Find a coarse region (for better performance on dGPU) in which
# to place kernargs. NOTE: This violates the HSA spec
kernarg_regions = list()
for r in gpu_host_accessible_coarse_regions:
# NOTE: VIOLATION
if r.supports(enums.HSA_REGION_GLOBAL_FLAG_KERNARG):
kernarg_regions.append(r)
self.assertGreater(len(kernarg_regions), 0)
# use first region for args
kernarg_region = kernarg_regions[0]
kernarg_ptr = kernarg_region.allocate(
2 * ctypes.sizeof(ctypes.c_void_p))
self.assertNotEqual(kernarg_ptr, None, "pointer must not be NULL")
# wire in gpu memory
argref = (2 * ctypes.c_size_t).from_address(kernarg_ptr.value)
argref[0] = gpu_in_ptr.value
argref[1] = gpu_out_ptr.value
# signal
sig = roc.create_signal(1)
# create queue and dispatch job
queue = agent.create_queue_single(32)
queue.dispatch(sym, kernarg_ptr, workgroup_size=(256, 1, 1),
grid_size=(nelem, 1, 1),signal=None)
# copy result back to host accessible memory to check
roc.hsa_memory_copy(host_out_ptr, gpu_out_ptr, src.nbytes)
# check the data is recovered
ref = (nelem * ctypes.c_float).from_address(host_out_ptr.value)
np.testing.assert_equal(ref, src)
# free
roc.hsa_memory_free(host_in_ptr)
roc.hsa_memory_free(host_out_ptr)
roc.hsa_memory_free(gpu_in_ptr)
roc.hsa_memory_free(gpu_out_ptr)
class TestContext(_TestBase):
"""Tests the Context class behaviour is correct."""
def test_memalloc(self):
"""
Tests Context.memalloc() for a given, in the parlance of HSA,\
`component`. Testing includes specialisations for the supported
components of dGPUs and APUs.
"""
n = 10 # things to alloc
nbytes = ctypes.sizeof(ctypes.c_double) * n
# run if a dGPU is present
if dgpu_present:
# find a host accessible region
dGPU_agent = self.gpu
CPU_agent = self.cpu
gpu_ctx = Context(dGPU_agent)
gpu_only_mem = gpu_ctx.memalloc(nbytes, hostAccessible=False)
ha_mem = gpu_ctx.memalloc(nbytes, hostAccessible=True)
# on dGPU systems, all host mem is host accessible
cpu_ctx = Context(CPU_agent)
cpu_mem = cpu_ctx.memalloc(nbytes, hostAccessible=True)
# Test writing to allocated area
src = np.random.random(n).astype(np.float64)
roc.hsa_memory_copy(cpu_mem.device_pointer, src.ctypes.data, src.nbytes)
roc.hsa_memory_copy(ha_mem.device_pointer, cpu_mem.device_pointer, src.nbytes)
roc.hsa_memory_copy(gpu_only_mem.device_pointer, ha_mem.device_pointer, src.nbytes)
# clear
z0 = np.zeros_like(src)
roc.hsa_memory_copy(ha_mem.device_pointer, z0.ctypes.data, z0.nbytes)
ref = (n * ctypes.c_double).from_address(ha_mem.device_pointer.value)
for k in range(n):
self.assertEqual(ref[k], 0)
# copy back from dGPU
roc.hsa_memory_copy(ha_mem.device_pointer, gpu_only_mem.device_pointer, src.nbytes)
for k in range(n):
self.assertEqual(ref[k], src[k])
else: #TODO: write APU variant
pass
def check_mempools(self, agent, has_fine_grain=True):
# get allocation-allowed pools
mp_alloc_list = [mp for mp in agent.mempools if mp.alloc_allowed]
mpdct = {'global': [], 'readonly': [], 'private': [], 'group': []}
for mp in mp_alloc_list:
mpdct[mp.kind].append(mp)
# only globals are allocation-allowed
if has_fine_grain:
self.assertEqual(len(mpdct['global']), 2)
else:
self.assertEqual(len(mpdct['global']), 1)
self.assertEqual(len(mpdct['readonly']), 0)
self.assertEqual(len(mpdct['private']), 0)
self.assertEqual(len(mpdct['group']), 0)
self.assertEqual(len(agent.mempools.globals), len(mpdct['global']))
# the global-pools are coarse-grain and fine-grain pools
glbs = mpdct['global']
coarsegrain = None
finegrain = None
for gmp in glbs:
if gmp.supports(enums_ext.HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_COARSE_GRAINED):
coarsegrain = gmp
if gmp.supports(enums_ext.HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED):
finegrain = gmp
self.assertIsNotNone(coarsegrain)
if has_fine_grain:
self.assertIsNotNone(finegrain)
else:
self.assertIsNone(finegrain)
self.assertIsNot(coarsegrain, finegrain)
def test_cpu_mempool_property(self):
self.check_mempools(self.cpu)
@unittest.skipUnless(dgpu_present, "dGPU only")
def test_gpu_mempool_property(self):
self.check_mempools(self.gpu, has_fine_grain=False)
@unittest.skipUnless(dgpu_present, "dGPU only")
def test_mempool(self):
n = 10 # things to alloc
nbytes = ctypes.sizeof(ctypes.c_double) * n
dGPU_agent = self.gpu
CPU_agent = self.cpu
# allocate a GPU memory pool
gpu_ctx = Context(dGPU_agent)
gpu_only_mem = gpu_ctx.mempoolalloc(nbytes)
# allocate a CPU memory pool, allow the GPU access to it
cpu_ctx = Context(CPU_agent)
cpu_mem = cpu_ctx.mempoolalloc(nbytes, allow_access_to=[gpu_ctx.agent])
## Test writing to allocated area
src = np.random.random(n).astype(np.float64)
roc.hsa_memory_copy(cpu_mem.device_pointer, src.ctypes.data, src.nbytes)
roc.hsa_memory_copy(gpu_only_mem.device_pointer, cpu_mem.device_pointer, src.nbytes)
# clear
z0 = np.zeros_like(src)
roc.hsa_memory_copy(cpu_mem.device_pointer, z0.ctypes.data, z0.nbytes)
ref = (n * ctypes.c_double).from_address(cpu_mem.device_pointer.value)
for k in range(n):
self.assertEqual(ref[k], 0)
# copy back from dGPU
roc.hsa_memory_copy(cpu_mem.device_pointer, gpu_only_mem.device_pointer, src.nbytes)
for k in range(n):
self.assertEqual(ref[k], src[k])
def check_mempool_with_flags(self, finegrain):
dGPU_agent = self.gpu
gpu_ctx = Context(dGPU_agent)
CPU_agent = self.cpu
cpu_ctx = Context(CPU_agent)
# get mempool with specific flags
cpu_ctx.mempoolalloc(1024, allow_access_to=[gpu_ctx._agent])
@unittest.skipUnless(dgpu_present, 'dGPU only')
def test_mempool_finegrained(self):
self.check_mempool_with_flags(finegrain=True)
@unittest.skipUnless(dgpu_present, 'dGPU only')
def test_mempool_coarsegrained(self):
self.check_mempool_with_flags(finegrain=False)
@unittest.skipUnless(dgpu_present, 'dGPU only')
def test_mempool_amd_example(self):
dGPU_agent = self.gpu
gpu_ctx = Context(dGPU_agent)
CPU_agent = self.cpu
cpu_ctx = Context(CPU_agent)
kNumInt = 1024
kSize = kNumInt * ctypes.sizeof(ctypes.c_int)
dependent_signal = roc.create_signal(0)
completion_signal = roc.create_signal(0)
## allocate host src and dst, allow gpu access
flags = dict(allow_access_to=[gpu_ctx.agent], finegrain=False)
host_src = cpu_ctx.mempoolalloc(kSize, **flags)
host_dst = cpu_ctx.mempoolalloc(kSize, **flags)
# there's a loop in `i` here over GPU hardware
i = 0
# get gpu local pool
local_memory = gpu_ctx.mempoolalloc(kSize)
host_src_view = (kNumInt * ctypes.c_int).from_address(host_src.device_pointer.value)
host_dst_view = (kNumInt * ctypes.c_int).from_address(host_dst.device_pointer.value)
host_src_view[:] = i + 2016 + np.arange(0, kNumInt, dtype=np.int32)
host_dst_view[:] = np.zeros(kNumInt, dtype=np.int32)
# print("GPU: %s"%gpu_ctx._agent.name)
# print("CPU: %s"%cpu_ctx._agent.name)
roc.hsa_signal_store_relaxed(completion_signal, 1);
q = queue.Queue()
class validatorThread(threading.Thread):
def run(self):
val = roc.hsa_signal_wait_acquire(
completion_signal,
enums.HSA_SIGNAL_CONDITION_EQ,
0,
ctypes.c_uint64(-1),
enums.HSA_WAIT_STATE_ACTIVE)
q.put(val) # wait_res
# this could be a call on the signal itself dependent_signal.store_relaxed(1)
roc.hsa_signal_store_relaxed(dependent_signal, 1);
h2l_start = threading.Semaphore(value=0)
class l2hThread(threading.Thread):
def run(self):
dep_signal = drvapi.hsa_signal_t(dependent_signal._id)
roc.hsa_amd_memory_async_copy(host_dst.device_pointer.value,
cpu_ctx._agent._id,
local_memory.device_pointer.value,
gpu_ctx._agent._id, kSize, 1,
ctypes.byref(dep_signal),
completion_signal)
h2l_start.release() # signal h2l to start
class h2lThread(threading.Thread):
def run(self):
h2l_start.acquire() # to wait until l2h thread has started
roc.hsa_amd_memory_async_copy(local_memory.device_pointer.value,
gpu_ctx._agent._id,
host_src.device_pointer.value,
cpu_ctx._agent._id, kSize, 0,
None,
dependent_signal)
timeout = 10 # 10 seconds timeout
# # init thread instances
validator = validatorThread()
l2h = l2hThread()
h2l = h2lThread()
# run them
validator.start()
l2h.start()
h2l.start()
# join
l2h.join(timeout)
h2l.join(timeout)
validator.join(timeout)
# verify
wait_res = q.get()
self.assertEqual(wait_res, 0)
np.testing.assert_allclose(host_dst_view, host_src_view)
@unittest.skipUnless(dgpu_present, "dGPU only")
def test_to_device_to_host(self):
"""
Tests .to_device() and .copy_to_host()
"""
n = 10
data = np.zeros(n)
output = np.zeros(n)
@vectorize("float64(float64)", target='roc')
def func(x):
return x + 1
hsaapi.to_device(data)
out_device = hsaapi.to_device(output)
func(data, out=out_device)
host_output = out_device.copy_to_host()
np.testing.assert_equal(np.ones(n), host_output)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jdtatz/pl-nirs-sim-app",
"score": 2
}
|
#### File: pl-nirs-sim-app/nirs_sim_app/nirs_sim.py
```python
import numpy as np
import numba as nb
from pymcx import MCX
def create_props(spec, wavelen):
layers = spec['layers']
lprops = spec['layer_properties']
ext_coeff = {k: np.interp(wavelen, *itr) for k, itr in spec['extinction_coeffs'].items()}
media = np.empty((1+len(layers), 4), np.float32)
media[0] = 0, 0, 1, spec.get('n_external', 1)
for i, l in enumerate(layers):
lp = lprops[l]
g = lp['g']
mua = sum(ext_coeff[k] * lp['components'][k] for k in ext_coeff)
mus = lp['Scatter A'] * wavelen ** -lp['Scatter b'] / (1 - g)
media[1+i] = mua, mus, g, lp['n']
return media, np.stack(lprops[l]['BFi'] for l in layers)
@nb.jit(nopython=True, nogil=True, parallel=False)
def analysis(detp, prop, tof_domain, tau, wavelength, BFi, freq, ndet, ntof, nmedia, pcounts, paths, phiTD, phiFD, g1_top, phiDist):
c = 2.998e+11 # speed of light in mm / s
detBins = detp[0].astype(np.intc) - 1
tofBins = np.minimum(np.digitize(prop[1:, 3] @ detp[2:(2+nmedia)], c * tof_domain), ntof) - 1
distBins = np.minimum(np.digitize(prop[1:, 3] * detp[2:(2+nmedia)].T, c * tof_domain), ntof) - 1
path = -prop[1:, 0] @ detp[2:(2+nmedia)]
phis = np.exp(path)
fds = np.exp((-prop[1:, 0] + 2j * np.pi * freq * prop[1:, 3] / c).astype(np.complex64) @ detp[2:(2+nmedia)].astype(np.complex64))
prep = (-2*(2*np.pi*prop[1:, 3]/(wavelength*1e-6))**2*BFi).astype(np.float32) @ detp[(2+nmedia):(2+2*nmedia)]
for i in range(len(detBins)):
pcounts[detBins[i], tofBins[i]] += 1
paths[detBins[i], tofBins[i]] += detp[2:(2+nmedia), i]
phiTD[detBins[i], tofBins[i]] += phis[i]
phiFD[detBins[i]] += fds[i]
for l in range(nmedia):
phiDist[detBins[i], distBins[i, l], l] += phis[i]
for j in range(len(tau)):
g1_top[detBins[i], j] += np.exp(prep[i] * tau[j] + path[i])
def simulate(spec, wavelength):
cfg = spec['mcx']
cfg.ismomentum = True
cfg.prop, BFi = create_props(spec, wavelength)
run_count = spec.get('run_count', 1)
seeds = np.asarray(spec.get('seeds', np.random.randint(0xFFFF, size=run_count)))
tof_domain = spec.get('tof_domain', np.append(np.arange(cfg.tstart, cfg.tend, cfg.tstep), cfg.tend))
tau = spec.get('tau', np.logspace(-8, -2))
freq = spec.get('frequency', 110e6)
ndet, ntof, nmedia = len(cfg.detpos), len(tof_domain) - 1, len(cfg.prop) - 1
phiTD = np.zeros((ndet, ntof), np.float64)
phiFD = np.zeros(ndet, np.complex128)
paths = np.zeros((ndet, ntof, nmedia), np.float64)
pcounts = np.zeros((ndet, ntof), np.int64)
g1_top = np.zeros((ndet, len(tau)), np.float64)
phiDist = np.zeros((ndet, ntof, nmedia), np.float64)
fslice = 0
for seed in seeds:
cfg.seed = int(seed)
result = cfg.run(2)
detp = result["detphoton"]
if detp.shape[1] >= cfg.maxdetphoton:
raise Exception("Too many photons detected: {}".format(detp.shape[1]))
analysis(detp, cfg.prop, tof_domain, tau, wavelength, BFi, freq, ndet, ntof, nmedia, pcounts, paths, phiTD, phiFD, g1_top, phiDist)
fslice += result["fluence"][spec['slice']]
del detp
del result
fslice /= run_count
paths /= pcounts[:, :, np.newaxis]
g1 = g1_top / np.sum(phiTD, axis=1)[:, np.newaxis]
phiDist /= np.sum(phiTD, axis=1)[:, np.newaxis, np.newaxis]
return {'Photons': pcounts, 'Paths': paths, 'PhiTD': phiTD, 'PhiFD': phiFD, 'PhiDist': phiDist, 'Seeds': seeds, 'Slice': fslice, 'g1': g1}
```
|
{
"source": "jdtech3/PW4py",
"score": 2
}
|
#### File: PW4py/pw4py/__init__.py
```python
import requests
__version__ = '0.1.1'
session = None
# User settable options
API_KEY = None
API_PATH = 'http://politicsandwar.com/api'
def load(**kwargs):
if 'key' in kwargs:
global API_KEY
API_KEY = kwargs['key']
if 'test_server' in kwargs:
global API_PATH
if kwargs['test_server'] == True:
API_PATH = 'http://test.politicsandwar.com/api'
else:
API_PATH = 'http://politicsandwar.com/api'
else:
API_PATH = 'http://politicsandwar.com/api'
# Create a requests session and set API key if applicable
global session
session = requests.Session()
def init(**kwargs):
load(**kwargs)
# Imports
from .nation import Nation
from .alliance import Alliance
```
|
{
"source": "jdtech3/Station-Master",
"score": 2
}
|
#### File: Station-Master/cogs/fun.py
```python
import discord
from discord.ext import commands
import aiohttp
from datetime import datetime # for embeds
class FunCog:
def __init__(self, bot):
self.bot = bot
# Get URLs and parse them as json
@staticmethod
async def get(url, session):
async with session.get(url) as response:
return await response.json(content_type=None)
# .dog
@commands.command()
async def dog(self, ctx):
async with aiohttp.ClientSession() as session:
resp = await self.get("https://dog.ceo/api/breeds/image/random", session)
img_link = resp['message']
embed = discord.Embed(title="*Aww...*", colour=discord.Colour(0xf8e71c), timestamp=datetime.now())
embed.set_image(url=img_link)
embed.set_footer(text="daVinci | Provided by: https://dog.ceo/dog-api/", icon_url="http://icons.iconarchive.com/icons/google/noto-emoji-animals-nature/1024/22215-dog-icon.png")
await ctx.send(embed=embed)
# .dogfact
@commands.command(name='dogfact')
async def dog_fact(self, ctx):
async with aiohttp.ClientSession() as session:
resp = await self.get("https://some-random-api.ml/dogfact", session)
fact = resp['fact']
embed = discord.Embed(title="Dogfact!", colour=discord.Colour(0xf8e71c), description=fact, timestamp=datetime.now())
embed.set_footer(text="daVinci | Provided by: https://some-random-api.ml/dogfact", icon_url="http://icons.iconarchive.com/icons/google/noto-emoji-animals-nature/1024/22215-dog-icon.png")
await ctx.send(embed=embed)
# .cat
@commands.command()
async def cat(self, ctx):
async with aiohttp.ClientSession() as session:
resp = await self.get("https://api.thecatapi.com/v1/images/search?mime_types=jpg,png", session)
img_link = resp[0]['url']
embed = discord.Embed(title="*Aww...*", colour=discord.Colour(0xf8e71c), timestamp=datetime.now())
embed.set_image(url=img_link)
embed.set_footer(text="daVinci | Provided by: https://thecatapi.com/", icon_url="http://icons.iconarchive.com/icons/sonya/swarm/256/Cat-icon.png")
await ctx.send(embed=embed)
# .catfact
@commands.command(name='catfact')
async def cat_fact(self, ctx):
async with aiohttp.ClientSession() as session:
resp = await self.get("https://catfact.ninja/fact", session)
fact = resp['fact']
embed = discord.Embed(title="Catfact!", colour=discord.Colour(0xf8e71c), description=fact, timestamp=datetime.now())
embed.set_footer(text="daVinci | Provided by: https://catfact.ninja/fact", icon_url="http://icons.iconarchive.com/icons/sonya/swarm/256/Cat-icon.png")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(FunCog(bot))
```
#### File: Station-Master/cogs/help.py
```python
import discord
from discord.ext import commands
class HelpCog:
def __init__(self, bot):
self.bot = bot
self.bot.remove_command('help')
# .help
@commands.group()
async def help(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send("""```css
[ Command List ]
- Use .help [command] for details -
.help :: Displays this message.
.ping :: Ping/pong, pretty self-explanatory.
.pong :: Needs no description :P
.listings get :: Returns listings in a given category
.listings add :: Adds listing to a given category
.listings remove :: Removes a listing
.listings categories :: All possible categories
```
""")
def setup(bot):
bot.add_cog(HelpCog(bot))
```
|
{
"source": "jdtech3/YouTube-Archive-Tracker",
"score": 2
}
|
#### File: jdtech3/YouTube-Archive-Tracker/bot.py
```python
import asyncio
import discord
from discord.ext import commands
from config import config
from sheets.read_stats import Stats
__author__ = 'JDTech'
__version__ = '0.1.0'
# Default enabled cogs
initial_cogs = ['cogs.admin', 'cogs.tools', 'cogs.help']
# Bot stuff
TOKEN = config['bot_token']
bot = commands.Bot(command_prefix=config['bot_prefix'])
# Disallow calling the bot in PMs
# TODO: Suppress CheckFailure exceptions
@bot.check
async def no_pm(ctx):
if ctx.message.guild is None:
embed = discord.Embed(title="Not allowed to use this command in PMs.", colour=discord.Colour(0xd0021b))
await ctx.channel.send(embed=embed)
return False
else:
return True
# Reply to mentions
@bot.event
async def on_message(message):
if message.content == '<@!662158675275808768>':
embed = discord.Embed(title="Hi there! Do `/help` to see what I can do for ya :smile:", colour=discord.Colour(0x7ed321))
await message.channel.send(embed=embed)
else:
await bot.process_commands(message)
# .version
@bot.command()
async def version(ctx):
embed = discord.Embed(title=f"*The Tracker* **v{__version__}**", colour=discord.Colour(0xf8e71c))
await ctx.send(embed=embed)
# Playing... animation
async def presence_animation():
WATCHING = discord.ActivityType.watching
STREAMING = discord.ActivityType.streaming
while True:
stats = Stats()
await bot.change_presence(activity=discord.Activity(name=f'{stats.videos} videos', type=WATCHING))
await asyncio.sleep(60)
await bot.change_presence(activity=discord.Activity(name=f'{round(stats.size_tb, 2)} TB', type=WATCHING))
await asyncio.sleep(60)
await bot.change_presence(activity=discord.Activity(name=f'/help ▪ made by {__author__}', type=STREAMING))
await asyncio.sleep(15)
# Print some info
@bot.event
async def on_ready():
print('---------------')
print('Connected. Loading cogs...')
print('---------------')
# Try to load initial cogs
for cog in initial_cogs:
try:
bot.load_extension(cog)
except Exception as e:
print(f'Failed to load extension {cog}. {e}')
else:
print(f'Loaded extension {cog}.')
print('---------------')
print('Logged in as: ')
print(f'Username: {bot.user.name} | ID: {bot.user.id}')
print(f'Discord version: {discord.__version__}')
print(f'Bot version: {__version__}')
print('---------------')
bot.loop.create_task(presence_animation())
if __name__ == "__main__":
# Run the bot!
bot.run(TOKEN)
```
|
{
"source": "jdtheiss/pebl",
"score": 3
}
|
#### File: pebl/functions/utils.py
```python
import os
import numpy as np
import sys
import cStringIO
import re
import scipy.io as sio
import copy
def cell2strtable(celltable, delim='\t'):
''' convert a cell table into a string table that can be printed nicely
Parameters:
celltable - array-like, ndarray with rows and columns in desired order
delim - str, delimter to combine columns of celltable
[default = '\\t' (strictly 4 spaces)]
Returns:
strtable - str, string version of celltable that prints as table
Example:
celltable = np.array([['Column 1 Title','Column 2 Title',''],
['Row 2 Column 1 is longer...','Row 2 Column 2','Extra Column!']])
delim='\t'
strtable = cell2strtable(celltable, delim)
print(strtable)
Column 1 Title Column 2 Title
Row 2 Column 1 is longer... Row 2 Column 2 Extra Column!
'''
# change \t to 4 spaces
if delim == '\t':
delim = ' '
# check that celltable is ndarray and object
if type(celltable) != np.ndarray:
celltable = np.array([celltable], dtype=np.object)
elif celltable.dtype != np.object: # copy as np.object
celltable = copy.deepcopy(celltable).astype(np.object)
else: # copy as is
celltable = copy.deepcopy(celltable)
# if len(shape) == 1, reshape
if len(celltable.shape)==1:
celltable = np.reshape(celltable, (1,celltable.shape[0]))
# convert all to string
for i,x in enumerate(celltable.ravel()):
celltable.ravel()[i] = np.str(x)
# get max length in each column
max_len = []
for x in celltable.transpose():
max_len.append(np.max([len(y) for y in x]))
# pad each column with zeros
for i,r in enumerate(celltable):
for ii,c in enumerate(r):
if len(c) < max_len[i]:
spaces = ''.join([' ' for n in range(max_len[ii]-len(c))])
celltable[i][ii] = c + spaces
# join strings with delim
strtable = []
if len(celltable.shape) > 1:
for r in range(celltable.shape[0]):
strtable.append(delim.join(celltable[r]))
strtable = '\n'.join(strtable)
else:
strtable = delim.join(celltable)
return strtable
def py2mat(A, filename, variable):
''' load from or save to matlab format
Parameters:
A - object, object to save (set to None if loading from filename)
filename - str, file to load from or save to
variable - str, variable name to load or save
Returns:
A - object, object converted from file or converted to matlab format
Example:
A = {0: {'spm': {'temporal': {'st': {'nslices': {0: 28},
'prefix': {0: u'a'},
'refslice': {0: 1},
'scans': {0: {0: u'<UNDEFINED>'}},
'so': {0: 1, 1: 3, 2: 5, 3: 7, 4: 9, 5: 11, 6: 13, 7: 15, 8: 17,
9: 19, 10: 21, 11: 23, 12: 25, 13: 27, 14: 2, 15: 4, 16: 6,
17: 8, 18: 10, 19: 12, 20: 14, 21: 16, 22: 18, 23: 20, 24: 22,
25: 24, 26: 26, 27: 28},
'ta': {0: 1.9285714285714286},
'tr': {0: 2}}}}}}
'''
# load from filename
if A==None:
# init out
out = np.array([], np.object)
# load filename as matlab dtype
A = sio.loadmat(filename, mat_dtype=True)
A = A[variable]
# get substructs of A
S0 = struct2sub(A)
# for each level, get dtype
S1 = np.empty(len(S0), dtype=np.object).tolist()
cell = np.zeros(len(S0), dtype=np.bool).tolist()
for i,S_ in enumerate(S0):
S1[i] = []
cell[i] = []
for n in range(1, len(S_)):
A_ = subsref(A, S_[:n])
# cell index
if A_.dtype == np.object:
# set single index
if A_.ndim == 1:
S1[i].append(S_[n])
cell[i].append(copy.deepcopy(S1[i]))
# set cell array
elif A_.shape[0] > 1:
S1[i].append(S_[n])
cell[i].append(copy.deepcopy(S1[i]))
# field name
elif A_.dtype.names != None:
# set fieldname
if A_.ndim == 0:
S1[i].append(A_.dtype.names[S_[n]])
# set noncell array
elif A_.shape[0] > 1:
S1[i].append(S_[n])
elif A_.ndim > 0 and A_.shape[0] > 1:
S1[i].append(S_[n])
# set values
for S0_, S1_ in zip(S0, S1):
item = subsref(A, S0_)
out = subsasgn(out, S1_, item, list)
# set cells as numpy arrays
for C_ in cell:
# first cell is implied
for c in C_[1:]:
out = subsasgn(out, c, np.array([subsref(out, c)], np.object))
else: # copy A
A = copy.deepcopy(A)
# get substructs for A at each level
S0 = struct2sub(A, dict_out=True)
# set additional dimension for matlab
for k in S0.keys():
for S_ in S0[k]:
A_ = subsref(A, S_)
# if list without following or preceding list, set extra dim
if type(A_)==list and type(subsref(A, S_[:-1]))!=list and \
type(A_[0])!=list:
A = subsasgn(A, S_, [A_])
S0 = struct2sub(A, dict_out=True)
# set dicts as arrays with dtype
l = S0.keys()
l.reverse()
for k in l:
for S_ in S0[k]:
A_ = subsref(A, S_)
# set dict to array with keys as dtype
if type(A_) == dict:
A = subsasgn(A, S_, np.array([tuple(A_.values())],
np.dtype([(k, np.object) for k in A_.keys()])))
S0 = struct2sub(A, dict_out=True)
# set out to dict using variable
out = {variable: A}
# save mat
sio.savemat(filename, out)
return out
def subsref(A, S):
''' return value from A using references in S
Parameters:
A - object, object to return value from
S - list, indices/fields to reference to obtain value from A (see Example)
Returns:
value - any, value to index from A using S
Example:
A = {0: {'test': [9,8,7]}}
S = [0, 'test', 1]
value = subsref(A, S)
value =
8
'''
# copy S
S = list(S)
# copy A
value = copy.deepcopy(A)
# for each substruct, get value
for S_ in S:
if type(S_) == str and re.match('.*:.*', S_) != None:
value = eval('value[{S_}]'.format(S_=S_))
else:
value = value[S_]
return value
def subsasgn(A, S, C, append_type=None):
''' set value in A using reference in S
Parameters:
A - object, object to set value
S - list, indices/fields to reference when setting value
C - any, value to set in A at reference S
append_type - type, type of iterable to append if needed (e.g., list)
[default is None, sets to type(A)]
Returns:
A - object, updated object with value set at reference S
Example:
A = {0: {'spm': {'util': {'disp': {'data': '<UNDEFINED>'}}}}}
S = [0, 'spm', 'util', 'disp', 'data']
C = './mri/anatomical.nii'
subsasgn(A, S, C)
A =
{0: {'spm': {'util': {'disp': {'data': './mri/anatomical.nii'}}}}}
Note: Only tested for dict, list, and ndarray. If S == [], A is set to C
'''
# copy A
A = copy.deepcopy(A)
value = A
# set default for setting new index
if append_type == None:
def_val = type(A)([])
else:
def_val = append_type([])
# ensure def_val has ndim > 0
if type(def_val).__module__ == np.__name__ and def_val.ndim == 0:
def_val = np.array([None], dtype=A.dtype)
# for each level in S, index value
for i,S_ in enumerate(S):
# add new key to dict
if type(value) == dict and S_ not in value.keys():
value[S_] = copy.deepcopy(def_val)
# set value to dict and add key with new value type(A)
elif type(value) != dict and type(S_) == str:
value = {}
value[S_] = copy.deepcopy(def_val)
# append list
elif type(value) == list and S_ >= len(value):
for _ in range(S_ - len(value) + 1):
value.append(copy.deepcopy(def_val))
# append ndarray with None
elif type(value).__module__ == np.__name__:
if value.ndim == 0:
value = np.array([value])
if S_ >= len(value):
for _ in range(S_ - len(value) + 1):
value = np.append(value, None)
# if None, set as list
elif value == None:
value = []
for _ in range(S_ - len(value) + 1):
value.append([])
# set value to A at current substruct
if i > 0 and len(S[:i]) > 0:
exec('A' + sub2str(S[:i]) + '= value')
else:
A = value
# evaluate : string
if type(S_) == str and re.match('.*:.*', S_) != None:
value = eval('value[{S_}]'.format(S_=S_))
else: # index value using S_
value = value[S_]
# set complete reference to C
if len(S) > 0:
exec('A' + sub2str(S) + '= C')
else: # simple set
A = C
return A
def sub2str(S):
''' convert a "substruct" to a "string representation" or vice versa
Parameters:
S - list or str, substruct/string representation to convert
Returns:
S - list or str, converted substruct/string representation
Example 1:
S = [0, 'field1', 0, 'field2', 1]
str_rep = sub2str(S)
str_rep =
'[0]["field1"][0]["field2"][1]'
Example 2:
str_rep = '["field1"]["field2"][4]'
S = sub2str(str_rep)
S =
['field1', 'field2', 4]
'''
# copy S
if type(S) != str:
S = list(S)
# init output
out = []
# if str, output array
if type(S) == str:
S = re.split('[\[\]]', S)
S = [S for S in S if S != '']
for S_ in S:
if S_.isdigit():
out.append(int(S_))
else:
out.append(re.sub('"', '', S_))
else: # if array, output str
if not np.iterable(S):
S = [S,]
for S_ in S:
if type(S_) == str:
out.append('"' + S_ + '"')
else:
out.append(str(S_))
out = '[' + ']['.join(out) + ']'
return out
def struct2sub(A, r=np.inf, dict_out=False):
''' return all "substructs" from A through levels r
Parameters:
A - object, object to return substructs from
r - number, number of levels to search when obtaining substructs. Returns
substruct lists with maximum length of r + 1 (0 is first level)
[default is np.inf, i.e. all levels of A]
dict_out - bool, return each level list of substruct as dict with keys
corresponding to levels
[default is False]
Returns:
S - list, list of substructs for each value in A through levels r
Example:
A = {'test': {0: 12, 1: '2'}, 'test2': 3}
r = 1
S =
[['test', 0], ['test', 1], ['test2']]
'''
# copy A
A = copy.deepcopy(A)
# get substruct based on type
S = {0: []}
if type(A) == dict:
S[0] = [[S_] for S_ in A.keys()]
elif type(A) == list or type(A) == tuple:
S[0] = [[S_] for S_ in range(len(A))]
elif type(A).__module__ == np.__name__:
if A.ndim > 0 or type(A) == np.void:
A = list(A)
S[0] = [[S_] for S_ in range(len(A))]
# ensure list is not empty
if len(S[0]) == 0:
S[0] = [[],]
# # if r is zero, return
if r == 0:
return S[0]
# for each level, get struct2sub and append to previous
r_ = 0
while r_ < r:
S[r_+1] = []
for S0 in S[r_]:
for S1 in struct2sub(subsref(A, S0), 0):
S[r_+1].append(S0 + S1)
if len(struct2sub(subsref(A, S0), 0)) == 0:
S[r_+1].append(S[r_])
if S[r_] == S[r_+1]:
S.pop(r_+1, None)
break
else:
r_ += 1
if dict_out: # return dict
return S
else: # return S at level r_
return S[r_]
def pebl_getfield(A, S=None, R=None, expr=None, fun=None, r=np.inf):
''' get values from object, A, using substructs or string representations
Parameters:
A - object, object to return values from
Options:
S - list, substruct to get value from A
[defualt is None]
R - list or str, string representation to get value from A
[default is None]
expr - str, expression to search string representations to get value
from A
[default is None]
fun - dict, dict containing function to search for values within A. keys
within the dict should contain 'fun', and integers corresponding to
argument index (see Example 2). Each C will be input as the argument
not contained in the dict keys (i.e. at index 0 for Example 2).
[default is None]
r - int, number of level to search within A (each level is field or
index reference)
[default is np.inf]
Returns:
C - list, values returned from A
(i.e. C[0] == subsref(A, S[0]) or eval('A' + R[0]))
S - list, substructs used to return values from A
R - list, string representations used to return values from A
Example 1:
A = {0: {'spm': {'util': {'disp': {'data': '<UNDEFINED>'}}}}}
expr = '.*\["disp"]'
C, S, R = pebl_getfield(A, expr=expr)
C =
array([{'data': '<UNDEFINED>'}], dtype=object)
S =
[[0, 'spm', 'util', 'disp']]
R =
['[0]["spm"]["util"]["disp"]']
Example 2:
A = {'test1': {0: 3}, 'test2': [2,3,4,5], 'test3': []}
fun = {'fun': np.equal, 1: 3}
C, S, R = pebl_getfield(A, fun=fun)
C =
array([3, 3], dtype=object)
S =
[['test1', 0], ['test2', 1]]
R =
['["test1"][0]', '["test2"][1]']
'''
# if S exists, get copy
if S != None:
if type(S)!=list or type(S[0])!=list:
S = [S,]
else:
S = list(S)
else: # get substructs of A
S = []
if not np.iterable(r):
r = [r,]
for rr in r:
S = S + struct2sub(A, rr)
# if R exists, update S
if R != None:
if not np.iterable(R):
R = [R,]
else:
R = list(R)
S = []
for R_ in R:
S.append(sub2str(R_))
else: # if R doesnt exist, set from S
R = []
for S_ in S:
R.append(sub2str(S_))
# find R using regex
if expr != None:
tmp = list(R)
R = []
# copy expr
if type(expr) == str:
expr = [expr,]
else:
expr = list(expr)
for e in expr:
m = [re.findall(e, R_) for R_ in tmp]
m = np.unique([m[0] for m in m if len(m) > 0])
R = np.append(R, m)
R = np.unique(R).tolist()
# update S
S = []
for R_ in R:
S.append(sub2str(R_))
# use subsref to get values
C = []
for S_ in S:
C.append(subsref(A, S_))
# search using function
if fun != None:
# copy fun
if type(fun) != dict:
fun = {'fun': fun}
else:
fun = dict(fun)
# set fnd array of false
fnd = np.zeros(len(C), dtype=np.bool)
# get key positions for function call
key_ns = [k for k in fun.keys() if type(k) == int]
key_rng = range(np.max(key_ns)+1)
c_idx = [k for k in key_rng if k not in key_ns]
if len(c_idx) == 0:
c_idx = np.max(key_ns)+1
else:
c_idx = c_idx[0]
# for each C_ evalutate function
for i, C_ in enumerate(C):
# set c_idx to C_
fun[c_idx] = C_
# set args for input
args = [fun[k] for k in key_rng]
# evaluate function
tmp = fun['fun'](*args)
if tmp == NotImplemented:
fnd[i] = False
else:
fnd[i] = tmp
# set to true indices
C = np.array(C, dtype=np.object)[fnd].tolist()
S = np.array(S, dtype=np.object)[fnd].tolist()
R = np.array(R, dtype=np.object)[fnd].tolist()
# return C, S, R
return C, S, R
def pebl_setfield(A, C, S=None, R=None, expr=None, fun=None, r=np.inf):
''' set values in object, A, using substructs or string representations
Parameters:
A - object, object to set values
C - list, list of values to set in A
S - list, substructs referencing location to set values in A
[default is None]
R - list, string representations referencing location to set values in A
[default is None]
expr - str, expression to search R in order to set values in A
[defualt is None]
fun - dict, dict to find locations in A to set values
[defualt is None]
r - int, number of levels in A to search if S or R are not set directly
[default is np.inf]
Returns:
A - object, updated object with values set
Note:
See pebl_getfield for further description on Parameters.
Example:
A = {0: {'test1': [1,2,3]}}
S = [0, 'test1', -1]
C = []
'''
# init C
if type(C) != list: # make iterable
C = [C,]
else: # copy
C = list(C)
# if no S and no R, set from A using pebl_getfield
if S==None and R==None:
_, S, R = pebl_getfield(A, expr=expr, fun=fun, r=r)
# check for S and R separately
if R==None:
R = []
elif type(R) == str: # set as iterable
R = [R,]
else: # copy
R = list(R)
if S==None:
S = []
for R_ in R:
S.append(sub2str(R_))
elif type(S)!=list or type(S[0])!=list: # make iterable
S = [S,]
else: # copy
S = list(S)
# set C based on S
if type(C) != list or len(C)==1 or len(S) == 1:
C = np.repeat([C], len(S)).tolist()
elif len(C) != len(S):
C = C[:np.min([len(C),len(S)])]
S = S[:np.min([len(C),len(S)])]
# init R for output
R = []
for C_, S_ in zip(C, S):
# update R
R.append(sub2str(S_))
# set A
A = subsasgn(A, S_, C_)
return A
def pebl_search(folder, expr, ftype, n_levels=np.inf, verbose=False):
''' search a folder, subfolders, and files for expr
Parameters:
folder - str, folder to begin search
expr - str, expression to search within folders/files
ftype - str, file type to narrow search (or 'dir' to search folders)
n_levels - int, number of directory levels to search
[default is np.inf]
verbose - bool, print folder/file currrently being searched
[default is False]
Returns:
files - list, fullpath files that contained expression in name or text
Example 1:
folder = os.curdir
expr = 'utils.*'
ftype = 'dir'
files = pebl_search(folder, expr, ftype)
files =
['/pebl/pebl/functions/utils.py',
'/pebl/pebl/functions/utils.pyc']
Example 2:
folder = os.curdir
expr = 'def pebl_search'
ftype = '.*\.py$'
files = pebl_search(folder, expr, ftype)
files =
['/pebl/pebl/functions/utils.py']
'''
# init files output
files = np.array([])
# set folder to fullpath
folder = os.path.abspath(folder)
# get names of files/folders in folder
names = os.listdir(folder)
# get indices of directories
dir_tf = np.array([os.path.isdir(os.path.join(folder,n)) for n in names],
dtype=np.bool)
# regex for ftype matches
matches = [re.match(ftype, n) for n in names]
ftype_tf = np.array([m != None for m in matches], dtype=np.bool)
# find files to search
file_tf = np.invert(dir_tf) * ftype_tf
# if dir, search files for expr
if ftype == 'dir':
fnd = np.array([re.match(expr, n) != None for n in names], dtype=np.bool)
for i in np.where(fnd)[0]:
files = np.append(files, os.path.join(folder,names[i]))
else: # for each file, search text for expr
for i in np.where(file_tf)[0]:
if verbose:
print('Searching {name}'.format(name=names[i]))
with open(os.path.join(folder,names[i]), 'r') as f:
txt = f.read()
if len(re.findall(expr, txt)) > 0:
files = np.append(files, os.path.join(folder,names[i]))
# search additional levels
if n_levels > 0 and np.any(dir_tf):
for i in np.where(dir_tf)[0]:
if verbose:
print('Searching {dir}'.format(dir=names[i]))
files = np.append(files, pebl_search(os.path.join(folder,names[i]),
expr, ftype, n_levels=n_levels-1, verbose=verbose))
# return files as list
return files.tolist()
```
|
{
"source": "JDTheRipperPC/foo-scripts",
"score": 2
}
|
#### File: python/shodan/shodan_simple_search.py
```python
import shodan
from shodan.cli.helpers import get_api_key
def simple_search():
try:
ShodanAPI = shodan.Shodan(get_api_key())
results = ShodanAPI.search('apache')
for result in results.get('matches'):
print('IP: {ip}\nData: {data}\n'.format(
ip=result.get('ip_str'),
data=result.get('data')
))
except shodan.APIError as err:
print('Error: {}'.format(err))
if __name__ == '__main__':
simple_search()
```
|
{
"source": "JDTheRipperPC/toscrape-examples",
"score": 2
}
|
#### File: books_toscrape_com/books_toscrape_com/exporters.py
```python
import csv
from scrapy.utils.project import get_project_settings
from scrapy.exporters import CsvItemExporter
class CsvExtendedItemExporter(CsvItemExporter):
def __init__(self, *args, **kwargs):
settings = get_project_settings()
dialect = settings.get('CSV_DIALECT')
kwargs['dialect'] = csv.get_dialect('dialect') if dialect in csv.list_dialects() else csv.get_dialect('excel')
kwargs['delimiter'] = settings.get('CSV_DELIMITER', kwargs['dialect'].delimiter)
kwargs['doublequote'] = settings.get('CSV_DOUBLEQUOTE', kwargs['dialect'].doublequote)
kwargs['escapechar'] = settings.get('CSV_ESCAPECHAR', kwargs['dialect'].escapechar)
kwargs['lineterminator'] = settings.get('CSV_LINETERMINATOR', kwargs['dialect'].lineterminator)
kwargs['quotechar'] = settings.get('CSV_QUOTECHAR', kwargs['dialect'].quotechar)
kwargs['quoting'] = settings.get('CSV_QUOTING', kwargs['dialect'].quoting)
super(CsvExtendedItemExporter, self).__init__(*args, **kwargs)
```
#### File: books_toscrape_com/books_toscrape_com/pipelines.py
```python
import os
class JsonPipeline(object):
def __init__(self, filename):
self.filename = filename
self.f = open(self.filename, 'w')
@classmethod
def from_crawler(cls, crawler):
nameformat = crawler.settings.get('JSON_PIPELINE_FORMAT')
name = nameformat % {
"name": crawler.spider.name,
"json_filename": getattr(crawler.spider, 'json_filename', crawler.spider.name)
}
# import pdb; pdb.set_trace()
return cls(
filename='{directory}{name}'.format(
directory=crawler.settings.get('JSON_PIPELINE_OUTPUT'),
name=name
)
)
def open_spider(self, spider):
self.f.write('[\n')
def close_spider(self, spider):
self.f.write(']\n')
def process_item(self, item, spider):
self.f.write('{item}\n'.format(item=item))
class BooksToscrapeComPipeline(object):
def process_item(self, item, spider):
return item
```
|
{
"source": "jdthorpe/MCPH",
"score": 2
}
|
#### File: jdthorpe/MCPH/event.py
```python
from math import isinf
from operator import attrgetter
from types import NoneType
import pdb
inf = float('inf')
class Event(object):
"""
most of what follows is not longer accurate. Here is what's current:
==================================================
* an event is an object with 3 special (public) properties:
'reference': another event that serves as the
reference time for this event. References
can be followed from child to parent
until a global (person) object is reached.
The global object cannonically does not have
a reference event.
'reftime': the time between this event and
the reference event (negative if this event occured
first).
'time': a calculated property := time between
this event and the global event (birth),
*iif* a global event is at the top of the
reference chain.
and a couple of sepcial methods:
'getEvent()': returns a named event.
'getEvents()': returns a (possibly empty) list
of events with a set of common characteristics
Events *may* have a reference event, in which case the
event is a child of it's reference.
there are three ways to set a reference event on an event:
(1) in the init method [ child = Event(reference=parent) ]
(2) via the child's reference attrubute [ child.reference = parent ]
(3) via attribute assignment [ parent.foo = child ]
Note that the first two methods to not assign a name
to the event.
The link between parnet and child can always be removed
via 'del child.reference', and in the case that the third
assignment option was used, 'del parent.foo' will also
remove the link between child and parent.
the final aspect of an event is that attribute assignment
can be used to set the reference (parent / child)
relationship. (e.g. parent.foo = child) sets the
parent / child relation and names the event 'foo'.
==================================================
"""
# default values for time and reftime
def __init__(self,
# the time between the reference event and this event
reftime=None,
# the reference object
reference=None,
# a string, tuple of strings, or list of strings to aid
# in searching for events.
type=()):
# store the reference event
if reference is not None:
self.reference = reference # implicicitly uses the reference setter property
`
# the time of the event relative to the reference frame
if reftime is not None:
self.__dict__['reftime'] = reftime
`
# store the 'type' tuple
if isinstance(type,str):
type = (type,)
elif isinstance(type,list):
type = tuple(type)
self.type = type # a tuple that names the event type
# initialize the childen and prevented by lists
self._children = []
self._preventedBy = []
# --------------------------------------------------
# prevented properties
# --------------------------------------------------
def unpreventALL(self):
self._preventedBy = []
def unprevent(self,by):
for(i in range(len(self._preventedBy)-1,-1,-1))
if self._preventedBy is by:
del self._preventedBy[i]
def prevent(self,by):
if inherits(by,origin):
raise RuntimeError('An event cannot be prevented by an orign')
if self is by:
raise RuntimeError('An event cannot be prevented by itself')
if by not in self._preventedBy :
self._preventedBy.append(by)
def _getTimePrevented(self):
if(len(self._preventedBy)):
return min([x.time for time in self._preventedBy])
else :
return float('inf')
TimePrevented = property(_getTimePrevented)
def _prevented (self):
""" An event is prevented if any of the prevention events
occure prior to the event in the absence of prevention
events.
"""
return float(self) > min(x.time for x in self._preventedBy])
prevented = property(_prevented)
# --------------------------------------------------
# time property
# --------------------------------------------------
def _getTime(self):
if 'reference' not in self.__dict__:
raise RuntimeError("Attempt to GET the time of an event before setting the event's reference attribute, OR no global reference found.")
refTime = self.reference.time
if self.reftime is None or refTime is None:
return None
else:
return float(self.reftime) + refTime
time = property(_getTime)
# --------------------------------------------------
# redraw method
# --------------------------------------------------
def redraw(self):
"""call the redraw method on self.reference.time or self.reference.reftime"""
try:
self.reference.time.redraw()
except AttributeError:
pass
try:
self.reference.reftime.redraw()
except AttributeError:
pass
# --------------------------------------------------
# Attribute Setter
# --------------------------------------------------
def __setattr__(self, name, value):
""" The Set Attr method, which is reponsible for setting
the double link between events for statements like: `e1.e2 = e2`
"""
if name in ('reference',):
# python calles setter methods in this order, so we have to bypass __setattr__
# in order to get the property getter and setter methods defined below to handle
# the assighment. See this page for details:
#
# http://stackoverflow.com/questions/15750522/class-properties-and-setattr
object.__setattr__(self, name, value)
return
if isinstance(value,Event):
if ('reference' in value.__dict__
and value.reference is not self):
raise AttributeError('Attempt to add two reference to a single event')
# PREVENT CIRCULAR PARENT/CHILD REFERENCES
tmp = value
while 'reference' in tmp.__dict__:
if(inherits(tmp,origin))
break
if tmp is self:
raise ValueError("Circular Reference Error: attempt to add a Event as a child of an ancestor.")
tmp = tmp.reference
# ADD SELF AS THE EVENT'S NEW 'REFERENCE' ATTIRUBTE
value.reference = self
self.__dict__[name] = value
# --------------------------------------------------
# Attribute Deleter
# --------------------------------------------------
def __delattr__(self, name):
if name not in self.__dict__:
raise AttributeError(name)
if name == 'reference':
# python calles setter methods in this order, so we have to bypass __setattr__
# in order to get the property getter and setter methods defined below to handle
# the assighment. See this page for details:
#
# http://stackoverflow.com/questions/15750522/class-properties-and-setattr
object.__delattr__(self, name)
# this propogates the delete on to the '__delReferenceEvent()' method below
return
if isinstance(self.__dict__[name],event):
# NO CIRCULAR REFERENCES PLEASE, hence the following line
# is NOT !!: self.__dict__[name].reference
del self.__dict__[name].__dict__['reference']
del self.__dict__[name]
# --------------------------------------------------
#
# --------------------------------------------------
def _origin(self):
""" Retruns the origin event which is an ancestor to self. """
this = self
while True:
if 'reference' not in self.__dict__:
return None
reference = self.__dict__['reference']
if isinstance(reference,origin):
return reference
else: this = reference
origin = property(_origin)
# --------------------------------------------------
# stubs for pre and post processing
# --------------------------------------------------
def preprocess(self):
""" preprocess() is called when the event is initialized. it is
responsible for initializing any values required for processing
and/or eligibility testing. before the person event is tested for
enrollment eligibility, and before the personEventProcessors in the
decisionRule are called.
"""
pass # to be over-written by sub-classes
def process(self):
""" process() is called in order to handle the conditional events.
For example It's not possible for a tubal ligation (TL) to occure
after the tubes are removed (BS), so the TL should set it's time to
None in it's "process()" method when a BSO occures before the TL.
The Process() method should be used to modifiy the event that it
is called on, and not other events. The cascasde of
event.process() calles proceeds cronologically from the minimum
of [a] the time of the event before calling event.process(), [b]
the time of the event after calling event.process() and [c] the
return value from process (optional).
"""
pass # to be over-written by sub-classes
def postprocess(self):
""" postprocess() is called after all the event generators have been
called, and after the person event is qualified for enrollment
eligibility. It is also called each time that the time of the event
is reset.
postprocess() is therefor good for things like assigning marker
levels, which are expensive to generate, and not needed to
determine eligibility or when the tumor does not have an time at
diagnosis. with a diagnosis, etc.,
The timing of this or any other event that existed during
processing should *NOT* be modified here.
Postprocess() is called *after* eligibility testing, so it may
not be called on events from ineligible individuals.
"""
pass # optionally, to be over-written by sub-classes
# --------------------------------------------------
# Reference event property
# --------------------------------------------------
def __setReferenceEvent(self,reference):
# PREVENT CIRCULAR PARENT/CHILD REFERENCES
if reference is self:
raise ValueError("Attempt to add a Event as it's own reference point. Circular references are forbidden")
ancestor = reference
while True:
if ancestor is self:
raise ValueError("Attempt to add a Event as a child of an ancestor. Circular references are forbidden")
if not 'reference' in ancestor.__dict__:
break
ancestor = ancestor.reference
if 'reference' in self.__dict__:
print 'deleting child.reference'
del self.reference
self.__dict__['reference'] = reference
# *may* need to complete the loop
if not self in reference.getEvents():
reference.__dict__['_children'].append(self)
def __getReferenceEvent(self):
return self.__dict__['reference']
def __delReferenceEvent(self):
# since we don't know if this event is is even named, we have
# to delete it from the reference based on it's value. Specifically
# this event (self) can be on of:
# [a] this.__data__.[namedChild]
# [b] this.__data__._children[this.__data__._children.index(child)]
if self in self.__dict__['_children']:
del self.__dict__['_children'][self.__dict__['_children'].index(self)]
tmpKeys = []
for key,value in self.__dict__.iteritems():
if value is self:
tmpKeys.append(key)
for key in tmpKeys:
del self.__dict__[key]
# now delete the reference from __dict__
del self.__dict__['reference']
reference = property(__getReferenceEvent,
__setReferenceEvent,
__delReferenceEvent)
# --------------------------------------------------
# event query methods
# --------------------------------------------------
def getEvent(self,name):
out = self.__dict__[name]
if not isinstance(out,Event):
raise KeyError("Event instance has no event named '"+name[0]+"'.")
def getEvents(self,
type=None,
deepQuery=False,
includePrevented=False,
includeNoneTimes=False,
ordered=False,
first=False):
""" returns a list of events with type 'type' """
try:
out = self.__dict__['_children'] + [e for e in self.__dict__ if isinstance(e,Event) ]
except: pdb.set_trace()
if not includeNoneTimes:
out = [e for e in out if e.time is not None]
if not includePrevented:
out = [e for e in out if not e.prevented]
if deepQuery:
# NOTE THAT this looping trick depends on not haveing circular references in the events
for e in out:
out.extend(e.getEvents(type=type, deepQuery=deepQuery, ordered=ordered, first=first))
if type:
if hasattr(type,'__call__'):
out = [e for e in out if type(e)]
else:
out = [e for e in out if type in e.type ]
if ordered or first:
out = sorted(out, key=lambda x: x.time if x.time is not None else inf)
if first:
if len(out):
return out[0]
else:
return None
else:
return out
# module test code
if __name__ == '__main__':
import sys
b = Event()
# ----------------------------------------
msg = 'bad time assignment (no reference event)'
sys.stdout.write(msg+"\r" )
try:
b.time = 5
except RuntimeError:
sys.stdout.write(msg + "...Passed\n" )
else:
sys.stdout.write(msg + "...Failed\n" )
# ----------------------------------------
msg = 'bad time query (no reference event)'
try:
_ = b.time
except RuntimeError:
sys.stdout.write(msg + "...Passed\n" )
else:
sys.stdout.write(msg + "...Failed\n" )
# ----------------------------------------
msg = 'self reference assignment'
sys.stdout.write(msg+"\r" )
try:
b.reference = b
except ValueError as e:
sys.stdout.write(msg + "...Passed\n" )
else:
sys.stdout.write(msg + "...Failed\n" )
# ----------------------------------------
msg = 'valid reference assignment'
sys.stdout.write(msg+"\r" )
a = Event()
try:
b.reference = a
except ValueError as e:
sys.stdout.write(msg + "...Failed\n" )
else:
sys.stdout.write(msg + "...Passed\n" )
# ----------------------------------------
msg = 'circular reference assignment'
sys.stdout.write(msg+"\r" )
try:
a.reference = b
except ValueError as e:
sys.stdout.write(msg + "...Passed\n" )
else:
sys.stdout.write(msg + "...Failed\n" )
# ----------------------------------------
msg = 'no origin'
sys.stdout.write(msg+"\r" )
try:
b.time = 5
except RuntimeError:
sys.stdout.write(msg + "...Passed\n" )
else:
sys.stdout.write(msg + "...Failed\n" )
# ----------------------------------------
msg = 'no origin'
try:
_ = b.time
except AttributeError:
sys.stdout.write(msg + "...Passed\n" )
else:
sys.stdout.write(msg + "...Failed\n" )
# ----------------------------------------
a.isGlobalReference = True
msg = 'good time assignment '
sys.stdout.write(msg+"\r" )
try:
b.time = 5
except AttributeError:
sys.stdout.write(msg+"...Failed\n" )
else:
sys.stdout.write(msg+"...Passed\n" )
c = Event(reference=b,time = 11)
assert c.reftime == 6
# ----------------------------------------
a.isGlobalReference = True
msg = 'deleting global event '
sys.stdout.write(msg+"\r" )
try:
del b.reference
except AttributeError:
sys.stdout.write(msg+"...Failed\n" )
else:
sys.stdout.write(msg+"...Passed\n" )
# ----------------------------------------
msg = 'getting time of secondary event after deleting the global event '
sys.stdout.write(msg+"\r" )
try:
b.time
except RuntimeError:
sys.stdout.write(msg+"...Passed\n" )
else:
sys.stdout.write(msg+"...Failed\n" )
# ----------------------------------------
msg = 'getting time of tertiary event after deleting the global event '
sys.stdout.write(msg+"\r" )
try:
c.time
except RuntimeError:
sys.stdout.write(msg+"...Passed\n" )
else:
sys.stdout.write(msg+"...Failed\n" )
# ----------------------------------------
msg = 'adding the global by named assignment'
sys.stdout.write(msg+"\r" )
try:
a.tumor = b
except :
sys.stdout.write(msg+"...Failed\n" )
else:
sys.stdout.write(msg+"...Passed\n" )
# ----------------------------------------
msg = 'getting time of secondary event after attribute assignment the global event '
sys.stdout.write(msg+"\r" )
try:
b.time
except :
sys.stdout.write(msg+"...Failed\n" )
else:
sys.stdout.write(msg+"...Passed\n" )
# ----------------------------------------
#a.isGlobalReference = True
msg = 'circular reference (as a named attribute)'
sys.stdout.write(msg+"\r" )
try:
c.person = a
except RuntimeError:
sys.stdout.write(msg+"...Passed\n" )
else:
sys.stdout.write(msg+"...Failed\n" )
# ----------------------------------------
#a.isGlobalReference = True
msg = 'deleting global event (as a named attribute)'
sys.stdout.write(msg+"\r" )
try:
del b.reference
except AttributeError:
sys.stdout.write(msg+"...Failed\n" )
else:
sys.stdout.write(msg+"...Passed\n" )
# ----------------------------------------
msg = 'getting time of secondary event after deleting the global event '
sys.stdout.write(msg+"\r" )
try:
b.time
except RuntimeError:
sys.stdout.write(msg+"...Passed\n" )
else:
sys.stdout.write(msg+"...Failed\n" )
# ----------------------------------------
msg = 'getting time of tertiary event after deleting the global event '
sys.stdout.write(msg+"\r" )
try:
c.time
except RuntimeError:
sys.stdout.write(msg+"...Passed\n" )
else:
sys.stdout.write(msg+"...Failed\n" )
# ValueError
```
#### File: jdthorpe/MCPH/origin.py
```python
maxEventProcessCalls = 1000
class origin(Event):
# prevention
def prevent(self,by):
raise RuntimeError('cannot prevent an origin')
TimePrevented = property(lambda:float('inf'))
prevented = property(lambda:False)
# un-prevention
def unpreventALL(self): pass
def unprevent(self,by): pass
# the origin is time = 0 by definition
time = property(lambda:0)
# reference property
reference = property(lambda:raise RuntimeError('the origin has no reference event'))
# return the origin (A.K.A. self)
origin = property(lambda self: self)
def __init__(self,*args,**kwargs):
"""initialzation for an event origin."""
super(origin, self).__init__(*args,**kwargs)
self.eventState = ()
def processEvents(self):
""" this function calls all the events in order of time and allows them
to be processed, which allows events to be responsive to one
another.
Specifically, some events may be dependent on the occurance
of another event. For example, Tubal ligation is not possible
after tubes are removed, and women may be at lower risk after
a tubal ligation.
Event processing happens in order by time, and the event should
only respond by deleting it's reference (in the case that the event
does not happen because of an earlier event), or (if it is a
distributed event), by accelerating (decelerating) it's distribution
at the time of the event that event that affect's it's risk of
occuring by calling event.accelerate() [or better still, by calling
event.setChangePoints() with a list of event ages and RR's]
"""
if len(self.eventState):
raise RuntimeError('nested calls to origin.process() are not allowed')
events = self.getEvents(type=lambda x:True,
includePrevented=True,
includeNoneTimes=True,
deepQuery=True,
ordered=False)
eventState = tuple( (e.time,e) for e in events)
self.eventState = eventState
processedEvents = 0
while True:
for e in events:
# --------------------------------------------------
# It's important to avoid processing an event twice
# with the same set of prior events, in order to prevent
# infintite update loops.
# so we'll record a list of events that occured before this on
# at the outset, and only re-update the event in case the set
# of prior events has changed between loops.
# --------------------------------------------------
# get a list of events that occured prior to the current event
if e.time is None:
# any event with a numeric time occured before an event
# with a None time (None == Inf).
priorEvents = self.getEvents(
type=lambda x: True ,
deepQuery=True,
includeNoneTimes=False)
else:
priorEvents = self.getEvents(
type=lambda x: x.time is not None and x.time < e.time)
# the tuple will change if the event Times change...
priorEventsTuple = tuple( (e.time,e) for e in priorEvents)
if hasattr(e,'_processedFor'):
if e._processedFor == priorEventsTuple:
# don't process the same event twice. (otherwise a
# race condition may occure)
continue
else:
# the event history has changed, so we need to
# re-set the event to it's initial state so as to
# keep the event processsing itempotent.
# FIXME: this needs to be more generic than a call
# to a method specific to the DistributedEvent
# class. Something like event.Reset, perhaps
if hasattr(e,'_resetCDF'):
e._resetCDF()
e._processedFor = priorEventsTuple
eventTimeBeforeProcessing = e.time
e.process()
if eventTimeBeforeProcessing != e.time:
processedEvents += 1
if processedEvents > maxEventProcessCalls:
raise RuntimeError('Number of event.process() calles exceeds maxEventProcessCalls. Possible race condition.')
events = self.getEvents(type=lambda x:True,
includePrevented=True,
includeNoneTimes=True,
deepQuery=True,
ordered=False)
eventState = tuple( (e.time,e) for e in events)
if eventState == self.eventState:
break
self.eventState = eventState
#RESET THE eventsState...
self.eventState = ()
```
|
{
"source": "jdthorpe/svg2mod",
"score": 3
}
|
#### File: svg2mod/svg2mod/svg2mod.py
```python
import argparse
import datetime
import os
from pprint import pformat, pprint
import re
import svg2mod.svg as svg
import sys
from abc import ABC
# ----------------------------------------------------------------------------
DEFAULT_DPI = 96 # 96 as of Inkscape 0.92
# ----------------------------------------------------------------------------
def svg2mod(
format,
units,
input_file_name,
output_file_name,
module_name,
module_value,
center,
scale_factor,
precision,
dpi,
):
pretty = format == "pretty"
use_mm = units == "mm"
if pretty and not use_mm:
raise ValueError("Error: decimil units only allowed with legacy output type")
# Import the SVG:
imported = Svg2ModImport(input_file_name, module_name, module_value)
# Pick an output file name if none was provided:
if output_file_name is None:
output_file_name = os.path.splitext(os.path.basename(input_file_name))[0]
# Append the correct file name extension if needed:
if pretty:
extension = ".kicad_mod"
else:
extension = ".mod"
if not output_file_name.endswith(extension):
output_file_name += extension
if pretty:
return Svg2ModExportPretty(
imported,
output_file_name,
center,
scale_factor,
precision,
dpi,
)
# If the module file exists, try to read it:
if os.path.isfile(output_file_name):
module = Svg2ModExportLegacyUpdater(
imported,
output_file_name,
center,
scale_factor,
precision,
dpi,
)
if module:
return module
return Svg2ModExportLegacy(
imported,
output_file_name,
center,
scale_factor,
precision,
use_mm=use_mm,
dpi=dpi,
)
# ------------------------------------------------------------------------
class LineSegment:
# ------------------------------------------------------------------------
@staticmethod
def _on_segment(p, q, r):
"""Given three colinear points p, q, and r, check if
point q lies on line segment pr."""
if (
q.x <= max(p.x, r.x)
and q.x >= min(p.x, r.x)
and q.y <= max(p.y, r.y)
and q.y >= min(p.y, r.y)
):
return True
return False
# ------------------------------------------------------------------------
@staticmethod
def _orientation(p, q, r):
"""Find orientation of ordered triplet (p, q, r).
Returns following values
0 --> p, q and r are colinear
1 --> Clockwise
2 --> Counterclockwise
"""
val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)
if val == 0:
return 0
if val > 0:
return 1
return 2
# ------------------------------------------------------------------------
def __init__(self, p=None, q=None):
self.p = p
self.q = q
# ------------------------------------------------------------------------
def connects(self, segment):
if self.q.x == segment.p.x and self.q.y == segment.p.y:
return True
if self.q.x == segment.q.x and self.q.y == segment.q.y:
return True
if self.p.x == segment.p.x and self.p.y == segment.p.y:
return True
if self.p.x == segment.q.x and self.p.y == segment.q.y:
return True
return False
# ------------------------------------------------------------------------
def intersects(self, segment):
"""Return true if line segments 'p1q1' and 'p2q2' intersect.
Adapted from:
http://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/
"""
# Find the four orientations needed for general and special cases:
o1 = self._orientation(self.p, self.q, segment.p)
o2 = self._orientation(self.p, self.q, segment.q)
o3 = self._orientation(segment.p, segment.q, self.p)
o4 = self._orientation(segment.p, segment.q, self.q)
return (
# General case:
(o1 != o2 and o3 != o4)
or
# p1, q1 and p2 are colinear and p2 lies on segment p1q1:
(o1 == 0 and self._on_segment(self.p, segment.p, self.q))
or
# p1, q1 and p2 are colinear and q2 lies on segment p1q1:
(o2 == 0 and self._on_segment(self.p, segment.q, self.q))
or
# p2, q2 and p1 are colinear and p1 lies on segment p2q2:
(o3 == 0 and self._on_segment(segment.p, self.p, segment.q))
or
# p2, q2 and q1 are colinear and q1 lies on segment p2q2:
(o4 == 0 and self._on_segment(segment.p, self.q, segment.q))
)
# ------------------------------------------------------------------------
def q_next(self, q):
self.p = self.q
self.q = q
# ------------------------------------------------------------------------
# ----------------------------------------------------------------------------
class PolygonSegment:
# ------------------------------------------------------------------------
def __init__(self, points):
self.points = points
if len(points) < 3:
print(
"Warning:"
" Path segment has only {} points (not a polygon?)".format(len(points))
)
# ------------------------------------------------------------------------
# KiCad will not "pick up the pen" when moving between a polygon outline
# and holes within it, so we search for a pair of points connecting the
# outline (self) to the hole such that the connecting segment will not
# cross the visible inner space within any hole.
def _find_insertion_point(self, hole, holes):
# print( " Finding insertion point. {} holes".format( len( holes ) ) )
# Try the next point on the container:
for cp in range(len(self.points)):
container_point = self.points[cp]
# print( " Trying container point {}".format( cp ) )
# Try the next point on the hole:
for hp in range(len(hole.points) - 1):
hole_point = hole.points[hp]
# print( " Trying hole point {}".format( cp ) )
bridge = LineSegment(container_point, hole_point)
# Check for intersection with each other hole:
for other_hole in holes:
# print( " Trying other hole. Check = {}".format( hole == other_hole ) )
# If the other hole intersects, don't bother checking
# remaining holes:
if other_hole.intersects(
bridge,
check_connects=(other_hole == hole or other_hole == self),
):
break
# print( " Hole does not intersect." )
else:
print(" Found insertion point: {}, {}".format(cp, hp))
# No other holes intersected, so this insertion point
# is acceptable:
return (cp, hole.points_starting_on_index(hp))
print("Could not insert segment without overlapping other segments")
# ------------------------------------------------------------------------
# Return the list of ordered points starting on the given index, ensuring
# that the first and last points are the same.
def points_starting_on_index(self, index):
points = self.points
if index > 0:
# Strip off end point, which is a duplicate of the start point:
points = points[:-1]
points = points[index:] + points[:index]
points.append(svg.Point(points[0].x, points[0].y))
return points
# ------------------------------------------------------------------------
# Return a list of points with the given polygon segments (paths) inlined.
def inline(self, segments):
if len(segments) < 1:
return self.points
print(" Inlining {} segments...".format(len(segments)))
all_segments = segments[:] + [self]
insertions = []
# Find the insertion point for each hole:
for hole in segments:
insertion = self._find_insertion_point(hole, all_segments)
if insertion is not None:
insertions.append(insertion)
insertions.sort(key=lambda i: i[0])
inlined = [self.points[0]]
ip = 1
points = self.points
for insertion in insertions:
while ip <= insertion[0]:
inlined.append(points[ip])
ip += 1
if (
inlined[-1].x == insertion[1][0].x
and inlined[-1].y == insertion[1][0].y
):
inlined += insertion[1][1:-1]
else:
inlined += insertion[1]
inlined.append(
svg.Point(
points[ip - 1].x,
points[ip - 1].y,
)
)
while ip < len(points):
inlined.append(points[ip])
ip += 1
return inlined
# ------------------------------------------------------------------------
def intersects(self, line_segment, check_connects):
hole_segment = LineSegment()
# Check each segment of other hole for intersection:
for point in self.points:
hole_segment.q_next(point)
if hole_segment.p is not None:
if check_connects and line_segment.connects(hole_segment):
continue
if line_segment.intersects(hole_segment):
return True
return False
# ------------------------------------------------------------------------
# Apply all transformations and rounding, then remove duplicate
# consecutive points along the path.
def process(self, transformer, flip, fill):
points = []
for point in self.points:
point = transformer.transform_point(point, flip)
if len(points) < 1 or point.x != points[-1].x or point.y != points[-1].y:
points.append(point)
if points[0].x != points[-1].x or points[0].y != points[-1].y:
# print( "Warning: Closing polygon. start=({}, {}) end=({}, {})".format(
# points[ 0 ].x, points[ 0 ].y,
# points[ -1 ].x, points[ -1 ].y,
# ) )
if fill:
points.append(
svg.Point(
points[0].x,
points[0].y,
)
)
# else:
# print( "Polygon closed: start=({}, {}) end=({}, {})".format(
# points[ 0 ].x, points[ 0 ].y,
# points[ -1 ].x, points[ -1 ].y,
# ) )
self.points = points
# ------------------------------------------------------------------------
# ----------------------------------------------------------------------------
class Svg2ModImport:
# ------------------------------------------------------------------------
def __init__(self, file_name, module_name, module_value):
self.file_name = file_name
self.module_name = module_name
self.module_value = module_value
print("Parsing SVG...")
self.svg = svg.parse(file_name)
# ------------------------------------------------------------------------
# ----------------------------------------------------------------------------
class Svg2ModExport(ABC):
layer_map = {}
# ------------------------------------------------------------------------
@staticmethod
def _convert_decimil_to_mm(decimil):
return float(decimil) * 0.00254
# ------------------------------------------------------------------------
@staticmethod
def _convert_mm_to_decimil(mm):
return int(round(mm * 393.700787))
# ------------------------------------------------------------------------
def _get_fill_stroke(self, item):
fill = True
stroke = True
stroke_width = 0.0
if item.style is not None and item.style != "":
for property in filter(None, item.style.split(";")):
nv = property.split(":")
name = nv[0].strip()
value = nv[1].strip()
if name == "fill" and value == "none":
fill = False
elif name == "stroke" and value == "none":
stroke = False
elif name == "stroke-width":
if value.endswith("px"):
value = value.replace("px", "")
stroke_width = float(value) * 25.4 / float(self.dpi)
else:
stroke_width = float(value)
if not stroke:
stroke_width = 0.0
elif stroke_width is None:
# Give a default stroke width?
stroke_width = self._convert_decimil_to_mm(1)
return fill, stroke, stroke_width
# ------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
center,
scale_factor=1.0,
precision=20.0,
use_mm=True,
dpi=DEFAULT_DPI,
):
if use_mm:
# 25.4 mm/in;
scale_factor *= 25.4 / float(dpi)
use_mm = True
else:
# PCBNew uses "decimil" (10K DPI);
scale_factor *= 10000.0 / float(dpi)
self.imported = svg2mod_import
self.file_name = file_name
self.center = center
self.scale_factor = scale_factor
self.precision = precision
self.use_mm = use_mm
self.dpi = dpi
# ------------------------------------------------------------------------
def _calculate_translation(self):
min_point, max_point = self.imported.svg.bbox()
if self.center:
# Center the drawing:
adjust_x = min_point.x + (max_point.x - min_point.x) / 2.0
adjust_y = min_point.y + (max_point.y - min_point.y) / 2.0
self.translation = svg.Point(
0.0 - adjust_x,
0.0 - adjust_y,
)
else:
self.translation = svg.Point(
0.0,
0.0,
)
# ------------------------------------------------------------------------
# Find and keep only the layers of interest.
def _prune(self, items=None):
if items is None:
self.layers = {}
for name in self.layer_map.keys():
self.layers[name] = None
items = self.imported.svg.items
self.imported.svg.items = []
for item in items:
if not isinstance(item, svg.Group):
continue
for name in self.layers.keys():
# if re.search( name, item.name, re.I ):
if name == item.name:
print("Found SVG layer: {}".format(item.name))
self.imported.svg.items.append(item)
self.layers[name] = item
break
else:
self._prune(item.items)
# ------------------------------------------------------------------------
def _write_items(self, items, layer, flip=False):
for item in items:
if isinstance(item, svg.Group):
self._write_items(item.items, layer, flip)
continue
elif isinstance(item, svg.Path):
segments = [
PolygonSegment(segment)
for segment in item.segments(precision=self.precision)
]
fill, stroke, stroke_width = self._get_fill_stroke(item)
for segment in segments:
segment.process(self, flip, fill)
if len(segments) > 1:
points = segments[0].inline(segments[1:])
elif len(segments) > 0:
points = segments[0].points
if not self.use_mm:
stroke_width = self._convert_mm_to_decimil(stroke_width)
print(" Writing polygon with {} points".format(len(points)))
self._write_polygon(points, layer, fill, stroke, stroke_width)
else:
print("Unsupported SVG element: {}".format(item.__class__.__name__))
# ------------------------------------------------------------------------
# abstract classes to be included in subclasses
def _write_polygon(self, points, layer, fill, stroke, stroke_width):
raise NotImplementedError
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
raise NotImplementedError
def _get_layer_name(self, name, front):
raise NotImplementedError
def _write_module_footer(self, front):
raise NotImplementedError
def _write_polygon_header(self, points, layer):
raise NotImplementedError
def _write_polygon_point(self, point):
raise NotImplementedError
def _write_polygon_footer(self, layer, stroke_width):
raise NotImplementedError
def _write_polygon_segment(self, prior_point, point, layer, stroke_width):
raise NotImplementedError
def _write_library_intro(self):
raise NotImplementedError
def _write_modules(self):
raise NotImplementedError
# ------------------------------------------------------------------------
def _write_module(self, front):
min_point, max_point = self.imported.svg.bbox()
min_point = self.transform_point(min_point, flip=False)
max_point = self.transform_point(max_point, flip=False)
label_offset = 1200
label_size = 600
label_pen = 120
if self.use_mm:
label_size = self._convert_decimil_to_mm(label_size)
label_pen = self._convert_decimil_to_mm(label_pen)
reference_y = min_point.y - self._convert_decimil_to_mm(label_offset)
value_y = max_point.y + self._convert_decimil_to_mm(label_offset)
else:
reference_y = min_point.y - label_offset
value_y = max_point.y + label_offset
self._write_module_header(
label_size,
label_pen,
reference_y,
value_y,
front,
)
for name, group in self.layers.items():
if group is None:
continue
layer = self._get_layer_name(name, front)
# print( " Writing layer: {}".format( name ) )
self._write_items(group.items, layer, not front)
self._write_module_footer(front)
# ------------------------------------------------------------------------
def _write_polygon_filled(self, points, layer, stroke_width=0.0):
self._write_polygon_header(points, layer)
for point in points:
self._write_polygon_point(point)
self._write_polygon_footer(layer, stroke_width)
# ------------------------------------------------------------------------
def _write_polygon_outline(self, points, layer, stroke_width):
prior_point = None
for point in points:
if prior_point is not None:
self._write_polygon_segment(prior_point, point, layer, stroke_width)
prior_point = point
# ------------------------------------------------------------------------
def transform_point(self, point, flip=False):
transformed_point = svg.Point(
(point.x + self.translation.x) * self.scale_factor,
(point.y + self.translation.y) * self.scale_factor,
)
if flip:
transformed_point.x *= -1
if self.use_mm:
transformed_point.x = round(transformed_point.x, 12)
transformed_point.y = round(transformed_point.y, 12)
else:
transformed_point.x = int(round(transformed_point.x))
transformed_point.y = int(round(transformed_point.y))
return transformed_point
# ------------------------------------------------------------------------
def write(self, file):
self._prune()
# Must come after pruning:
self._calculate_translation()
try:
self.output_file = file
self._write_library_intro()
self._write_modules()
finally:
self.output_file = None
# ------------------------------------------------------------------------
# ----------------------------------------------------------------------------
class Svg2ModExportLegacy(Svg2ModExport):
layer_map = {
#'inkscape-name' : [ kicad-front, kicad-back ],
"F.Cu": [15, 15],
"B.Cu": [0, 0],
"F.Adhes": [17, 17],
"B.Adhes": [16, 16],
"F.Paste": [19, 19],
"B.Paste": [18, 18],
"F.SilkS": [21, 21],
"B.SilkS": [20, 20],
"F.Mask": [23, 23],
"B.Mask": [22, 22],
"Dwgs.User": [24, 24],
"Cmts.User": [25, 25],
"Eco1.User": [26, 26],
"Eco2.User": [27, 27],
"Edge.Cuts": [28, 28],
}
# ------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
center,
scale_factor=1.0,
precision=20.0,
use_mm=True,
dpi=DEFAULT_DPI,
):
super(Svg2ModExportLegacy, self).__init__(
svg2mod_import,
file_name,
center,
scale_factor,
precision,
use_mm,
dpi,
)
self.include_reverse = True
# ------------------------------------------------------------------------
def _get_layer_name(self, name, front):
layer_info = self.layer_map[name]
layer = layer_info[0]
if not front and layer_info[1] is not None:
layer = layer_info[1]
return layer
# ------------------------------------------------------------------------
def _get_module_name(self, front=None):
if self.include_reverse and not front:
return self.imported.module_name + "-rev"
return self.imported.module_name
# ------------------------------------------------------------------------
def _write_library_intro(self):
modules_list = self._get_module_name(front=True)
if self.include_reverse:
modules_list += "\n" + self._get_module_name(front=False)
units = ""
if self.use_mm:
units = "\nUnits mm"
self.output_file.write(
"""PCBNEW-LibModule-V1 {0}{1}
$INDEX
{2}
$EndINDEX
#
# {3}
#
""".format(
datetime.datetime.now().strftime("%a %d %b %Y %I:%M:%S %p %Z"),
units,
modules_list,
self.imported.file_name,
)
)
# ------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
self.output_file.write(
"""$MODULE {0}
Po 0 0 0 {6} 00000000 00000000 ~~
Li {0}
T0 0 {1} {2} {2} 0 {3} N I 21 "{0}"
T1 0 {5} {2} {2} 0 {3} N I 21 "{4}"
""".format(
self._get_module_name(front),
reference_y,
label_size,
label_pen,
self.imported.module_value,
value_y,
15, # Seems necessary
)
)
# ------------------------------------------------------------------------
def _write_module_footer(self, front):
self.output_file.write("$EndMODULE {0}\n".format(self._get_module_name(front)))
# ------------------------------------------------------------------------
def _write_modules(self):
self._write_module(front=True)
if self.include_reverse:
self._write_module(front=False)
self.output_file.write("$EndLIBRARY")
# ------------------------------------------------------------------------
def _write_polygon(self, points, layer, fill, stroke, stroke_width):
if fill:
self._write_polygon_filled(points, layer)
if stroke:
self._write_polygon_outline(points, layer, stroke_width)
# ------------------------------------------------------------------------
def _write_polygon_footer(self, layer, stroke_width):
pass
# ------------------------------------------------------------------------
def _write_polygon_header(self, points, layer):
pen = 1
if self.use_mm:
pen = self._convert_decimil_to_mm(pen)
self.output_file.write("DP 0 0 0 0 {} {} {}\n".format(len(points), pen, layer))
# ------------------------------------------------------------------------
def _write_polygon_point(self, point):
self.output_file.write("Dl {} {}\n".format(point.x, point.y))
# ------------------------------------------------------------------------
def _write_polygon_segment(self, p, q, layer, stroke_width):
self.output_file.write(
"DS {} {} {} {} {} {}\n".format(p.x, p.y, q.x, q.y, stroke_width, layer)
)
# ------------------------------------------------------------------------
# ----------------------------------------------------------------------------
class Svg2ModExportLegacyUpdater(Svg2ModExportLegacy):
# ------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
center,
scale_factor=1.0,
precision=20.0,
dpi=DEFAULT_DPI,
include_reverse=True,
):
self.file_name = file_name
use_mm = self._parse_output_file()
super(Svg2ModExportLegacyUpdater, self).__init__(
svg2mod_import,
file_name,
center,
scale_factor,
precision,
use_mm,
dpi,
)
# ------------------------------------------------------------------------
def _parse_output_file(self):
print("Parsing module file: {}".format(self.file_name))
module_file = open(self.file_name, "r")
lines = module_file.readlines()
module_file.close()
self.loaded_modules = {}
self.post_index = []
self.pre_index = []
use_mm = False
index = 0
# Find the start of the index:
while index < len(lines):
line = lines[index]
index += 1
self.pre_index.append(line)
if line[:6] == "$INDEX":
break
m = re.match("Units[\s]+mm[\s]*", line)
if m is not None:
print(" Use mm detected")
use_mm = True
# Read the index:
while index < len(lines):
line = lines[index]
if line[:9] == "$EndINDEX":
break
index += 1
self.loaded_modules[line.strip()] = []
# Read up until the first module:
while index < len(lines):
line = lines[index]
if line[:7] == "$MODULE":
break
index += 1
self.post_index.append(line)
# Read modules:
while index < len(lines):
line = lines[index]
if line[:7] == "$MODULE":
module_name, module_lines, index = self._read_module(lines, index)
if module_name is not None:
self.loaded_modules[module_name] = module_lines
elif line[:11] == "$EndLIBRARY":
break
else:
raise Exception("Expected $EndLIBRARY: [{}]".format(line))
# print( "Pre-index:" )
# pprint( self.pre_index )
# print( "Post-index:" )
# pprint( self.post_index )
# print( "Loaded modules:" )
# pprint( self.loaded_modules )
return use_mm
# ------------------------------------------------------------------------
def _read_module(self, lines, index):
# Read module name:
m = re.match(r"\$MODULE[\s]+([^\s]+)[\s]*", lines[index])
module_name = m.group(1)
print(" Reading module {}".format(module_name))
index += 1
module_lines = []
while index < len(lines):
line = lines[index]
index += 1
m = re.match(r"\$EndMODULE[\s]+" + module_name + r"[\s]*", line)
if m is not None:
return module_name, module_lines, index
module_lines.append(line)
raise Exception("Could not find end of module '{}'".format(module_name))
# ------------------------------------------------------------------------
def _write_library_intro(self):
# Write pre-index:
self.output_file.writelines(self.pre_index)
self.loaded_modules[self._get_module_name(front=True)] = None
if self.include_reverse:
self.loaded_modules[self._get_module_name(front=False)] = None
# Write index:
for module_name in sorted(self.loaded_modules.keys(), key=str.lower):
self.output_file.write(module_name + "\n")
# Write post-index:
self.output_file.writelines(self.post_index)
# ------------------------------------------------------------------------
def _write_preserved_modules(self, up_to=None):
if up_to is not None:
up_to = up_to.lower()
for module_name in sorted(self.loaded_modules.keys(), key=str.lower):
if up_to is not None and module_name.lower() >= up_to:
continue
module_lines = self.loaded_modules[module_name]
if module_lines is not None:
self.output_file.write("$MODULE {}\n".format(module_name))
self.output_file.writelines(module_lines)
self.output_file.write("$EndMODULE {}\n".format(module_name))
self.loaded_modules[module_name] = None
# ------------------------------------------------------------------------
def _write_module_footer(self, front):
super(Svg2ModExportLegacyUpdater, self)._write_module_footer(
front,
)
# Write remaining modules:
if not front:
self._write_preserved_modules()
# ------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
self._write_preserved_modules(up_to=self._get_module_name(front))
super(Svg2ModExportLegacyUpdater, self)._write_module_header(
label_size,
label_pen,
reference_y,
value_y,
front,
)
# ------------------------------------------------------------------------
# ----------------------------------------------------------------------------
class Svg2ModExportPretty(Svg2ModExport):
layer_map = {
#'inkscape-name' : kicad-name,
"F.Cu": "F.Cu",
"B.Cu": "B.Cu",
"F.Adhes": "F.Adhes",
"B.Adhes": "B.Adhes",
"F.Paste": "F.Paste",
"B.Paste": "B.Paste",
"F.SilkS": "F.SilkS",
"B.SilkS": "B.SilkS",
"F.Mask": "F.Mask",
"B.Mask": "B.Mask",
"Dwgs.User": "Dwgs.User",
"Cmts.User": "Cmts.User",
"Eco1.User": "Eco1.User",
"Eco2.User": "Eco2.User",
"Edge.Cuts": "Edge.Cuts",
"F.CrtYd": "F.CrtYd",
"B.CrtYd": "B.CrtYd",
"F.Fab": "F.Fab",
"B.Fab": "B.Fab",
}
# ------------------------------------------------------------------------
def _get_layer_name(self, name, front):
return self.layer_map[name]
# ------------------------------------------------------------------------
def _get_module_name(self, front=None):
return self.imported.module_name
# ------------------------------------------------------------------------
def _write_library_intro(self):
self.output_file.write(
"""(module {0} (layer F.Cu) (tedit {1:8X})
(attr virtual)
(descr "{2}")
(tags {3})
""".format(
self.imported.module_name, # 0
int(round(os.path.getctime(self.imported.file_name))), # 1
"Imported from {}".format(self.imported.file_name), # 2
"svg2mod", # 3
)
)
# ------------------------------------------------------------------------
def _write_module_footer(self, front):
self.output_file.write("\n)")
# ------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
if front:
side = "F"
else:
side = "B"
self.output_file.write(
""" (fp_text reference {0} (at 0 {1}) (layer {2}.SilkS) hide
(effects (font (size {3} {3}) (thickness {4})))
)
(fp_text value {5} (at 0 {6}) (layer {2}.SilkS) hide
(effects (font (size {3} {3}) (thickness {4})))
)""".format(
self._get_module_name(), # 0
reference_y, # 1
side, # 2
label_size, # 3
label_pen, # 4
self.imported.module_value, # 5
value_y, # 6
)
)
# ------------------------------------------------------------------------
def _write_modules(self):
self._write_module(front=True)
# ------------------------------------------------------------------------
def _write_polygon(self, points, layer, fill, stroke, stroke_width):
if fill:
self._write_polygon_filled(points, layer, stroke_width)
# Polygons with a fill and stroke are drawn with the filled polygon
# above:
if stroke and not fill:
self._write_polygon_outline(points, layer, stroke_width)
# ------------------------------------------------------------------------
def _write_polygon_footer(self, layer, stroke_width):
self.output_file.write(
" )\n (layer {})\n (width {})\n )".format(layer, stroke_width)
)
# ------------------------------------------------------------------------
def _write_polygon_header(self, points, layer):
self.output_file.write("\n (fp_poly\n (pts \n")
# ------------------------------------------------------------------------
def _write_polygon_point(self, point):
self.output_file.write(" (xy {} {})\n".format(point.x, point.y))
# ------------------------------------------------------------------------
def _write_polygon_segment(self, p, q, layer, stroke_width):
self.output_file.write(
"""\n (fp_line
(start {} {})
(end {} {})
(layer {})
(width {})
)""".format(
p.x,
p.y,
q.x,
q.y,
layer,
stroke_width,
)
)
# ----------------------------------------------------------------------------
def get_arguments():
parser = argparse.ArgumentParser(
description=("Convert Inkscape SVG drawings to KiCad footprint modules.")
)
# ------------------------------------------------------------------------
parser.add_argument(
"-i",
"--input-file",
type=str,
dest="input_file_name",
metavar="FILENAME",
help="name of the SVG file",
required=True,
)
parser.add_argument(
"-o",
"--output-file",
type=str,
dest="output_file_name",
metavar="FILENAME",
help="name of the module file",
)
parser.add_argument(
"--name",
"--module-name",
type=str,
dest="module_name",
metavar="NAME",
help="base name of the module",
default="svg2mod",
)
parser.add_argument(
"--value",
"--module-value",
type=str,
dest="module_value",
metavar="VALUE",
help="value of the module",
default="G***",
)
parser.add_argument(
"-f",
"--factor",
type=float,
dest="scale_factor",
metavar="FACTOR",
help="scale paths by this factor",
default=1.0,
)
parser.add_argument(
"-p",
"--precision",
type=float,
dest="precision",
metavar="PRECISION",
help="smoothness for approximating curves with line segments (float)",
default=10.0,
)
parser.add_argument(
"--format",
type=str,
dest="format",
metavar="FORMAT",
choices=["legacy", "pretty"],
help="output module file format (legacy|pretty)",
default="pretty",
)
parser.add_argument(
"--units",
type=str,
dest="units",
metavar="UNITS",
choices=["decimil", "mm"],
help="output units, if output format is legacy (decimil|mm)",
default="mm",
)
parser.add_argument(
"-d",
"--dpi",
type=int,
dest="dpi",
metavar="DPI",
help="DPI of the SVG file (int)",
default=DEFAULT_DPI,
)
parser.add_argument(
"--center",
dest="center",
action="store_const",
const=True,
help="Center the module to the center of the bounding box",
default=False,
)
return parser.parse_args(), parser
# ------------------------------------------------------------------------
def main():
args = get_arguments()[0]
module: Svg2ModExport = svg2mod(
format=args.format,
units=args.units,
input_file_name=args.input_file_name,
output_file_name=args.output_file_name,
module_name=args.module_name,
module_value=args.module_value,
center=args.center,
scale_factor=args.scale_factor,
precision=args.precision,
dpi=args.dpi,
)
with open(args.output_file_name, "w") as fh:
module.write(fh)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
main()
```
|
{
"source": "jdtibochab/bacillusme",
"score": 3
}
|
#### File: bacillusme/util/helper_functions.py
```python
import cobrame
from cobrame.core.model import MEModel
from tqdm import tqdm
import pandas as pd
import re
import matplotlib.pyplot as plt
import numpy as np
def get_base_complex_data(model, complex_id):
"""If a complex is modified in a metabolic reaction it will not
have a formation reaction associated with it. This function returns
the complex data of the "base" complex, which will have the subunit
stoichiometry of that complex"""
# First try unmodified complex id
try_1 = complex_id.split('_')[0]
if try_1 in model.process_data:
return model.process_data.get_by_id(try_1)
try_2 = complex_id.split('_')[0] + '_'
count = 0
for i in model.process_data.query(try_2):
if isinstance(i, cobrame.ComplexData):
count += 1
data = i
if count == 0:
raise UserWarning('No base complex found for %s' % complex_id)
if count > 1:
raise UserWarning('More than one possible base complex found for %s' %
complex_id)
return data
def get_identical_reactions(ref,rxn):
candidate_rxns = []
metabolites = rxn.metabolites
met_ids = []
# Get reaction metabolite IDs
for metabolite in metabolites:
met_ids.append(metabolite.id)
# Look for identical reactions in reference model
for ref_rxn in ref.reactions:
ref_metabolites = ref_rxn.metabolites
ref_met_ids = []
if len(metabolites) == len(ref_metabolites):
for ref_metabolite in ref_metabolites:
ref_met_ids.append(ref_metabolite.id)
if len(list(set(ref_met_ids) & set(met_ids))) == len(metabolites):
candidate_rxns.append(ref_rxn)
return candidate_rxns
def get_gene_info(gb_file,info,ID,element_types):
output = None
for feature in gb_file.features:
# Skip if not a gene used in ME construction
if feature.type not in element_types or 'pseudo' in feature.qualifiers:
continue
if feature.qualifiers["locus_tag"][0] == ID:
output = feature.qualifiers[info]
return output
############################### NEW ###############################
def test_metabolite_production(me,metabolites,muf = 0.):
from qminospy.me2 import ME_NLP
gap_mets = []
if not muf and me.global_info['k_deg'] != 0:
print ('Updating model with kdeg = 0 for mu = 0')
me.global_info['k_deg'] = 0.
me.update()
for met_id in metabolites:
r_id = 'DM_' + met_id
r = cobrame.MEReaction(r_id)
try:
me.add_reaction(r)
r.reaction = met_id + '->'
except:
print(me.reactions.get_by_id(r_id).id,' already in model')
#print_reactions_of_met(me,met_id)
me.objective = r_id
me_nlp = ME_NLP(me, growth_key='mu')
x,status,hs = me_nlp.solvelp(muf)
f = me.solution.x_dict[r_id]
if not status == 'optimal' or f < 0.01:
gap_mets.append(met_id)
print(met_id, status, f)
return gap_mets
def identify_precursors(me,metabolite_id,only_direct_precursors = False,ignore_classes = None, force_classes = None):
### NOT WORKING YET
import copy
precursors = []
formation_reactions = []
metabolite = me.metabolites.get_by_id(metabolite_id)
for rxn in me.reactions:
if metabolite in rxn.products:
formation_reactions.append(rxn)
for rxn in formation_reactions:
for reactant in rxn.reactants:
if reactant.id not in precursors:
precursors.append(reactant.id)
direct_precursors = copy.copy(precursors)
#print(precursors)
if not only_direct_precursors:
for precursor in precursors:
for rxn in me.metabolites.get_by_id(precursor).reactions:
products_of_rxn = [product.id for product in rxn.products]
if precursor in products_of_rxn:
reactants = rxn.reactants
reactant_ids = [met.id for met in reactants]
if metabolite_id not in reactant_ids:
for reactant in reactants:
if reactant.id not in precursors:
precursors.append(reactant.id)
#print(reactant.id)
test_precursors = copy.copy(precursors)
if ignore_classes:
for precursor_id in test_precursors:
precursor = me.metabolites.get_by_id(precursor_id)
for ignore_class in ignore_classes:
if isinstance(precursor,ignore_class):
precursors.remove(precursor_id)
break
print(len(precursors))
test_precursors = copy.copy(precursors)
if force_classes:
for precursor_id in test_precursors:
precursor = me.metabolites.get_by_id(precursor_id)
e = 1
for force_class in force_classes:
if isinstance(precursor,force_class):
e = 0
if e:
precursors.remove(precursor_id)
print(len(precursors))
return precursors,direct_precursors
def get_reactions_of_met(me,met,s = 0, ignore_types = (),only_types = (), verbose = True):
import copy
met_stoich = 0
if only_types:
only_reaction_types = tuple([getattr(cobrame,i) for i in only_types])
elif ignore_types:
ignore_reaction_types = tuple([getattr(cobrame,i) for i in ignore_types])
reactions = []
if not hasattr(me.metabolites,met):
return reactions
for rxn in me.metabolites.get_by_id(met).reactions:
if only_types and not isinstance(rxn, only_reaction_types):
continue
elif ignore_types and isinstance(rxn, ignore_reaction_types):
continue
reactants = [met.id for met in rxn.reactants]
products = [met.id for met in rxn.products]
try:
pos = 1 if met in products else -1
rev = 1 if rxn.lower_bound < 0 else 0
fwd = 1 if rxn.upper_bound > 0 else 0
except:
if verbose:
print(rxn.id, 'symbolic bounds')
else:
pass
try:
if not s:
reactions.append(rxn)
if verbose:
print('(',rxn.id,rxn.lower_bound,rxn.upper_bound,')', '\t',rxn.reaction)
elif s == pos*fwd or s == pos*rev:
reactions.append(rxn)
if verbose:
print('(',rxn.id,rxn.lower_bound,rxn.upper_bound,')', '\t',rxn.reaction)
except:
if verbose:
print(rxn.id, 'no reaction')
else:
pass
return reactions
def add_exchange_reactions(me,metabolites):
for met in metabolites:
rxn_id = "EX_" + met
try:
r = cobrame.MEReaction(rxn_id)
me.add_reaction(r)
r.reaction = met + " <=> "
except:
r = me.reactions.get_by_id(rxn_id)
r.lower_bound = -1000
#print(r.id,r.lower_bound,r.upper_bound,r.reaction)
return me
def brute_force_check(me,metabolites_to_add,objective_function = 'biomass_dilution',muf = 0.01, min_f = 0.01):
me.objective = objective_function
from qminospy.me2 import ME_NLP
print('Added exchange reactions ')
me = add_exchange_reactions(me,metabolites_to_add)
print('Objective: ', objective_function, me.reactions.get_by_id(objective_function).reaction)
me_nlp = ME_NLP(me, growth_key='mu')
x,status,hs = me_nlp.solvelp(muf)
initial_f = me.solution.x_dict[objective_function]
print('Initial objective function value of ', initial_f, status)
if not status =='optimal':
return
if initial_f < min_f:
print('No production capacity of objective')
print(me.solution.x_dict['formation_ribosome'])
eliminate_mets = []
for met_id in metabolites_to_add:
ex_rxn_id = "EX_" + met_id
ex_rxn_flux = me.solution.x_dict[ex_rxn_id]
ex_rxn = me.reactions.get_by_id(ex_rxn_id)
if ex_rxn_flux > 0:
me.reactions.get_by_id(ex_rxn_id).lower_bound = 0
me.reactions.get_by_id(ex_rxn_id).upper_bound = 1000
print(ex_rxn_id, ex_rxn_flux, ex_rxn.reaction)
elif ex_rxn_flux < 0:
me.reactions.get_by_id(ex_rxn_id).lower_bound = -1000
me.reactions.get_by_id(ex_rxn_id).upper_bound = 0
print(ex_rxn_id, ex_rxn_flux, ex_rxn.reaction)
elif ex_rxn_flux == 0:
me.reactions.get_by_id(ex_rxn_id).lower_bound = 0
me.reactions.get_by_id(ex_rxn_id).upper_bound = 0
print(ex_rxn_id, ' carrying no flux ... eliminated')
eliminate_mets.append(met_id)
for el_met_id in eliminate_mets:
el_rxn_id = 'EX_' + el_met_id
metabolites_to_add.remove(el_met_id)
print('Processing ', len(metabolites_to_add), ' metabolites')
gap_mets = []
for met_id in metabolites_to_add:
ex_rxn_id = "EX_" + met_id
lb = me.reactions.get_by_id(ex_rxn_id).lower_bound
ub = me.reactions.get_by_id(ex_rxn_id).lower_bound
me.reactions.get_by_id(ex_rxn_id).lower_bound = 0
me.reactions.get_by_id(ex_rxn_id).upper_bound = 0
me_nlp = ME_NLP(me, growth_key='mu')
x,status,hs = me_nlp.solvelp(muf)
f = me.solution.x_dict[objective_function]
el_bool = ''
if not status == 'optimal' or f < min_f:
me.reactions.get_by_id(ex_rxn_id).lower_bound = lb
me.reactions.get_by_id(ex_rxn_id).lower_bound = ub
gap_mets.append(met_id)
el_bool = ' gap'
print(met_id, status, f, el_bool, '... Gaps: ', len(gap_mets))
return gap_mets
def solve_me_model(me, max_mu=1., precision=1e-6, min_mu=0, using_soplex=True,
compiled_expressions=None, verbosity = 2, mu_fix = False,
growth_key='mu'):
from qminospy.me1 import ME_NLP1
## If fixed growth rate, solve as LP
if mu_fix:
me_nlp = ME_NLP1(me)
me_nlp.solvelp(mu_fix)
else:
##
if using_soplex:
from cobrame.solve.algorithms import binary_search
binary_search(me, min_mu=min_mu, max_mu=max_mu, debug=True, mu_accuracy=precision,
compiled_expressions=compiled_expressions)
else:
# The object containing solveME methods--composite that uses a ME model object
me_nlp = ME_NLP1(me, growth_key=growth_key)
# Use bisection for now (until the NLP formulation is worked out)
muopt, hs, xopt, cache = me_nlp.bisectmu(precision=precision, mumax=max_mu, verbosity=verbosity)
try:
me.solution.f = me.solution.x_dict['biomass_dilution']
except:
pass
def show_escher_map(me, solution=None):
import escher
view = escher.Builder("iJO1366.Central metabolism")
view.reaction_data = me.get_metabolic_flux(solution=solution)
return view
def open_all_exchange(me):
for rxn in me.reactions:
rxn_id = rxn.id
if 'EX_' in rxn_id:
rxn.upper_bound = 1000
rxn.lower_bound = -1000
return me
def is_same_reaction(rxn,ref_rxn,approximate=False):
a = 2 if approximate else 0
reactants = [met.id[0:len(met.id)-a] for met in rxn.reactants]
ref_reactants = [met.id[0:len(met.id)-a] for met in ref_rxn.reactants]
products = [met.id[0:len(met.id)-a] for met in rxn.products]
ref_products = [met.id[0:len(met.id)-a] for met in ref_rxn.products]
if set(reactants)==set(ref_reactants) and set(products)==set(ref_products):
return 1
else:
return 0
def homogenize_metabolites(model,ref_model,approximate=False):
# Warning: This function assumes metabolite IDs are conserved.
met_dict = {}
for met in model.metabolites:
if approximate:
met_dict[met.id] = [ref_met.id for ref_met in ref_model.metabolites\
if ref_met.id[0:len(ref_met.id)-2] == met.id[0:len(met.id)-2]]
elif met.id in ref_model.metabolites:
met_dict[met.id] = [met.id]
else:
met_dict[met.id] = []
return met_dict
def homogenize_reactions(model,ref_model,approximate=False):
print('Homogenizing metabolites {} against {}'.format(model.id,ref_model.id))
met_dict = homogenize_metabolites(model,ref_model,approximate=approximate)
all_ref_rxns = [rxn.id for rxn in ref_model.reactions]
rxn_dict = dict()
rxn_id_dict = {}
print('Homogenizing reactions {} against {}'.format(model.id,ref_model.id))
for rxn in tqdm(model.reactions):
if rxn not in rxn_dict:
rxn_dict[rxn] = []
rxn_id_dict[rxn.id] = []
base_met = rxn.reactants[0]
for m in met_dict[base_met.id]:
ref_met = ref_model.metabolites.get_by_id(m)
for ref_rxn in ref_met.reactions:
if is_same_reaction(rxn,ref_rxn,approximate=approximate):
rxn_dict[rxn].append(ref_rxn)
rxn_id_dict[rxn.id].append(ref_rxn.id)
return rxn_dict,rxn_id_dict
def exchange_single_model(me, flux_dict = 0, solution=0):
import pandas as pd
complete_dict = {'id':[],'name':[],'reaction':[],'lower_bound':[],'upper_bound':[],'flux':[]}
if solution:
flux_dict = solution.x_dict
elif not flux_dict:
flux_dict = me.solution.x_dict
for rxn in me.reactions:
if 'EX_' in rxn.id:
flux = flux_dict[rxn.id]
if not flux:
continue
rxn_name = rxn.name
reaction = rxn.reaction
lb = rxn.lower_bound
ub = rxn.upper_bound
complete_dict['id'].append(rxn.id)
complete_dict['name'].append(rxn_name)
complete_dict['reaction'].append(reaction)
complete_dict['lower_bound'].append(lb)
complete_dict['upper_bound'].append(ub)
complete_dict['flux'].append(flux)
df = pd.DataFrame(complete_dict).set_index('id')
return df
def get_metabolites_from_pattern(model,pattern):
met_list = []
for met in model.metabolites:
if pattern in met.id:
met_list.append(met.id)
return met_list
def flux_based_reactions(model,met_id,only_types=(),ignore_types = (),threshold = 0.,flux_dict=0,growth_symbol='mu'):
if not flux_dict:
flux_dict = model.solution.x_dict
reactions = get_reactions_of_met(model,met_id,only_types=only_types,ignore_types=ignore_types,verbose=False)
if len(reactions) == 0:
print('No reactions found for {}'.format(met_id))
return
result_dict = {}
for rxn in reactions:
result_dict[rxn.id] = {}
for rxn_met,stoich in rxn.metabolites.items():
if rxn_met.id == met_id:
if hasattr(stoich, 'subs'):
coeff = float(stoich.subs(growth_symbol,flux_dict['biomass_dilution']))
else:
coeff = stoich
result_dict[rxn.id]['lb'] = rxn.lower_bound
result_dict[rxn.id]['ub'] = rxn.upper_bound
result_dict[rxn.id]['rxn_flux'] = flux_dict[rxn.id]
result_dict[rxn.id]['met_flux'] = flux_dict[rxn.id]*coeff
try: result_dict[rxn.id]['reaction'] = rxn.reaction
except: result_dict[rxn.id]['reaction'] = 'no_reaction'
break
df = pd.DataFrame.from_dict(result_dict).T
return df.loc[df['met_flux'].abs().sort_values(ascending=False).index]
def generate_gene_field(me):
import cobra
current_gene_ids = [gene.id for gene in me.genes]
for met in me.metabolites:
met_id = met.id
if isinstance(met, cobrame.TranslatedGene):
gene_id = met_id.split('_')[1]
if gene_id and gene_id not in current_gene_ids:
try:
gene = cobra.Gene(gene_id)
me.genes.append(gene)
print(gene_id)
except:
pass
def solution_summary(me):
reactions = [rxn.id for rxn in me.reactions]
summary_df = pd.DataFrame(columns=['lb','ub','flux','formula'],index=reactions)
for rxn_id in tqdm(reactions):
rxn = me.reactions.get_by_id(rxn_id)
summary_df.loc[rxn_id]['lb'] = rxn.lower_bound
summary_df.loc[rxn_id]['ub'] = rxn.upper_bound
summary_df.loc[rxn_id]['flux'] = me.solution.x_dict[rxn_id]
summary_df.loc[rxn_id]['formula'] = rxn.reaction
return summary_df
def get_flux_for_escher(model,type='m'):
if type == 'm':
flux_dict = model.solution.x_dict
elif type == 'me':
flux_dict = model.get_metabolic_flux(solution=me.solution)
return pd.DataFrame.from_dict({'flux':flux_dict})
def get_compartments_of_reaction(r):
return r.get_compartments()
def get_all_transport_of_model(model):
transport_reactions = []
for r in tqdm(model.reactions):
comps = get_compartments_of_reaction(r)
if len(comps) > 1:
transport_reactions.append(r.id)
return list(set(transport_reactions))
def get_transport_reactions(model,met_id,comps=['e','c']):
from_met = re.sub('_[a-z]$','_'+comps[0],met_id)
to_met = re.sub('_[a-z]$','_'+comps[1],met_id)
if isinstance(model,MEModel):
reaction_type = ['MetabolicReaction']
else:
reaction_type = 0
prod_rxns = [rxn.id for rxn in get_reactions_of_met(model,to_met,s=1,verbose=0,only_types=reaction_type)]
cons_rxns = [rxn.id for rxn in get_reactions_of_met(model,from_met,s=-1,verbose=0,only_types=reaction_type)]
transport_rxn_ids = list(set(prod_rxns)&set(cons_rxns))
transport_rxns = [model.reactions.get_by_id(rxn_id) for rxn_id in transport_rxn_ids]
return transport_rxns
def global_mass_balance(model):
exchange_df = exchange_single_model(model)
balance = 0
mass_dict = {}
for r_id in exchange_df.index:
r = model.reactions.get_by_id(r_id)
flux = model.solution.x_dict[r_id] # mmol/gDW h
mass = 0
for m in r.metabolites:
coeff = r.metabolites[m]/1000
weight = m.formula_weight # g/mmol
mass += coeff*flux*weight
balance += mass
mass_dict[r_id] = mass
return balance, pd.DataFrame.from_dict({'mass':mass_dict})
def get_gene_annotation(me,model):
gene_annotation = {}
for m in tqdm(me.metabolites):
if isinstance(m,cobrame.TranslatedGene):
gene_id = m.id.split('_')[-1]
if hasattr(model.genes,gene_id):
gene = model.genes.get_by_id(gene_id)
for r in gene.reactions:
gene_annotation[gene_id] = r.subsystem
else:
rxns = get_reactions_of_met(me,m.id,verbose=False)
for r in rxns:
if 'formation' in r.id:
active_complex = [i.id for i in r.products][0]
final_rxns = get_reactions_of_met(me,active_complex,verbose=False)
subsystem = list(set([i.id.split('_')[0] for i in final_rxns if active_complex not in i.id]))
if subsystem:
gene_annotation[gene_id] = subsystem[0]
break
return gene_annotation
def get_final_reactions_of_gene(me,gene_id):
rxns = get_reactions_of_met(me,'protein_'+gene_id,verbose=False)
final_rxns = []
if hasattr(me.reactions,'translocation_'+gene_id):
translocated_complex = [i.id for i in me.reactions.get_by_id('translocation_'+gene_id).products if 'Membrane' in i.id][0]
formation_rxn = get_reactions_of_met(me,translocated_complex,verbose=False,s=-1)[0]
else:
for r in rxns:
if 'formation' in r.id:
formation_rxn = r
break
active_complex = [i.id for i in formation_rxn.products if 'biomass' not in i.id][0]
final_rxns = get_reactions_of_met(me,active_complex,verbose=False)
return list(set(final_rxns) - set([formation_rxn]))
def get_met_production(model,met_list,flux_responses,x_var,only_types = [],plot=True):
if plot:
fig,ax = plt.subplots(int(np.ceil(np.sqrt(len(met_list)))),int(np.floor(np.sqrt(len(met_list)))),figsize=(13,3*int(np.ceil(np.sqrt(len(met_list))))))
if len(met_list) > 1:
ax = ax.flatten()
if not isinstance(flux_responses,list):
flux_responses = [flux_responses]
for idx,met_id in enumerate(tqdm(met_list,position=0,leave=True)):
met = model.metabolites.get_by_id(met_id)
if isinstance(met,cobrame.Metabolite) and not only_types:
only_types = ['MetabolicReaction','MEReaction']
for flux_df in flux_responses:
met_rate = []
uptake_rate = []
for case in flux_df.columns:
df = flux_based_reactions(model,met_id,flux_dict=flux_df[case].to_dict(),only_types=only_types)
met_rate.append(df[df.met_flux>0]['met_flux'].sum())
uptake_rate.append(flux_df.abs()[case][x_var])
yield met_id,met_rate,uptake_rate
if plot:
ax_i = ax[idx] if len(met_list) > 1 else ax
ax_i.plot(uptake_rate,met_rate,'-o')
ax_i.set_xlabel(x_var)
ax_i.set_ylabel('mmol {}/gDW/h'.format(met_id))
ax_i.set_title(met_id)
if plot: fig.tight_layout()
def get_compartment_transport(model,comps,MEmodel=False):
reactions = []
comps = set(comps)
for r in model.reactions:
if MEmodel and not isinstance(r,cobrame.MetabolicReaction):
continue
r_comps = set()
for m in r.metabolites:
r_comps.add(m.id[-1])
if len(r_comps & comps)>0 and len(r_comps) > 1:
reactions.append(r)
return reactions
def get_biomass_fractions(model,bof):
fractions = {}
fractions['protein'] = 0
fractions['rna'] = 0
fractions['dna'] = 0
fractions['lipid'] = 0
fractions['other'] = 0
aminoacid_exp = '^[a-z]{3}__L_c$'
dna_exp = '^d[a,t,c,g]tp_c$'
rna_exp = '^[a,t,c,g,u][t,m]p_c$'
lipid_exp = '^.*_BS_c$'
for m,coeff in bof.metabolites.items():
if coeff > 0 or 'h2o' in m.id: continue
if re.search(aminoacid_exp,m.id):
fractions['protein'] -= coeff*m.formula_weight
elif re.search(dna_exp,m.id):
fractions['dna'] -= coeff*m.formula_weight
elif re.search(rna_exp,m.id):
if m.id == 'atp_c':
coeff = coeff + bof.metabolites[model.metabolites.adp_c]
fractions['rna'] -= coeff*m.formula_weight
elif re.search(lipid_exp,m.id):
fractions['lipid'] -= coeff*m.formula_weight
else:
fractions['other'] -= coeff*m.formula_weight
return pd.DataFrame.from_dict({'fraction':fractions})
```
#### File: lib/bacillusme/flat_files.py
```python
from __future__ import print_function, absolute_import, division
from collections import defaultdict
import json
from os.path import dirname, join, abspath
from warnings import warn
import cobra
import pandas
from six import iteritems
import cobrame
from bacillusme import corrections
ecoli_files_dir = join(dirname(abspath(__file__)), 'building_data/')
del dirname, abspath
def fixpath(filename):
return join(ecoli_files_dir, filename)
def fix_id(id_str):
return id_str.replace("_DASH_", "__")
def get_tu_dataframe(filename):
tu_df = pandas.read_csv(join(ecoli_files_dir, filename), delimiter="\t",
index_col=0)
tu_df = corrections.correct_tu_dataframe(tu_df)
return tu_df
def get_complex_subunit_stoichiometry(complex_stoichiometry_file,
rna_components=set()):
"""Returns dictionary of complex: {stoichiometry: {bnumber: stoichiometry}}
some entries in the file need to be renamed.
Colton 7/8/15 made changes directly to flat file
renames = {"MnmE_": "b3706", "MnmG_": "b3741", "YheM_": "b3344",
"YheL_": "b3343", "YheN_": "b3345"}
"""
complex_stoichiometry = \
pandas.read_table(fixpath(complex_stoichiometry_file),
names=['Complex', 'Name', 'Stoichiometry',
'Source']).set_index('Complex')
complex_stoichiometry_dict = {}
for key, row in complex_stoichiometry.iterrows():
if key.startswith('#'):
continue
if key in complex_stoichiometry_dict.keys():
warn('Complex (%s) in complex_stoichiometry_file twice' % key)
else:
complex_stoichiometry_dict[key] = {}
for bnums in row['Stoichiometry'].split(' AND '):
bnum, num = bnums.rstrip(')').split('(')
## Sometimes it returned num = '1)'
# num = num[0]
stoichiometry = float(num) if not num == '' else 1.
prefix = 'protein_' if bnum not in rna_components else 'RNA_'
complex_stoichiometry_dict[key][prefix + bnum] = stoichiometry
complex_stoichiometry_dict = \
corrections.correct_complex_stoichiometry(complex_stoichiometry_dict)
return complex_stoichiometry_dict
def get_complex_modifications(complex_modification_file, protein_complex_file):
"""
Reads from protein_complexes.txt and protein_modification.txt
"""
complex_mods = pandas.read_table(fixpath(complex_modification_file))
complex_mods = complex_mods.set_index('Modified_enzyme')
complex_set = \
set(get_complex_subunit_stoichiometry(protein_complex_file).keys())
# ignore complexes which are produced in the reaction matrix
rxn_dict = get_reaction_matrix_dict('reaction_matrix.txt',
complex_set=complex_set)
ignored_complexes = set()
for met_stoich in rxn_dict.values():
for met, value in iteritems(met_stoich):
if 'mod_c' not in met:
ignored_complexes.add(met.replace('_c', ''))
else:
ignored_complexes.add(met)
# don't ignore these. They are included in the reaction matrix but still
# must be formed via a complex formation reaction
# TODO look into this list closer
# ignored_complexes.remove('CPLX0-782_mod_2:4fe4s')
new_mod_dict = {}
for key, value in iteritems(complex_mods.T.to_dict()):
if key.startswith('#') or key in ignored_complexes:
continue
key = key.replace('_DASH_', '__')
new_mod_dict[key] = {}
new_mod_dict[key]['core_enzyme'] = value['Core_enzyme']
new_mod_dict[key]['modifications'] = {}
for mods in value['Modifications'].split(' AND '):
mod, num_mods = mods.rstrip(')').split('(')
if num_mods == '':
num_mods = 1.
else:
num_mods = float(num_mods)
mod = mod.replace('_DASH_', '__')
new_mod_dict[key]['modifications'][mod + '_c'] = -num_mods
new_mod_dict = corrections.correct_complex_modification_dict(new_mod_dict)
return new_mod_dict
def get_reaction_to_complex(m_model, modifications=True):
"""anything not in this dict is assumed to be an orphan"""
rxn_to_complex_dict = defaultdict(set)
# Load enzyme reaction association dataframe
df = pandas.read_csv(fixpath('enzyme_reaction_association.txt'),
delimiter='\t', names=['Reaction', 'Complexes'])
# Fix legacy naming
df = df.applymap(lambda x: x.replace('DASH', ''))
df = df.set_index('Reaction')
df = corrections.correct_enzyme_reaction_association_frame(df)
for reaction, complexes in df.itertuples():
for cplx in complexes.split(' OR '):
if modifications:
rxn_to_complex_dict[reaction].add(cplx)
else:
rxn_to_complex_dict[reaction].add(cplx.split('_mod_')[0])
#for reaction in m_model.reactions:
# if "s0001" in reaction.gene_reaction_rule:
# rxn_to_complex_dict[reaction.id].add(None)
return rxn_to_complex_dict
def get_reaction_matrix_dict(reaction_matrix_file, complex_set=set()):
"""Return dictionary representation of the metabolic reaction matrix.
Updates metabolite id with compartment if not contained in complex_list
"""
matrix_df = pandas.read_csv(fixpath(reaction_matrix_file), delimiter='\t',
names=['Reaction', 'Metabolites',
'Compartment', 'Stoichiometry'])
matrix_df.replace({'No_Compartment': 'Cytosol'}, inplace=True)
compartments = {'Cytosol': 'c', 'Periplasm': 'p', 'Extra-organism': 'e'}
metabolic_reaction_dict = defaultdict(dict)
for i, row in matrix_df.iterrows():
reaction = fix_id(row['Reaction'])
metabolite = fix_id(row['Metabolites'])
# erpA is annotated incorrectly
# metabolite = metabolite.replace('CPLX-7524_mod_mn2', 'CPLX0-7617')
stoichiometry = row['Stoichiometry']
compartment_id = '_%s' % compartments.get(row['Compartment'])
# use compartment to append appropriate suffix
if metabolite.split('_mod_')[0] not in complex_set:
metabolite += compartment_id
metabolic_reaction_dict[reaction][metabolite] = float(stoichiometry)
metabolic_reaction_dict = \
corrections.correct_reaction_matrix(metabolic_reaction_dict)
return metabolic_reaction_dict
def get_reaction_info_frame(reaction_info_file):
df = pandas.read_csv(fixpath(reaction_info_file), delimiter="\t",
index_col=0)
df = corrections.correct_reaction_info_frame(df)
return df
def remove_compartment(id_str):
return id_str.replace('_c', '').replace('_p', '').replace('_e', '')
def process_m_model(m_model, metabolites_file, m_to_me_map_file,
reaction_info_file, reaction_matrix_file,
protein_complex_file, defer_to_rxn_matrix=set()):
m_model = m_model.copy()
met_info = pandas.read_csv(fixpath(metabolites_file), delimiter="\t",
header=None, index_col=0,
names=["id", "name", "formula", "compartment",
"data_source"])
met_info.rename(lambda x: x.replace('_DASH_', '__'), inplace=True)
complex_set = \
set(get_complex_subunit_stoichiometry(protein_complex_file).keys())
rxn_info = get_reaction_info_frame(reaction_info_file)
reaction_matrix_dict = get_reaction_matrix_dict(reaction_matrix_file,
complex_set=complex_set)
m_to_me_df = pandas.read_csv(fixpath(m_to_me_map_file), index_col=0,
names=['m_name', 'me_name'])
for rxn in list(m_model.reactions):
if rxn.id.startswith('EX_') or rxn.id.startswith('DM_'):
continue
if rxn.id not in reaction_matrix_dict.keys() \
or rxn.id in defer_to_rxn_matrix:
rxn.remove_from_model(remove_orphans=True)
for rxn_id in reaction_matrix_dict:
if rxn_id in m_model.reactions:
continue
rxn_stoichiometry = reaction_matrix_dict[rxn_id]
for met in rxn_stoichiometry:
try:
met_obj = m_model.metabolites.get_by_id(met)
except KeyError:
met_obj = cobrame.Metabolite(str(met))
m_model.add_metabolites([met_obj])
met_id = remove_compartment(met_obj.id)
if met_id in met_info.index and not met_obj.formula:
met_obj.formula = met_info.loc[met_id, 'formula']
met_obj.name = met_info.loc[met_id, 'name']
rxn = cobrame.MEReaction(rxn_id)
m_model.add_reactions([rxn])
rxn.add_metabolites(rxn_stoichiometry)
reversible = rxn_info.loc[rxn_id, 'is_reversible']
rxn.lower_bound = -1000 if reversible else 0
m_to_me_dict = m_to_me_df.to_dict()
for met in list(m_model.metabolites):
met_id = met.id
if met_id in m_to_me_df.index:
new_met_id = m_to_me_df.loc[met.id, 'me_name']
if not new_met_id == 'eliminate':
print(new_met_id)
met.id = new_met_id
else:
for reaction in met.reactions:
reaction.remove_from_model()
met.remove_from_model()
# Add formulas not included in metabolites.txt
corrections.update_metabolite_formulas(m_model)
m_model.repair()
return m_model
def get_m_model():
m = cobra.Model("e_coli_ME_M_portion")
m.compartments = {"p": "Periplasm", "e": "Extra-organism", "c": "Cytosol"}
compartment_lookup = {v: k for k, v in iteritems(m.compartments)}
met_info = pandas.read_csv(join(ecoli_files_dir, "metabolites.txt"),
delimiter="\t", header=None, index_col=0,
names=["id", "name", "formula", "compartment",
"data_source"])
complex_set = \
set(get_complex_subunit_stoichiometry("protein_complexes.txt").keys())
for met_id in met_info.index:
fixed_id = fix_id(met_id)
for compartment in met_info.compartment[met_id].split("AND"):
compartment = compartment.strip()
if compartment == "No_Compartment":
print("Assigned %s to c" % met_id)
compartment = m.compartments["c"]
new_met = cobra.Metabolite(
fixed_id + "_" + compartment_lookup[compartment])
new_met.name = met_info.name[met_id]
new_met.formula = met_info.formula[met_id]
m.add_metabolites(new_met)
rxn_info = get_reaction_info_frame('reactions.txt')
rxn_dict = get_reaction_matrix_dict('reaction_matrix.txt',
complex_set=complex_set)
for rxn_id in rxn_info.index:
reaction = cobrame.MEReaction(rxn_id)
reaction.name = rxn_info.description[rxn_id]
for met_id, amount in iteritems(rxn_dict[rxn_id]):
try:
metabolite = m.metabolites.get_by_id(met_id)
except KeyError:
metabolite = cobra.Metabolite(met_id)
reaction.add_metabolites({metabolite: amount})
reaction.lower_bound = \
-1000. if rxn_info.is_reversible[rxn_id] else 0.
reaction.upper_bound = 1000.
if rxn_info.is_spontaneous[rxn_id]:
reaction.gene_reaction_rule = "s0001"
m.add_reaction(reaction)
sources_sinks = pandas.read_csv(
fixpath("reaction_matrix_sources_and_sinks.txt"),
delimiter="\t", header=None, names=["rxn_id", "met_id", "compartment",
"stoic"], index_col=1)
source_amounts = pandas.read_csv(join(ecoli_files_dir,
"exchange_bounds.txt"),
delimiter="\t", index_col=0,
names=["met_id", "amount"])
sources_sinks.index = [fix_id(i) for i in sources_sinks.index]
source_amounts.index = [fix_id(i) for i in source_amounts.index]
for met in sources_sinks.index:
met_id = met + "_" + compartment_lookup[sources_sinks.compartment[met]]
# EX_ or DM_ + met_id
reaction_id = sources_sinks.rxn_id[met][:3] + met_id
reaction = cobrame.MEReaction(reaction_id)
m.add_reaction(reaction)
reaction.add_metabolites({m.metabolites.get_by_id(met_id): -1})
# set bounds on exchanges
if reaction.id.startswith("EX_") and met in source_amounts.index:
reaction.lower_bound = -source_amounts.amount[met]
# Add formulas not included in metabolites.txt
corrections.update_metabolite_formulas(m)
return m
def get_trna_modification_targets():
trna_mod_dict = defaultdict(dict)
filename = fixpath('post_transcriptional_modification_of_tRNA.txt')
trna_mod = pandas.read_csv(filename, delimiter='\t')
for mod in trna_mod.iterrows():
mod = mod[1]
mod_loc = '%s_at_%s' % (mod['modification'], mod['position'])
trna_mod_dict[mod['bnum']][mod_loc] = 1
return trna_mod_dict
def get_reaction_keffs(me, verbose=True):
def log(*args, **kwargs):
if verbose:
print(*args, **kwargs)
with open(fixpath('keffs.json'), 'r') as infile:
keffs = json.load(infile)
new_keffs = {}
for r in me.reactions:
# skip spontaneous reactions
if getattr(r, "complex_data", None) is None:
continue
if isinstance(r, cobrame.MetabolicReaction) and \
r.complex_data.id != "CPLX_dummy":
met_rxn = r
key = met_rxn.id.replace("-", "_DASH_").replace(
"__", "_DASH_").replace(":", "_COLON_")
# specific patches for PGK, TPI ids
key = key.replace('TPI_DASH_CPLX', 'TPI')
key = key.replace('PGK_DASH_CPLX', 'PGK')
# key = met_rxn.id
key = "keff_" + key.replace("_FWD_", "_").replace("_REV_", "_")
matches = [i for i in keffs if key in i]
# get the direction
if met_rxn.reverse:
matches = [i for i in matches
if i.endswith("_reverse_priming_keff")]
else:
matches = [i for i in matches
if i.endswith("_forward_priming_keff")]
if len(matches) == 1:
new_keffs[met_rxn.id] = keffs[matches[0]]
elif len(matches) > 0:
if len(matches) == len([i for i in matches if key + "_mod_"]):
new_keffs[met_rxn.id] = keffs[matches[0]]
else:
log(key, len(matches))
else: # len(matches) == 0
log("no keff found for " + key)
return new_keffs
def get_m_to_me_metabolite_mapping():
"""returns a mapping from m metabolites to me metabolites"""
f = pandas.read_csv(fixpath("m_to_me_mets.csv"), index_col=0)["me_name"]
return f.dropna().to_dict()
def get_replace_function(source):
def fix_columns(x):
return x.replace(source, '').replace('C', '')
return fix_columns
def get_dill_keq_df():
"""returns the dill length-based approximation of protein folding keqs"""
df = pandas.read_csv(fixpath('Dill_dG_matrix.csv'))
dill = df.rename(columns=get_replace_function('Dill_Keq_'))
return dill.set_index('genome_region')
def get_oobatake_keq_df():
"""returns the Oobatake prediction protein folding keqs"""
df = pandas.read_csv(fixpath('Oobatake_Keq_matrix.csv'))
oobatake = df.rename(columns=get_replace_function('Oobatake_Keq_'))
return oobatake.set_index('genome_region')
def get_folding_rates_df():
"""returns the Oobatake prediction protein folding keqs"""
df = pandas.read_csv(fixpath('Folding_Rates_matrix_slope22000.csv'))
folding_rates = df.rename(columns=get_replace_function('k_f_'))
return folding_rates.set_index('genome_region')
def get_aggregation_popensity_df():
"""returns the Oobatake prediction protein folding keqs"""
df = pandas.read_csv(fixpath('DnaK_reactions_parameters_5.csv'))
return df.set_index('gene')
```
#### File: lib/ecolime/compartments.py
```python
import cobrame
from ecolime.util.helper_functions import get_base_complex_data
from collections import defaultdict
def _return_compartments_of_complexes(model, cplx):
try:
data = model.process_data.get_by_id(cplx.id)
except KeyError:
data = get_base_complex_data(model, cplx.id)
mem_dict = defaultdict(int)
for s in data.stoichiometry:
if '_Inner_Membrane' in s:
mem_dict['im'] += 1
elif '_Outer_Membrane' in s:
mem_dict['om'] += 1
elif '_Periplasm' in s:
mem_dict['p'] += 1
# if no membrane associated with membrane subunit, assume complex is
# cytosolic
if len(mem_dict) == 0:
return 'c'
# if only one membrane is represented in protein subunits, use this
# membrane for the compartment
elif len(mem_dict) == 1:
return mem_dict.popitem()[0]
# if multiple membrane compartements are represented, use generic "m" for
# "membrane" for now
else:
return 'm'
def add_compartments_to_model(model):
"""Firsts adds compartments based on suffix of metabolite ID. If metabolite
is a complex, the protein subunit stoichiometry is used to infer
compartment. All remaining metabolites without a compartment suffix (RNAs,
generic metabolites, nonmembrane proteins, etc.) are assumed to be
cytosolic"""
for m in model.metabolites:
if m.compartment:
continue
if isinstance(m, cobrame.Constraint):
m.compartment = 'mc'
elif '_Inner_Membrane' in m.id:
m.compartment = 'im'
elif '_Outer_Membrane' in m.id:
m.compartment = 'om'
elif '_Periplasm' in m.id or m.id.endswith('_p'):
m.compartment = 'p'
elif m.id.endswith('_e'):
m.compartment = 'e'
elif m.id.endswith('_c'):
m.compartment = 'c'
elif isinstance(m, cobrame.Complex):
m.compartment = _return_compartments_of_complexes(model, m)
else:
m.compartment = 'c'
```
#### File: lib/ecolime/formulas.py
```python
from __future__ import print_function, absolute_import, division
from collections import Counter
import cobrame
from cobrame.util import massbalance
def get_remaining_complex_elements(model, complex, modification_formulas):
tmp_met = cobrame.Metabolite('tmp_met')
mets = model.metabolites
components = complex.id.split('_mod_')
base_complex = components[0]
elements = Counter()
# If a the completely unmodified complex is present in the model
# has a formula, initialize the elements dictionary with that
if base_complex in mets and mets.get_by_id(base_complex).formula:
elements.update(mets.get_by_id(base_complex).elements)
for component in components[1:]:
new_elements = elements.copy()
new_complex = '_mod_'.join([base_complex, component])
if new_complex in mets and mets.get_by_id(new_complex).formula:
# default to new_complex elements if both new and old exist
if base_complex in mets and mets.get_by_id(base_complex).formula:
new_elements = Counter()
formula = mets.get_by_id(new_complex).formula
tmp_met.formula = formula
new_elements.update(tmp_met.elements)
# Net effect of an SH modification is adding a Sulfur to elements
elif ':SH' in component:
new_elements['S'] += 1
# modifies O- to SH
elif component == 'cosh':
new_elements['O'] -= 1
new_elements['S'] += 1
new_elements['H'] += 1
elif component in modification_formulas:
formula = modification_formulas[component]
tmp_met.formula = formula
new_elements.update(tmp_met.elements)
elif ':' in component:
value, component = component.split(':')
if component in modification_formulas:
formula = modification_formulas[component]['formula']
elif component + '_c' in mets:
formula = mets.get_by_id(component + '_c').formula
else:
raise UserWarning('No formula found for modification (%s)'
% component)
tmp_met.formula = formula
for e, v in tmp_met.elements.items():
new_elements[e] += v * float(value)
elif 'Oxidized' in component and 'FLAVODOXIN' not in base_complex:
new_elements.update({'H': -2})
if elements == new_elements and 'FLAVODOXIN' not in base_complex:
print(complex.id, base_complex, component)
base_complex = '_mod_'.join([base_complex, component])
elements = new_elements.copy()
return elements
def add_remaining_complex_formulas(model, modification_formulas):
"""
Add formula to complexes that are not formed from a complex formation
reaction (ie. complexes involved in metabolic reactions)
"""
element_dict = {}
# Reset all formulas first
complex_list = []
for c in model.metabolites:
# If not complex or formed by complex formation reaction, do not reset
if not isinstance(c, cobrame.Complex) or c.id in model.process_data:
continue
for r in c.reactions:
if hasattr(r, 'update'):
r.update()
c.formula = ''
c.elements = {}
complex_list.append(c)
# Get formulas only for complexes without complex formation reaction
for c in complex_list:
element_dict[c] = get_remaining_complex_elements(model, c,
modification_formulas)
# Adding elements for complexes dynamically can change function output
# Update all of them after
for c, elements in element_dict.items():
massbalance.elements_to_formula(c, elements)
```
#### File: lib/ecolime/trna_charging.py
```python
from __future__ import print_function, absolute_import, division
from six import iteritems
import cobrame
from ecolime.corrections import correct_trna_modifications
amino_acid_trna_synthetase = {
"cys__L_c": "CysS_mono_mod_1:zn2",
"leu__L_c": "LeuS_mono",
"lys__L_c": "generic_LYSINEaaRS",
"asp__L_c": "Asp_RS_dim",
"phe__L_c": "Phe_RS_tetra_mod_mg2",
"his__L_c": "His_RS_dim_mod_4:mg2",
"asn__L_c": "Asn_RS_dim",
"pro__L_c": "Pro_RS_dim",
"ala__L_c": "Ala_RS_tetra_mod_4:zn2",
"ile__L_c": "IleS_mono_mod_2:zn2",
"ser__L_c": "Ser_RS_dim_mod_mg2",
"arg__L_c": "ArgS_mono_mod_mg2",
"met__L_c": "Met_RS_dim_mod_2:zn2",
"tyr__L_c": "Tyr_RS_dim",
"glu__L_c": "GltX_mono_mod_mg2_mod_1:zn2",
"thr__L_c": "Thr_RS_dim_mod_zn2",
"val__L_c": "ValS_mono_mod_mg2",
"gly_c": "Gly_RS_tetra",
"trp__L_c": "Trp_RS_dim_mod_mg2",
"gln__L_c": "GlnS_mono"
}
trna_modification = {'D_at_20A': {'machines': ['generic_Dus'],
'metabolites': {'nadph_c': -1,
'h_c': -1,
'nadp_c': 1}},
'D_at_20': {'machines': ['generic_Dus'],
'metabolites': {'nadph_c': -1,
'h_c': -1,
'nadp_c': 1}},
't6A_at_37': {'machines': ['YrdC_mono'],
'metabolites': {'hco3_c': -1,
'thr__L_c': -1,
'atp_c': -1,
'amp_c': 1,
'h_c': 1,
'h2o_c': 1,
'ppi_c': 1}},
'm7G_at_46': {'machines': ['YggH_mono'],
'metabolites': {'amet_c': -1,
'ahcys_c': 1,
'h_c': 1}},
'acp3U_at_47': {'machines': [],
'metabolites': {'amet_c': -1,
'5mta_c': 1,
'h_c': 1}},
'm5U_at_54': {'machines': ['TrmA_mono'],
'metabolites': {'amet_c': -1,
'ahcys_c': 1,
'h_c': 1}},
'Y_at_55': {'machines': ['TruB_mono'],
'metabolites': {}},
'Y_at_65': {'machines': ['YqcB_mono'],
'metabolites': {}},
'D_at_17': {'machines': ['generic_Dus'],
'metabolites': {'nadph_c': -1,
'h_c': -1,
'nadp_c': 1}},
'cmo5U_at_34': {'machines': ['YecO_mono', 'YecP_mono'],
'metabolites': {'amet_c': -1,
'chor_c': -2,
'ahcys_c': 1,
'h_c': 1,
'C10H8O5_c': 1,
'C9H9O4_c': 1}},
'D_at_16': {'machines': ['generic_Dus'],
'metabolites': {'nadph_c': -1,
'h_c': -1,
'nadp_c': 1}},
'Q_at_34': {
'machines': ['Tgt_hexa_mod_6:zn2', 'QueA_mono',
'QueG_mono_mod_adocbl'],
'metabolites': {'preq1_c': -1,
'amet_c': -1,
'gua_c': 1,
'ade_c': 1,
'met__L_c': 1,
'h_c': 2}},
'm2A_at_37': {'machines': [],
'metabolites': {'amet_c': -1,
'ahcys_c': 1,
'h_c': 1}},
's4U_at_8': {'machines': ['ThiI_mono'],
'carriers': {'trdrd_c': -1,
'trdox_c': 1,
'IscS_mod_2:pydx5p_mod_1:SH':
-1,
'IscS_mod_2:pydx5p': 1},
'metabolites': {'atp_c': -1,
'amp_c': 1,
'ppi_c': 1,
'h_c': 1}},
'm6t6A_at_37': {'machines': ['YrdC_mono'],
'metabolites': {'amet_c': -1,
'atp_c': -1,
'hco3_c': -1,
'thr__L_c': -1,
'ahcys_c': 1,
'amp_c': 1,
'h_c': 2,
'h2o_c': 1,
'ppi_c': 1}},
's2C_at_32': {'machines': ['YdaO_mono'],
'carriers': {'trdrd_c': -1,
'trdox_c': 1,
'IscS_mod_2:pydx5p_mod_1:SH':
-1,
'IscS_mod_2:pydx5p': 1},
'metabolites': {'atp_c': -1,
'amp_c': 1,
'ppi_c': 1,
'h_c': 1}},
'mnm5U_at_34': {'machines': ['MnmEG_cplx_mod_fad_mod_2:k',
'MnmC_mono_mod_fad'],
'metabolites': {'gtp_c': -1,
'h2o_c': -1,
'5fthf_c': -1,
'gly_c': -1,
'amet_c': -1,
'ahcys_c': 1,
'h_c': 3,
'gdp_c': 1,
'glx_c': 1,
'pi_c': 1,
'thf_c': 1}},
'Y_at_40': {'machines': ['TruA_dim'],
'metabolites': {}},
'Gm_at_18': {'machines': ['TrmH_dim'],
'metabolites': {'amet_c': -1,
'ahcys_c': 1,
'h_c': 1}},
'Um_at_32': {'machines': [],
'metabolites': {'amet_c': -1,
'ahcys_c': 1,
'h_c': 1}},
'Y_at_38': {'machines': ['TruA_dim'],
'metabolites': {}},
'ac4C_at_34': {'machines': ['TmcA_mono'],
'metabolites': {'accoa_c': -1,
'coa_c': 1}},
'Y_at_39': {'machines': ['TruA_dim'],
'metabolites': {}},
# YhhP, YheLMN, YccK involved in sulfur transferase
# activity. TrmU catalyzes the addition of sulfur to
# uridine
'mnm5s2U_at_34': {'machines': [
'TrmU_mono', 'YhhP_mono',
'YheLMN_cplx', 'YccK_mono',
'MnmEG_cplx_mod_fad_mod_2:k',
'MnmC_mono_mod_fad'],
'carriers': {
'IscS_mod_2:pydx5p_mod_1:SH': -1,
'trdrd_c': -1,
'IscS_mod_2:pydx5p': 1,
'trdox_c': 1},
'metabolites': {'atp_c': -1,
'gtp_c': -1,
'h2o_c': -1,
'5fthf_c': -1,
'gly_c': -1,
'amet_c': -1,
'gdp_c': 1,
'pi_c': 1,
'h_c': 4,
'thf_c': 1,
'glx_c': 1,
'ahcys_c': 1,
'amp_c': 1,
'ppi_c': 1}},
'm6A_at_37': {'machines': ['YfiC_mono'],
'metabolites': {'amet_c': -1,
'ahcys_c': 1,
'h_c': 1}},
'Cm_at_32': {'machines': ['TrmJ_dim'],
'metabolites': {'amet_c': -1,
'ahcys_c': 1,
'h_c': 1}},
'ms2i6A_at_37': {'machines': ['MiaA_dim_mod_2:mg2',
'MiaB_mono_mod_1:4fe4s'],
'carriers': {
'IscS_mod_2:pydx5p_mod_1:SH': -1,
'IscS_mod_2:pydx5p': 1,
'fldrd_c': -1,
'fldox_c': 1, },
'metabolites': {'dmpp_c': -1,
'amet_c': -2,
'ppi_c': 1,
'ahcys_c': 1,
'h_c': 2,
'met__L_c': 1,
'dad__5_c': 1,
}},
'Y_at_32': {'machines': ['RluA_mono'],
'metabolites': {}},
'D_at_21': {'machines': ['generic_Dus'],
'metabolites': {'nadph_c': -1,
'h_c': -1,
'nadp_c': 1}},
'm1G_at_37': {'machines': ['TrmD_dim'],
'metabolites': {'amet_c': -1,
'ahcys_c': 1,
'h_c': 1}},
'Y_at_13': {'machines': ['TruD_mono'],
'metabolites': {}},
'k2C_at_34': {'machines': ['TilS_mono'],
'metabolites': {'atp_c': -1,
'lys__L_c': -1,
'ppi_c': 1,
'amp_c': 1,
'h_c': 2}},
'I_at_34': {'machines': ['TadA_dim_mod_2:zn2'],
'metabolites': {'h2o_c': -1, 'h_c': -1,
'nh4_c': 1}},
'i6A_at_37': {'machines': ['MiaA_dim_mod_2:mg2'],
'metabolites': {'dmpp_c': -1,
'ppi_c': 1}},
'D_at_20_in_met_tRNA': {'machines': ['DusA_mono'],
'metabolites': {'nadph_c': -1,
'h_c': -1,
'nadp_c': 1}},
'D_at_16_in_met_tRNA': {'machines': ['DusA_mono'],
'metabolites': {'nadph_c': -1,
'h_c': -1,
'nadp_c': 1}},
'D_at_17_in_met_tRNA': {'machines': ['DusA_mono'],
'metabolites': {'nadph_c': -1,
'h_c': -1,
'nadp_c': 1}},
'D_at_20A_in_met_tRNA': {'machines': ['DusA_mono'],
'metabolites': {'nadph_c': -1,
'h_c': -1,
'nadp_c': 1}}
}
modification_info = {'D': {'elements': {'H': 2}, 'charge': 0},
'i6A': {'elements': {'C': 5, 'H': 8}, 'charge': 0},
'I': {'elements': {'N': -1, 'H': -1, 'O': 1},
'charge': 0},
'k2C': {'elements': {'O': 1, 'N': 2, 'H': 12, 'C': 6},
'charge': 0},
'Y': {'elements': {}, 'charge': 0},
'm1G': {'elements': {'H': 2, 'C': 1}, 'charge': 0},
'ms2i6A': {'elements': {'C': 6, 'H': 10, 'S': 1},
'charge': 0},
'Cm': {'elements': {'H': 2, 'C': 1}, 'charge': 0},
'Um': {'elements': {'H': 2, 'C': 1}, 'charge': 0},
'm6A': {'elements': {'H': 2, 'C': 1}, 'charge': 0},
'mnm5s2U': {'elements': {'C': 2, 'H': 5, 'N': 1, 'O': -1,
'S': 1},
'charge': 0},
'ac4C': {'elements': {'H': 2, 'C': 2, 'O': 1},
'charge': 0},
'Gm': {'elements': {'H': 2, 'C': 1}, 'charge': 0},
'mnm5U': {'elements': {'C': 2, 'H': 5, 'N': 1},
'charge': 0},
's2C': {'elements': {'O': -1, 'S': 1}, 'charge': 0},
'm6t6A': {'elements': {'C': 6, 'O': 4, 'N': 1, 'H': 9},
'charge': 0},
's4U': {'elements': {'O': -1, 'S': 1}, 'charge': 0},
'm2A': {'elements': {'H': 2, 'C': 1}, 'charge': 0},
'Q': {'elements': {'C': 7, 'O': 2, 'H': 11}, 'charge': 1},
'cmo5U': {'elements': {'C': 2, 'O': 3, 'H': 2},
'charge': 0},
'm5U': {'elements': {'C': 1, 'H': 2}, 'charge': 0},
'acp3U': {'elements': {'C': 4, 'H': 7, 'N': 1, 'O': 2},
'charge': 0},
'm7G': {'elements': {'C': 1, 'H': 2}, 'charge': 0},
't6A': {'elements': {'C': 5, 'N': 1, 'O': 4, 'H': 6},
'charge': 0}
}
def add_trna_modification_procedures(model):
modifications = trna_modification.copy()
modifications = correct_trna_modifications(modifications)
for mod, components in iteritems(modifications):
trna_mod = cobrame.SubreactionData(mod, model)
trna_mod.enzyme = components['machines']
trna_mod.stoichiometry = components['metabolites']
trna_mod.keff = 65. # iOL uses 65 for all tRNA mods
if 'carriers' in components.keys():
for carrier, stoich in components['carriers'].items():
if stoich < 0:
trna_mod.enzyme += [carrier]
trna_mod.stoichiometry[carrier] = stoich
# Add element contribution from modification to tRNA
trna_mod._element_contribution = \
modification_info[mod.split('_')[0]]['elements']
return modifications
```
|
{
"source": "jdtibochab/cobra_utils",
"score": 3
}
|
#### File: cobra_utils/query/rxn_info.py
```python
from __future__ import absolute_import
import pandas as pd
import cobra
from cobra_utils.query.met_info import classify_metabolites_by_type
def rxn_info_from_metabolites(model, metabolites, verbose=True):
'''
This function looks for all the reactions where the metabolites in the list participate. Also, it retrieves the genes
associated to those reactions.
Parameters
----------
model : cobra.core.Model.Model
A cobra model.
metabolites : array-like
An iterable object containing a list of metabolite ids present in the model.
verbose : boolean, True by default.
A variable to enable or disable the printings of this function.
Returns
-------
rxn_gene_association : pandas.DataFrame
A pandas dataframe containing the information retrieved. The columns are :
'MetName', 'MetID', 'RxnID', 'RxnName', 'GeneID', 'Subsystem', 'RxnFormula'
'''
if verbose:
print('Using list of metabolites to get reactions where they participate. Also, getting genes of those reactions.')
rxn_gene_association = []
for metabolite in metabolites:
met = model.metabolites.get_by_id(metabolite)
for rxn in met.reactions:
if len(rxn.genes) != 0:
for gene in rxn.genes:
rxn_gene_association.append(
(rxn.id, rxn.name, str(gene.id), rxn.subsystem, rxn.reaction, met.id, met.name))
else:
rxn_gene_association.append(
(rxn.id, rxn.name, '', rxn.subsystem, rxn.reaction, met.id, met.name))
labels = ['RxnID', 'RxnName', 'GeneID', 'Subsystem', 'RxnFormula', 'MetID', 'MetName']
rxn_gene_association = pd.DataFrame.from_records(rxn_gene_association, columns=labels)
if verbose:
print('Information correctly obtained.')
return rxn_gene_association
def rxn_info_from_reactions(model, reactions, verbose=True):
'''
This function looks for all the reactions and genes that are associated from a list of reactions ids.
Parameters
----------
model : cobra.core.Model.Model
A cobra model.
reactions : array-like
An iterable object containing a list of reaction ids present in the model.
verbose : boolean, True by default.
A variable to enable or disable the printings of this function.
Returns
-------
rxn_gene_association : pandas.DataFrame
A pandas dataframe containing the information retrieved. The columns are :
'GeneID', 'RxnID', 'RxnName', 'SubSystem', 'RxnFormula'
'''
if verbose:
print('Using list of reactions to get their information and genes associated.')
rxn_gene_association = []
for reaction in reactions:
rxn = model.reactions.get_by_id(reaction)
if len(rxn.genes) != 0:
for gene in rxn.genes:
rxn_gene_association.append((str(gene.id), rxn.id, rxn.name, rxn.subsystem, rxn.reaction))
else:
rxn_gene_association.append(('', rxn.id, rxn.name, rxn.subsystem, rxn.reaction))
labels = ['GeneID', 'RxnID', 'RxnName', 'SubSystem', 'RxnFormula']
rxn_gene_association = pd.DataFrame.from_records(rxn_gene_association, columns=labels)
if verbose:
print('Information correctly obtained.')
return rxn_gene_association
def rxn_info_from_genes(model, genes, verbose=True):
'''
This function looks for all the reactions and genes that are associated from a list of gene ids.
Parameters
----------
model : cobra.core.Model.Model
A cobra model.
genes : array-like
An iterable object containing a list of gene ids present in the model.
verbose : boolean, True by default.
A variable to enable or disable the printings of this function.
Returns
-------
rxn_gene_association : pandas.DataFrame
A pandas dataframe containing the information retrieved. The columns are :
'GeneID', 'RxnID', 'RxnName', 'SubSystem', 'RxnFormula'
'''
if verbose:
print('Using list of genes to get the reactions associated and their information.')
rxn_gene_association = []
for gene in genes:
g = model.genes.get_by_id(gene)
for rxn in g.reactions:
rxn_gene_association.append((str(g.id), rxn.id, rxn.name, rxn.subsystem, rxn.reaction))
labels = ['GeneID', 'RxnID', 'RxnName', 'SubSystem', 'RxnFormula']
rxn_gene_association = pd.DataFrame.from_records(rxn_gene_association, columns=labels)
if verbose:
print('Information correctly obtained.')
return rxn_gene_association
def rxn_info_from_model(model, verbose=True):
'''
This function looks for all the reactions in the model and returns their respective information.
Parameters
----------
model : cobra.core.Model.Model
A cobra model.
verbose : boolean, True by default.
A variable to enable or disable the printings of this function.
Returns
-------
rxn_gene_association : pandas.DataFrame
A pandas dataframe containing the information retrieved. The columns are :
'GeneID', 'RxnID', 'RxnName', 'SubSystem', 'RxnFormula'
'''
if verbose:
print('Getting information for all reactions in the model.')
rxn_gene_association = []
for rxn in model.reactions:
if len(rxn.genes) != 0:
for gene in rxn.genes:
rxn_gene_association.append((str(gene.id), rxn.id, rxn.name, rxn.subsystem, rxn.reaction))
else:
rxn_gene_association.append(('', rxn.id, rxn.name, rxn.subsystem, rxn.reaction))
labels = ['GeneID', 'RxnID', 'RxnName', 'SubSystem', 'RxnFormula']
rxn_gene_association = pd.DataFrame.from_records(rxn_gene_association, columns=labels)
if verbose:
print('Information correctly obtained.')
return rxn_gene_association
def get_objective_function(model):
'''
This function returns the reaction set as the objective function for FBA.
Parameters
----------
model : cobra.core.Model.Model
A cobra model.
Returns
-------
reaction : cobra.core.reaction.Reaction
A cobra reaction.
'''
for rxn in model.reactions:
if rxn.objective_coefficient:
obj_rxn = rxn
print(rxn.id)
return obj_rxn
def get_reaction_stoichiometry(reaction):
'''
This function returns the stoichiometry of a reaction
Parameters
----------
reaction : cobra.core.reaction.Reaction
A cobra reaction.
Returns
-------
reaction_stoichiometry : dict
A dictionary containing the metabolite stoichiometry of the reaction.
'''
reaction_stoichiometry = dict()
for met in reaction.metabolites:
stoich = reaction.get_coefficient(met.id)
reaction_stoichiometry[met.id] = stoich
return reaction_stoichiometry
def biomass_breakdown(model,input_info, input_mode = 'reaction'):
if input_mode == 'reaction':
biomass_rxn = input_info
stoich = get_reaction_stoichiometry(biomass_rxn)
class_dict = classify_metabolites_by_type(biomass_rxn.metabolites)
elif input_mode == 'dict':
stoich = input_info
metabolites = []
for met_id in stoich.keys():
met = model.metabolites.get_by_id(met_id)
metabolites.append(met)
class_dict = classify_metabolites_by_type(metabolites)
exclude = ['adp','h2o','h','pi','ppi']
contributions = dict()
for met_type in class_dict.keys():
contributions[met_type] = 0
for met_id in class_dict[met_type]:
if met_id.split('_')[0] not in exclude:
met = model.metabolites.get_by_id(met_id)
formula = cobra.core.formula.Formula(met.formula)
met_weight = formula.weight
if met_type == 'aminoacids':
met_weight -= 18 ## Taking out water after polymerization
if met_id == 'atp_c':
met_stoich = stoich['atp_c'] + stoich['adp_c'] # Non-energy-related atp
else:
met_stoich = stoich[met_id]
met_mass = met_weight * met_stoich
contributions[met_type] += abs(met_mass)/1000
contributions
return contributions
```
|
{
"source": "jdtibochab/ME-Models",
"score": 2
}
|
#### File: ME-Models/pputidame/translocation.py
```python
from __future__ import print_function, absolute_import, division
from collections import defaultdict
from cobrame.core.processdata import PostTranslationData
from cobrame.core.reaction import PostTranslationReaction
pathway = {'sec': {'enzymes': {'G1G01-5397-MONOMER': {'length_dependent': True,
'fixed_keff': False},
'G1G01-1433-MONOMER': {'length_dependent': True,
'fixed_keff': False},
'Sec-CPLX': {'length_dependent': True,
'fixed_keff': False}},
'keff': 4.,
'length_dependent_energy': True,
'stoichiometry': {'atp_c': -1./25., 'h2o_c': -1./25.,
'adp_c': 1./25., 'pi_c': 1./25.,
'h_c': 1./25.}},
'tat': {'enzymes': {'TatBC_octa': {'length_dependent': False,
'fixed_keff': False},
'CPLX1G01-1114': {'length_dependent': False,
'fixed_keff': False}},
'keff': 0.0125,
'length_dependent_energy': False,
'stoichiometry': {}},
'bam': {'enzymes': {'G1G01-440-MONOMER': {'length_dependent': False,
'fixed_keff': False},
'Bam-CPLX': {'length_dependent': False,
'fixed_keff': False}},
'keff': 0.027,
'length_dependent_energy': False,
'stoichiometry': {}},
'lol': {'enzymes': {'LolCDE-CPLX': {'length_dependent': False,
'fixed_keff': False},
'G1G01-4270-MONOMER': {'length_dependent': False,
'fixed_keff': False},
'G1G01-799-MONOMER': {'length_dependent': False,
'fixed_keff': False}},
'keff': 0.9,
'length_dependent_energy': False,
'stoichiometry': {'atp_c': -1., 'h2o_c': -1.,
'adp_c': 1., 'pi_c': 1., 'h_c': 1.}},
'yidC': {'enzymes': {'SRP-CPLX': {'length_dependent': True,
'fixed_keff': False},
'G1G01-6-MONOMER': {'length_dependent': True,
'fixed_keff': False}},
'keff': 20.,
'length_dependent_energy': False,
'stoichiometry': {'gtp_c': -1., 'h2o_c': -1., 'gdp_c': 1.,
'pi_c': 1., 'h_c': 1.}},
'secA': {'enzymes': {'G1G01-1433-MONOMER': {'length_dependent': True,
'fixed_keff': False}},
'keff': 4.,
'length_dependent_energy': True,
'stoichiometry': {'atp_c': -1./3./25., 'h2o_c': -1./3./25.,
'adp_c': 1./3./25., 'pi_c': 1./3./25.,
'h_c': 1./3./25.}},
'srp_yidC': {'enzymes': {'SRP-CPLX': {'length_dependent': True,
'fixed_keff': False},
'G1G01-6-MONOMER': {'length_dependent': True,
'fixed_keff': False},
'Sec-CPLX': {'length_dependent': True,
'fixed_keff': False}},
'keff': 20.,
'length_dependent_energy': False,
'stoichiometry': {'gtp_c': -1., 'h2o_c': -1.,
'gdp_c': 1., 'pi_c': 1., 'h_c': 1.}},
'srp': {'enzymes': {'SRP-CPLX': {'length_dependent': True,
'fixed_keff': False},
'G1G01-5455-MONOMER': {'length_dependent': False,
'fixed_keff': True},
'Sec-CPLX': {'length_dependent': True,
'fixed_keff': False}},
'keff': 20.,
'length_dependent_energy': False,
'stoichiometry': {'gtp_c': -2., 'h2o_c': -2., 'gdp_c': 2.,
'pi_c': 2., 'h_c': 2.}}
}
abbreviation_to_pathway = {'s': 'sec_translocation',
't': 'tat_translocation',
'b': 'bam_translocation',
'l': 'lol_translocation',
'y': 'yidC_translocation',
'a': 'secA_translocation',
'p': 'srp_yidC_translocation',
'r': 'srp_translocation'}
# Some proteins require different numbers of a complex in order to be
# translocated by a pathway
multipliers = {
'TatA_MONOMER': {'PP_0489': 15.5,
'PP_0490': 15.5,
'PP_2205': 23.0,
'PP_4897': 22.0},
'TatE_MONOMER': {'PP_0489': 15.5,
'PP_0490': 15.5,
'PP_2205': 23.0,
'PP_4897': 22.0},
'YidC_MONOMER': {'PP_5412': 2.0}}
multipliers_protein_keys = defaultdict(dict)
for enzyme, value in multipliers.items():
for bnum in value.keys():
multipliers_protein_keys['protein_' + bnum][enzyme] = value[bnum]
mmol = 6.022e20 # number of molecules per mmol
nm2_per_m2 = 1e18 # used to convert nm^2 to m^2
def add_translocation_pathways(model, pathways_df, membrane_constraints=False):
def add_translocation_data_and_reaction(model, pathways, preprocessed_id,
processed_id, compartment,
peptide_data, alt=False):
suffix = '_alt' if alt else ''
data = PostTranslationData('translocation_' + preprocessed_id + suffix,
model, processed_id, preprocessed_id)
data.translocation = pathways
data.translocation_multipliers = \
multipliers_protein_keys.get(preprocessed_id, {})
# Add protein surface area constraint
if membrane_constraints and compartment != 'Periplasm':
protein = peptide_data.protein
protein_met = model.metabolites.get_by_id('protein_' + protein)
mass = protein_met.formula_weight / 1000. # in kDa
membrane_thickness = model.global_info['membrane_thickness']
thickness = membrane_thickness[compartment]
# Relationship uses protein molecular in kDa
# Adds surface area constraint in units of m^2/mmol
data.surface_area['SA_protein_' + compartment] = \
(1.21 / thickness * 2.) * mass * mmol / nm2_per_m2
rxn = PostTranslationReaction('translocation_' + peptide_data.id +
suffix)
rxn.posttranslation_data = data
model.add_reaction(rxn)
rxn.update()
# loop through all translation data and add translocation rxns/surface area
# constraints if they are membrane proteins
for peptide_data in model.translation_data:
# extract translocation info if peptide contained in complex
# stoichiometry
translocation_info = pathways_df[
pathways_df.Protein.str.match(peptide_data.id)]
# iterate if protein is not in a membrane complex
if len(translocation_info) == 0:
continue
# Assign preprocessed and processed (translocated) peptide ids
compartment = translocation_info.Protein_compartment.values[0]
processed_id = 'protein_' + peptide_data.id + '_' + compartment
preprocessed_id = 'protein_' + peptide_data.id
# compile translocation pathways for each membrane protein
pathways = set()
pathways_alt = set()
for abbrev in translocation_info.translocase_pathway.values[0]:
pathway_name = abbreviation_to_pathway[abbrev]
# The tat translocation pathway can use an alternate enzyme
if type(pathway_name) == list:
pathways.add(pathway_name[0])
pathways_alt.add(pathway_name[1])
else:
pathways.add(pathway_name)
pathways_alt.add(pathway_name)
add_translocation_data_and_reaction(model, pathways, preprocessed_id,
processed_id, compartment,
peptide_data)
# if there's an alternative pathway (tat) add this reaction as well
if pathways != pathways_alt:
add_translocation_data_and_reaction(model, pathways_alt,
preprocessed_id, processed_id,
compartment, peptide_data,
alt=True)
lipoprotein_precursors = {'AcrA': 'PP_1386',
'BamB': 'PP_0856',
'BamC': 'PP_1238',
'BamD': 'PP_0622',
'BamE': 'PP_4731',
'CusC': 'PP_1384',
'LolB': 'PP_0724',
'MetQ': 'PP_0112',
'MltA': 'PP_4971',
'MltB': 'PP_4805'}
lipid_modifications = ['pg120_p', 'pg141_p', 'pg140_p', 'pg181_p', 'pg161_p',
'pg160_p', 'pg180_p']
def add_lipoprotein_formation(model, compartment_dict,
membrane_constraints=False, update=True):
# loop through all proteins which need lipid modifications (lipoproteins)
for protein in lipoprotein_precursors.values():
compartment = compartment_dict.get(protein)
protein_met = model.metabolites.get_by_id('protein_' + protein)
mass = protein_met.formula_weight / 1000. # in kDa
processed_id = 'protein_' + protein + '_lipoprotein_' + compartment
preprocessed_id = 'protein_' + protein + '_' + compartment
def add_lipoprotein_data_and_reaction(first_lipid, second_lipid):
# Add PostTranslation Data, modifications and surface area
data = PostTranslationData(reaction_prefix + '_' + second_lipid,
model, processed_id, preprocessed_id)
data.subreactions['mod_' + first_lipid] = 1
data.subreactions['mod2_' + second_lipid + '_p'] = 1
data.biomass_type = 'lipid_biomass'
if membrane_constraints:
thickness_dict = model.global_info['membrane_thickness']
thickness = thickness_dict['Outer_Membrane']
# From Liu et al. x2 for each to account for each leaflet
protein_SA = 1.21 / thickness * 2 * mass * mmol / nm2_per_m2
data.surface_area = {'SA_protein_' + compartment: -protein_SA,
'SA_lipoprotein': 1. * mmol / nm2_per_m2}
# Add Reaction to model and associated it with its data
rxn = PostTranslationReaction(reaction_prefix + '_' + second_lipid)
model.add_reaction(rxn)
rxn.posttranslation_data = data
if update:
rxn.update()
for mod in lipid_modifications:
reaction_prefix = protein + '_lipid_modification_' + mod
add_lipoprotein_data_and_reaction(mod, 'pg160')
add_lipoprotein_data_and_reaction(mod, 'pe160')
```
|
{
"source": "jdtibochab/network_bisb",
"score": 3
}
|
#### File: lib/network_evaluation_tools/data_import_tools.py
```python
import pandas as pd
import networkx as nx
import time
import os
# Filter extended sif file where all edges are weighted by a specific quantile
# Return the filtered network edge list and save it to a file if desired (for import by load_network_file)
def filter_weighted_network_sif(network_file_path, nodeA_col=0, nodeB_col=1, score_col=2, q=0.9, delimiter='\t', verbose=False, save_path=None):
data = pd.read_csv(network_file_path, sep=delimiter, header=-1, low_memory=False)
# Filter edges by score quantile
q_score = data[score_col].quantile(q)
if verbose:
print str(round(q*100,2))+'%', 'score:', q_score
data_filt = data[data[score_col]>q_score][data.columns[[nodeA_col, nodeB_col, score_col]]]
data_filt.columns = ['nodeA', 'nodeB', 'edgeScore']
if verbose:
print data_filt.shape[0], '/', data.shape[0], 'edges retained'
if save_path is not None:
data_filt.to_csv(save_path, sep='\t', header=False, index=False)
return data_filt
# Load network from file as unweighted network
# Can set delimiter, but default delimiter is tab
# Only will read edges as first two columns, all other columns will be ignored
def load_network_file(network_file_path, delimiter='\t', verbose=False):
network = nx.read_edgelist(network_file_path, delimiter=delimiter, data=False)
if verbose:
print 'Network File Loaded:', network_file_path
return network
# Get full paths to all networks in directory with a given file name structure:
# e.g. If filename = 'BIND_Symbol.sif', then network_name='BIND', suffix='_Symbol', ext='.sif
def get_networks(wd, suffix=None, file_ext='.sif'):
network_files = {}
for fn in os.listdir(wd):
if suffix==None:
if fn.endswith(file_ext):
network_files[fn.split(file_ext)[0]]=wd+fn
else:
if fn.endswith(file_ext) and fn.split(file_ext)[0].endswith(suffix):
network_files[fn.split(suffix)[0]]=wd+fn
return network_files
# Companion function with get_networks(), loads all of the network files found in a directory
# Uses the load_network_file() function to load each network, also only imports first two columns, no edge data
# Constructs a dictionary of useful network items for each network in the directory:
# - Actual networkx object representation of network
# - List of nodes by name for each network
# - List of edges by node name for each network
def load_networks(network_file_map, delimiter='\t', verbose=False):
# Initialize dictionaries
networks, network_edges, network_nodes = {}, {}, {}
# Loading network and network properties
for network_name in network_file_map:
loadtime = time.time()
# Load network
network = load_network_file(network_file_map[network_name], verbose=verbose)
networks[network_name]=network
# Construct network node list
network_nodes[network_name] = network.nodes()
# Construct network edge list
network_edges[network_name] = network.edges()
if verbose:
print 'All given network files loaded'
# Return data structure
return networks, network_edges, network_nodes
# Convert and save MAF from Broad Firehose
# Can produce 2 types of filetypes: 'matrix' or 'list', matrix is a full samples-by-genes binary csv, 'list' is a sparse representaiton of 'matrix'
# This is a conversion tool, so the result must be saved (most tools will require a path to a processed MAF file and load it separately)
# Gene naming can be 'Symbol' or 'Entrez'
def process_TCGA_MAF(maf_file, save_path, filetype='matrix', gene_naming='Symbol', verbose=False):
loadtime = time.time()
# Load MAF File
TCGA_MAF = pd.read_csv(maf_file,sep='\t',low_memory=False)
# Get all patient somatic mutation (sm) pairs from MAF file
if gene_naming=='Entrez':
TCGA_sm = TCGA_MAF.groupby(['Tumor_Sample_Barcode', 'Entrez_Gene_Id']).size()
else:
TCGA_sm = TCGA_MAF.groupby(['Tumor_Sample_Barcode', 'Hugo_Symbol']).size()
# Turn somatic mutation data into binary matrix
TCGA_sm_mat = TCGA_sm.unstack().fillna(0)
TCGA_sm_mat = (TCGA_sm_mat>0).astype(int)
# Trim TCGA barcodes
TCGA_sm_mat.index = [pat[:12] for pat in TCGA_sm_mat.index]
# Filter samples with duplicate IDs
non_dup_IDs = list(TCGA_sm_mat.index.value_counts().index[TCGA_sm_mat.index.value_counts()==1])
dup_IDs = list(TCGA_sm_mat.index.value_counts().index[TCGA_sm_mat.index.value_counts()>1])
# Save file as binary matrix or sparse list
if filetype=='list':
# Now try to construct two-column/sparse representation of binary sm data
# Get list of all patient somatic mutations
index_list = list(TCGA_sm.index)
# Filter list of patient somatic mutations of duplicate patient barcodes
index_list_filt = [i for i in index_list if not any([True if barcode in i[0] else False for barcode in dup_IDs])]
# Save patient somatic mutations list to file
f = open(save_path, 'w')
for sm in index_list_filt:
f.write(sm[0][:12]+'\t'+sm[1]+'\n')
f.close()
if verbose:
print 'Binary somatic mutations list saved'
else:
# Save non-duplicate patients' binary TCGA somatic mutation matrix to csv
TCGA_sm_mat_filt = TCGA_sm_mat.ix[non_dup_IDs]
# Remove all genes that have no more mutations after patient filtering
nonempty_cols = [col for col in TCGA_sm_mat_filt.columns if not all(TCGA_sm_mat_filt[col]==0)]
TCGA_sm_mat_filt2 = TCGA_sm_mat_filt[nonempty_cols]
# Remove columns with bad names like '0'
named_cols = [col for col in TCGA_sm_mat_filt.columns if col!='0']
TCGA_sm_mat_filt3 = TCGA_sm_mat_filt2[nonempty_cols]
TCGA_sm_mat_filt3.to_csv(save_path)
if verbose:
print 'Binary somatic mutation matrix saved'
if verbose:
print 'MAF file processed:', maf_file, round(time.time()-loadtime, 2), 'seconds.'
return
# Load binary mutation data with 2 file types (filetype= 'matrix' or 'list')
# filetype=='matrix' is a csv or tsv style matrix with row and column headers, rows are samples/patients, columns are genes
# filetype=='list' is a 2 columns text file separated by the delimiter where 1st column is sample/patient, 2nd column is one gene mutated in that patient
# Line example in 'list' file: 'Patient ID','Gene Mutated'
def load_binary_mutation_data(filename, filetype='matrix', delimiter=',', verbose=False):
if filetype=='list':
f = open(filename)
binary_mat_lines = f.read().splitlines()
binary_mat_data = [(line.split('\t')[0], line.split('\t')[1]) for line in binary_mat_lines]
binary_mat_index = pd.MultiIndex.from_tuples(binary_mat_data, names=['Tumor_Sample_Barcode', 'Hugo_Symbol'])
binary_mat_2col = pd.DataFrame(1, index=binary_mat_index, columns=[0])[0]
binary_mat = binary_mat_2col.unstack().fillna(0)
else:
binary_mat = pd.read_csv(filename, delimiter=delimiter, index_col=0).astype(int)
if verbose:
print 'Binary Mutation Matrix Loaded:', filename
return binary_mat
# Concatinate multiple mutation matrices together
# All file type structures and delimiters must be the same (see load_binary_mutation_matrix()) across all files
def concat_binary_mutation_matrices(filename_list, filetype='matrix', delimiter=',', verbose=False, save_path=None):
binary_mat_list = [load_binary_mutation_data(fn, filetype=filetype, delimiter=delimiter, verbose=verbose) for fn in filename_list]
binary_mat_concat = pd.concat(binary_mat_list).fillna(0)
if verbose:
print 'All binary mutation matrices loaded and concatenated'
if save_path==None:
return binary_mat_concat
else:
binary_mat_concat.to_csv(save_path)
return binary_mat_concat
# Construct dictionary of node sets from input text file to perform AUPRC analysis on for network of interest
# File format: Each line is a delimited list with the first item in the list is the name of the node set
# All other nodes in the list follow the node set name
def load_node_sets(node_set_file, delimiter='\t', verbose=False):
f = open(node_set_file)
node_set_lines = f.read().splitlines()
node_set_lines_split = [line.split(delimiter) for line in node_set_lines]
f.close()
node_sets = {node_set[0]:set(node_set[1:]) for node_set in node_set_lines_split}
if verbose:
print 'Node cohorts loaded:', node_set_file
return node_sets
```
#### File: network_bisb/network_evaluation_tools/network_evaluation_functions.py
```python
from multiprocessing import Pool
from network_evaluation_tools import data_import_tools as dit
from network_evaluation_tools import network_propagation as prop
import networkx as nx
import numpy as np
import os
import random
import scipy.stats as stats
import sklearn.metrics as metrics
import pandas as pd
import time
# Shuffle network in degree-preserving manner
# Input: network - networkx formatted network
# For large networks this can be slow: may need to be sped up to prevent bottlenecking
def shuffle_network(network, max_tries_n=10, verbose=False):
# Shuffle Network
shuff_time = time.time()
edge_len=len(network.edges())
shuff_net=network.copy()
try:
nx.double_edge_swap(shuff_net, nswap=edge_len, max_tries=edge_len*max_tries_n)
except:
if verbose:
print('Note: Maximum number of swap attempts ('+repr(edge_len*max_tries_n)+') exceeded before desired swaps achieved ('+repr(edge_len)+').')
if verbose:
# Evaluate Network Similarity
shared_edges = len(set(network.edges()).intersection(set(shuff_net.edges())))
print('Network shuffled:', time.time()-shuff_time, 'seconds. Edge similarity:', shared_edges/float(edge_len))
return shuff_net
# Calculate optimal sub-sampling proportion for test/train
# Input: NetworkX object and dictionary of {geneset name:list of genes}
def calculate_p(network, nodesets, m=-0.18887257, b=0.64897403):
network_nodes = [str(gene) for gene in network.nodes()]
nodesets_p = {}
for nodeset in nodesets:
nodesets_coverage = len([node for node in nodesets[nodeset] if node in network_nodes])
nodesets_p[nodeset] = round(m*np.log10(nodesets_coverage)+b, 4)
return nodesets_p
# Construct influence matrix of each network node propagated across network to use as kernel in AUPRC analysis
# Input: NetowkrkX object. No propagation constant or alpha model required, can be calculated
def construct_prop_kernel(network, alpha=None, m=-0.02935302, b=0.74842057, verbose=False, save_path=None):
network_Fo = pd.DataFrame(data=np.identity(len(network.nodes())), index=network.nodes(), columns=network.nodes())
if alpha is None:
alpha_val = prop.calculate_alpha(network, m=m, b=b)
else:
alpha_val = alpha
network_Fn = prop.closed_form_network_propagation(network, network_Fo, alpha_val, verbose=verbose)
network_Fn = network_Fn.ix[network_Fn.columns]
if verbose:
print('Propagated network kernel constructed')
if save_path is not None:
if save_path.endswith('.hdf'):
network_Fn.to_hdf(save_path, key='Kernel', mode='w')
else:
network_Fn.to_csv(save_path)
return network_Fn
# Global variable initialization function for small network AUPRC calculations
def global_var_initializer(global_net_kernel):
global kernel
kernel = global_net_kernel
# Calculate AUPRC of a single node set's recovery for small networks (<250k edges)
# This method is faster for smaller networks, but still has a relatively large memory footprint
# The parallel setup for this situation requires passing the network kernel to each individual thread
def calculate_small_network_AUPRC(params):
node_set_name, node_set, p, n, bg, verbose = params[0], params[1], params[2], params[3], params[4], params[5]
runtime = time.time()
intersect = [nodes for nodes in node_set if nodes in kernel.index]
AUPRCs = []
sample_size = int(round(p*len(intersect)))
for i in range(n): # Number of times to run the sampling
sample = random.sample(intersect, sample_size) # get node set sample
intersect_non_sample = [node for node in intersect if node not in sample] # nodes in intersect not in sample
bg_non_sample = [node for node in bg if node not in sample] # nodes in background gene list not in sample
bg_sample_sum = kernel.ix[sample][bg_non_sample].sum().sort_values(ascending=False) # summed prop value for all nodes in background
y_actual = pd.Series(0, index=bg_sample_sum.index, dtype=int) # nodes sorted by mean prop value
y_actual.ix[intersect_non_sample]+=1 # which nodes in sorted list are in intersect_non_sample
intersect_non_sample_sorted = y_actual[y_actual==1].index # intersect_non_sample sorted
TP, FN = 0, len(intersect_non_sample_sorted) # initialize precision and recall curves
precision, recall = [1], [0] # initialize true positives and false negatives
for node in intersect_non_sample_sorted: # Slide down sorted nodes by summed prop value by nodes that are in intersect_non_sample
TP += 1.0 # Calculate true positives found at this point in list
FN -= 1.0 # Calculate false negatives found at this point in list
precision.append(TP/float(y_actual.ix[:node].shape[0])) # Calculate precision ( TP / TP+FP ) and add point to curve
recall.append(TP/float(TP+FN)) # Calculate recall ( TP / TP+FN ) and add point to curve
AUPRCs.append(metrics.auc(recall, precision)) # Calculate Area Under Precision-Recall Curve (AUPRC)
if verbose:
print('AUPRC Analysis for given node set', '('+repr(len(intersect))+' nodes in network) complete:', round(time.time()-runtime, 2), 'seconds.')
return [node_set_name, np.mean(AUPRCs)]
# Caclulate AUPRC of a single node set's recovery for large networks (>=250k edges)
# This method is slower than the small network case, as well as forces the memory footprint to be too large
# The parallel setup for this situation requries
def calculate_large_network_AUPRC(params):
geneset, intersect_non_sample_sorted, P_totals, verbose = params[0], params[1], params[2], params[3]
runtime = time.time()
TP, FN = 0, len(intersect_non_sample_sorted) # initialize true positives and false negatives
precision, recall = [1], [0] # initialize precision and recall curves
for node in intersect_non_sample_sorted: # Step down sorted nodes by summed prop value by nodes that are in intersect_non_sample
TP += 1.0 # Calculate true positives found at this point in list
FN -= 1.0 # Calculate false negatives found at this point in list
precision.append(TP/float(P_totals[node])) # Calculate precision ( TP / TP+FP ) and add point to curve
recall.append(TP/float(TP+FN)) # Calculate recall ( TP / TP+FN ) and add point to curve
AUPRC = metrics.auc(recall, precision) # Calculate Area Under Precision-Recall Curve (AUPRC)
if verbose:
print('AUPRC Analysis for given node set:', geneset, 'complete:', round(time.time()-runtime, 2), 'seconds.')
return [geneset, AUPRC]
# Wrapper to calculate AUPRC of multiple node sets' recovery for small networks (<250k edges)
def small_network_AUPRC_wrapper(net_kernel, genesets, genesets_p, n=30, cores=1, bg=None, verbose=True):
# Construct params list
if bg is None:
bg_intersect = list(net_kernel.index)
else:
bg_intersect = list(set(bg).intersection(set(net_kernel.index)))
AUPRC_Analysis_params = [[geneset, genesets[geneset], genesets_p[geneset], n, bg_intersect, verbose] for geneset in genesets]
# Determine parallel calculation status
if cores == 1:
# Set network kernel
global_var_initializer(net_kernel)
# Calculate AUPRC values for all gene sets
AUPRC_results = []
for params_list in AUPRC_Analysis_params:
AUPRC_results.append(calculate_small_network_AUPRC(params_list))
else:
# Initialize worker pool
pool = Pool(cores, global_var_initializer, [net_kernel])
# Run the AUPRC analysis for each geneset
AUPRC_results = pool.map(calculate_small_network_AUPRC, AUPRC_Analysis_params)
# Close worker pool
pool.close()
# Construct AUPRC results
geneset_AUPRCs = {result[0]:result[1] for result in AUPRC_results}
AUPRCs_table = pd.Series(geneset_AUPRCs, name='AUPRC')
return AUPRCs_table
# Wrapper to calculate AUPRC of multiple node sets' recovery for large networks (>=250k edges)
def large_network_AUPRC_wrapper(net_kernel, genesets, genesets_p, n=30, cores=1, bg=None, verbose=True):
starttime = time.time()
# Construct binary gene set sub-sample matrix
geneset_list = list(genesets.keys())
m, c = len(geneset_list), net_kernel.shape[0]
subsample_mat = np.zeros((n*m, c))
y_actual_mat = np.zeros((n*m, c))
# Each block of length n rows is a sub-sampled binary vector of the corresponding gene set
for i in range(m):
geneset = geneset_list[i]
# Get indices of gene set genes in kernel
intersect = [gene for gene in genesets[geneset] if gene in net_kernel.index]
index_dict = dict((gene, idx) for idx, gene in enumerate(net_kernel.index))
intersect_idx = [index_dict[gene] for gene in intersect]
# Generate n sub-samples
for j in range(n):
# Sub-sample gene set indices
sample_size = int(round(genesets_p[geneset]*len(intersect)))
sample_idx = random.sample(intersect_idx, sample_size)
non_sample_idx = [idx for idx in intersect_idx if idx not in sample_idx]
# Set sub-sampled list to 1
row = (i*n)+j
subsample_mat[row, sample_idx] = 1
y_actual_mat[row, non_sample_idx] = 1
if verbose:
print('Binary gene set sub-sample matrix constructed')
# Propagate sub-samples
prop_subsamples = np.dot(subsample_mat, net_kernel)
if verbose:
print('Binary gene set sub-sample matrix propagated')
# Construct parameter list to be passed
AUPRC_Analysis_params = []
for i in range(len(geneset_list)):
for j in range(n):
row = (i*n)+j
prop_result_full = pd.DataFrame(np.array((subsample_mat[row], y_actual_mat[row], prop_subsamples[row])),
index=['Sub-Sample', 'Non-Sample', 'Prop Score'], columns=net_kernel.columns).T
# Set background gene sets from a predefined gene set or all network genes
if bg is None:
prop_result = prop_result_full.sort_values(by=['Sub-Sample', 'Prop Score', 'Non-Sample'],
ascending=[False, False, False]).ix[int(sum(subsample_mat[row])):]['Non-Sample']
else:
prop_result = prop_result_full.ix[bg].dropna().sort_values(by=['Sub-Sample', 'Prop Score', 'Non-Sample'],
ascending=[False, False, False]).ix[int(sum(subsample_mat[row])):]['Non-Sample']
intersect_non_sample_sorted = prop_result[prop_result==1].index
P_totals = {node:float(prop_result.ix[:node].shape[0]) for node in intersect_non_sample_sorted}
AUPRC_Analysis_params.append([geneset_list[i], intersect_non_sample_sorted, P_totals, verbose])
# Determine parallel calculation status
if cores == 1:
# Calculate AUPRC values for all gene sets
AUPRC_results = []
for params_list in AUPRC_Analysis_params:
AUPRC_results.append(calculate_large_network_AUPRC(params_list))
else:
# Initialize worker pool
pool = Pool(cores)
# Run the AUPRC analysis for each geneset
AUPRC_results = pool.map(calculate_large_network_AUPRC, AUPRC_Analysis_params)
# Close worker pool
pool.close()
# Construct AUPRC results
geneset_AUPRCs = pd.DataFrame(AUPRC_results, columns=['Gene Set', 'AUPRCs']).set_index('Gene Set', drop=True)
geneset_AUPRCs_merged = {geneset:geneset_AUPRCs.ix[geneset]['AUPRCs'].mean() for geneset in geneset_list}
AUPRCs_table = pd.Series(geneset_AUPRCs_merged, name='AUPRC')
return AUPRCs_table
# Wrapper to calculate AUPRCs of multiple node sets given network and node set files
def AUPRC_Analysis_single(network_file, genesets_file, shuffle=False, kernel_file=None, prop_constant=None,
subsample_iter=30, cores=1, geneset_background=False, save_path=None, verbose=True):
starttime = time.time()
# Load network
network = dit.load_network_file(network_file, verbose=verbose)
# Shuffle network?
if shuffle:
network = shuffle_network(network, verbose=verbose)
# Get network size
net_nodes = network.nodes()
net_size = len(net_nodes)
if verbose:
print('Network size:', net_size, 'Nodes')
# Calculate or load network propagation kernel
if kernel_file is None:
# Determine propagation constant
if prop_constant is None:
alpha = prop.calculate_alpha(network)
else:
alpha = prop_constant
# Calculate network propagation kernel
net_kernel = construct_prop_kernel(network, alpha=alpha, verbose=verbose)
else:
# Load network propagation kernel
if kernel_file.endswith('.hdf'):
net_kernel = pd.read_hdf(kernel_file)
else:
net_kernel = pd.read_csv(kernel_file)
# Load node sets to recover
genesets = dit.load_node_sets(genesets_file, verbose=verbose)
# Calculate sub-sample rate for each node set given network
genesets_p = calculate_p(network, genesets)
# Set background of genes to recover as all network nodes or union of all gene sets' genes
if geneset_background:
background_gene_set = set()
for geneset in genesets:
background_gene_set = background_gene_set.union(genesets[geneset])
background_genes = list(background_gene_set.intersection(set(net_nodes)))
else:
background_genes = list(net_nodes)
# if network is small:
if net_size < 10000:
AUPRC_table = small_network_AUPRC_wrapper(net_kernel, genesets, genesets_p, n=subsample_iter, cores=cores, bg=background_genes, verbose=verbose)
# if network is large:
elif (net_size >= 10000) & (net_size < 15000):
AUPRC_table = large_network_AUPRC_wrapper(net_kernel, genesets, genesets_p, n=subsample_iter, cores=cores, bg=background_genes, verbose=verbose)
# if network is large:
else:
AUPRC_table = large_network_AUPRC_wrapper(net_kernel, genesets, genesets_p, n=subsample_iter, cores=1, bg=background_genes, verbose=verbose)
if verbose:
print('AUPRC values calculated', time.time()-starttime, 'seconds')
# Save table
if save_path is not None:
AUPRC_table.to_csv(save_path)
if verbose:
print('AUPRC table saved:', save_path)
return AUPRC_table
# The function will take all files containing the filename marker given to shuff_net_AUPRCs_fn and construct a single null AUPRCs table from them (in wd)
# shuff_net_AUPRCs_fn is a generic filename marker (assumes all shuff_net_AUPRCs files have the same file name structure)
def get_null_AUPRCs_table(wd, shuff_net_AUPRCs_fn, geneset_list=None):
shuff_net_AUPRCs = [pd.read_csv(wd+fn, index_col=0, header=-1) for fn in os.listdir(wd) if shuff_net_AUPRCs_fn in fn]
shuff_net_AUPRCs = pd.concat(shuff_net_AUPRCs, axis=1)
if geneset_list is None:
return shuff_net_AUPRCs
else:
return shuff_net_AUPRCs.ix[geneset_list].dropna(axis=1)
# Calculate robust z-score metric for a network on given node sets given results directory of AUPRC calculations
# Requires the AUPRCs calculated for the actual network in a pandas Series
# Also requires the AUPRCs calculated for the same gene sets on the shuffled networks in a pandas DataFrame
def calculate_network_performance_score(actual_net_AUPRCs, shuff_net_AUPRCs, verbose=True, save_path=None):
# Align data (only calculate for gene sets with full data on both actual networks and all shuffled networks)
genesets = sorted(list(set(actual_net_AUPRCs.index).intersection(set(shuff_net_AUPRCs.index))), key=lambda s: s.lower())
actual_net_AUPRCs = actual_net_AUPRCs.ix[genesets]
shuff_net_AUPRCs = shuff_net_AUPRCs.ix[genesets]
# Compute robust z-score for composite network performances
k = 1/stats.norm.ppf(0.75) # Mean absolute deviation scaling factor to make median absolute deviation behave similarly to the standard deviation of a normal distribution
AUPRC_null_median = shuff_net_AUPRCs.median(axis=1)
AUPRC_null_MAD = abs(shuff_net_AUPRCs.subtract(AUPRC_null_median, axis=0)).median(axis=1)
AUPRC_null_MAD_scaled = k*AUPRC_null_MAD
AUPRC_ZNorm = (actual_net_AUPRCs - AUPRC_null_median).divide(AUPRC_null_MAD_scaled)
if save_path is not None:
AUPRC_ZNorm.to_csv(save_path)
if verbose:
print('AUPRC values z-normalized')
return AUPRC_ZNorm
# Calculate relative gain of actual network AUPRC over median random network AUPRC performance for each gene set
# Requires the AUPRCs calculated for the actual network in a pandas Series
# Also requires the AUPRCs calculated for the same gene sets on the shuffled networks in a pandas DataFrame
def calculate_network_performance_gain(actual_net_AUPRCs, shuff_net_AUPRCs, verbose=True, save_path=None):
# Align data (only calculate for gene sets with full data on both actual networks and all shuffled networks)
genesets = sorted(list(set(actual_net_AUPRCs.index).intersection(set(shuff_net_AUPRCs.index))), key=lambda s: s.lower())
actual_net_AUPRCs = actual_net_AUPRCs.ix[genesets]
shuff_net_AUPRCs = shuff_net_AUPRCs.ix[genesets]
# Compute relative gain
AUPRC_null_median = shuff_net_AUPRCs.median(axis=1)
AUPRC_gain = (actual_net_AUPRCs - AUPRC_null_median).divide(AUPRC_null_median)
if save_path is not None:
AUPRC_gain.to_csv(save_path)
if verbose:
print('AUPRC relative performance gain calculated')
return AUPRC_gain
```
|
{
"source": "JDTimlin/QSO_Clustering",
"score": 2
}
|
#### File: clustering/Limbers/Limber_MCint_highz.py
```python
import os
import sys
import numpy as np
from astropy.io import fits as pf
from sklearn.neighbors import KernelDensity as kde
from scipy import integrate
import camb
from camb import model
from scipy.special import j0
from scipy import interpolate
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D as ax3d
from skmonaco import mcquad
from skmonaco import mcmiser
import time
#Import the Adam's data as a lowz test
#data = '/Users/johntimlin/Clustering/Myers2006/Myers2006_dr1_test.fits'
#obs = pf.open(data)[1].data
#gdx = ((obs.ZSPEC >= 0.4)&(obs.ZSPEC <= 3))
#gdx = ((obs.zphot <= 2.1) & (obs.zphot >= 0.4) & (obs.ZSPEC > 0))
#Compute the redshift percentiles for the Friedmann-Diaconis rule for bin width
#q75, q25 = np.percentile(obs.ZSPEC[gdx], [75 ,25])
#iqr = q75 - q25
#FD = 2*iqr /(len(obs.ZSPEC[gdx]))**(1/3.0)
#Set up the bin range from using the Friedmann Diaconis bin width
#bins = np.arange(min(obs.ZSPEC[gdx]),max(obs.ZSPEC[gdx]),FD)
#num,bins = np.histogram(obs.ZSPEC[gdx],bins,normed=True)
#Import SpIES / SHELA data
data = '../Data_Sets/QSO_Candidates_allcuts_with_errors_visualinsp.fits'
obs = pf.open(data)[1].data
Z = obs.zphotNW
gdx = ((Z >= 3.4)&(Z <= 5.2) & (obs.Good_obj == 0)) & (obs.dec>=-1.2) & (obs.dec<=1.2)
#gdx = Z>0
#Set up a KDE for dNdz
tmpz = Z[gdx][:, np.newaxis] #change the array from row shape (1) to column shape (1,)
print np.shape(tmpz)
sample_range = np.linspace(min(tmpz[:, 0]), max(tmpz[:, 0]), len(tmpz[:, 0]))[:, np.newaxis]
est = kde(bandwidth=0.1,kernel='epanechnikov') #Set up the Kernel
histkde = est.fit(tmpz).score_samples(sample_range) #fit the kernel to the data and find the density of the grid
#Interpolate (you get the same function back) to plug in any z in the range (as opposed to set z values)
dNdz = interpolate.interp1d(sample_range.flatten(),np.exp(histkde))
print sample_range.flatten()
print 'done'
ZE = np.linspace(min(Z),max(Z),100)
xo=integrate.quad(dNdz,min(sample_range),max(sample_range)) #quad(f(x),xlower,xupper, args)
print xo
'''
#Plot the KDE dndz
plt.plot(sample_range[:,0],np.exp(histkde))
plt.xlabel('z')
#plt.plot(sample_range[:,0],dNdz(sample_range[:,0]))
#plt.plot(bins[:-1],num,linestyle = 'steps-mid')
ZE = np.linspace(min(Z),max(Z),100)
xo=integrate.quad(dNdz,min(sample_range),max(sample_range)) #quad(f(x),xlower,xupper, args)
print xo
plt.savefig('dndz.png')
plt.show()
'''
# Compute the matter power spectrum from CAMB and Generate the P(z,k) function to output the power at any given redshift
#and wavenumber
#First define Planck 2015 cosmological parameters
H = 70 #H0.
oc = 0.229 #physical density of CDM
ob = 0.046 #physical density of baryons
#Conversion to density param: Omega_Matter = (oc+ob)/(H0/100.)**2
#Set up parameters in CAMB
pars = camb.CAMBparams()
#H0 is hubble parameter at z=0, ombh2 is the baryon density (physical), omch2 is the matter density (physical)
#mnu is sum of neutrino masses, omk is curvature parameter (set to 0 for flat), meffsterile is effective mass of sterile neutrinos
#pars.set_cosmology(H0=H,ombh2=ob, omch2=oc,omk=0)#,mnu=0,meffsterile=0)
#Hard code the cosmolgy params
pars.H0=H #hubble param (No h!!)
pars.omegab=ob #Baryon density parameter
pars.omegac=oc #CDM density parameter
pars.omegav=0.725 #Vacuum density parameter
pars.set_dark_energy()
#Set parameters using standard power law parameterization.If nt=None, uses inflation consistency relation.
#ns is scalar speectral index
pars.InitPower.set_params(ns=0.960)
camb.set_halofit_version(version='original') #uses the Smith 2003 halo model
ze=np.linspace(0,20,150)
ka=np.logspace(-4,2,len(ze))#np.linspace(0,10,100)
#Get the matter power spectrum interpolation object (based on RectBivariateSpline).
#pars: input parameters, zs: redshift range, nonlinear: generate nonlinear power spectrum, hubble_units=True: output as Mpc/h^3
#instead of Mpc^3
PK = camb.get_matter_power_interpolator(pars,zs = ze,zmax = ze[-1], nonlinear=True, hubble_units=False, k_hunit=True, kmax = ka[-1])
#Generate the power using the interpolator and the z and k arrays
#Power = PK.P(z,k)
def dimpower(Pk,z,k):
delta = Pk.P(z,k) * k**3/(2*np.pi**2)
return delta
def domega(kz,theta,cambpars,H0,dndz,Power,OmegaM,OmegaL,evalint=False):
if evalint == True:
#Use this if integrating ln(10)k dlog(k)
#start = time.time()
k=kz[0]
z=kz[1]
bkg = camb.get_background(cambpars)
x = 10**k * (theta/60./180.*np.pi) * bkg.comoving_radial_distance(z)
om = (H0/3.0e5) * 10**(-k) * dimpower(Power,z,10**k) * dndz(z)**2 * j0(x) * (OmegaM*(1+z)**3+OmegaL)**0.5*np.log(10)
#end = time.time()
#print end-start
## USe this if integrating dk
#x = k * theta * bkg.comoving_radial_distance(z)
#om = (H0/3.0e5) * k**-2 * dimpower(Power,z,k) * dndz(z)**2 * j0(x) * (OmegaM*(1+z)**3+OmegaL)**0.5
if evalint == False:
#project the z array onto new axis to output a matrix when evaluating in k and z. This allows
#me to plot a wireframe 3d plot
#k=kz[0]
#z=kz[1]
z = np.array(z)
z = z[:,np.newaxis]
bkg = camb.get_background(cambpars)
x = k * theta * bkg.comoving_radial_distance(z)
om = (H0/3.0e5) * k**-2 * dimpower(Power,z,k) * dndz(z)**2 * j0(x) * (OmegaM*(1+z)**3+OmegaL)**0.5
return om
'''
#parameters if integrate == False
theta = 1./60./180.*np.pi # radians = arcmin/60/180*pi
z = np.linspace(2.91,5.1,100)
k = np.logspace(-3,2,100)
omegaM = (oc+ob)/(H/100.)**2
omegaL= 1.0-omegaM
#Generate the surface under which to integrate
surf = domega(k,z,theta,pars,H,dNdz,PK,omegaM,omegaL)
#Set up meshgrid such that z interates over the columns and k iterates over the row
K,Z = np.meshgrid(k,z)
plt.figure(4)
plt.plot(K[0],surf[0])
plt.xscale('log')
plt.xlabel('k')
plt.ylabel(r'$\delta^2$w')
plt.figure(5)
plt.plot(Z[:,0],surf[:,0])
plt.xscale('linear')
plt.xlabel('z')
plt.ylabel(r'$\delta^2$w')
fig = plt.figure(6)
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(Z,np.log10(K),surf)
ax.set_xlabel('z')
ax.set_ylabel('log10(k)')
ax.set_zlabel(r'$\delta^2$w')
plt.show()
'''
#Integrate using mcmiser
omegaM = (oc+ob)#/(H/100.)**2
omegaL= 1.0-omegaM
print H,omegaM, omegaL, omegaM+omegaL
print 'begin integration'
s= time.time()
#mcquad(fn,integrand xl=[0.,0.],xu=[1.,1.], lower and upper limits of integration npoints=100000 number of points,args)
newtheta = np.logspace(-1.3,2.5,20)
mclimber = []
for i in range(len(newtheta)):
thetas = newtheta[i]
test = mcmiser(domega, xl=[-4.0,3.41], xu=[1.0,5.1], npoints=1e3, args=(thetas,pars,H,dNdz,PK,omegaM,omegaL,True))
mclimber.append(test[0])
print mclimber
e=time.time()
print e-s
'''
latest run:
mcmiser(domega, xl=[-3.0,3.45], xu=[2.0,5.0], npoints=1e6, args=(thetas,pars,H,dNdz,PK,omegaM,omegaL,True))
[0.0018763493756045195, 0.0015591052537067829, 0.0013261541719343291, 0.0011664782432483816, 0.0010404309744665909, 0.00091741906231659518, 0.00078667114128277277, 0.00064789973106323866, 0.0005049509301372051, 0.00036797906601997838, 0.00024422862731641093, 0.00014404571216926446, 7.2933582496721974e-05, 2.9223826003039019e-05, 7.8230852216102688e-06, 2.9890491694937377e-06, -2.307437559147607e-06, -9.1226385750823894e-07, -3.9755941765663542e-07, 1.3928717601483434e-08]
141181.353475 s
new candidates, 10**3points
[0.0019430243038571534, 0.0016349397131697643, 0.0015559643190088466, 0.0011592312843893796, 0.001045982603488736, 0.00095526409517522886, 0.00093113611560497887, 0.0005889401612489372, 0.00053144714843557936, 0.00038853567370124737, 0.00025666765171879647, 0.00016544957819145055, 9.8265639739552113e-05, 3.3731282373988794e-05, 8.4752026179249433e-06, 2.2529810568760694e-06, 9.1571876941527249e-06, -7.5021177212707544e-07, 4.2410939833994758e-06, 3.9566810872630768e-06]
shen candidates: newtheta = np.logspace(-1.3,2.5,20)
mcmiser(domega, xl=[-3.0,2.91], xu=[2.0,5.17], npoints=1e6, args=(thetas,pars,H,dNdz,PK,omegaM,omegaL,True))
[0.0018358807532616219, 0.0015034895403743954, 0.0012276746859320596, 0.0010369278499846939, 0.00090159800775010729, 0.00078828444848061288, 0.00067568885621950841, 0.00055784990591065565, 0.00043864978763109299, 0.00032197840731266829, 0.00021621673957789532, 0.0001293993054038773, 6.6678330899456382e-05, 2.7563877682033188e-05, 7.9067731028462201e-06, 2.9283435400988902e-06, -2.2004904973685094e-06, -8.6505180997999433e-07, -3.2480646807619417e-07, 7.9393559384844712e-08]
10^3 points changing to z = 5.1 on the upper limit of the integral
[0.0031072154820804372, 0.0024773982340020656, 0.0022069831939996406, 0.0018249231279954346, 0.0016802887745281424, 0.0014562986726930265, 0.0012651874250202608, 0.0010400616665105426, 0.00080494654363068101, 0.0005982830063258948, 0.00038513590577395919, 0.00026714928016567424, 0.00014338341872873156, 4.9450665812679637e-05, 1.782600514223763e-05, 5.0932795884699636e-06, 1.4925594883012705e-05, -4.9547953675508698e-06, 4.5836346273833925e-06, 3.8992113097562235e-06]
[0.0022529517590044938, 0.0021280436475443168, 0.0018731349354724374, 0.0015033947078234348, 0.0013070363461209996, 0.0011766368407685472, 0.0010140398851336263, 0.00083560744525899085, 0.00065508092975343803, 0.00047837963299693522, 0.0003187878517245088, 0.0001885268241591017, 9.5947162744095565e-05, 3.876918723162803e-05, 1.0394048795964464e-05, 3.7806009976488573e-06, -3.1705205023784285e-06, -1.3079909197198175e-06, -1.1571078800356848e-06, 2.4679274288594045e-07]
'''
```
|
{
"source": "jdtimmerman/tuxedo-backlight-control",
"score": 2
}
|
#### File: share/tuxedo-backlight-control/backlight_control.py
```python
import os
import re
from colors import colors
if os.path.isfile('/etc/tuxedo-backlight-control/colors.conf'):
colors_conf = open('/etc/tuxedo-backlight-control/colors.conf')
for line in colors_conf:
match = re.search('([\da-z_]+)=([\da-f]{6})', line)
if match and len(match.groups()) == 2:
colors[match.groups()[0]] = match.groups()[1]
class BacklightControl():
"""
Abstraction on top of tuxedo_keyboard C driver interface for keyboard backlight
"""
DEVICE_PATH = '/sys/devices/platform/tuxedo_keyboard/'
MODULE_PATH = '/sys/module/tuxedo_keyboard'
VERSION = '0.7'
modes = (
'color',
'breathe',
'cycle',
'dance',
'flash',
'random',
'tempo',
'wave'
)
colors = colors
regions = ('left', 'center', 'right', 'extra')
params = ('state', 'mode', 'color_left', 'color_center', 'color_right', 'color_extra')
@staticmethod
def get_device_param(prop):
""" read driver param value directly from '/sys/devices/platform/tuxedo_keyboard/' """
if os.path.isfile(BacklightControl.DEVICE_PATH + prop):
return open(BacklightControl.DEVICE_PATH + prop).read()
return None
@staticmethod
def get_device_color(region):
""" read driver color value directly from '/sys/devices/platform/tuxedo_keyboard/' """
color = BacklightControl.get_device_param('color_' + region)
if color:
try:
index = list(BacklightControl.colors.values()).index(color.strip().upper())
return list(BacklightControl.colors.keys())[index]
except Exception:
return 'Select...'
return None
@staticmethod
def set_device_param(prop, value):
fh = open(BacklightControl.DEVICE_PATH + prop, mode='r+')
fh.write(str(value))
fh.close()
@staticmethod
def set_device_color(region, color):
if color in BacklightControl.colors.keys():
index = list(BacklightControl.colors.values()).index(color.strip().upper())
values = list(BacklightControl.colors.keys())
BacklightControl.set_device_param('color_' + region, values[index])
@staticmethod
def find_color_by_key(color):
index = list(BacklightControl.colors.keys()).index(color)
return '0x' + list(BacklightControl.colors.values())[index]
@staticmethod
def is_single_color():
""" checks whether all keyboard regions have the same color assigned to them """
is_single = True
last_color = None
for region in BacklightControl.regions:
color = BacklightControl.get_device_color(region)
if last_color not in (None, color):
is_single = False
break
else:
last_color = color
return is_single
@staticmethod
def set_single_color(color):
""" assigns a single color by name to all keyboard regions """
for region in BacklightControl.regions:
mapped_color = BacklightControl.find_color_by_key(color)
BacklightControl.set_device_param('color_' + region, mapped_color)
@staticmethod
def capitalize(label):
""" capitalizes a string """
return label.capitalize()
@property
def state(self):
param = self.get_device_param('state')
if param:
return int(param)
return 1
@state.setter
def state(self, value):
self.set_device_param('state', value)
@property
def mode(self):
param = self.get_device_param('mode')
if param and len(self.modes) > int(param):
return self.modes[int(param)]
return None
@mode.setter
def mode(self, value):
index = self.modes.index(value)
self.set_device_param('mode', index)
@property
def color_left(self):
""" get hex code for color_left """
return self.get_device_color('left')
@color_left.setter
def color_left(self, value):
""" set hex code for color_left, with color name present in colors dict """
self.set_device_param('color_left', self.find_color_by_key(value))
@property
def color_center(self):
""" get hex code for color_center """
return self.get_device_color('center')
@color_center.setter
def color_center(self, value):
""" set hex code for color_center, with color name present in colors dict """
self.set_device_param('color_center', self.find_color_by_key(value))
@property
def color_right(self):
""" get hex code for color_right """
return self.get_device_color('right')
@color_right.setter
def color_right(self, value):
""" set hex code for color_right, with color name present in colors dict """
self.set_device_param('color_right', self.find_color_by_key(value))
@property
def color_extra(self):
""" get hex code for color_extra """
return self.get_device_color('extra')
@color_extra.setter
def color_extra(self, value):
""" set hex code for color_extra, with color name present in colors dict """
self.set_device_param('color_extra', self.find_color_by_key(value))
@staticmethod
def display_modes():
""" return a capitalized item-list of all backlight modes """
return map(BacklightControl.capitalize, BacklightControl.modes)
@staticmethod
def display_colors():
""" return a capitalized item-list of all backlight colors """
return map(BacklightControl.capitalize, BacklightControl.colors.keys())
backlight = BacklightControl()
```
|
{
"source": "JDTN/koalixcrm",
"score": 2
}
|
#### File: koalixcrm/accounting/admin.py
```python
from django import forms
from django.contrib import admin
from django.utils.translation import ugettext as _
from koalixcrm.accounting.views import *
class AccountingPeriodBooking(admin.TabularInline):
model = Booking
extra = 1
show_change_link = True
can_delete = True
classes = ['collapse']
fields = ('fromAccount', 'toAccount', 'description', 'amount', 'bookingDateOnly', 'staff', 'bookingReference',)
readonly_fields = (
'fromAccount', 'toAccount', 'description', 'amount', 'bookingDateOnly', 'staff', 'bookingReference',)
allow_add = True
class OptionBooking(admin.ModelAdmin):
list_display = ('fromAccount', 'toAccount', 'amount', 'bookingDateOnly', 'staff')
fieldsets = ((_('Basic'), {'fields': (
'fromAccount', 'toAccount', 'amount', 'bookingDate', 'staff', 'description', 'bookingReference',
'accountingPeriod')}),)
save_as = True
def save_model(self, request, obj, form, change):
if (change == True):
obj.lastmodifiedby = request.user
else:
obj.lastmodifiedby = request.user
obj.staff = request.user
obj.save()
class AccountForm(forms.ModelForm):
"""AccountForm is used to overwrite the clean method of the
original form and to add an additional checks to the model"""
class Meta:
model = Account
fields = '__all__'
def clean(self):
super(AccountForm, self).clean()
errors = []
if (self.cleaned_data['isopenreliabilitiesaccount']):
openliabilitiesaccount = Account.objects.filter(isopenreliabilitiesaccount=True)
if (self.cleaned_data['accountType'] != "L"):
errors.append(_('The open liabilites account must be a liabities account'))
elif openliabilitiesaccount:
errors.append(_('There may only be one open liablities account in the system'))
if (self.cleaned_data['isopeninterestaccount']):
openinterestaccounts = Account.objects.filter(isopeninterestaccount=True)
if (self.cleaned_data['accountType'] != "A"):
errors.append(_('The open intrests account must be an asset account'))
elif openinterestaccounts:
errors.append(_('There may only be one open intrests account in the system'))
if (self.cleaned_data['isACustomerPaymentAccount']):
if (self.cleaned_data['accountType'] != "A"):
errors.append(_('A customer payment account must be an asset account'))
if (self.cleaned_data['isProductInventoryActiva']):
if (self.cleaned_data['accountType'] != "A"):
errors.append(_('A product inventory account must be an asset account'))
if len(errors) > 0:
raise forms.ValidationError(errors)
return self.cleaned_data
class OptionAccount(admin.ModelAdmin):
list_display = ('accountNumber', 'accountType', 'title', 'sumOfAllBookings')
list_display_links = ('accountNumber', 'accountType', 'title', 'sumOfAllBookings')
fieldsets = ((_('Basic'), {'fields': (
'accountNumber', 'accountType', 'title', 'description', 'isopenreliabilitiesaccount', 'isopeninterestaccount',
'isProductInventoryActiva', 'isACustomerPaymentAccount')}),)
save_as = True
form = AccountForm
class AccountingPeriodForm(forms.ModelForm):
"""AccountingPeriodForm is used to overwrite the clean method of the
original form and to add an additional check to the model"""
class Meta:
model = AccountingPeriod
fields = '__all__'
def clean(self):
super(AccountingPeriodForm, self).clean()
errors = []
try:
if self.cleaned_data['begin'] > self.cleaned_data['end']:
errors.append(_('The begin date cannot be later than the end date.'))
except KeyError:
errors.append(_('The begin and the end date may not be empty'))
if errors:
raise forms.ValidationError(errors)
return self.cleaned_data
class OptionAccountingPeriod(admin.ModelAdmin):
list_display = ('title', 'begin', 'end')
list_display_links = ('title', 'begin', 'end')
fieldsets = (
(_('Basics'), {
'fields': ('title', 'begin', 'end')
}),
)
inlines = [AccountingPeriodBooking, ]
save_as = True
form = AccountingPeriodForm
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
for instance in instances:
if (change == True):
instance.lastmodifiedby = request.user
else:
instance.lastmodifiedby = request.user
instance.staff = request.user
instance.save()
def createBalanceSheet(self, request, queryset):
for obj in queryset:
response = exportPDF(self, request, obj, "balanceSheet", "/admin/accounting/accountingperiod/")
return response
createBalanceSheet.short_description = _("Create PDF of Balance Sheet")
def createProfitLossStatement(self, request, queryset):
for obj in queryset:
response = exportPDF(self, request, obj, "profitLossStatement", "/admin/accounting/accountingperiod/")
return response
createProfitLossStatement.short_description = _("Create PDF of Profit Loss Statement Sheet")
def exportAllAccounts(self, request, queryset):
for obj in queryset:
response = exportXML(self, request, obj, "allAccount", "/admin/accounting/accountingperiod/")
return response
exportAllAccounts.short_description = _("Create XML of all Accounts")
actions = ['createBalanceSheet', 'createProfitLossStatement', 'exportAllAccounts', ]
class OptionProductCategorie(admin.ModelAdmin):
list_display = ('title', 'profitAccount', 'lossAccount')
list_display_links = ('title', 'profitAccount', 'lossAccount')
fieldsets = (
(_('Basics'), {
'fields': ('title', 'profitAccount', 'lossAccount')
}),
)
save_as = True
admin.site.register(Account, OptionAccount)
admin.site.register(Booking, OptionBooking)
admin.site.register(ProductCategorie, OptionProductCategorie)
admin.site.register(AccountingPeriod, OptionAccountingPeriod)
```
#### File: koalixcrm/accounting/models.py
```python
import os
from datetime import *
from subprocess import check_output
from subprocess import STDOUT
from xml.dom.minidom import Document
from django.conf import settings
from django.core import serializers
from django.db import models
from django.utils.translation import ugettext as _
from koalixcrm import djangoUserExtension
from koalixcrm.crm.exceptions import UserExtensionMissing
from koalixcrm.accounting.const.accountTypeChoices import *
from koalixcrm.accounting.exceptions import NoObjectsToBeSerialzed
from koalixcrm.accounting.exceptions import ProgrammingError
class AccountingPeriod(models.Model):
"""Accounting period repesents the equivalent of the business logic element of a fiscal year
the accounting period is refered in the booking and is used as a supporting object to generate
balance sheets and profit/loss statements"""
title = models.CharField(max_length=200, verbose_name=_("Title")) # For example "Year 2009", "1st Quarter 2009"
begin = models.DateField(verbose_name=_("Begin"))
end = models.DateField(verbose_name=_("End"))
@staticmethod
def getCurrentValidAccountingPeriod():
"""Returns the accounting period that is currently valid. Valid is an accountingPeriod when the current date
lies between begin and end of the accountingPeriod
Args:
no arguments
Returns:
accoutingPeriod (AccoutingPeriod)
Raises:
NoFeasableAccountingPeriodFound when there is no valid accounting Period"""
currentValidAccountingPeriod = None
for accountingPeriod in AccountingPeriod.objects.all():
if accountingPeriod.begin < date.today() and accountingPeriod.end > date.today():
return accountingPeriod
if currentValidAccountingPeriod == None:
raise NoFeasableAccountingPeriodFound()
@staticmethod
def getAllPriorAccountingPeriods(targetAccountingPeriod):
"""Returns the accounting period that is currently valid. Valid is an accountingPeriod when the current date
lies between begin and end of the accountingPeriod
Args:
no arguments
Returns:
accoutingPeriods (List of AccoutingPeriod)
Raises:
NoPriorAccountingPeriodFound when there is no valid accounting Period"""
currentValidAccountingPeriod = None
accountingPeriods = []
for accountingPeriod in AccountingPeriod.objects.all():
if accountingPeriod.end < targetAccountingPeriod.begin:
accountingPeriods.append(accountingPeriod)
if accountingPeriods == []:
raise NoPriorAccountingPeriodFound()
return accountingPeriods
@staticmethod
def createXML(whatToCreate):
"""This method serialize requestd objects into a XML file which is located in the PDF_OUTPUT_ROOT folder.
Args:
whatToCreate (str): Which objects that have to be serialized
Returns:
path_full to the location of the file
Raises:
ProgrammingError will be raised when incorrect objects to be serialized was selected
NoObjectToBeSerialized will be raised when no object can be serialized"""
XMLSerializer = serializers.get_serializer("xml")
xml_serializer = XMLSerializer()
if whatToCreate == "allAccount":
path_fullToOutputFile = os.path.join(settings.PDF_OUTPUT_ROOT, "accounts.xml")
objectsToSerialize = Account.objects.all()
else:
raise ProgrammingError(
_("During XML Export it was not correctly specified which data that has to be exported"))
out = open(os.path.join(settings.PDF_OUTPUT_ROOT, "accounts.xml"), "w")
if objectsToSerialize == '':
raise NoObjectsToBeSerialzed(_("During XML Export it was not correctly specied data has to be exported"))
else:
xml_serializer.serialize(objectsToSerialize, stream=out, indent=3)
out.close()
return path_fullToOutputFile
# TODO def importAllAccountsXML(self):
def createPDF(self, raisedbyuser, whatToCreate):
userExtension = djangoUserExtension.models.UserExtension.objects.filter(user=raisedbyuser.id)
if (len(userExtension) == 0):
raise UserExtensionMissing(_("During BalanceSheet PDF Export"))
doc = Document()
if whatToCreate == "balanceSheet":
main = doc.createElement("koalixaccountingbalacesheet")
out = open(os.path.join(settings.PDF_OUTPUT_ROOT, "balancesheet_" + str(self.id) + ".xml"), "wb")
else:
main = doc.createElement("koalixaccountingprofitlossstatement")
out = open(os.path.join(settings.PDF_OUTPUT_ROOT, "profitlossstatement_" + str(self.id) + ".xml"), "wb")
accountingPeriodName = doc.createElement("accountingPeriodName")
accountingPeriodName.appendChild(doc.createTextNode(self.__str__()))
main.appendChild(accountingPeriodName)
organisiationname = doc.createElement("organisiationname")
organisiationname.appendChild(doc.createTextNode(userExtension[0].defaultTemplateSet.organisationname))
main.appendChild(organisiationname)
accountingPeriodTo = doc.createElement("accountingPeriodTo")
accountingPeriodTo.appendChild(doc.createTextNode(self.end.year.__str__()))
main.appendChild(accountingPeriodTo)
accountingPeriodFrom = doc.createElement("accountingPeriodFrom")
accountingPeriodFrom.appendChild(doc.createTextNode(self.begin.year.__str__()))
main.appendChild(accountingPeriodFrom)
headerPicture = doc.createElement("headerpicture")
headerPicture.appendChild(doc.createTextNode(userExtension[0].defaultTemplateSet.logo.path_full))
main.appendChild(headerPicture)
accounts = Account.objects.all()
overallValueBalance = 0
overallValueProfitLoss = 0
for account in list(accounts):
withinAccountingPeriod = account.sumOfAllBookingsWithinAccountingPeriod(self)
beforeAccountingPeriod = account.sumOfAllBookingsBeforeAccountingPeriod(self)
currentValue = withinAccountingPeriod + beforeAccountingPeriod
if (currentValue != 0):
currentAccountElement = doc.createElement("Account")
accountNumber = doc.createElement("AccountNumber")
accountNumber.appendChild(doc.createTextNode(account.accountNumber.__str__()))
beforeAccountingPeriodAccountElement = doc.createElement("beforeAccountingPeriod")
beforeAccountingPeriodAccountElement.appendChild(doc.createTextNode(beforeAccountingPeriod.__str__()))
currentValueElement = doc.createElement("currentValue")
currentValueElement.appendChild(doc.createTextNode(currentValue.__str__()))
accountNameElement = doc.createElement("accountName")
accountNameElement.appendChild(doc.createTextNode(account.title))
currentAccountElement.setAttribute("accountType", account.accountType.__str__())
currentAccountElement.appendChild(accountNumber)
currentAccountElement.appendChild(accountNameElement)
currentAccountElement.appendChild(currentValueElement)
currentAccountElement.appendChild(beforeAccountingPeriodAccountElement)
main.appendChild(currentAccountElement)
if account.accountType == "A":
overallValueBalance = overallValueBalance + currentValue;
if account.accountType == "L":
overallValueBalance = overallValueBalance - currentValue;
if account.accountType == "E":
overallValueProfitLoss = overallValueProfitLoss + currentValue;
if account.accountType == "S":
overallValueProfitLoss = overallValueProfitLoss - currentValue;
totalProfitLoss = doc.createElement("TotalProfitLoss")
totalProfitLoss.appendChild(doc.createTextNode(overallValueProfitLoss.__str__()))
main.appendChild(totalProfitLoss)
totalBalance = doc.createElement("TotalBalance")
totalBalance.appendChild(doc.createTextNode(overallValueBalance.__str__()))
main.appendChild(totalBalance)
doc.appendChild(main)
out.write(doc.toprettyxml(indent=" ", newl="\n", encoding="utf-8"))
out.close()
if whatToCreate == "balanceSheet":
check_output(
[settings.FOP_EXECUTABLE, '-c', userExtension[0].defaultTemplateSet.fopConfigurationFile.path_full, '-xml',
os.path.join(settings.PDF_OUTPUT_ROOT, 'balancesheet_' + str(self.id) + '.xml'), '-xsl',
userExtension[0].defaultTemplateSet.balancesheetXSLFile.xslfile.path_full, '-pdf',
os.path.join(settings.PDF_OUTPUT_ROOT, 'balancesheet_' + str(self.id) + '.pdf')], stderr=STDOUT)
return os.path.join(settings.PDF_OUTPUT_ROOT, "balancesheet_" + str(self.id) + ".pdf")
else:
check_output(
[settings.FOP_EXECUTABLE, '-c', userExtension[0].defaultTemplateSet.fopConfigurationFile.path_full, '-xml',
os.path.join(settings.PDF_OUTPUT_ROOT, 'profitlossstatement_' + str(self.id) + '.xml'), '-xsl',
userExtension[0].defaultTemplateSet.profitLossStatementXSLFile.xslfile.path_full, '-pdf',
os.path.join(settings.PDF_OUTPUT_ROOT, 'profitlossstatement_' + str(self.id) + '.pdf')], stderr=STDOUT)
return os.path.join(settings.PDF_OUTPUT_ROOT, "profitlossstatement_" + str(self.id) + ".pdf")
def __str__(self):
return self.title
# TODO: def createNewAccountingPeriod() Neues Geschäftsjahr erstellen
class Meta:
app_label = "accounting"
verbose_name = _('Accounting Period')
verbose_name_plural = _('Accounting Periods')
class Account(models.Model):
accountNumber = models.IntegerField(verbose_name=_("Account Number"))
title = models.CharField(verbose_name=_("Account Title"), max_length=50)
accountType = models.CharField(verbose_name=_("Account Type"), max_length=1, choices=ACCOUNTTYPECHOICES)
description = models.TextField(verbose_name=_("Description"), null=True, blank=True)
isopenreliabilitiesaccount = models.BooleanField(verbose_name=_("Is The Open Liabilities Account"))
isopeninterestaccount = models.BooleanField(verbose_name=_("Is The Open Interests Account"))
isProductInventoryActiva = models.BooleanField(verbose_name=_("Is a Product Inventory Account"))
isACustomerPaymentAccount = models.BooleanField(verbose_name=_("Is a Customer Payment Account"))
def sumOfAllBookings(self):
calculated_sum = self.allBookings(fromAccount=False) - self.allBookings(fromAccount=True)
if self.accountType == 'S' or self.accountType == 'L':
calculated_sum = 0 - calculated_sum
return calculated_sum
sumOfAllBookings.short_description = _("Value");
def sumOfAllBookingsWithinAccountingPeriod(self, accountingPeriod):
calculated_sum = self.allBookingsInAccountingPeriod(fromAccount=False,
accountingPeriod=accountingPeriod) - self.allBookingsInAccountingPeriod(
fromAccount=True, accountingPeriod=accountingPeriod)
if self.accountType == 'S' or self.accountType == 'L':
calculated_sum = 0 - calculated_sum
return calculated_sum
def sumOfAllBookingsBeforeAccountingPeriod(self, currentAccountingPeriod):
accountingPeriods = AccountingPeriod.getAllPriorAccountingPeriods(currentAccountingPeriod)
sum = 0
for accountingPeriod in accountingPeriods:
sum = sum + self.allBookingsInAccountingPeriod(fromAccount=False,
accountingPeriod=accountingPeriod) - self.allBookingsInAccountingPeriod(
fromAccount=True, accountingPeriod=accountingPeriod)
if self.accountType == 'S' or self.accountType == 'L':
sum = 0 - sum
return sum
def allBookings(self, fromAccount):
sum = 0
if fromAccount == True:
bookings = Booking.objects.filter(fromAccount=self.id)
else:
bookings = Booking.objects.filter(toAccount=self.id)
for booking in list(bookings):
sum = sum + booking.amount
return sum
def allBookingsInAccountingPeriod(self, fromAccount, accountingPeriod):
sum = 0
if (fromAccount == True):
bookings = Booking.objects.filter(fromAccount=self.id, accountingPeriod=accountingPeriod.id)
else:
bookings = Booking.objects.filter(toAccount=self.id, accountingPeriod=accountingPeriod.id)
for booking in list(bookings):
sum = sum + booking.amount
return sum
def __str__(self):
return self.accountNumber.__str__() + " " + self.title
class Meta:
app_label = "accounting"
verbose_name = _('Account')
verbose_name_plural = _('Account')
ordering = ['accountNumber']
class ProductCategorie(models.Model):
title = models.CharField(verbose_name=_("Product Categorie Title"), max_length=50)
profitAccount = models.ForeignKey(Account, verbose_name=_("Profit Account"), limit_choices_to={"accountType": "E"},
related_name="db_profit_account")
lossAccount = models.ForeignKey(Account, verbose_name=_("Loss Account"), limit_choices_to={"accountType": "S"},
related_name="db_loss_account")
class Meta:
app_label = "accounting"
verbose_name = _('Product Categorie')
verbose_name_plural = _('Product Categories')
def __str__(self):
return self.title
class Booking(models.Model):
fromAccount = models.ForeignKey(Account, verbose_name=_("From Account"), related_name="db_booking_fromaccount")
toAccount = models.ForeignKey(Account, verbose_name=_("To Account"), related_name="db_booking_toaccount")
amount = models.DecimalField(max_digits=20, decimal_places=2, verbose_name=_("Amount"))
description = models.CharField(verbose_name=_("Description"), max_length=120, null=True, blank=True)
bookingReference = models.ForeignKey('crm.Invoice', verbose_name=_("Booking Reference"), null=True, blank=True)
bookingDate = models.DateTimeField(verbose_name=_("Booking at"))
accountingPeriod = models.ForeignKey(AccountingPeriod, verbose_name=_("AccountingPeriod"))
staff = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True}, blank=True,
verbose_name=_("Reference Staff"), related_name="db_booking_refstaff")
dateofcreation = models.DateTimeField(verbose_name=_("Created at"), auto_now=True)
lastmodification = models.DateTimeField(verbose_name=_("Last modified"), auto_now_add=True)
lastmodifiedby = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True}, blank=True,
verbose_name=_("Last modified by"), related_name="db_booking_lstmodified")
def bookingDateOnly(self):
return self.bookingDate.date()
bookingDateOnly.short_description = _("Date");
def __str__(self):
return self.fromAccount.__str__() + " " + self.toAccount.__str__() + " " + self.amount.__str__()
class Meta:
app_label = "accounting"
verbose_name = _('Booking')
verbose_name_plural = _('Bookings')
```
#### File: koalixcrm/crm/views.py
```python
from os import path
from wsgiref.util import FileWrapper
from subprocess import CalledProcessError
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from koalixcrm.crm.models import *
def exportPDF(callingModelAdmin, request, whereToCreateFrom, whatToCreate, redirectTo):
"""This method exports PDFs provided by different Models in the crm application
Args:
callingModelAdmin (ModelAdmin): The calling ModelAdmin must be provided for error message response.
request: The request User is to know where to save the error message
whereToCreateFrom (Model): The model from which a PDF should be exported
whatToCreate (str): What document Type that has to be
redirectTo (str): String that describes to where the method sould redirect in case of an error
Returns:
HTTpResponse with a PDF when successful
HTTpResponseRedirect when not successful
Raises:
raises Http404 exception if anything goes wrong"""
try:
pdf = whereToCreateFrom.createPDF(whatToCreate)
response = HttpResponse(FileWrapper(open(pdf, 'rb')), content_type='application/pdf')
response['Content-Length'] = path.getsize(pdf)
except (TemplateSetMissing, UserExtensionMissing, CalledProcessError, UserExtensionEmailAddressMissing, UserExtensionPhoneAddressMissing) as e:
if isinstance(e, UserExtensionMissing):
response = HttpResponseRedirect(redirectTo)
callingModelAdmin.message_user(request, _("User Extension Missing"))
elif isinstance(e, UserExtensionEmailAddressMissing):
response = HttpResponseRedirect(redirectTo)
callingModelAdmin.message_user(request, _("User Extension Email Missing"))
elif isinstance(e, UserExtensionPhoneAddressMissing):
response = HttpResponseRedirect(redirectTo)
callingModelAdmin.message_user(request, _("User Extension Phone Missing"))
elif isinstance(e, TemplateSetMissing):
response = HttpResponseRedirect(redirectTo)
callingModelAdmin.message_user(request, _("Templateset Missing"))
elif type(e) == CalledProcessError:
response = HttpResponseRedirect(redirectTo)
callingModelAdmin.message_user(request, e.output)
else:
raise Http404
return response
def selectaddress(invoiceid):
invoice = Invoice.objects.get(id=invoiceid)
address = invoice.contract
```
|
{
"source": "jdtogni/trader",
"score": 2
}
|
#### File: trader/tests/roc_quarterly.py
```python
from zipline.api import order, order_target_percent, record, symbol, symbols, set_symbol_lookup_date, history, \
get_datetime, schedule_function, date_rules, time_rules, get_open_orders
from numpy import diff, isnan, arange, insert, sort, array
from pandas import rolling_mean, Timestamp, to_datetime
import collections
from datetime import timedelta, date
import pprint
from dttrader import DTPortfolio, DTEODChangeTrader, DTEODChangeTrader2
year = 0
month = 1
my_stock = ''
trade_start = 0
start = 0
end = 0
quiet = True
def initialize(context):
set_symbol_lookup_date('2015-02-08')
context.stocks = symbols(my_stock)
context.prev_cash = 0
schedule_function(handle_end_of_day,
date_rules.every_day(),
time_rules.market_close(minutes=30))
def handle_data(context, data):
today = get_datetime().date()
return
def handle_end_of_day(context, data):
# yesterday + today close price
now = get_datetime()
# price_history = history(2, '1d', 'price')
global trade_start, port, trader
trader.pre_cache()
port.pre_cache(context, data)
for stock in context.stocks:
record(stock.symbol, data[stock].price)
if now.date() < trade_start:
return
port.handle(context, data)
for stock in context.stocks:
# to build stats later
trader.handle(context, data, stock, quiet=quiet)
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
f, (ax1, ax2) = plt.subplots(nrows=2)
# ax1.set_ylabel('Portfolio value (USD)')
pv = results.portfolio_value
pv = (pv / pv[0])
# pv.plot(ax=ax1, color='b')
p = results.get('pct')
p = p.dropna()
p.plot(ax=ax1, color='gray')
bh = results.get(my_stock)
bh = (bh/bh.loc[str(trade_start):str(trade_start+timedelta(days=1)):][0])
bh.plot(ax=ax1, color='g')
v = results.get('value')
v = v.dropna()
v = (v/v[0])
v.plot(ax=ax1, color='r')
trader.plot(results, my_stock, ax2)
port.plot_signals(ax2)
ax2.set_xlim(trade_start, end)
ax1.set_xlim(trade_start, end)
plt.gcf().set_size_inches(18, 8)
print("show")
plt.show()
print("after show")
# print(results)
# Note: this if-block should be removed if running
# this algorithm on quantopian.com
if __name__ == '__main__':
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_from_yahoo
import sys
sys.argv = sys.argv + ("SOXL 2015-3 10000 0.15 0.35 1 0.2 show".split(" "))
my_stock = sys.argv[1]
year = int(sys.argv[2][:4])
quarter = int(sys.argv[2][5:])
month = (quarter - 1) * 3 + 1
show = True if len(sys.argv) > 7 and sys.argv[8] == "show" else False
port = DTPortfolio(cash=int(sys.argv[3]))
window = 180
# quiet = False
trader = DTEODChangeTrader(buy_threshold=float(sys.argv[4]),
sell_threshold=float(sys.argv[5]),
buy_pct=float(sys.argv[6]),
sell_pct=float(sys.argv[7]),
roc_window=window)
trader.portfolio = port
# Set the simulation start and end dates
# create more data to prime metrics
trade_start = date(year, month, 1)
start = trade_start + timedelta(days=-300)
end = trade_start + timedelta(days=92)
# Load price data from yahoo.
data = load_from_yahoo(stocks=[my_stock], indexes={}, start=start, end=end)
# Create and run the algorithm.
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data,
identifiers=[my_stock], capital_base=10000)
results = algo.run(data)
port.performance_csv(prefix="%s,%s,%s,%s,%s,%s,%s" % (sys.argv[1], sys.argv[2], sys.argv[3],
sys.argv[4], sys.argv[5], sys.argv[6],
sys.argv[7]))
if show:
analyze(results=results)
```
|
{
"source": "JDTrujillo18/sympy",
"score": 2
}
|
#### File: parsing/tests/test_sympy_parser.py
```python
from sympy.core import Symbol, Function, Float, Rational, Integer, I, Mul, Pow, Eq
from sympy.functions import exp, factorial, sin
from sympy.logic import And
from sympy.series import Limit
from sympy.utilities.pytest import raises
from sympy.parsing.sympy_parser import (
parse_expr, standard_transformations, rationalize, TokenError,
split_symbols, implicit_multiplication, convert_equals_signs,
)
def test_sympy_parser():
x = Symbol('x')
inputs = {
'2*x': 2 * x,
'3.00': Float(3),
'22/7': Rational(22, 7),
'2+3j': 2 + 3*I,
'exp(x)': exp(x),
'x!': factorial(x),
'3.[3]': Rational(10, 3),
'10!': 3628800,
'-(2)': -Integer(2),
'[-1, -2, 3]': [Integer(-1), Integer(-2), Integer(3)],
'Symbol("x").free_symbols': x.free_symbols,
"S('S(3).n(n=3)')": 3.00,
'factorint(12, visual=True)': Mul(
Pow(2, 2, evaluate=False),
Pow(3, 1, evaluate=False),
evaluate=False),
'Limit(sin(x), x, 0, dir="-")': Limit(sin(x), x, 0, dir='-'),
}
for text, result in inputs.items():
assert parse_expr(text) == result
def test_rationalize():
inputs = {
'0.123': Rational(123, 1000)
}
transformations = standard_transformations + (rationalize,)
for text, result in inputs.items():
assert parse_expr(text, transformations=transformations) == result
def test_factorial_fail():
inputs = ['x!!!', 'x!!!!', '(!)']
for text in inputs:
try:
parse_expr(text)
assert False
except TokenError:
assert True
def test_local_dict():
local_dict = {
'my_function': lambda x: x + 2
}
inputs = {
'my_function(2)': Integer(4)
}
for text, result in inputs.items():
assert parse_expr(text, local_dict=local_dict) == result
def test_global_dict():
global_dict = {
'Symbol': Symbol
}
inputs = {
'Q & S': And(Symbol('Q'), Symbol('S'))
}
for text, result in inputs.items():
assert parse_expr(text, global_dict=global_dict) == result
def test_issue_2515():
raises(TokenError, lambda: parse_expr('(()'))
raises(TokenError, lambda: parse_expr('"""'))
def test_issue_7663():
x = Symbol('x')
e = '2*(x+1)'
assert parse_expr(e, evaluate=0) == parse_expr(e, evaluate=False)
def test_issue_10560():
inputs = {
'4*-3' : '(-3)*4',
'-4*3' : '(-4)*3',
}
for text, result in inputs.items():
assert parse_expr(text, evaluate=False) == parse_expr(result, evaluate=False)
def test_issue_10773():
inputs = {
'-10/5': '(-10)/5',
'-10/-5' : '(-10)/(-5)',
}
for text, result in inputs.items():
assert parse_expr(text, evaluate=False) == parse_expr(result, evaluate=False)
def test_split_symbols():
transformations = standard_transformations + \
(split_symbols, implicit_multiplication,)
x = Symbol('x')
y = Symbol('y')
xy = Symbol('xy')
assert parse_expr("xy") == xy
assert parse_expr("xy", transformations=transformations) == x*y
def test_split_symbols_function():
transformations = standard_transformations + \
(split_symbols, implicit_multiplication,)
x = Symbol('x')
y = Symbol('y')
a = Symbol('a')
f = Function('f')
assert parse_expr("ay(x+1)", transformations=transformations) == a*y*(x+1)
assert parse_expr("af(x+1)", transformations=transformations,
local_dict={'f':f}) == a*f(x+1)
def test_match_parentheses_implicit_multiplication():
transformations = standard_transformations + \
(implicit_multiplication,)
raises(TokenError, lambda: parse_expr('(1,2),(3,4]',transformations=transformations))
def test_convert_equals_signs():
transformations = standard_transformations + \
(convert_equals_signs, )
x = Symbol('x')
y = Symbol('y')
assert parse_expr("1*2=x", transformations=transformations) == Eq(2, x)
assert parse_expr("y = x", transformations=transformations) == Eq(y, x)
assert parse_expr("(2*y = x) = False",
transformations=transformations) == Eq(Eq(2*y, x), False)
```
#### File: sympy/polys/subresultants_qq_zz.py
```python
from __future__ import print_function, division
from sympy import (Abs, degree, expand, eye, floor, LC, Matrix, nan, Poly, pprint)
from sympy import (QQ, quo, rem, S, sign, simplify, summation, var, zeros)
def sylvester(f, g, x, method = 1):
'''
The input polynomials f, g are in Z[x] or in Q[x].
Let mx = max( degree(f, x) , degree(g, x) ).
a. If method = 1 (default), computes sylvester1, Sylvester's matrix of 1840
of dimension (m + n) x (m + n). The determinants of properly chosen
submatrices of this matrix (a.k.a. subresultants) can be
used to compute the coefficients of the Euclidean PRS of f, g.
b. If method = 2, computes sylvester2, Sylvester's matrix of 1853
of dimension (2*mx) x (2*mx). The determinants of properly chosen
submatrices of this matrix (a.k.a. ``modified'' subresultants) can be
used to compute the coefficients of the Sturmian PRS of f, g.
Applications of these Matrices can be found in the references below.
Especially, for applications of sylvester2, see the first reference!!
References:
===========
1. <NAME>., <NAME> and <NAME>: ``On a Theorem
by Van Vleck Regarding Sturm Sequences. Serdica Journal of Computing,
Vol. 7, No 4, 101–134, 2013.
2. <NAME>., <NAME> and <NAME>: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29–46, 2014.
'''
# obtain degrees of polys
m, n = degree( Poly(f, x), x), degree( Poly(g, x), x)
# Special cases:
# A:: case m = n < 0 (i.e. both polys are 0)
if m == n and n < 0:
return Matrix([])
# B:: case m = n = 0 (i.e. both polys are constants)
if m == n and n == 0:
return Matrix([])
# C:: m == 0 and n < 0 or m < 0 and n == 0
# (i.e. one poly is constant and the other is 0)
if m == 0 and n < 0:
return Matrix([])
elif m < 0 and n == 0:
return Matrix([])
# D:: m >= 1 and n < 0 or m < 0 and n >=1
# (i.e. one poly is of degree >=1 and the other is 0)
if m >= 1 and n < 0:
return Matrix([0])
elif m < 0 and n >= 1:
return Matrix([0])
fp = Poly(f, x).all_coeffs()
gp = Poly(g, x).all_coeffs()
# Sylvester's matrix of 1840 (default; a.k.a. sylvester1)
if method <= 1:
M = zeros(m + n)
k = 0
for i in range(n):
j = k
for coeff in fp:
M[i, j] = coeff
j = j + 1
k = k + 1
k = 0
for i in range(n, m + n):
j = k
for coeff in gp:
M[i, j] = coeff
j = j + 1
k = k + 1
return M
# Sylvester's matrix of 1853 (a.k.a sylvester2)
if method >= 2:
if len(fp) < len(gp):
h = []
for i in range(len(gp) - len(fp)):
h.append(0)
fp[ : 0] = h
else:
h = []
for i in range(len(fp) - len(gp)):
h.append(0)
gp[ : 0] = h
mx = max(m, n)
dim = 2*mx
M = zeros( dim )
k = 0
for i in range( mx ):
j = k
for coeff in fp:
M[2*i, j] = coeff
j = j + 1
j = k
for coeff in gp:
M[2*i + 1, j] = coeff
j = j + 1
k = k + 1
return M
def sign_seq(poly_seq, x):
"""
Given a sequence of polynomials poly_seq, it returns
the sequence of signs of the leading coefficients of
the polynomials in poly_seq.
"""
return [sign(LC(poly_seq[i], x)) for i in range(len(poly_seq))]
def bezout(p, q, x, method='bz'):
"""
The input polynomials p, q are in Z[x] or in Q[x]. Let
mx = max( degree(p, x) , degree(q, x) ).
The default option bezout(p, q, x, method='bz') returns Bezout's
symmetric matrix of p and q, of dimensions (mx) x (mx). The
determinant of this matrix is equal to the determinant of sylvester2,
Sylvester's matrix of 1853, whose dimensions are (2*mx) x (2*mx);
however the subresultants of these two matrices may differ.
The other option, bezout(p, q, x, 'prs'), is of interest to us
in this module because it returns a matrix equivalent to sylvester2.
In this case all subresultants of the two matrices are identical.
Both the subresultant polynomial remainder sequence (prs) and
the modified subresultant prs of p and q can be computed by
evaluating determinants of appropriately selected submatrices of
bezout(p, q, x, 'prs') --- one determinant per coefficient of the
remainder polynomials.
The matrices bezout(p, q, x, 'bz') and bezout(p, q, x, 'prs')
are related by the formula
bezout(p, q, x, 'prs') =
backward_eye(deg(p)) * bezout(p, q, x, 'bz') * backward_eye(deg(p)),
where backward_eye() is the backward identity function.
References:
===========
1. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants
and Their Applications. Appl. Algebra in Engin., Communic. and Comp.,
Vol. 15, 233–266, 2004.
"""
# obtain degrees of polys
m, n = degree( Poly(p, x), x), degree( Poly(q, x), x)
# Special cases:
# A:: case m = n < 0 (i.e. both polys are 0)
if m == n and n < 0:
return Matrix([])
# B:: case m = n = 0 (i.e. both polys are constants)
if m == n and n == 0:
return Matrix([])
# C:: m == 0 and n < 0 or m < 0 and n == 0
# (i.e. one poly is constant and the other is 0)
if m == 0 and n < 0:
return Matrix([])
elif m < 0 and n == 0:
return Matrix([])
# D:: m >= 1 and n < 0 or m < 0 and n >=1
# (i.e. one poly is of degree >=1 and the other is 0)
if m >= 1 and n < 0:
return Matrix([0])
elif m < 0 and n >= 1:
return Matrix([0])
y = var('y')
# expr is 0 when x = y
expr = p * q.subs({x:y}) - p.subs({x:y}) * q
# hence expr is exactly divisible by x - y
poly = Poly( quo(expr, x-y), x, y)
# form Bezout matrix and store them in B as indicated to get
# the LC coefficient of each poly either in the first position
# of each row (method='prs') or in the last (method='bz').
mx = max(m, n)
B = zeros(mx)
for i in range(mx):
for j in range(mx):
if method == 'prs':
B[mx - 1 - i, mx - 1 - j] = poly.nth(i, j)
else:
B[i, j] = poly.nth(i, j)
return B
def backward_eye(n):
'''
Returns the backward identity matrix of dimensions n x n.
Needed to "turn" the Bezout matrices
so that the leading coefficients are first.
See docstring of the function bezout(p, q, x, method='bz').
'''
M = eye(n) # identity matrix of order n
for i in range(int(M.rows / 2)):
M.row_swap(0 + i, M.rows - 1 - i)
return M
def process_bezout_output(poly_seq, x):
"""
poly_seq is a polynomial remainder sequence computed either by
subresultants_bezout or by modified_subresultants_bezout.
This function removes from poly_seq all zero polynomials as well
as all those whose degree is equal to the degree of a previous
polynomial in poly_seq, as we scan it from left to right.
"""
L = poly_seq[:] # get a copy of the input sequence
d = degree(L[1], x)
i = 2
while i < len(L):
d_i = degree(L[i], x)
if d_i < 0: # zero poly
L.remove(L[i])
i = i - 1
if d == d_i: # poly degree equals degree of previous poly
L.remove(L[i])
i = i - 1
if d_i >= 0:
d = d_i
i = i + 1
return L
def subresultants_bezout(p, q, x):
"""
The input polynomials p, q are in Z[x] or in Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant polynomial remainder sequence
of p, q by evaluating determinants of appropriately selected
submatrices of bezout(p, q, x, 'prs'). The dimensions of the
latter are deg(p) x deg(p).
Each coefficient is computed by evaluating the determinant of the
corresponding submatrix of bezout(p, q, x, 'prs').
bezout(p, q, x, 'prs) is used instead of sylvester(p, q, x, 1),
Sylvester's matrix of 1840, because the dimensions of the latter
are (deg(p) + deg(q)) x (deg(p) + deg(q)).
If the subresultant prs is complete, then the output coincides
with the Euclidean sequence of the polynomials p, q.
References:
===========
1. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants
and Their Applications. Appl. Algebra in Engin., Communic. and Comp.,
Vol. 15, 233–266, 2004.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
f, g = p, q
n = degF = degree(f, x)
m = degG = degree(g, x)
# make sure proper degrees
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, degF, degG, f, g = m, n, degG, degF, g, f
if n > 0 and m == 0:
return [f, g]
SR_L = [f, g] # subresultant list
F = LC(f, x)**(degF - degG)
# form the bezout matrix
B = bezout(f, g, x, 'prs')
# pick appropriate submatrices of B
# and form subresultant polys
if degF > degG:
j = 2
if degF == degG:
j = 1
while j <= degF:
M = B[0:j, :]
k, coeff_L = j - 1, []
while k <= degF - 1:
coeff_L.append(M[: ,0 : j].det())
if k < degF - 1:
M.col_swap(j - 1, k + 1)
k = k + 1
# apply Theorem 2.1 in the paper by Toca & Vega 2004
# to get correct signs
SR_L.append((int((-1)**(j*(j-1)/2)) * Poly(coeff_L, x) / F).as_expr())
j = j + 1
return process_bezout_output(SR_L, x)
def modified_subresultants_bezout(p, q, x):
"""
The input polynomials p, q are in Z[x] or in Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the modified subresultant polynomial remainder sequence
of p, q by evaluating determinants of appropriately selected
submatrices of bezout(p, q, x, 'prs'). The dimensions of the
latter are deg(p) x deg(p).
Each coefficient is computed by evaluating the determinant of the
corresponding submatrix of bezout(p, q, x, 'prs').
bezout(p, q, x, 'prs') is used instead of sylvester(p, q, x, 2),
Sylvester's matrix of 1853, because the dimensions of the latter
are 2*deg(p) x 2*deg(p).
If the modified subresultant prs is complete, and LC( p ) > 0, the output
coincides with the (generalized) Sturm's sequence of the polynomials p, q.
References:
===========
1. <NAME>., <NAME> and <NAME>: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29–46, 2014.
2. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants
and Their Applications. Appl. Algebra in Engin., Communic. and Comp.,
Vol. 15, 233–266, 2004.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
f, g = p, q
n = degF = degree(f, x)
m = degG = degree(g, x)
# make sure proper degrees
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, degF, degG, f, g = m, n, degG, degF, g, f
if n > 0 and m == 0:
return [f, g]
SR_L = [f, g] # subresultant list
# form the bezout matrix
B = bezout(f, g, x, 'prs')
# pick appropriate submatrices of B
# and form subresultant polys
if degF > degG:
j = 2
if degF == degG:
j = 1
while j <= degF:
M = B[0:j, :]
k, coeff_L = j - 1, []
while k <= degF - 1:
coeff_L.append(M[: ,0 : j].det())
if k < degF - 1:
M.col_swap(j - 1, k + 1)
k = k + 1
## Theorem 2.1 in the paper by Toca & Vega 2004 is _not needed_
## in this case since
## the bezout matrix is equivalent to sylvester2
SR_L.append(( Poly(coeff_L, x)).as_expr())
j = j + 1
return process_bezout_output(SR_L, x)
def sturm_pg(p, q, x, method=0):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the (generalized) Sturm sequence of p and q in Z[x] or Q[x].
If q = diff(p, x, 1) it is the usual Sturm sequence.
A. If method == 0, default, the remainder coefficients of the sequence
are (in absolute value) ``modified'' subresultants, which for non-monic
polynomials are greater than the coefficients of the corresponding
subresultants by the factor Abs(LC(p)**( deg(p)- deg(q))).
B. If method == 1, the remainder coefficients of the sequence are (in
absolute value) subresultants, which for non-monic polynomials are
smaller than the coefficients of the corresponding ``modified''
subresultants by the factor Abs(LC(p)**( deg(p)- deg(q))).
If the Sturm sequence is complete, method=0 and LC( p ) > 0, the coefficients
of the polynomials in the sequence are ``modified'' subresultants.
That is, they are determinants of appropriately selected submatrices of
sylvester2, Sylvester's matrix of 1853. In this case the Sturm sequence
coincides with the ``modified'' subresultant prs, of the polynomials
p, q.
If the Sturm sequence is incomplete and method=0 then the signs of the
coefficients of the polynomials in the sequence may differ from the signs
of the coefficients of the corresponding polynomials in the ``modified''
subresultant prs; however, the absolute values are the same.
To compute the coefficients, no determinant evaluation takes place. Instead,
polynomial divisions in Q[x] are performed, using the function rem(p, q, x);
the coefficients of the remainders computed this way become (``modified'')
subresultants with the help of the Pell-Gordon Theorem of 1917.
See also the function euclid_pg(p, q, x).
References:
===========
1. <NAME>., <NAME>. The Modified Remainders Obtained in Finding
the Highest Common Factor of Two Polynomials. Annals of MatheMatics,
Second Series, 18 (1917), No. 4, 188–193.
2. <NAME>., <NAME> and <NAME>: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29–46, 2014.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p, x)
d1 = degree(q, x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p,q]
# make sure LC(p) > 0
flag = 0
if LC(p,x) < 0:
flag = 1
p = -p
q = -q
# initialize
lcf = LC(p, x)**(d0 - d1) # lcf * subr = modified subr
a0, a1 = p, q # the input polys
sturm_seq = [a0, a1] # the output list
del0 = d0 - d1 # degree difference
rho1 = LC(a1, x) # leading coeff of a1
exp_deg = d1 - 1 # expected degree of a2
a2 = - rem(a0, a1, domain=QQ) # first remainder
rho2 = LC(a2,x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
deg_diff_new = exp_deg - d2 # expected - actual degree
del1 = d1 - d2 # degree difference
# mul_fac is the factor by which a2 is multiplied to
# get integer coefficients
mul_fac_old = rho1**(del0 + del1 - deg_diff_new)
# append accordingly
if method == 0:
sturm_seq.append( simplify(lcf * a2 * Abs(mul_fac_old)))
else:
sturm_seq.append( simplify( a2 * Abs(mul_fac_old)))
# main loop
deg_diff_old = deg_diff_new
while d2 > 0:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
del0 = del1 # update degree difference
exp_deg = d1 - 1 # new expected degree
a2 = - rem(a0, a1, domain=QQ) # new remainder
rho3 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
deg_diff_new = exp_deg - d2 # expected - actual degree
del1 = d1 - d2 # degree difference
# take into consideration the power
# rho1**deg_diff_old that was "left out"
expo_old = deg_diff_old # rho1 raised to this power
expo_new = del0 + del1 - deg_diff_new # rho2 raised to this power
# update variables and append
mul_fac_new = rho2**(expo_new) * rho1**(expo_old) * mul_fac_old
deg_diff_old, mul_fac_old = deg_diff_new, mul_fac_new
rho1, rho2 = rho2, rho3
if method == 0:
sturm_seq.append( simplify(lcf * a2 * Abs(mul_fac_old)))
else:
sturm_seq.append( simplify( a2 * Abs(mul_fac_old)))
if flag: # change the sign of the sequence
sturm_seq = [-i for i in sturm_seq]
# gcd is of degree > 0 ?
m = len(sturm_seq)
if sturm_seq[m - 1] == nan or sturm_seq[m - 1] == 0:
sturm_seq.pop(m - 1)
return sturm_seq
def sturm_q(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the (generalized) Sturm sequence of p and q in Q[x].
Polynomial divisions in Q[x] are performed, using the function rem(p, q, x).
The coefficients of the polynomials in the Sturm sequence can be uniquely
determined from the corresponding coefficients of the polynomials found
either in:
(a) the ``modified'' subresultant prs, (references 1, 2)
or in
(b) the subresultant prs (reference 3).
References:
===========
1. <NAME>., <NAME>. The Modified Remainders Obtained in Finding
the Highest Common Factor of Two Polynomials. Annals of MatheMatics,
Second Series, 18 (1917), No. 4, 188–193.
2 <NAME>., <NAME> and <NAME>: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29–46, 2014.
3. <NAME>., <NAME> and <NAME>: ``A Basic Result
on the Theory of Subresultants.'' Submitted for publication.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p, x)
d1 = degree(q, x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p,q]
# make sure LC(p) > 0
flag = 0
if LC(p,x) < 0:
flag = 1
p = -p
q = -q
# initialize
a0, a1 = p, q # the input polys
sturm_seq = [a0, a1] # the output list
a2 = -rem(a0, a1, domain=QQ) # first remainder
d2 = degree(a2, x) # degree of a2
sturm_seq.append( a2 )
# main loop
while d2 > 0:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
a2 = -rem(a0, a1, domain=QQ) # new remainder
d2 = degree(a2, x) # actual degree of a2
sturm_seq.append( a2 )
if flag: # change the sign of the sequence
sturm_seq = [-i for i in sturm_seq]
# gcd is of degree > 0 ?
m = len(sturm_seq)
if sturm_seq[m - 1] == nan or sturm_seq[m - 1] == 0:
sturm_seq.pop(m - 1)
return sturm_seq
def sturm_amv(p, q, x, method=0):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the (generalized) Sturm sequence of p and q in Z[x] or Q[x].
If q = diff(p, x, 1) it is the usual Sturm sequence.
A. If method == 0, default, the remainder coefficients of the
sequence are (in absolute value) ``modified'' subresultants, which
for non-monic polynomials are greater than the coefficients of the
corresponding subresultants by the factor Abs(LC(p)**( deg(p)- deg(q))).
B. If method == 1, the remainder coefficients of the sequence are (in
absolute value) subresultants, which for non-monic polynomials are
smaller than the coefficients of the corresponding ``modified''
subresultants by the factor Abs( LC(p)**( deg(p)- deg(q)) ).
If the Sturm sequence is complete, method=0 and LC( p ) > 0, then the
coefficients of the polynomials in the sequence are ``modified'' subresultants.
That is, they are determinants of appropriately selected submatrices of
sylvester2, Sylvester's matrix of 1853. In this case the Sturm sequence
coincides with the ``modified'' subresultant prs, of the polynomials
p, q.
If the Sturm sequence is incomplete and method=0 then the signs of the
coefficients of the polynomials in the sequence may differ from the signs
of the coefficients of the corresponding polynomials in the ``modified''
subresultant prs; however, the absolute values are the same.
To compute the coefficients, no determinant evaluation takes place.
Instead, we first compute the euclidean sequence of p and q using
euclid_amv(p, q, x) and then: (a) change the signs of the remainders in the
Euclidean sequence according to the pattern "-, -, +, +, -, -, +, +,..."
(see Lemma 1 in the 1st reference or Theorem 3 in the 2nd reference)
and (b) if method=0, assuming deg(p) > deg(q), we multiply the remainder
coefficients of the Euclidean sequence times the factor
Abs( LC(p)**( deg(p)- deg(q)) ) to make them modified subresultants.
See also the function sturm_pg(p, q, x).
References:
===========
1. <NAME>., <NAME> and <NAME>: ``A Basic Result
on the Theory of Subresultants.'' Submitted for publication.
2. <NAME>., <NAME> and <NAME>: ``On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials.'' Serdica
Journal of Computing, to appear.
3. <NAME>., <NAME> and <NAME>: ``Subresultant Polynomial
Remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Submitted for publication.
"""
# compute the euclidean sequence
prs = euclid_amv(p, q, x)
# defensive
if prs == [] or len(prs) == 2:
return prs
# the coefficients in prs are subresultants and hence are smaller
# than the corresponding subresultants by the factor
# Abs( LC(prs[0])**( deg(prs[0]) - deg(prs[1])) ); Theorem 2, 2nd reference.
lcf = Abs( LC(prs[0])**( degree(prs[0], x) - degree(prs[1], x) ) )
# the signs of the first two polys in the sequence stay the same
sturm_seq = [prs[0], prs[1]]
# change the signs according to "-, -, +, +, -, -, +, +,..."
# and multiply times lcf if needed
flag = 0
m = len(prs)
i = 2
while i <= m-1:
if flag == 0:
sturm_seq.append( - prs[i] )
i = i + 1
if i == m:
break
sturm_seq.append( - prs[i] )
i = i + 1
flag = 1
elif flag == 1:
sturm_seq.append( prs[i] )
i = i + 1
if i == m:
break
sturm_seq.append( prs[i] )
i = i + 1
flag = 0
# subresultants or modified subresultants?
if method == 0 and lcf > 1:
aux_seq = [sturm_seq[0], sturm_seq[1]]
for i in range(2, m):
aux_seq.append(simplify(sturm_seq[i] * lcf ))
sturm_seq = aux_seq
return sturm_seq
def euclid_pg(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the Euclidean sequence of p and q in Z[x] or Q[x].
If the Euclidean sequence is complete the coefficients of the polynomials
in the sequence are subresultants. That is, they are determinants of
appropriately selected submatrices of sylvester1, Sylvester's matrix of 1840.
In this case the Euclidean sequence coincides with the subresultant prs
of the polynomials p, q.
If the Euclidean sequence is incomplete the signs of the coefficients of the
polynomials in the sequence may differ from the signs of the coefficients of
the corresponding polynomials in the subresultant prs; however, the absolute
values are the same.
To compute the Euclidean sequence, no determinant evaluation takes place.
We first compute the (generalized) Sturm sequence of p and q using
sturm_pg(p, q, x, 1), in which case the coefficients are (in absolute value)
equal to subresultants. Then we change the signs of the remainders in the
Sturm sequence according to the pattern "-, -, +, +, -, -, +, +,..." ;
see Lemma 1 in the 1st reference or Theorem 3 in the 2nd reference as well as
the function sturm_pg(p, q, x).
References:
===========
1. <NAME>., <NAME> and <NAME>: ``A Basic Result
on the Theory of Subresultants.'' Submitted for publication.
2. <NAME>., <NAME> and <NAME>: ``On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials.'' Serdica
Journal of Computing, to appear.
3. <NAME>., <NAME> and <NAME>: ``Subresultant Polynomial
Remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Submitted for publication.
"""
# compute the sturmian sequence using the Pell-Gordon (or AMV) theorem
# with the coefficients in the prs being (in absolute value) subresultants
prs = sturm_pg(p, q, x, 1) ## any other method would do
# defensive
if prs == [] or len(prs) == 2:
return prs
# the signs of the first two polys in the sequence stay the same
euclid_seq = [prs[0], prs[1]]
# change the signs according to "-, -, +, +, -, -, +, +,..."
flag = 0
m = len(prs)
i = 2
while i <= m-1:
if flag == 0:
euclid_seq.append(- prs[i] )
i = i + 1
if i == m:
break
euclid_seq.append(- prs[i] )
i = i + 1
flag = 1
elif flag == 1:
euclid_seq.append(prs[i] )
i = i + 1
if i == m:
break
euclid_seq.append(prs[i] )
i = i + 1
flag = 0
return euclid_seq
def euclid_q(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the Euclidean sequence of p and q in Q[x].
Polynomial divisions in Q[x] are performed, using the function rem(p, q, x).
The coefficients of the polynomials in the Euclidean sequence can be uniquely
determined from the corresponding coefficients of the polynomials found
either in:
(a) the ``modified'' subresultant polynomial remainder sequence,
(references 1, 2)
or in
(b) the subresultant polynomial remainder sequence (references 3).
References:
===========
1. <NAME>., <NAME>. The Modified Remainders Obtained in Finding
the Highest Common Factor of Two Polynomials. Annals of MatheMatics,
Second Series, 18 (1917), No. 4, 188–193.
2. <NAME>., <NAME> and <NAME>: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29–46, 2014.
<NAME>, <NAME>., <NAME> and <NAME>: ``A Basic Result
on the Theory of Subresultants.'' Submitted for publication.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p, x)
d1 = degree(q, x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p,q]
# make sure LC(p) > 0
flag = 0
if LC(p,x) < 0:
flag = 1
p = -p
q = -q
# initialize
a0, a1 = p, q # the input polys
euclid_seq = [a0, a1] # the output list
a2 = rem(a0, a1, domain=QQ) # first remainder
d2 = degree(a2, x) # degree of a2
euclid_seq.append( a2 )
# main loop
while d2 > 0:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
a2 = rem(a0, a1, domain=QQ) # new remainder
d2 = degree(a2, x) # actual degree of a2
euclid_seq.append( a2 )
if flag: # change the sign of the sequence
euclid_seq = [-i for i in euclid_seq]
# gcd is of degree > 0 ?
m = len(euclid_seq)
if euclid_seq[m - 1] == nan or euclid_seq[m - 1] == 0:
euclid_seq.pop(m - 1)
return euclid_seq
def euclid_amv(f, g, x):
"""
f, g are polynomials in Z[x] or Q[x]. It is assumed
that degree(f, x) >= degree(g, x).
Computes the Euclidean sequence of p and q in Z[x] or Q[x].
If the Euclidean sequence is complete the coefficients of the polynomials
in the sequence are subresultants. That is, they are determinants of
appropriately selected submatrices of sylvester1, Sylvester's matrix of 1840.
In this case the Euclidean sequence coincides with the subresultant prs,
of the polynomials p, q.
If the Euclidean sequence is incomplete the signs of the coefficients of the
polynomials in the sequence may differ from the signs of the coefficients of
the corresponding polynomials in the subresultant prs; however, the absolute
values are the same.
To compute the coefficients, no determinant evaluation takes place.
Instead, polynomial divisions in Z[x] or Q[x] are performed, using
the function rem_z(f, g, x); the coefficients of the remainders
computed this way become subresultants with the help of the
Collins-Brown-Traub formula for coefficient reduction.
References:
===========
1. <NAME>., <NAME> and <NAME>: ``A Basic Result
on the Theory of Subresultants.'' Submitted for publication.
2. <NAME>., <NAME> and <NAME>: ``Subresultant Polynomial
remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Submitted for publication.
"""
# make sure neither f nor g is 0
if f == 0 or g == 0:
return [f, g]
# make sure proper degrees
d0 = degree(f, x)
d1 = degree(g, x)
if d0 == 0 and d1 == 0:
return [f, g]
if d1 > d0:
d0, d1 = d1, d0
f, g = g, f
if d0 > 0 and d1 == 0:
return [f, g]
# initialize
a0 = f
a1 = g
euclid_seq = [a0, a1]
deg_dif_p1, c = degree(a0, x) - degree(a1, x) + 1, -1
# compute the first polynomial of the prs
i = 1
a2 = rem_z(a0, a1, x) / Abs( (-1)**deg_dif_p1 ) # first remainder
euclid_seq.append( a2 )
d2 = degree(a2, x) # actual degree of a2
# main loop
while d2 >= 1:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
i += 1
sigma0 = -LC(a0)
c = (sigma0**(deg_dif_p1 - 1)) / (c**(deg_dif_p1 - 2))
deg_dif_p1 = degree(a0, x) - d2 + 1
a2 = rem_z(a0, a1, x) / Abs( ((c**(deg_dif_p1 - 1)) * sigma0) )
euclid_seq.append( a2 )
d2 = degree(a2, x) # actual degree of a2
# gcd is of degree > 0 ?
m = len(euclid_seq)
if euclid_seq[m - 1] == nan or euclid_seq[m - 1] == 0:
euclid_seq.pop(m - 1)
return euclid_seq
def modified_subresultants_pg(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the ``modified'' subresultant prs of p and q in Z[x] or Q[x];
the coefficients of the polynomials in the sequence are
``modified'' subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester2, Sylvester's matrix of 1853.
To compute the coefficients, no determinant evaluation takes place. Instead,
polynomial divisions in Q[x] are performed, using the function rem(p, q, x);
the coefficients of the remainders computed this way become ``modified''
subresultants with the help of the Pell-Gordon Theorem of 1917.
If the ``modified'' subresultant prs is complete, and LC( p ) > 0, it coincides
with the (generalized) Sturm sequence of the polynomials p, q.
References:
===========
1. <NAME>., <NAME>. The Modified Remainders Obtained in Finding
the Highest Common Factor of Two Polynomials. Annals of MatheMatics,
Second Series, 18 (1917), No. 4, 188–193.
2. <NAME>., <NAME> and <NAME>: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29–46, 2014.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p,x)
d1 = degree(q,x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p,q]
# initialize
k = var('k') # index in summation formula
u_list = [] # of elements (-1)**u_i
subres_l = [p, q] # mod. subr. prs output list
a0, a1 = p, q # the input polys
del0 = d0 - d1 # degree difference
degdif = del0 # save it
rho_1 = LC(a0) # lead. coeff (a0)
# Initialize Pell-Gordon variables
rho_list_minus_1 = sign( LC(a0, x)) # sign of LC(a0)
rho1 = LC(a1, x) # leading coeff of a1
rho_list = [ sign(rho1)] # of signs
p_list = [del0] # of degree differences
u = summation(k, (k, 1, p_list[0])) # value of u
u_list.append(u) # of u values
v = sum(p_list) # v value
# first remainder
exp_deg = d1 - 1 # expected degree of a2
a2 = - rem(a0, a1, domain=QQ) # first remainder
rho2 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
deg_diff_new = exp_deg - d2 # expected - actual degree
del1 = d1 - d2 # degree difference
# mul_fac is the factor by which a2 is multiplied to
# get integer coefficients
mul_fac_old = rho1**(del0 + del1 - deg_diff_new)
# update Pell-Gordon variables
p_list.append(1 + deg_diff_new) # deg_diff_new is 0 for complete seq
# apply Pell-Gordon formula (7) in second reference
num = 1 # numerator of fraction
for k in range(len(u_list)):
num *= (-1)**u_list[k]
num = num * (-1)**v
# denominator depends on complete / incomplete seq
if deg_diff_new == 0: # complete seq
den = 1
for k in range(len(rho_list)):
den *= rho_list[k]**(p_list[k] + p_list[k + 1])
den = den * rho_list_minus_1
else: # incomplete seq
den = 1
for k in range(len(rho_list)-1):
den *= rho_list[k]**(p_list[k] + p_list[k + 1])
den = den * rho_list_minus_1
expo = (p_list[len(rho_list) - 1] + p_list[len(rho_list)] - deg_diff_new)
den = den * rho_list[len(rho_list) - 1]**expo
# the sign of the determinant depends on sg(num / den)
if sign(num / den) > 0:
subres_l.append( simplify(rho_1**degdif*a2* Abs(mul_fac_old) ) )
else:
subres_l.append(- simplify(rho_1**degdif*a2* Abs(mul_fac_old) ) )
# update Pell-Gordon variables
k = var('k')
rho_list.append( sign(rho2))
u = summation(k, (k, 1, p_list[len(p_list) - 1]))
u_list.append(u)
v = sum(p_list)
deg_diff_old=deg_diff_new
# main loop
while d2 > 0:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
del0 = del1 # update degree difference
exp_deg = d1 - 1 # new expected degree
a2 = - rem(a0, a1, domain=QQ) # new remainder
rho3 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
deg_diff_new = exp_deg - d2 # expected - actual degree
del1 = d1 - d2 # degree difference
# take into consideration the power
# rho1**deg_diff_old that was "left out"
expo_old = deg_diff_old # rho1 raised to this power
expo_new = del0 + del1 - deg_diff_new # rho2 raised to this power
mul_fac_new = rho2**(expo_new) * rho1**(expo_old) * mul_fac_old
# update variables
deg_diff_old, mul_fac_old = deg_diff_new, mul_fac_new
rho1, rho2 = rho2, rho3
# update Pell-Gordon variables
p_list.append(1 + deg_diff_new) # deg_diff_new is 0 for complete seq
# apply Pell-Gordon formula (7) in second reference
num = 1 # numerator
for k in range(len(u_list)):
num *= (-1)**u_list[k]
num = num * (-1)**v
# denominator depends on complete / incomplete seq
if deg_diff_new == 0: # complete seq
den = 1
for k in range(len(rho_list)):
den *= rho_list[k]**(p_list[k] + p_list[k + 1])
den = den * rho_list_minus_1
else: # incomplete seq
den = 1
for k in range(len(rho_list)-1):
den *= rho_list[k]**(p_list[k] + p_list[k + 1])
den = den * rho_list_minus_1
expo = (p_list[len(rho_list) - 1] + p_list[len(rho_list)] - deg_diff_new)
den = den * rho_list[len(rho_list) - 1]**expo
# the sign of the determinant depends on sg(num / den)
if sign(num / den) > 0:
subres_l.append( simplify(rho_1**degdif*a2* Abs(mul_fac_old) ) )
else:
subres_l.append(- simplify(rho_1**degdif*a2* Abs(mul_fac_old) ) )
# update Pell-Gordon variables
k = var('k')
rho_list.append( sign(rho2))
u = summation(k, (k, 1, p_list[len(p_list) - 1]))
u_list.append(u)
v = sum(p_list)
# gcd is of degree > 0 ?
m = len(subres_l)
if subres_l[m - 1] == nan or subres_l[m - 1] == 0:
subres_l.pop(m - 1)
# LC( p ) < 0
m = len(subres_l) # list may be shorter now due to deg(gcd ) > 0
if LC( p ) < 0:
aux_seq = [subres_l[0], subres_l[1]]
for i in range(2, m):
aux_seq.append(simplify(subres_l[i] * (-1) ))
subres_l = aux_seq
return subres_l
def subresultants_pg(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p and q in Z[x] or Q[x], from
the modified subresultant prs of p and q.
The coefficients of the polynomials in these two sequences differ only
in sign and the factor LC(p)**( deg(p)- deg(q)) as stated in
Theorem 2 of the reference.
The coefficients of the polynomials in the output sequence are
subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester1, Sylvester's matrix of 1840.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References:
===========
1. Akritas, <NAME>., <NAME> and <NAME>: ‘‘On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials.''
Serdica Journal of Computing, to appear.
"""
# compute the modified subresultant prs
lst = modified_subresultants_pg(p,q,x) ## any other method would do
# defensive
if lst == [] or len(lst) == 2:
return lst
# the coefficients in lst are modified subresultants and, hence, are
# greater than those of the corresponding subresultants by the factor
# LC(lst[0])**( deg(lst[0]) - deg(lst[1])); see Theorem 2 in reference.
lcf = LC(lst[0])**( degree(lst[0], x) - degree(lst[1], x) )
# Initialize the subresultant prs list
subr_seq = [lst[0], lst[1]]
# compute the degree sequences m_i and j_i of Theorem 2 in reference.
deg_seq = [degree(Poly(poly, x), x) for poly in lst]
deg = deg_seq[0]
deg_seq_s = deg_seq[1:-1]
m_seq = [m-1 for m in deg_seq_s]
j_seq = [deg - m for m in m_seq]
# compute the AMV factors of Theorem 2 in reference.
fact = [(-1)**( j*(j-1)/S(2) ) for j in j_seq]
# shortened list without the first two polys
lst_s = lst[2:]
# poly lst_s[k] is multiplied times fact[k], divided by lcf
# and appended to the subresultant prs list
m = len(fact)
for k in range(m):
if sign(fact[k]) == -1:
subr_seq.append(-lst_s[k] / lcf)
else:
subr_seq.append(lst_s[k] / lcf)
return subr_seq
def subresultants_amv_q(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p and q in Q[x];
the coefficients of the polynomials in the sequence are
subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester1, Sylvester's matrix of 1840.
To compute the coefficients, no determinant evaluation takes place.
Instead, polynomial divisions in Q[x] are performed, using the
function rem(p, q, x); the coefficients of the remainders
computed this way become subresultants with the help of the
Akritas-Malaschonok-Vigklas Theorem of 2015.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References:
===========
1. <NAME>., <NAME> and <NAME>: ``A Basic Result
on the Theory of Subresultants.'' Submitted for publication.
2. <NAME>., <NAME> and <NAME>: ``Subresultant Polynomial
remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Submitted for publication.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p, x)
d1 = degree(q, x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p, q]
# initialize
i, s = 0, 0 # counters for remainders & odd elements
p_odd_index_sum = 0 # contains the sum of p_1, p_3, etc
subres_l = [p, q] # subresultant prs output list
a0, a1 = p, q # the input polys
sigma1 = LC(a1, x) # leading coeff of a1
p0 = d0 - d1 # degree difference
if p0 % 2 == 1:
s += 1
phi = floor( (s + 1) / 2 )
mul_fac = 1
d2 = d1
# main loop
while d2 > 0:
i += 1
a2 = rem(a0, a1, domain= QQ) # new remainder
if i == 1:
sigma2 = LC(a2, x)
else:
sigma3 = LC(a2, x)
sigma1, sigma2 = sigma2, sigma3
d2 = degree(a2, x)
p1 = d1 - d2
psi = i + phi + p_odd_index_sum
# new mul_fac
mul_fac = sigma1**(p0 + 1) * mul_fac
## compute the sign of the first fraction in formula (9) of the paper
# numerator
num = (-1)**psi
# denominator
den = sign(mul_fac)
# the sign of the determinant depends on sign( num / den ) != 0
if sign(num / den) > 0:
subres_l.append( simplify(expand(a2* Abs(mul_fac))))
else:
subres_l.append(- simplify(expand(a2* Abs(mul_fac))))
## bring into mul_fac the missing power of sigma if there was a degree gap
if p1 - 1 > 0:
mul_fac = mul_fac * sigma1**(p1 - 1)
# update AMV variables
a0, a1, d0, d1 = a1, a2, d1, d2
p0 = p1
if p0 % 2 ==1:
s += 1
phi = floor( (s + 1) / 2 )
if i%2 == 1:
p_odd_index_sum += p0 # p_i has odd index
# gcd is of degree > 0 ?
m = len(subres_l)
if subres_l[m - 1] == nan or subres_l[m - 1] == 0:
subres_l.pop(m - 1)
return subres_l
def compute_sign(base, expo):
'''
base != 0 and expo >= 0 are integers;
returns the sign of base**expo without
evaluating the power itself!
'''
sb = sign(base)
if sb == 1:
return 1
pe = expo % 2
if pe == 0:
return -sb
else:
return sb
def rem_z(p, q, x):
'''
Intended mainly for p, q polynomials in Z[x] so that,
on dividing p by q, the remainder will also be in Z[x]. (However,
it also works fine for polynomials in Q[x].) It is assumed
that degree(p, x) >= degree(q, x).
It premultiplies p by the _absolute_ value of the leading coefficient
of q, raised to the power deg(p) - deg(q) + 1 and then performs
polynomial division in Q[x], using the function rem(p, q, x).
By contrast the function prem(p, q, x) does _not_ use the absolute
value of the leading coefficient of q.
This results not only in ``messing up the signs'' of the Euclidean and
Sturmian prs's as mentioned in the second reference,
but also in violation of the main results of the first and third
references --- Theorem 4 and Theorem 1 respectively. Theorems 4 and 1
establish a one-to-one correspondence between the Euclidean and the
Sturmian prs of p, q, on one hand, and the subresultant prs of p, q,
on the other.
References:
===========
1. <NAME>., <NAME> and <NAME>: ``On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials.''
Serdica Journal of Computing, to appear.
2. http://planetMath.org/sturmstheorem
3. <NAME>., <NAME> and <NAME>: ``A Basic Result on
the Theory of Subresultants.'' Submitted for publication.
'''
delta = (degree(p, x) - degree(q, x) + 1)
return rem(Abs(LC(q, x))**delta * p, q, x)
def quo_z(p, q, x):
"""
Intended mainly for p, q polynomials in Z[x] so that,
on dividing p by q, the quotient will also be in Z[x]. (However,
it also works fine for polynomials in Q[x].) It is assumed
that degree(p, x) >= degree(q, x).
It premultiplies p by the _absolute_ value of the leading coefficient
of q, raised to the power deg(p) - deg(q) + 1 and then performs
polynomial division in Q[x], using the function quo(p, q, x).
By contrast the function pquo(p, q, x) does _not_ use the absolute
value of the leading coefficient of q.
See also function rem_z(p, q, x) for additional comments and references.
"""
delta = (degree(p, x) - degree(q, x) + 1)
return quo(Abs(LC(q, x))**delta * p, q, x)
def subresultants_amv(f, g, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(f, x) >= degree(g, x).
Computes the subresultant prs of p and q in Z[x] or Q[x];
the coefficients of the polynomials in the sequence are
subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester1, Sylvester's matrix of 1840.
To compute the coefficients, no determinant evaluation takes place.
Instead, polynomial divisions in Z[x] or Q[x] are performed, using
the function rem_z(p, q, x); the coefficients of the remainders
computed this way become subresultants with the help of the
Akritas-Malaschonok-Vigklas Theorem of 2015 and the Collins-Brown-
Traub formula for coefficient reduction.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References:
===========
1. <NAME>., <NAME> and <NAME>: ``A Basic Result
on the Theory of Subresultants.'' Submitted for publication.
2. <NAME>., <NAME> and <NAME>: ``Subresultant Polynomial
remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Submitted for publication.
"""
# make sure neither f nor g is 0
if f == 0 or g == 0:
return [f, g]
# make sure proper degrees
d0 = degree(f, x)
d1 = degree(g, x)
if d0 == 0 and d1 == 0:
return [f, g]
if d1 > d0:
d0, d1 = d1, d0
f, g = g, f
if d0 > 0 and d1 == 0:
return [f, g]
# initialize
a0 = f
a1 = g
subres_l = [a0, a1]
deg_dif_p1, c = degree(a0, x) - degree(a1, x) + 1, -1
# initialize AMV variables
sigma1 = LC(a1, x) # leading coeff of a1
i, s = 0, 0 # counters for remainders & odd elements
p_odd_index_sum = 0 # contains the sum of p_1, p_3, etc
p0 = deg_dif_p1 - 1
if p0 % 2 == 1:
s += 1
phi = floor( (s + 1) / 2 )
# compute the first polynomial of the prs
i += 1
a2 = rem_z(a0, a1, x) / Abs( (-1)**deg_dif_p1 ) # first remainder
sigma2 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
p1 = d1 - d2 # degree difference
# sgn_den is the factor, the denominator 1st fraction of (9),
# by which a2 is multiplied to get integer coefficients
sgn_den = compute_sign( sigma1, p0 + 1 )
## compute sign of the 1st fraction in formula (9) of the paper
# numerator
psi = i + phi + p_odd_index_sum
num = (-1)**psi
# denominator
den = sgn_den
# the sign of the determinant depends on sign(num / den) != 0
if sign(num / den) > 0:
subres_l.append( a2 )
else:
subres_l.append( -a2 )
# update AMV variable
if p1 % 2 == 1:
s += 1
# bring in the missing power of sigma if there was gap
if p1 - 1 > 0:
sgn_den = sgn_den * compute_sign( sigma1, p1 - 1 )
# main loop
while d2 >= 1:
phi = floor( (s + 1) / 2 )
if i%2 == 1:
p_odd_index_sum += p1 # p_i has odd index
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
p0 = p1 # update degree difference
i += 1
sigma0 = -LC(a0)
c = (sigma0**(deg_dif_p1 - 1)) / (c**(deg_dif_p1 - 2))
deg_dif_p1 = degree(a0, x) - d2 + 1
a2 = rem_z(a0, a1, x) / Abs( ((c**(deg_dif_p1 - 1)) * sigma0) )
sigma3 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
p1 = d1 - d2 # degree difference
psi = i + phi + p_odd_index_sum
# update variables
sigma1, sigma2 = sigma2, sigma3
# new sgn_den
sgn_den = compute_sign( sigma1, p0 + 1 ) * sgn_den
# compute the sign of the first fraction in formula (9) of the paper
# numerator
num = (-1)**psi
# denominator
den = sgn_den
# the sign of the determinant depends on sign( num / den ) != 0
if sign(num / den) > 0:
subres_l.append( a2 )
else:
subres_l.append( -a2 )
# update AMV variable
if p1 % 2 ==1:
s += 1
# bring in the missing power of sigma if there was gap
if p1 - 1 > 0:
sgn_den = sgn_den * compute_sign( sigma1, p1 - 1 )
# gcd is of degree > 0 ?
m = len(subres_l)
if subres_l[m - 1] == nan or subres_l[m - 1] == 0:
subres_l.pop(m - 1)
return subres_l
def modified_subresultants_amv(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the modified subresultant prs of p and q in Z[x] or Q[x],
from the subresultant prs of p and q.
The coefficients of the polynomials in the two sequences differ only
in sign and the factor LC(p)**( deg(p)- deg(q)) as stated in
Theorem 2 of the reference.
The coefficients of the polynomials in the output sequence are
modified subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester2, Sylvester's matrix of 1853.
If the modified subresultant prs is complete, and LC( p ) > 0, it coincides
with the (generalized) Sturm's sequence of the polynomials p, q.
References:
===========
1. <NAME>., <NAME> and <NAME>: ‘‘On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials.''
Serdica Journal of Computing, to appear.
"""
# compute the subresultant prs
lst = subresultants_amv(p,q,x) ## any other method would do
# defensive
if lst == [] or len(lst) == 2:
return lst
# the coefficients in lst are subresultants and, hence, smaller than those
# of the corresponding modified subresultants by the factor
# LC(lst[0])**( deg(lst[0]) - deg(lst[1])); see Theorem 2.
lcf = LC(lst[0])**( degree(lst[0], x) - degree(lst[1], x) )
# Initialize the modified subresultant prs list
subr_seq = [lst[0], lst[1]]
# compute the degree sequences m_i and j_i of Theorem 2
deg_seq = [degree(Poly(poly, x), x) for poly in lst]
deg = deg_seq[0]
deg_seq_s = deg_seq[1:-1]
m_seq = [m-1 for m in deg_seq_s]
j_seq = [deg - m for m in m_seq]
# compute the AMV factors of Theorem 2
fact = [(-1)**( j*(j-1)/S(2) ) for j in j_seq]
# shortened list without the first two polys
lst_s = lst[2:]
# poly lst_s[k] is multiplied times fact[k] and times lcf
# and appended to the subresultant prs list
m = len(fact)
for k in range(m):
if sign(fact[k]) == -1:
subr_seq.append( simplify(-lst_s[k] * lcf) )
else:
subr_seq.append( simplify(lst_s[k] * lcf) )
return subr_seq
def correct_sign(deg_f, deg_g, s1, rdel, cdel):
"""
Used in various subresultant prs algorithms.
Evaluates the determinant, (a.k.a. subresultant) of a properly selected
submatrix of s1, Sylvester's matrix of 1840, to get the correct sign
and value of the leading coefficient of a given polynomial remainder.
deg_f, deg_g are the degrees of the original polynomials p, q for which the
matrix s1 = sylvester(p, q, x, 1) was constructed.
rdel denotes the expected degree of the remainder; it is the number of
rows to be deleted from each group of rows in s1 as described in the
reference below.
cdel denotes the expected degree minus the actual degree of the remainder;
it is the number of columns to be deleted --- starting with the last column
forming the square matrix --- from the matrix resulting after the row deletions.
References:
===========
<NAME>., <NAME> and <NAME>: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29–46, 2014.
"""
M = s1[:, :] # copy of matrix s1
# eliminate rdel rows from the first deg_g rows
for i in range(M.rows - deg_f - 1, M.rows - deg_f - rdel - 1, -1):
M.row_del(i)
# eliminate rdel rows from the last deg_f rows
for i in range(M.rows - 1, M.rows - rdel - 1, -1):
M.row_del(i)
# eliminate cdel columns
for i in range(cdel):
M.col_del(M.rows - 1)
# define submatrix
Md = M[:, 0: M.rows]
return Md.det()
def subresultants_rem(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p and q in Z[x] or Q[x];
the coefficients of the polynomials in the sequence are
subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester1, Sylvester's matrix of 1840.
To compute the coefficients polynomial divisions in Q[x] are
performed, using the function rem(p, q, x). The coefficients
of the remainders computed this way become subresultants by evaluating
one subresultant per remainder --- that of the leading coefficient.
This way we obtain the correct sign and value of the leading coefficient
of the remainder and we easily ``force'' the rest of the coefficients
to become subresultants.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References:
===========
1. <NAME>.:``Three New Methods for Computing Subresultant
Polynomial Remainder Sequences (PRS’s).'' Serdica Journal of Computing,
to appear.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
f, g = p, q
n = deg_f = degree(f, x)
m = deg_g = degree(g, x)
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, deg_f, deg_g, f, g = m, n, deg_g, deg_f, g, f
if n > 0 and m == 0:
return [f, g]
# initialize
s1 = sylvester(f, g, x, 1)
sr_list = [f, g] # subresultant list
# main loop
while deg_g > 0:
r = rem(p, q, x)
d = degree(r, x)
if d < 0:
return sr_list
# make coefficients subresultants evaluating ONE determinant
exp_deg = deg_g - 1 # expected degree
sign_value = correct_sign(n, m, s1, exp_deg, exp_deg - d)
r = simplify((r / LC(r, x)) * sign_value)
# append poly with subresultant coeffs
sr_list.append(r)
# update degrees and polys
deg_f, deg_g = deg_g, d
p, q = q, r
# gcd is of degree > 0 ?
m = len(sr_list)
if sr_list[m - 1] == nan or sr_list[m - 1] == 0:
sr_list.pop(m - 1)
return sr_list
def pivot(M, i, j):
'''
M is a matrix, and M[i, j] specifies the pivot element.
All elements below M[i, j], in the j-th column, will
be zeroed, if they are not already 0, according to
Dodgson-Bareiss' integer preserving transformations.
References:
===========
1. <NAME>.: ``A new method for computing polynomial greatest
common divisors and polynomial remainder sequences.''
Numerische MatheMatik 52, 119-127, 1988.
2. <NAME>., <NAME> and <NAME>: ``On a Theorem
by Van Vleck Regarding Sturm Sequences.''
Serdica Journal of Computing, 7, No 4, 101–134, 2013.
'''
ma = M[:, :] # copy of matrix M
rs = ma.rows # No. of rows
cs = ma.cols # No. of cols
for r in range(i+1, rs):
if ma[r, j] != 0:
for c in range(j + 1, cs):
ma[r, c] = ma[i, j] * ma[r, c] - ma[i, c] * ma[r, j]
ma[r, j] = 0
return ma
def rotate_r(L, k):
'''
Rotates right by k. L is a row of a matrix or a list.
'''
ll = list(L)
if ll == []:
return []
for i in range(k):
el = ll.pop(len(ll) - 1)
ll.insert(0, el)
return ll if type(L) is list else Matrix([ll])
def rotate_l(L, k):
'''
Rotates left by k. L is a row of a matrix or a list.
'''
ll = list(L)
if ll == []:
return []
for i in range(k):
el = ll.pop(0)
ll.insert(len(ll) - 1, el)
return ll if type(L) is list else Matrix([ll])
def row2poly(row, deg, x):
'''
Converts the row of a matrix to a poly of degree deg and variable x.
Some entries at the beginning and/or at the end of the row may be zero.
'''
k = 0
poly = []
leng = len(row)
# find the beginning of the poly ; i.e. the first
# non-zero element of the row
while row[k] == 0:
k = k + 1
# append the next deg + 1 elements to poly
for j in range( deg + 1):
if k + j <= leng:
poly.append(row[k + j])
return Poly(poly, x)
def create_ma(deg_f, deg_g, row1, row2, col_num):
'''
Creates a ``small'' matrix M to be triangularized.
deg_f, deg_g are the degrees of the divident and of the
divisor polynomials respectively, deg_g > deg_f.
The coefficients of the divident poly are the elements
in row2 and those of the divisor poly are the elements
in row1.
col_num defines the number of columns of the matrix M.
'''
if deg_g - deg_f >= 1:
print('Reverse degrees')
return
m = zeros(deg_f - deg_g + 2, col_num)
for i in range(deg_f - deg_g + 1):
m[i, :] = rotate_r(row1, i)
m[deg_f - deg_g + 1, :] = row2
return m
def find_degree(M, deg_f):
'''
Finds the degree of the poly corresponding (after triangularization)
to the _last_ row of the ``small'' matrix M, created by create_ma().
deg_f is the degree of the divident poly.
If _last_ row is all 0's returns None.
'''
j = deg_f
for i in range(0, M.cols):
if M[M.rows - 1, i] == 0:
j = j - 1
else:
return j if j >= 0 else 0
def final_touches(s2, r, deg_g):
"""
s2 is sylvester2, r is the row pointer in s2,
deg_g is the degree of the poly last inserted in s2.
After a gcd of degree > 0 has been found with Van Vleck's
method, and was inserted into s2, if its last term is not
in the last column of s2, then it is inserted as many
times as needed, rotated right by one each time, until
the condition is met.
"""
R = s2.row(r-1)
# find the first non zero term
for i in range(s2.cols):
if R[0,i] == 0:
continue
else:
break
# missing rows until last term is in last column
mr = s2.cols - (i + deg_g + 1)
# insert them by replacing the existing entries in the row
i = 0
while mr != 0 and r + i < s2.rows :
s2[r + i, : ] = rotate_r(R, i + 1)
i += 1
mr -= 1
return s2
def subresultants_vv(p, q, x, method = 0):
"""
p, q are polynomials in Z[x] (intended) or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p, q by triangularizing,
in Z[x] or in Q[x], all the smaller matrices encountered in the
process of triangularizing sylvester2, Sylvester's matrix of 1853;
see references 1 and 2 for Van Vleck's method. With each remainder,
sylvester2 gets updated and is prepared to be printed if requested.
If sylvester2 has small dimensions and you want to see the final,
triangularized matrix use this version with method=1; otherwise,
use either this version with method=0 (default) or the faster version,
subresultants_vv_2(p, q, x), where sylvester2 is used implicitly.
Sylvester's matrix sylvester1 is also used to compute one
subresultant per remainder; namely, that of the leading
coefficient, in order to obtain the correct sign and to
force the remainder coefficients to become subresultants.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
If the final, triangularized matrix s2 is printed, then:
(a) if deg(p) - deg(q) > 1 or deg( gcd(p, q) ) > 0, several
of the last rows in s2 will remain unprocessed;
(b) if deg(p) - deg(q) == 0, p will not appear in the final matrix.
References:
===========
1. <NAME>.: ``A new method for computing polynomial greatest
common divisors and polynomial remainder sequences.''
Numerische MatheMatik 52, 119-127, 1988.
2. <NAME>., <NAME> and <NAME>: ``On a Theorem
by Van Vleck Regarding Sturm Sequences.''
Serdica Journal of Computing, 7, No 4, 101–134, 2013.
3. <NAME>.:``Three New Methods for Computing Subresultant
Polynomial Remainder Sequences (PRS’s).'' Serdica Journal of Computing,
to appear.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
f, g = p, q
n = deg_f = degree(f, x)
m = deg_g = degree(g, x)
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, deg_f, deg_g, f, g = m, n, deg_g, deg_f, g, f
if n > 0 and m == 0:
return [f, g]
# initialize
s1 = sylvester(f, g, x, 1)
s2 = sylvester(f, g, x, 2)
sr_list = [f, g]
col_num = 2 * n # columns in s2
# make two rows (row0, row1) of poly coefficients
row0 = Poly(f, x, domain = QQ).all_coeffs()
leng0 = len(row0)
for i in range(col_num - leng0):
row0.append(0)
row0 = Matrix([row0])
row1 = Poly(g,x, domain = QQ).all_coeffs()
leng1 = len(row1)
for i in range(col_num - leng1):
row1.append(0)
row1 = Matrix([row1])
# row pointer for deg_f - deg_g == 1; may be reset below
r = 2
# modify first rows of s2 matrix depending on poly degrees
if deg_f - deg_g > 1:
r = 1
# replacing the existing entries in the rows of s2,
# insert row0 (deg_f - deg_g - 1) times, rotated each time
for i in range(deg_f - deg_g - 1):
s2[r + i, : ] = rotate_r(row0, i + 1)
r = r + deg_f - deg_g - 1
# insert row1 (deg_f - deg_g) times, rotated each time
for i in range(deg_f - deg_g):
s2[r + i, : ] = rotate_r(row1, r + i)
r = r + deg_f - deg_g
if deg_f - deg_g == 0:
r = 0
# main loop
while deg_g > 0:
# create a small matrix M, and triangularize it;
M = create_ma(deg_f, deg_g, row1, row0, col_num)
# will need only the first and last rows of M
for i in range(deg_f - deg_g + 1):
M1 = pivot(M, i, i)
M = M1[:, :]
# treat last row of M as poly; find its degree
d = find_degree(M, deg_f)
if d == None:
break
exp_deg = deg_g - 1
# evaluate one determinant & make coefficients subresultants
sign_value = correct_sign(n, m, s1, exp_deg, exp_deg - d)
poly = row2poly(M[M.rows - 1, :], d, x)
temp2 = LC(poly, x)
poly = simplify((poly / temp2) * sign_value)
# update s2 by inserting first row of M as needed
row0 = M[0, :]
for i in range(deg_g - d):
s2[r + i, :] = rotate_r(row0, r + i)
r = r + deg_g - d
# update s2 by inserting last row of M as needed
row1 = rotate_l(M[M.rows - 1, :], deg_f - d)
row1 = (row1 / temp2) * sign_value
for i in range(deg_g - d):
s2[r + i, :] = rotate_r(row1, r + i)
r = r + deg_g - d
# update degrees
deg_f, deg_g = deg_g, d
# append poly with subresultant coeffs
sr_list.append(poly)
# final touches to print the s2 matrix
if method != 0 and s2.rows > 2:
s2 = final_touches(s2, r, deg_g)
pprint(s2)
elif method != 0 and s2.rows == 2:
s2[1, :] = rotate_r(s2.row(1), 1)
pprint(s2)
return sr_list
def subresultants_vv_2(p, q, x):
"""
p, q are polynomials in Z[x] (intended) or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p, q by triangularizing,
in Z[x] or in Q[x], all the smaller matrices encountered in the
process of triangularizing sylvester2, Sylvester's matrix of 1853;
see references 1 and 2 for Van Vleck's method.
If the sylvester2 matrix has big dimensions use this version,
where sylvester2 is used implicitly. If you want to see the final,
triangularized matrix sylvester2, then use the first version,
subresultants_vv(p, q, x, 1).
sylvester1, Sylvester's matrix of 1840, is also used to compute
one subresultant per remainder; namely, that of the leading
coefficient, in order to obtain the correct sign and to
``force'' the remainder coefficients to become subresultants.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References:
===========
1. <NAME>.: ``A new method for computing polynomial greatest
common divisors and polynomial remainder sequences.''
Numerische MatheMatik 52, 119-127, 1988.
2. <NAME>., <NAME> and <NAME>klas: ``On a Theorem
by Van Vleck Regarding Sturm Sequences.''
Serdica Journal of Computing, 7, No 4, 101–134, 2013.
3. <NAME>.:``Three New Methods for Computing Subresultant
Polynomial Remainder Sequences (PRS’s).'' Serdica Journal of Computing,
to appear.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
f, g = p, q
n = deg_f = degree(f, x)
m = deg_g = degree(g, x)
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, deg_f, deg_g, f, g = m, n, deg_g, deg_f, g, f
if n > 0 and m == 0:
return [f, g]
# initialize
s1 = sylvester(f, g, x, 1)
sr_list = [f, g] # subresultant list
col_num = 2 * n # columns in sylvester2
# make two rows (row0, row1) of poly coefficients
row0 = Poly(f, x, domain = QQ).all_coeffs()
leng0 = len(row0)
for i in range(col_num - leng0):
row0.append(0)
row0 = Matrix([row0])
row1 = Poly(g,x, domain = QQ).all_coeffs()
leng1 = len(row1)
for i in range(col_num - leng1):
row1.append(0)
row1 = Matrix([row1])
# main loop
while deg_g > 0:
# create a small matrix M, and triangularize it
M = create_ma(deg_f, deg_g, row1, row0, col_num)
for i in range(deg_f - deg_g + 1):
M1 = pivot(M, i, i)
M = M1[:, :]
# treat last row of M as poly; find its degree
d = find_degree(M, deg_f)
if d == None:
return sr_list
exp_deg = deg_g - 1
# evaluate one determinant & make coefficients subresultants
sign_value = correct_sign(n, m, s1, exp_deg, exp_deg - d)
poly = row2poly(M[M.rows - 1, :], d, x)
poly = simplify((poly / LC(poly, x)) * sign_value)
# append poly with subresultant coeffs
sr_list.append(poly)
# update degrees and rows
deg_f, deg_g = deg_g, d
row0 = row1
row1 = Poly(poly, x, domain = QQ).all_coeffs()
leng1 = len(row1)
for i in range(col_num - leng1):
row1.append(0)
row1 = Matrix([row1])
return sr_list
```
#### File: polys/tests/test_monomials.py
```python
from sympy.polys.monomials import (
itermonomials, monomial_count,
monomial_mul, monomial_div,
monomial_gcd, monomial_lcm,
monomial_max, monomial_min,
monomial_divides,
Monomial,
)
from sympy.polys.polyerrors import ExactQuotientFailed
from sympy.abc import a, b, c, x, y, z
from sympy.core import S
from sympy.utilities.pytest import raises
def test_monomials():
assert itermonomials([], 0) == {S(1)}
assert itermonomials([], 1) == {S(1)}
assert itermonomials([], 2) == {S(1)}
assert itermonomials([], 3) == {S(1)}
assert itermonomials([x], 0) == {S(1)}
assert itermonomials([x], 1) == {S(1), x}
assert itermonomials([x], 2) == {S(1), x, x**2}
assert itermonomials([x], 3) == {S(1), x, x**2, x**3}
assert itermonomials([x, y], 0) == {S(1)}
assert itermonomials([x, y], 1) == {S(1), x, y}
assert itermonomials([x, y], 2) == {S(1), x, y, x**2, y**2, x*y}
assert itermonomials([x, y], 3) == \
{S(1), x, y, x**2, x**3, y**2, y**3, x*y, x*y**2, y*x**2}
def test_monomial_count():
assert monomial_count(2, 2) == 6
assert monomial_count(2, 3) == 10
def test_monomial_mul():
assert monomial_mul((3, 4, 1), (1, 2, 0)) == (4, 6, 1)
def test_monomial_div():
assert monomial_div((3, 4, 1), (1, 2, 0)) == (2, 2, 1)
def test_monomial_gcd():
assert monomial_gcd((3, 4, 1), (1, 2, 0)) == (1, 2, 0)
def test_monomial_lcm():
assert monomial_lcm((3, 4, 1), (1, 2, 0)) == (3, 4, 1)
def test_monomial_max():
assert monomial_max((3, 4, 5), (0, 5, 1), (6, 3, 9)) == (6, 5, 9)
def test_monomial_min():
assert monomial_min((3, 4, 5), (0, 5, 1), (6, 3, 9)) == (0, 3, 1)
def test_monomial_divides():
assert monomial_divides((1, 2, 3), (4, 5, 6)) is True
assert monomial_divides((1, 2, 3), (0, 5, 6)) is False
def test_Monomial():
m = Monomial((3, 4, 1), (x, y, z))
n = Monomial((1, 2, 0), (x, y, z))
assert m.as_expr() == x**3*y**4*z
assert n.as_expr() == x**1*y**2
assert m.as_expr(a, b, c) == a**3*b**4*c
assert n.as_expr(a, b, c) == a**1*b**2
assert m.exponents == (3, 4, 1)
assert m.gens == (x, y, z)
assert n.exponents == (1, 2, 0)
assert n.gens == (x, y, z)
assert m == (3, 4, 1)
assert n != (3, 4, 1)
assert m != (1, 2, 0)
assert n == (1, 2, 0)
assert m[0] == m[-3] == 3
assert m[1] == m[-2] == 4
assert m[2] == m[-1] == 1
assert n[0] == n[-3] == 1
assert n[1] == n[-2] == 2
assert n[2] == n[-1] == 0
assert m[:2] == (3, 4)
assert n[:2] == (1, 2)
assert m*n == Monomial((4, 6, 1))
assert m/n == Monomial((2, 2, 1))
assert m*(1, 2, 0) == Monomial((4, 6, 1))
assert m/(1, 2, 0) == Monomial((2, 2, 1))
assert m.gcd(n) == Monomial((1, 2, 0))
assert m.lcm(n) == Monomial((3, 4, 1))
assert m.gcd((1, 2, 0)) == Monomial((1, 2, 0))
assert m.lcm((1, 2, 0)) == Monomial((3, 4, 1))
assert m**0 == Monomial((0, 0, 0))
assert m**1 == m
assert m**2 == Monomial((6, 8, 2))
assert m**3 == Monomial((9, 12, 3))
raises(ExactQuotientFailed, lambda: m/Monomial((5, 2, 0)))
```
|
{
"source": "jdtsmith/autoftp",
"score": 3
}
|
#### File: autoftp/example/main.py
```python
import sys
import wifi
wifi.start()
import uftpd
import mymod
def unload(mod):
if not isinstance(mod, (list, tuple)): mod = (mod,)
for m in mod:
mod_name = m if type(m) is str else m.__name__
if mod_name in sys.modules:
del sys.modules[mod_name]
return mod_name
# Called from autoftp, reload then stops relevant module. MUST NOT BLOCK!
def reload_stop(mod):
if mod == 'mymod':
unload(mod)
else:
unload((mod,'mymod'))
global mymod
mymod = __import__('mymod')
if mObj: mObj.stop()
def start():
global mObj
mObj = mymod.myModule()
mObj.start()
print(">> myModule: Initial Startup!")
while True:
start()
print(">> myModule Stopped, Re-starting!")
```
|
{
"source": "jdtuck/scikit-fda",
"score": 3
}
|
#### File: exploratory/visualization/_boxplot.py
```python
from abc import ABC, abstractmethod
import math
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from ..depth import ModifiedBandDepth
from ..outliers import _envelopes
from ._utils import (_figure_to_svg, _get_figure_and_axes,
_set_figure_layout_for_fdata, _set_labels)
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class FDataBoxplot(ABC):
"""Abstract class inherited by the Boxplot and SurfaceBoxplot classes.
It the data of the functional boxplot or surface boxplot of a FDataGrid
object, depending on the dimensions of the domain, 1 or 2 respectively.
It forces to both classes, Boxplot and SurfaceBoxplot to conain at least
the median, central and outlying envelopes and a colormap for their
graphical representation, obtained calling the plot method.
"""
@abstractmethod
def __init__(self, factor=1.5):
if factor < 0:
raise ValueError("The number used to calculate the "
"outlying envelope must be positive.")
self._factor = factor
@property
def factor(self):
return self._factor
@property
def fdatagrid(self):
pass
@property
def median(self):
pass
@property
def central_envelope(self):
pass
@property
def non_outlying_envelope(self):
pass
@property
def colormap(self):
return self._colormap
@colormap.setter
def colormap(self, value):
if not isinstance(value, matplotlib.colors.LinearSegmentedColormap):
raise ValueError("colormap must be of type "
"matplotlib.colors.LinearSegmentedColormap")
self._colormap = value
@abstractmethod
def plot(self, chart=None, *, fig=None, axes=None,
n_rows=None, n_cols=None):
pass
def _repr_svg_(self):
fig = self.plot()
plt.close(fig)
return _figure_to_svg(fig)
class Boxplot(FDataBoxplot):
r"""Representation of the functional boxplot.
Class implementing the functionl boxplot which is an informative
exploratory tool for visualizing functional data, as well as its
generalization, the enhanced functional boxplot. Only supports 1
dimensional domain functional data.
Based on the center outward ordering induced by a :ref:`depth measure
<depth-measures>` for functional data, the descriptive statistics of a
functional boxplot are: the envelope of the 50% central region, the median
curve,and the maximum non-outlying envelope. In addition, outliers can be
detected in a functional boxplot by the 1.5 times the 50% central region
empirical rule, analogous to the rule for classical boxplots.
Args:
fdatagrid (FDataGrid): Object containing the data.
depth_method (:ref:`depth measure <depth-measures>`, optional):
Method used to order the data. Defaults to :func:`modified
band depth
<skfda.exploratory.depth.ModifiedBandDepth>`.
prob (list of float, optional): List with float numbers (in the
range from 1 to 0) that indicate which central regions to
represent.
Defaults to [0.5] which represents the 50% central region.
factor (double): Number used to calculate the outlying envelope.
Attributes:
fdatagrid (FDataGrid): Object containing the data.
median (array, (fdatagrid.dim_codomain, ngrid_points)): contains
the median/s.
central_envelope (array, (fdatagrid.dim_codomain, 2, ngrid_points)):
contains the central envelope/s.
non_outlying_envelope (array, (fdatagrid.dim_codomain, 2,
ngrid_points)):
contains the non-outlying envelope/s.
colormap (matplotlib.colors.LinearSegmentedColormap): Colormap from
which the colors to represent the central regions are selected.
envelopes (array, (fdatagrid.dim_codomain * ncentral_regions, 2,
ngrid_points)): contains the region envelopes.
outliers (array, (fdatagrid.dim_codomain, fdatagrid.n_samples)):
contains the outliers.
barcol (string): Color of the envelopes and vertical lines.
outliercol (string): Color of the ouliers.
mediancol (string): Color of the median.
show_full_outliers (boolean): If False (the default) then only the part
outside the box is plotted. If True, complete outling curves are
plotted.
Representation in a Jupyter notebook:
.. jupyter-execute::
from skfda.datasets import make_gaussian_process
from skfda.misc.covariances import Exponential
from skfda.exploratory.visualization import Boxplot
fd = make_gaussian_process(
n_samples=20, cov=Exponential(), random_state=3)
Boxplot(fd)
Examples:
Function :math:`f : \mathbb{R}\longmapsto\mathbb{R}`.
>>> from skfda import FDataGrid
>>> from skfda.exploratory.visualization import Boxplot
>>>
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = FDataGrid(data_matrix, grid_points, dataset_name="dataset",
... argument_names=["x_label"],
... coordinate_names=["y_label"])
>>> Boxplot(fd)
Boxplot(
FDataGrid=FDataGrid(
array([[[ 1. ],
[ 1. ],
[ 2. ],
[ 3. ],
[ 2.5],
[ 2. ]],
[[ 0.5],
[ 0.5],
[ 1. ],
[ 2. ],
[ 1.5],
[ 1. ]],
[[-1. ],
[-1. ],
[-0.5],
[ 1. ],
[ 1. ],
[ 0.5]],
[[-0.5],
[-0.5],
[-0.5],
[-1. ],
[-1. ],
[-1. ]]]),
grid_points=(array([ 0., 2., 4., 6., 8., 10.]),),
domain_range=((0.0, 10.0),),
dataset_name='dataset',
argument_names=('x_label',),
coordinate_names=('y_label',),
...),
median=array([[ 0.5],
[ 0.5],
[ 1. ],
[ 2. ],
[ 1.5],
[ 1. ]]),
central envelope=(array([[-1. ],
[-1. ],
[-0.5],
[ 1. ],
[ 1. ],
[ 0.5]]), array([[ 0.5],
[ 0.5],
[ 1. ],
[ 2. ],
[ 1.5],
[ 1. ]])),
non-outlying envelope=(array([[-1. ],
[-1. ],
[-0.5],
[ 1. ],
[ 1. ],
[ 0.5]]), array([[ 0.5],
[ 0.5],
[ 1. ],
[ 2. ],
[ 1.5],
[ 1. ]])),
envelopes=[(array([[-1. ],
[-1. ],
[-0.5],
[ 1. ],
[ 1. ],
[ 0.5]]), array([[ 0.5],
[ 0.5],
[ 1. ],
[ 2. ],
[ 1.5],
[ 1. ]]))],
outliers=array([ True, False, False, True]))
References:
<NAME>., & <NAME>. (2011). Functional Boxplots. Journal of
Computational and Graphical Statistics, 20(2), 316-334.
https://doi.org/10.1198/jcgs.2011.09224
"""
def __init__(self, fdatagrid, depth_method=ModifiedBandDepth(), prob=[0.5],
factor=1.5):
"""Initialization of the Boxplot class.
Args:
fdatagrid (FDataGrid): Object containing the data.
depth_method (:ref:`depth measure <depth-measures>`, optional):
Method used to order the data. Defaults to :func:`modified
band depth
<skfda.exploratory.depth.ModifiedBandDepth>`.
prob (list of float, optional): List with float numbers (in the
range from 1 to 0) that indicate which central regions to
represent.
Defaults to [0.5] which represents the 50% central region.
factor (double): Number used to calculate the outlying envelope.
"""
FDataBoxplot.__init__(self, factor)
if fdatagrid.dim_domain != 1:
raise ValueError(
"Function only supports FDataGrid with domain dimension 1.")
if sorted(prob, reverse=True) != prob:
raise ValueError(
"Probabilities required to be in descending order.")
if min(prob) < 0 or max(prob) > 1:
raise ValueError("Probabilities must be between 0 and 1.")
self._envelopes = [None] * len(prob)
depth = depth_method(fdatagrid)
indices_descending_depth = (-depth).argsort(axis=0)
# The median is the deepest curve
self._median = fdatagrid[indices_descending_depth[0]
].data_matrix[0, ...]
# Central region and envelope must be computed for outlier detection
central_region = _envelopes._compute_region(
fdatagrid, indices_descending_depth, 0.5)
self._central_envelope = _envelopes._compute_envelope(central_region)
# Non-outlying envelope
non_outlying_threshold = _envelopes._non_outlying_threshold(
self._central_envelope, factor)
predicted_outliers = _envelopes._predict_outliers(
fdatagrid, non_outlying_threshold)
inliers = fdatagrid[predicted_outliers == 0]
self._non_outlying_envelope = _envelopes._compute_envelope(inliers)
# Outliers
self._outliers = _envelopes._predict_outliers(
fdatagrid, self._non_outlying_envelope)
for i, p in enumerate(prob):
region = _envelopes._compute_region(
fdatagrid, indices_descending_depth, p)
self._envelopes[i] = _envelopes._compute_envelope(region)
self._fdatagrid = fdatagrid
self._prob = prob
self._colormap = plt.cm.get_cmap('RdPu')
self.barcol = "blue"
self.outliercol = "red"
self.mediancol = "black"
self._show_full_outliers = False
@property
def fdatagrid(self):
return self._fdatagrid
@property
def median(self):
return self._median
@property
def central_envelope(self):
return self._central_envelope
@property
def non_outlying_envelope(self):
return self._non_outlying_envelope
@property
def envelopes(self):
return self._envelopes
@property
def outliers(self):
return self._outliers
@property
def show_full_outliers(self):
return self._show_full_outliers
@show_full_outliers.setter
def show_full_outliers(self, boolean):
if not isinstance(boolean, bool):
raise ValueError("show_full_outliers must be boolean type")
self._show_full_outliers = boolean
def plot(self, chart=None, *, fig=None, axes=None,
n_rows=None, n_cols=None):
"""Visualization of the functional boxplot of the fdatagrid
(dim_domain=1).
Args:
fig (figure object, optional): figure over with the graphs are
plotted in case ax is not specified. If None and ax is also
None, the figure is initialized.
axes (list of axis objects, optional): axis over where the graphs
are plotted. If None, see param fig.
n_rows(int, optional): designates the number of rows of the figure
to plot the different dimensions of the image. Only specified
if fig and ax are None.
n_cols(int, optional): designates the number of columns of the
figure to plot the different dimensions of the image. Only
specified if fig and ax are None.
Returns:
fig (figure): figure object in which the graphs are plotted.
"""
fig, axes = _get_figure_and_axes(chart, fig, axes)
fig, axes = _set_figure_layout_for_fdata(
self.fdatagrid, fig, axes, n_rows, n_cols)
tones = np.linspace(0.1, 1.0, len(self._prob) + 1, endpoint=False)[1:]
color = self.colormap(tones)
if self.show_full_outliers:
var_zorder = 1
else:
var_zorder = 4
outliers = self.fdatagrid[self.outliers]
for m in range(self.fdatagrid.dim_codomain):
# Outliers
for o in outliers:
axes[m].plot(o.grid_points[0],
o.data_matrix[0, :, m],
color=self.outliercol,
linestyle='--', zorder=1)
for i in range(len(self._prob)):
# central regions
axes[m].fill_between(self.fdatagrid.grid_points[0],
self.envelopes[i][0][..., m],
self.envelopes[i][1][..., m],
facecolor=color[i], zorder=var_zorder)
# outlying envelope
axes[m].plot(self.fdatagrid.grid_points[0],
self.non_outlying_envelope[0][..., m],
self.fdatagrid.grid_points[0],
self.non_outlying_envelope[1][..., m],
color=self.barcol, zorder=4)
# central envelope
axes[m].plot(self.fdatagrid.grid_points[0],
self.central_envelope[0][..., m],
self.fdatagrid.grid_points[0],
self.central_envelope[1][..., m],
color=self.barcol, zorder=4)
# vertical lines
index = math.ceil(self.fdatagrid.ncol / 2)
x = self.fdatagrid.grid_points[0][index]
axes[m].plot([x, x],
[self.non_outlying_envelope[0][..., m][index],
self.central_envelope[0][..., m][index]],
color=self.barcol,
zorder=4)
axes[m].plot([x, x],
[self.non_outlying_envelope[1][..., m][index],
self.central_envelope[1][..., m][index]],
color=self.barcol, zorder=4)
# median sample
axes[m].plot(self.fdatagrid.grid_points[0], self.median[..., m],
color=self.mediancol, zorder=5)
_set_labels(self.fdatagrid, fig, axes)
return fig
def __repr__(self):
"""Return repr(self)."""
return (f"Boxplot("
f"\nFDataGrid={repr(self.fdatagrid)},"
f"\nmedian={repr(self.median)},"
f"\ncentral envelope={repr(self.central_envelope)},"
f"\nnon-outlying envelope={repr(self.non_outlying_envelope)},"
f"\nenvelopes={repr(self.envelopes)},"
f"\noutliers={repr(self.outliers)})").replace('\n', '\n ')
class SurfaceBoxplot(FDataBoxplot):
r"""Representation of the surface boxplot.
Class implementing the surface boxplot. Analogously to the functional
boxplot, it is an informative exploratory tool for visualizing functional
data with domain dimension 2. Nevertheless, it does not implement the
enhanced surface boxplot.
Based on the center outward ordering induced by a
:ref:`depth measure <depth-measures>`
for functional data, it represents the envelope of the
50% central region, the median curve, and the maximum non-outlying
envelope.
Args:
fdatagrid (FDataGrid): Object containing the data.
method (:ref:`depth measure <depth-measures>`, optional): Method
used to order the data. Defaults to :class:`modified band depth
<skfda.exploratory.depth.ModifiedBandDepth>`.
prob (list of float, optional): List with float numbers (in the
range from 1 to 0) that indicate which central regions to
represent.
Defaults to [0.5] which represents the 50% central region.
factor (double): Number used to calculate the outlying envelope.
Attributes:
fdatagrid (FDataGrid): Object containing the data.
median (array, (fdatagrid.dim_codomain, lx, ly)): contains
the median/s.
central_envelope (array, (fdatagrid.dim_codomain, 2, lx, ly)):
contains the central envelope/s.
non_outlying_envelope (array,(fdatagrid.dim_codomain, 2, lx, ly)):
contains the non-outlying envelope/s.
colormap (matplotlib.colors.LinearSegmentedColormap): Colormap from
which the colors to represent the central regions are selected.
boxcol (string): Color of the box, which includes median and central
envelope.
outcol (string): Color of the outlying envelope.
Examples:
Function :math:`f : \mathbb{R^2}\longmapsto\mathbb{R}`.
>>> from skfda import FDataGrid
>>> data_matrix = [[[[1], [0.7], [1]],
... [[4], [0.4], [5]]],
... [[[2], [0.5], [2]],
... [[3], [0.6], [3]]]]
>>> grid_points = [[2, 4], [3, 6, 8]]
>>> fd = FDataGrid(data_matrix, grid_points, dataset_name="dataset",
... argument_names=["x1_label", "x2_label"],
... coordinate_names=["y_label"])
>>> SurfaceBoxplot(fd)
SurfaceBoxplot(
FDataGrid=FDataGrid(
array([[[[ 1. ],
[ 0.7],
[ 1. ]],
[[ 4. ],
[ 0.4],
[ 5. ]]],
[[[ 2. ],
[ 0.5],
[ 2. ]],
[[ 3. ],
[ 0.6],
[ 3. ]]]]),
grid_points=(array([ 2., 4.]), array([ 3., 6., 8.])),
domain_range=((2.0, 4.0), (3.0, 8.0)),
dataset_name='dataset',
argument_names=('x1_label', 'x2_label'),
coordinate_names=('y_label',),
extrapolation=None,
...),
median=array([[[ 1. ],
[ 0.7],
[ 1. ]],
[[ 4. ],
[ 0.4],
[ 5. ]]]),
central envelope=(array([[[ 1. ],
[ 0.7],
[ 1. ]],
[[ 4. ],
[ 0.4],
[ 5. ]]]),
array([[[ 1. ],
[ 0.7],
[ 1. ]],
[[ 4. ],
[ 0.4],
[ 5. ]]])),
outlying envelope=(array([[[ 1. ],
[ 0.7],
[ 1. ]],
[[ 4. ],
[ 0.4],
[ 5. ]]]),
array([[[ 1. ],
[ 0.7],
[ 1. ]],
[[ 4. ],
[ 0.4],
[ 5. ]]])))
References:
<NAME>., & <NAME>. (2011). Functional Boxplots. Journal of
Computational and Graphical Statistics, 20(2), 316-334.
https://doi.org/10.1198/jcgs.2011.09224
"""
def __init__(self, fdatagrid, method=ModifiedBandDepth(), factor=1.5):
"""Initialization of the functional boxplot.
Args:
fdatagrid (FDataGrid): Object containing the data.
method (:ref:`depth measure <depth-measures>`, optional): Method
used to order the data. Defaults to :class:`modified band depth
<skfda.exploratory.depth.ModifiedBandDepth>`.
prob (list of float, optional): List with float numbers (in the
range from 1 to 0) that indicate which central regions to
represent.
Defaults to [0.5] which represents the 50% central region.
factor (double): Number used to calculate the outlying envelope.
"""
FDataBoxplot.__init__(self, factor)
if fdatagrid.dim_domain != 2:
raise ValueError(
"Class only supports FDataGrid with domain dimension 2.")
depth = method(fdatagrid)
indices_descending_depth = (-depth).argsort(axis=0)
# The mean is the deepest curve
self._median = fdatagrid.data_matrix[indices_descending_depth[0]]
# Central region and envelope must be computed for outlier detection
central_region = _envelopes._compute_region(
fdatagrid, indices_descending_depth, 0.5)
self._central_envelope = _envelopes._compute_envelope(central_region)
# Non-outlying envelope
non_outlying_threshold = _envelopes._non_outlying_threshold(
self._central_envelope, factor)
predicted_outliers = _envelopes._predict_outliers(
fdatagrid, non_outlying_threshold)
inliers = fdatagrid[predicted_outliers == 0]
self._non_outlying_envelope = _envelopes._compute_envelope(inliers)
self._fdatagrid = fdatagrid
self.colormap = plt.cm.get_cmap('Greys')
self._boxcol = 1.0
self._outcol = 0.7
@property
def fdatagrid(self):
return self._fdatagrid
@property
def median(self):
return self._median
@property
def central_envelope(self):
return self._central_envelope
@property
def non_outlying_envelope(self):
return self._non_outlying_envelope
@property
def boxcol(self):
return self._boxcol
@boxcol.setter
def boxcol(self, value):
if value < 0 or value > 1:
raise ValueError(
"boxcol must be a number between 0 and 1.")
self._boxcol = value
@property
def outcol(self):
return self._outcol
@outcol.setter
def outcol(self, value):
if value < 0 or value > 1:
raise ValueError(
"outcol must be a number between 0 and 1.")
self._outcol = value
def plot(self, chart=None, *, fig=None, axes=None,
n_rows=None, n_cols=None):
"""Visualization of the surface boxplot of the fdatagrid (dim_domain=2).
Args:
fig (figure object, optional): figure over with the graphs are
plotted in case ax is not specified. If None and ax is also
None, the figure is initialized.
axes (list of axis objects, optional): axis over where the graphs
are plotted. If None, see param fig.
n_rows(int, optional): designates the number of rows of the figure
to plot the different dimensions of the image. Only specified
if fig and ax are None.
n_cols(int, optional): designates the number of columns of the
figure to plot the different dimensions of the image. Only
specified if fig and ax are None.
Returns:
fig (figure): figure object in which the graphs are plotted.
"""
fig, axes = _get_figure_and_axes(chart, fig, axes)
fig, axes = _set_figure_layout_for_fdata(
self.fdatagrid, fig, axes, n_rows, n_cols)
x = self.fdatagrid.grid_points[0]
lx = len(x)
y = self.fdatagrid.grid_points[1]
ly = len(y)
X, Y = np.meshgrid(x, y)
for m in range(self.fdatagrid.dim_codomain):
# mean sample
axes[m].plot_wireframe(X, Y, np.squeeze(self.median[..., m]).T,
rstride=ly, cstride=lx,
color=self.colormap(self.boxcol))
axes[m].plot_surface(X, Y, np.squeeze(self.median[..., m]).T,
color=self.colormap(self.boxcol), alpha=0.8)
# central envelope
axes[m].plot_surface(
X, Y, np.squeeze(self.central_envelope[0][..., m]).T,
color=self.colormap(self.boxcol), alpha=0.5)
axes[m].plot_wireframe(
X, Y, np.squeeze(self.central_envelope[0][..., m]).T,
rstride=ly, cstride=lx,
color=self.colormap(self.boxcol))
axes[m].plot_surface(
X, Y, np.squeeze(self.central_envelope[1][..., m]).T,
color=self.colormap(self.boxcol), alpha=0.5)
axes[m].plot_wireframe(
X, Y, np.squeeze(self.central_envelope[1][..., m]).T,
rstride=ly, cstride=lx,
color=self.colormap(self.boxcol))
# box vertical lines
for indices in [(0, 0), (0, ly - 1), (lx - 1, 0),
(lx - 1, ly - 1)]:
x_corner = x[indices[0]]
y_corner = y[indices[1]]
axes[m].plot(
[x_corner, x_corner], [y_corner, y_corner],
[
self.central_envelope[1][..., m][indices[0],
indices[1]],
self.central_envelope[0][..., m][indices[0],
indices[1]]],
color=self.colormap(self.boxcol))
# outlying envelope
axes[m].plot_surface(
X, Y,
np.squeeze(self.non_outlying_envelope[0][..., m]).T,
color=self.colormap(self.outcol), alpha=0.3)
axes[m].plot_wireframe(
X, Y,
np.squeeze(self.non_outlying_envelope[0][..., m]).T,
rstride=ly, cstride=lx,
color=self.colormap(self.outcol))
axes[m].plot_surface(
X, Y,
np.squeeze(self.non_outlying_envelope[1][..., m]).T,
color=self.colormap(self.outcol), alpha=0.3)
axes[m].plot_wireframe(
X, Y,
np.squeeze(self.non_outlying_envelope[1][..., m]).T,
rstride=ly, cstride=lx,
color=self.colormap(self.outcol))
# vertical lines from central to outlying envelope
x_index = math.floor(lx / 2)
x_central = x[x_index]
y_index = math.floor(ly / 2)
y_central = y[y_index]
axes[m].plot(
[x_central, x_central], [y_central, y_central],
[self.non_outlying_envelope[1][..., m][x_index, y_index],
self.central_envelope[1][..., m][x_index, y_index]],
color=self.colormap(self.boxcol))
axes[m].plot(
[x_central, x_central], [y_central, y_central],
[self.non_outlying_envelope[0][..., m][x_index, y_index],
self.central_envelope[0][..., m][x_index, y_index]],
color=self.colormap(self.boxcol))
_set_labels(self.fdatagrid, fig, axes)
return fig
def __repr__(self):
"""Return repr(self)."""
return ((f"SurfaceBoxplot("
f"\nFDataGrid={repr(self.fdatagrid)},"
f"\nmedian={repr(self.median)},"
f"\ncentral envelope={repr(self.central_envelope)},"
f"\noutlying envelope={repr(self.non_outlying_envelope)})")
.replace('\n', '\n '))
```
#### File: exploratory/visualization/clustering.py
```python
from matplotlib.ticker import MaxNLocator
from mpldatacursor import datacursor
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_is_fitted
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
from ...ml.clustering import FuzzyCMeans
from ._utils import (_darken,
_get_figure_and_axes, _set_figure_layout_for_fdata,
_set_figure_layout, _set_labels)
__author__ = "<NAME>"
__email__ = "<EMAIL>"
def _check_if_estimator(estimator):
"""Checks the argument *estimator* is actually an estimator that
implements the *fit* method.
Args:
estimator (BaseEstimator object): estimator used to calculate the
clusters.
"""
msg = ("This %(name)s instance has no attribute \"fit\".")
if not hasattr(estimator, "fit"):
raise AttributeError(msg % {'name': type(estimator).__name__})
def _plot_clustering_checks(estimator, fdata, sample_colors, sample_labels,
cluster_colors, cluster_labels,
center_colors, center_labels):
"""Checks the arguments *sample_colors*, *sample_labels*, *cluster_colors*,
*cluster_labels*, *center_colors*, *center_labels*, passed to the plot
functions, have the correct dimensions.
Args:
estimator (BaseEstimator object): estimator used to calculate the
clusters.
fdata (FData object): contains the samples which are grouped
into different clusters.
sample_colors (list of colors): contains in order the colors of each
sample of the fdatagrid.
sample_labels (list of str): contains in order the labels of each
sample of the fdatagrid.
cluster_colors (list of colors): contains in order the colors of each
cluster the samples of the fdatagrid are classified into.
cluster_labels (list of str): contains in order the names of each
cluster the samples of the fdatagrid are classified into.
center_colors (list of colors): contains in order the colors of each
centroid of the clusters the samples of the fdatagrid are
classified into.
center_labels list of colors): contains in order the labels of each
centroid of the clusters the samples of the fdatagrid are
classified into.
"""
if sample_colors is not None and len(
sample_colors) != fdata.n_samples:
raise ValueError(
"sample_colors must contain a color for each sample.")
if sample_labels is not None and len(
sample_labels) != fdata.n_samples:
raise ValueError(
"sample_labels must contain a label for each sample.")
if cluster_colors is not None and len(
cluster_colors) != estimator.n_clusters:
raise ValueError(
"cluster_colors must contain a color for each cluster.")
if cluster_labels is not None and len(
cluster_labels) != estimator.n_clusters:
raise ValueError(
"cluster_labels must contain a label for each cluster.")
if center_colors is not None and len(
center_colors) != estimator.n_clusters:
raise ValueError(
"center_colors must contain a color for each center.")
if center_labels is not None and len(
center_labels) != estimator.n_clusters:
raise ValueError(
"centers_labels must contain a label for each center.")
def _plot_clusters(estimator, fdata, *, chart=None, fig=None, axes=None,
n_rows=None, n_cols=None,
labels, sample_labels, cluster_colors, cluster_labels,
center_colors, center_labels, center_width, colormap):
"""Implementation of the plot of the FDataGrid samples by clusters.
Args:
estimator (BaseEstimator object): estimator used to calculate the
clusters.
fdatagrid (FDataGrd object): contains the samples which are grouped
into different clusters.
fig (figure object): figure over which the graphs are plotted in
case ax is not specified. If None and ax is also None, the figure
is initialized.
axes (list of axes objects): axes over where the graphs are plotted.
If None, see param fig.
n_rows(int): designates the number of rows of the figure to plot the
different dimensions of the image. Only specified if fig and
ax are None.
n_cols(int): designates the number of columns of the figure to plot
the different dimensions of the image. Only specified if fig
and ax are None.
labels (numpy.ndarray, int: (n_samples, dim_codomain)): 2-dimensional
matrix where each row contains the number of cluster cluster
that observation belongs to.
sample_labels (list of str): contains in order the labels of each
sample of the fdatagrid.
cluster_colors (list of colors): contains in order the colors of each
cluster the samples of the fdatagrid are classified into.
cluster_labels (list of str): contains in order the names of each
cluster the samples of the fdatagrid are classified into.
center_colors (list of colors): contains in order the colors of each
centroid of the clusters the samples of the fdatagrid are
classified into.
center_labels list of colors): contains in order the labels of each
centroid of the clusters the samples of the fdatagrid are
classified into.
center_width (int): width of the centroids.
colormap(colormap): colormap from which the colors of the plot are
taken.
Returns:
(tuple): tuple containing:
fig (figure object): figure object in which the graphs are plotted
in case ax is None.
ax (axes object): axes in which the graphs are plotted.
"""
fig, axes = _get_figure_and_axes(chart, fig, axes)
fig, axes = _set_figure_layout_for_fdata(fdata, fig, axes, n_rows, n_cols)
_plot_clustering_checks(estimator, fdata, None, sample_labels,
cluster_colors, cluster_labels, center_colors,
center_labels)
if sample_labels is None:
sample_labels = [f'$SAMPLE: {i}$' for i in range(fdata.n_samples)]
if cluster_colors is None:
cluster_colors = colormap(
np.arange(estimator.n_clusters) / (estimator.n_clusters - 1))
if cluster_labels is None:
cluster_labels = [
f'$CLUSTER: {i}$' for i in range(estimator.n_clusters)]
if center_colors is None:
center_colors = [_darken(c, 0.5) for c in cluster_colors]
if center_labels is None:
center_labels = [
f'$CENTER: {i}$' for i in range(estimator.n_clusters)]
colors_by_cluster = cluster_colors[labels]
patches = []
for i in range(estimator.n_clusters):
patches.append(
mpatches.Patch(color=cluster_colors[i],
label=cluster_labels[i]))
for j in range(fdata.dim_codomain):
for i in range(fdata.n_samples):
axes[j].plot(fdata.grid_points[0],
fdata.data_matrix[i, :, j],
c=colors_by_cluster[i],
label=sample_labels[i])
for i in range(estimator.n_clusters):
axes[j].plot(fdata.grid_points[0],
estimator.cluster_centers_.data_matrix[i, :, j],
c=center_colors[i],
label=center_labels[i],
linewidth=center_width)
axes[j].legend(handles=patches)
datacursor(formatter='{label}'.format)
_set_labels(fdata, fig, axes)
return fig
def plot_clusters(estimator, X, chart=None, fig=None, axes=None,
n_rows=None, n_cols=None,
sample_labels=None, cluster_colors=None,
cluster_labels=None, center_colors=None,
center_labels=None,
center_width=3,
colormap=plt.cm.get_cmap('rainbow')):
"""Plot of the FDataGrid samples by clusters.
The clusters are calculated with the estimator passed as a parameter. If
the estimator is not fitted, the fit method is called.
Once each sample is assigned a label the plotting can be done.
Each group is assigned a color described in a leglend.
Args:
estimator (BaseEstimator object): estimator used to calculate the
clusters.
X (FDataGrd object): contains the samples which are grouped
into different clusters.
fig (figure object): figure over which the graphs are plotted in
case ax is not specified. If None and ax is also None, the figure
is initialized.
axes (list of axis objects): axis over where the graphs are plotted.
If None, see param fig.
n_rows (int): designates the number of rows of the figure to plot the
different dimensions of the image. Only specified if fig and
ax are None.
n_cols (int): designates the number of columns of the figure to plot
the different dimensions of the image. Only specified if fig
and ax are None.
sample_labels (list of str): contains in order the labels of each
sample of the fdatagrid.
cluster_colors (list of colors): contains in order the colors of each
cluster the samples of the fdatagrid are classified into.
cluster_labels (list of str): contains in order the names of each
cluster the samples of the fdatagrid are classified into.
center_colors (list of colors): contains in order the colors of each
centroid of the clusters the samples of the fdatagrid are
classified into.
center_labels (list of colors): contains in order the labels of each
centroid of the clusters the samples of the fdatagrid are
classified into.
center_width (int): width of the centroid curves.
colormap(colormap): colormap from which the colors of the plot are
taken. Defaults to `rainbow`.
Returns:
(tuple): tuple containing:
fig (figure object): figure object in which the graphs are plotted
in case ax is None.
ax (axes object): axes in which the graphs are plotted.
"""
_check_if_estimator(estimator)
try:
check_is_fitted(estimator)
estimator._check_test_data(X)
except NotFittedError:
estimator.fit(X)
if isinstance(estimator, FuzzyCMeans):
labels = np.argmax(estimator.labels_, axis=1)
else:
labels = estimator.labels_
return _plot_clusters(estimator=estimator, fdata=X,
fig=fig, axes=axes, n_rows=n_rows, n_cols=n_cols,
labels=labels, sample_labels=sample_labels,
cluster_colors=cluster_colors,
cluster_labels=cluster_labels,
center_colors=center_colors,
center_labels=center_labels,
center_width=center_width,
colormap=colormap)
def _get_labels(x_label, y_label, title, xlabel_str):
"""Sets the arguments *xlabel*, *ylabel*, *title* passed to the plot
functions :func:`plot_cluster_lines
<skfda.exploratory.visualization.clustering_plots.plot_cluster_lines>` and
:func:`plot_cluster_bars
<skfda.exploratory.visualization.clustering_plots.plot_cluster_bars>`,
in case they are not set yet.
Args:
xlabel (lstr): Label for the x-axes.
ylabel (str): Label for the y-axes.
title (str): Title for the figure where the clustering results are
ploted.
xlabel_str (str): In case xlabel is None, string to use for the labels
in the x-axes.
Returns:
xlabel (str): Labels for the x-axes.
ylabel (str): Labels for the y-axes.
title (str): Title for the figure where the clustering results are
plotted.
"""
if x_label is None:
x_label = xlabel_str
if y_label is None:
y_label = "Degree of membership"
if title is None:
title = "Degrees of membership of the samples to each cluster"
return x_label, y_label, title
def plot_cluster_lines(estimator, X, chart=None, fig=None, axes=None,
sample_colors=None, sample_labels=None,
cluster_labels=None,
colormap=plt.cm.get_cmap('rainbow'),
x_label=None, y_label=None, title=None):
"""Implementation of the plotting of the results of the
:func:`Fuzzy K-Means <fda.clustering.fuzzy_kmeans>` method.
A kind of Parallel Coordinates plot is generated in this function with the
membership values obtained from the algorithm. A line is plotted for each
sample with the values for each cluster. See `Clustering Example
<../auto_examples/plot_clustering.html>`_.
Args:
estimator (BaseEstimator object): estimator used to calculate the
clusters.
X (FDataGrd object): contains the samples which are grouped
into different clusters.
fig (figure object, optional): figure over which the graph is
plotted in case ax is not specified. If None and ax is also None,
the figure is initialized.
axes (axes object, optional): axis over where the graph is plotted.
If None, see param fig.
sample_colors (list of colors, optional): contains in order the colors
of each sample of the fdatagrid.
sample_labels (list of str, optional): contains in order the labels
of each sample of the fdatagrid.
cluster_labels (list of str, optional): contains in order the names of
each cluster the samples of the fdatagrid are classified into.
colormap(colormap, optional): colormap from which the colors of the
plot are taken.
x_label (str): Label for the x-axis. Defaults to "Cluster".
y_label (str): Label for the y-axis. Defaults to
"Degree of membership".
title (str, optional): Title for the figure where the clustering
results are ploted.
Defaults to "Degrees of membership of the samples to each cluster".
Returns:
(tuple): tuple containing:
fig (figure object): figure object in which the graphs are plotted
in case ax is None.
ax (axes object): axes in which the graphs are plotted.
"""
fdata = X
_check_if_estimator(estimator)
if not isinstance(estimator, FuzzyCMeans):
raise ValueError("The estimator must be a FuzzyCMeans object.")
try:
check_is_fitted(estimator)
estimator._check_test_data(X)
except NotFittedError:
estimator.fit(X)
fig, axes = _get_figure_and_axes(chart, fig, axes)
fig, axes = _set_figure_layout(fig, axes)
_plot_clustering_checks(estimator, fdata, sample_colors, sample_labels,
None, cluster_labels, None, None)
x_label, y_label, title = _get_labels(x_label, y_label, title, "Cluster")
if sample_colors is None:
cluster_colors = colormap(np.arange(estimator.n_clusters) /
(estimator.n_clusters - 1))
labels_by_cluster = np.argmax(estimator.labels_, axis=1)
sample_colors = cluster_colors[labels_by_cluster]
if sample_labels is None:
sample_labels = ['$SAMPLE: {}$'.format(i) for i in
range(fdata.n_samples)]
if cluster_labels is None:
cluster_labels = ['${}$'.format(i) for i in
range(estimator.n_clusters)]
axes[0].get_xaxis().set_major_locator(MaxNLocator(integer=True))
for i in range(fdata.n_samples):
axes[0].plot(np.arange(estimator.n_clusters),
estimator.labels_[i],
label=sample_labels[i],
color=sample_colors[i])
axes[0].set_xticks(np.arange(estimator.n_clusters))
axes[0].set_xticklabels(cluster_labels)
axes[0].set_xlabel(x_label)
axes[0].set_ylabel(y_label)
datacursor(formatter='{label}'.format)
fig.suptitle(title)
return fig
def plot_cluster_bars(estimator, X, chart=None, fig=None, axes=None, sort=-1,
sample_labels=None, cluster_colors=None,
cluster_labels=None, colormap=plt.cm.get_cmap('rainbow'),
x_label=None, y_label=None, title=None):
"""Implementation of the plotting of the results of the
:func:`Fuzzy K-Means <fda.clustering.fuzzy_kmeans>` method.
A kind of barplot is generated in this function with the membership values
obtained from the algorithm. There is a bar for each sample whose height is
1 (the sum of the membership values of a sample add to 1), and the part
proportional to each cluster is coloured with the corresponding color. See
`Clustering Example <../auto_examples/plot_clustering.html>`_.
Args:
estimator (BaseEstimator object): estimator used to calculate the
clusters.
X (FDataGrd object): contains the samples which are grouped
into different clusters.
fig (figure object, optional): figure over which the graph is
plotted in case ax is not specified. If None and ax is also None,
the figure is initialized.
axes (axes object, optional): axes over where the graph is plotted.
If None, see param fig.
sort(int, optional): Number in the range [-1, n_clusters) designating
the cluster whose labels are sorted in a decrementing order.
Defaults to -1, in this case, no sorting is done.
sample_labels (list of str, optional): contains in order the labels
of each sample of the fdatagrid.
cluster_labels (list of str, optional): contains in order the names of
each cluster the samples of the fdatagrid are classified into.
cluster_colors (list of colors): contains in order the colors of each
cluster the samples of the fdatagrid are classified into.
colormap(colormap, optional): colormap from which the colors of the
plot are taken.
x_label (str): Label for the x-axis. Defaults to "Sample".
y_label (str): Label for the y-axis. Defaults to
"Degree of membership".
title (str): Title for the figure where the clustering results are
plotted.
Defaults to "Degrees of membership of the samples to each cluster".
Returns:
(tuple): tuple containing:
fig (figure object): figure object in which the graph is plotted
in case ax is None.
ax (axis object): axis in which the graph is plotted.
"""
fdata = X
_check_if_estimator(estimator)
if not isinstance(estimator, FuzzyCMeans):
raise ValueError("The estimator must be a FuzzyCMeans object.")
try:
check_is_fitted(estimator)
estimator._check_test_data(X)
except NotFittedError:
estimator.fit(X)
if sort < -1 or sort >= estimator.n_clusters:
raise ValueError(
"The sorting number must belong to the interval [-1, n_clusters)")
fig, axes = _get_figure_and_axes(chart, fig, axes)
fig, axes = _set_figure_layout(fig, axes)
_plot_clustering_checks(estimator, fdata, None, sample_labels,
cluster_colors, cluster_labels, None, None)
x_label, y_label, title = _get_labels(x_label, y_label, title, "Sample")
if sample_labels is None:
sample_labels = np.arange(fdata.n_samples)
if cluster_colors is None:
cluster_colors = colormap(
np.arange(estimator.n_clusters) / (estimator.n_clusters - 1))
if cluster_labels is None:
cluster_labels = [f'$CLUSTER: {i}$' for i in
range(estimator.n_clusters)]
patches = []
for i in range(estimator.n_clusters):
patches.append(
mpatches.Patch(color=cluster_colors[i], label=cluster_labels[i]))
if sort != -1:
sample_indices = np.argsort(-estimator.labels_[:, sort])
sample_labels = np.copy(sample_labels[sample_indices])
labels_dim = np.copy(estimator.labels_[sample_indices])
temp_labels = np.copy(labels_dim[:, 0])
labels_dim[:, 0] = labels_dim[:, sort]
labels_dim[:, sort] = temp_labels
temp_color = np.copy(cluster_colors[0])
cluster_colors[0] = cluster_colors[sort]
cluster_colors[sort] = temp_color
else:
labels_dim = estimator.labels_
conc = np.zeros((fdata.n_samples, 1))
labels_dim = np.concatenate((conc, labels_dim), axis=-1)
for i in range(estimator.n_clusters):
axes[0].bar(np.arange(fdata.n_samples),
labels_dim[:, i + 1],
bottom=np.sum(labels_dim[:, :(i + 1)], axis=1),
color=cluster_colors[i])
axes[0].set_xticks(np.arange(fdata.n_samples))
axes[0].set_xticklabels(sample_labels)
axes[0].set_xlabel(x_label)
axes[0].set_ylabel(y_label)
axes[0].legend(handles=patches)
fig.suptitle(title)
return fig
```
#### File: misc/operators/_identity.py
```python
import numpy as np
from ...representation import FDataGrid
from ...representation.basis import Basis
from ._operators import Operator, gramian_matrix_optimization
class Identity(Operator):
"""Identity operator.
Linear operator that returns its input.
.. math::
Ix = x
Can be applied to both functional and multivariate data.
"""
def __call__(self, f):
return f
@gramian_matrix_optimization.register
def basis_penalty_matrix_optimized(
linear_operator: Identity,
basis: Basis):
return basis.gram_matrix()
@gramian_matrix_optimization.register
def fdatagrid_penalty_matrix_optimized(
linear_operator: Identity,
basis: FDataGrid):
from ..metrics import lp_norm
return np.diag(lp_norm(basis)**2)
```
#### File: misc/regularization/_regularization.py
```python
from collections.abc import Iterable
import itertools
from skfda.misc.operators import gramian_matrix, Identity
import scipy.linalg
from sklearn.base import BaseEstimator
import numpy as np
class TikhonovRegularization(BaseEstimator):
r"""
Implements Tikhonov regularization.
The penalization term in this type of regularization is the square of the
:math:`L_2` (Euclidean) norm of a linear operator applied to the function
or vector
.. math::
\lambda \| \Gamma x \|_2^2
where :math:`\Gamma` is the so called Tikhonov operator
(matrix for finite vectors) and :math:`\lambda` is a positive real number.
This linear operator can be an arbitrary Python callable that correspond
to a linear transformation. However, the
:doc:`operators </modules/misc/operators>` module
provides several common linear operators.
Parameters:
linear_operator: linear operator used for regularization.
regularization_parameter: scaling parameter (:math:`\lambda`) of the
penalization.
Examples:
Construct a regularization that penalizes the second derivative,
which is a measure of the curvature of the function.
>>> from skfda.misc.regularization import TikhonovRegularization
>>> from skfda.misc.operators import LinearDifferentialOperator
>>>
>>> regularization = TikhonovRegularization(
... LinearDifferentialOperator(2))
Construct a regularization that penalizes the identity operator,
that is, completely equivalent to the :math:`L_2` regularization (
:class:`L2Regularization`).
>>> from skfda.misc.regularization import TikhonovRegularization
>>> from skfda.misc.operators import Identity
>>>
>>> regularization = TikhonovRegularization(Identity())
Construct a regularization that penalizes the difference between
the points :math:`f(1)` and :math:`f(0)` of a function :math:`f`.
>>> from skfda.misc.regularization import TikhonovRegularization
>>>
>>> regularization = TikhonovRegularization(lambda x: x(1) - x(0))
Construct a regularization that penalizes the harmonic acceleration
operator :math:`Lf = \omega^2 D f + D^3 f`, that, when the
regularization parameter is large, forces the function to be
:math:`f(t) = c_1 + c_2 \sin \omega t + c_3 \cos \omega t`, where
:math:`\omega` is the angular frequency. This is useful for some
periodic functions.
>>> from skfda.misc.regularization import TikhonovRegularization
>>> from skfda.misc.operators import LinearDifferentialOperator
>>> import numpy as np
>>>
>>> period = 1
>>> w = 2 * np.pi / period
>>> regularization = TikhonovRegularization(
... LinearDifferentialOperator([0, w**2, 0, 1]))
"""
def __init__(self, linear_operator,
*, regularization_parameter=1):
self.linear_operator = linear_operator
self.regularization_parameter = regularization_parameter
def penalty_matrix(self, basis):
r"""
Return a penalty matrix for ordinary least squares.
"""
return self.regularization_parameter * gramian_matrix(
self.linear_operator, basis)
class L2Regularization(TikhonovRegularization):
r"""
Implements :math:`L_2` regularization.
The penalization term in this type of regularization is the square of the
:math:`L_2` (Euclidean) norm of the function or vector
.. math::
\lambda \| x \|_2^2
where :math:`\lambda` is a positive real number.
This is equivalent to Tikhonov regularization (
:class:`TikhonovRegularization`) using the identity operator (
:class:`Identity`).
Parameters:
regularization_parameter: scaling parameter (:math:`\lambda`) of the
penalization.
"""
def __init__(self, *, regularization_parameter=1):
return super().__init__(
linear_operator=Identity(),
regularization_parameter=regularization_parameter)
def compute_penalty_matrix(basis_iterable, regularization_parameter,
regularization):
"""
Computes the regularization matrix for a linear differential operator.
X can be a list of mixed data.
"""
# If there is no regularization, return 0 and rely on broadcasting
if regularization_parameter == 0 or regularization is None:
return 0
# Compute penalty matrix if not provided
if not isinstance(regularization, Iterable):
regularization = (regularization,)
if not isinstance(regularization_parameter, Iterable):
regularization_parameter = itertools.repeat(
regularization_parameter)
penalty_blocks = [
np.zeros((len(b), len(b))) if r is None else
a * r.penalty_matrix(b)
for b, r, a in zip(basis_iterable, regularization,
regularization_parameter)]
penalty_matrix = scipy.linalg.block_diag(*penalty_blocks)
return penalty_matrix
```
#### File: representation/basis/_coefficients_transformer.py
```python
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from ._fdatabasis import FDataBasis
class CoefficientsTransformer(BaseEstimator, TransformerMixin):
"""
Transformer returning the coefficients of FDataBasis objects as a matrix.
Attributes:
shape_ (tuple): original shape of coefficients per sample.
Examples:
>>> from skfda.representation.basis import (FDataBasis, Monomial,
... CoefficientsTransformer)
>>>
>>> basis = Monomial(n_basis=4)
>>> coefficients = [[0.5, 1, 2, .5], [1.5, 1, 4, .5]]
>>> fd = FDataBasis(basis, coefficients)
>>>
>>> transformer = CoefficientsTransformer()
>>> transformer.fit_transform(fd)
array([[ 0.5, 1. , 2. , 0.5],
[ 1.5, 1. , 4. , 0.5]])
"""
def fit(self, X: FDataBasis, y=None):
self.shape_ = X.coefficients.shape[1:]
return self
def transform(self, X, y=None):
check_is_fitted(self)
assert X.coefficients.shape[1:] == self.shape_
coefficients = X.coefficients.copy()
coefficients = coefficients.reshape((X.n_samples, -1))
return coefficients
```
#### File: skfda/representation/evaluator.py
```python
from abc import ABC, abstractmethod
class Evaluator(ABC):
"""Structure of an evaluator.
An evaluator defines how to evaluate points of a functional object, it
can be used as extrapolator to evaluate points outside the domain range or
as interpolation in a :class:`FDataGrid`. The corresponding examples of
Interpolation and Extrapolation shows the basic usage of this class.
The evaluator is called internally by :func:`evaluate`.
Should implement the methods :func:`evaluate` and
:func:`evaluate_composed`.
"""
@abstractmethod
def evaluate(self, fdata, eval_points, *, aligned=True):
"""Evaluation method.
Evaluates the samples at evaluation points. The evaluation
call will receive a 2-d array with the evaluation points, or
a 3-d array with the evaluation points per sample if ``aligned``
is ``False``.
Args:
eval_points (numpy.ndarray): Numpy array with shape
``(number_eval_points, dim_domain)`` with the
evaluation points.
Returns:
(numpy.darray): Numpy 3d array with shape
``(n_samples, number_eval_points, dim_codomain)`` with the
result of the evaluation. The entry ``(i,j,k)`` will contain
the value k-th image dimension of the i-th sample, at the
j-th evaluation point.
"""
pass
def __repr__(self):
return f"{type(self)}()"
def __eq__(self, other):
"""Equality operator between evaluators."""
return type(self) == type(other)
class GenericEvaluator(Evaluator):
"""Generic Evaluator.
Generic evaluator that recibes a functions to construct the evaluator.
The function will recieve an :class:`FData` as first argument, a numpy
array with the eval_points and the ``aligned`` parameter.
"""
def __init__(self, evaluate_function):
self.evaluate_function = evaluate_function
def evaluate(self, fdata, eval_points, *, aligned=True):
return self.evaluate_function(fdata, eval_points, aligned=aligned)
```
#### File: scikit-fda/tests/test_pandas_fdatagrid.py
```python
import operator
import skfda
from pandas import Series
import pandas
from pandas.tests.extension import base
import pytest
import numpy as np
##############################################################################
# Fixtures
##############################################################################
@pytest.fixture
def dtype():
"""A fixture providing the ExtensionDtype to validate."""
return skfda.representation.grid.FDataGridDType(
grid_points=[
np.arange(10),
np.arange(10) / 10],
dim_codomain=3
)
@pytest.fixture
def data():
"""
Length-100 array for this type.
* data[0] and data[1] should both be non missing
* data[0] and data[1] should not be equal
"""
data_matrix = np.arange(1, 100 * 10 * 10 * 3 + 1).reshape(100, 10, 10, 3)
grid_points = [
np.arange(10),
np.arange(10) / 10]
return skfda.FDataGrid(data_matrix, grid_points=grid_points)
@pytest.fixture
def data_for_twos():
"""Length-100 array in which all the elements are two."""
data_matrix = np.full(
100 * 10 * 10 * 3, fill_value=2).reshape(100, 10, 10, 3)
grid_points = [
np.arange(10),
np.arange(10) / 10]
return skfda.FDataGrid(data_matrix, grid_points=grid_points)
@pytest.fixture
def data_missing():
"""Length-2 array with [NA, Valid]"""
data_matrix = np.arange(
2 * 10 * 10 * 3, dtype=np.float_).reshape(2, 10, 10, 3)
data_matrix[0, ...] = np.NaN
grid_points = [
np.arange(10),
np.arange(10) / 10]
return skfda.FDataGrid(data_matrix, grid_points=grid_points)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
@pytest.fixture
def data_repeated(data):
"""
Generate many datasets.
Parameters
----------
data : fixture implementing `data`
Returns
-------
Callable[[int], Generator]:
A callable that takes a `count` argument and
returns a generator yielding `count` datasets.
"""
def gen(count):
for _ in range(count):
yield data
return gen
@pytest.fixture
def data_for_sorting():
"""
Length-3 array with a known sort order.
This should be three items [B, C, A] with
A < B < C
"""
raise NotImplementedError
@pytest.fixture
def data_missing_for_sorting():
"""
Length-3 array with a known sort order.
This should be three items [B, NA, A] with
A < B and NA missing.
"""
raise NotImplementedError
@pytest.fixture
def na_cmp():
"""
Binary operator for comparing NA values.
Should return a function of two arguments that returns
True if both arguments are (scalar) NA for your type.
By default, uses ``operator.is_``
"""
def isna(x, y):
return ((x is pandas.NA or all(x.isna()))
and (y is pandas.NA or all(y.isna())))
return isna
@pytest.fixture
def na_value():
"""The scalar missing value for this type. Default 'None'"""
return pandas.NA
@pytest.fixture
def data_for_grouping():
"""
Data for factorization, grouping, and unique tests.
Expected to be like [B, B, NA, NA, A, A, B, C]
Where A < B < C and NA is missing
"""
raise NotImplementedError
@pytest.fixture(params=[True, False])
def box_in_series(request):
"""Whether to box the data in a Series"""
return request.param
@pytest.fixture(
params=[
lambda x: 1,
lambda x: [1] * len(x),
lambda x: Series([1] * len(x)),
lambda x: x,
],
ids=["scalar", "list", "series", "object"],
)
def groupby_apply_op(request):
"""
Functions to test groupby.apply().
"""
return request.param
@pytest.fixture(params=[True, False])
def as_frame(request):
"""
Boolean fixture to support Series and Series.to_frame() comparison testing.
"""
return request.param
@pytest.fixture(params=[True, False])
def as_series(request):
"""
Boolean fixture to support arr and Series(arr) comparison testing.
"""
return request.param
@pytest.fixture(params=[True, False])
def use_numpy(request):
"""
Boolean fixture to support comparison testing of ExtensionDtype array
and numpy array.
"""
return request.param
@pytest.fixture(params=["ffill", "bfill"])
def fillna_method(request):
"""
Parametrized fixture giving method parameters 'ffill' and 'bfill' for
Series.fillna(method=<method>) testing.
"""
return request.param
@pytest.fixture(params=[True, False])
def as_array(request):
"""
Boolean fixture to support ExtensionDtype _from_sequence method testing.
"""
return request.param
_all_arithmetic_operators = [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
# "__floordiv__",
# "__rfloordiv__",
"__truediv__",
"__rtruediv__",
# "__pow__",
# "__rpow__",
# "__mod__",
# "__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations.
"""
return request.param
@pytest.fixture(params=["__eq__", "__ne__",
# "__le__", "__lt__", "__ge__", "__gt__"
])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
"""
return request.param
_all_numeric_reductions = [
"sum",
# "max",
# "min",
"mean",
# "prod",
# "std",
# "var",
# "median",
# "kurt",
# "skew",
]
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names.
"""
return request.param
##############################################################################
# Tests
##############################################################################
class TestCasting(base.BaseCastingTests):
# Tries to construct dtype from string
@pytest.mark.skip(reason="Unsupported")
def test_astype_str(self):
pass
# Tries to construct dtype from string
@pytest.mark.skip(reason="Unsupported")
def test_astype_string(self):
pass
class TestConstructors(base.BaseConstructorsTests):
# Does not support scalars which are also ExtensionArrays
@pytest.mark.skip(reason="Unsupported")
def test_series_constructor_scalar_with_index(self):
pass
# Tries to construct dtype from string
@pytest.mark.skip(reason="Unsupported")
def test_from_dtype(self):
pass
class TestDtype(base.BaseDtypeTests):
# Tries to construct dtype from string
@pytest.mark.skip(reason="Unsupported")
def test_construct_from_string_own_name(self):
pass
# Tries to construct dtype from string
@pytest.mark.skip(reason="Unsupported")
def test_is_dtype_from_name(self):
pass
# Tries to construct dtype from string
@pytest.mark.skip(reason="Unsupported")
def test_eq_with_str(self):
pass
# Tries to construct dtype from string
@pytest.mark.skip(reason="Unsupported")
def test_construct_from_string(self, dtype):
pass
class TestGetitem(base.BaseGetitemTests):
pass
class TestInterface(base.BaseInterfaceTests):
# Does not support scalars which are also array_like
@pytest.mark.skip(reason="Unsupported")
def test_array_interface(self):
pass
# We do not implement setitem
@pytest.mark.skip(reason="Unsupported")
def test_copy(self, dtype):
pass
# We do not implement setitem
@pytest.mark.skip(reason="Unsupported")
def test_view(self, dtype):
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
series_scalar_exc = None
# Does not convert properly a list of FData to a FData
@pytest.mark.skip(reason="Unsupported")
def test_arith_series_with_array(self, dtype):
pass
# Does not error on operations
@pytest.mark.skip(reason="Unsupported")
def test_error(self, dtype):
pass
class TestComparisonOps(base.BaseComparisonOpsTests):
# Cannot be compared with 0
@pytest.mark.skip(reason="Unsupported")
def test_compare_scalar(self, data, all_compare_operators):
pass
# Not sure how to pass it. Should it be reimplemented?
@pytest.mark.skip(reason="Unsupported")
def test_compare_array(self, data, all_compare_operators):
pass
class TestNumericReduce(base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
assert result.n_samples == 1
```
#### File: scikit-fda/tests/test_registration.py
```python
from skfda import FDataGrid
from skfda._utils import _check_estimator
from skfda.datasets import (make_multimodal_samples, make_multimodal_landmarks,
make_sinusoidal_process)
from skfda.exploratory.stats import mean
from skfda.preprocessing.registration import (
normalize_warping, invert_warping, landmark_shift_deltas, landmark_shift,
landmark_registration_warping, landmark_registration, ShiftRegistration)
from skfda.preprocessing.registration.validation import (
AmplitudePhaseDecomposition, LeastSquares,
SobolevLeastSquares, PairwiseCorrelation)
from skfda.representation.basis import Fourier
from skfda.representation.interpolation import SplineInterpolation
import unittest
from sklearn.exceptions import NotFittedError
import numpy as np
class TestWarping(unittest.TestCase):
"""Test warpings functions"""
def setUp(self):
"""Initialization of samples"""
self.time = np.linspace(-1, 1, 50)
interpolation = SplineInterpolation(3, monotone=True)
self.polynomial = FDataGrid([self.time**3, self.time**5],
self.time, interpolation=interpolation)
def test_invert_warping(self):
inverse = invert_warping(self.polynomial)
# Check if identity
id = self.polynomial.compose(inverse)
np.testing.assert_array_almost_equal([self.time, self.time],
id.data_matrix[..., 0],
decimal=3)
def test_standard_normalize_warping(self):
"""Test normalization to (0, 1)"""
normalized = normalize_warping(self.polynomial, (0, 1))
# Test new domain range (0, 1)
np.testing.assert_array_equal(normalized.domain_range, [(0, 1)])
np.testing.assert_array_almost_equal(normalized.grid_points[0],
np.linspace(0, 1, 50))
np.testing.assert_array_almost_equal(
normalized(0)[..., 0], [[0.], [0.]])
np.testing.assert_array_almost_equal(
normalized(1)[..., 0], [[1.], [1.]])
def test_standard_normalize_warping_default_value(self):
"""Test normalization """
normalized = normalize_warping(self.polynomial)
# Test new domain range (0, 1)
np.testing.assert_array_equal(normalized.domain_range, [(-1, 1)])
np.testing.assert_array_almost_equal(normalized.grid_points[0],
np.linspace(-1, 1, 50))
np.testing.assert_array_almost_equal(
normalized(-1)[..., 0], [[-1], [-1]])
np.testing.assert_array_almost_equal(
normalized(1)[..., 0], [[1.], [1.]])
def test_normalize_warping(self):
"""Test normalization to (a, b)"""
a = -4
b = 3
domain = (a, b)
normalized = normalize_warping(self.polynomial, domain)
# Test new domain range (0, 1)
np.testing.assert_array_equal(normalized.domain_range, [domain])
np.testing.assert_array_almost_equal(normalized.grid_points[0],
np.linspace(*domain, 50))
np.testing.assert_array_equal(normalized(a)[..., 0], [[a], [a]])
np.testing.assert_array_equal(normalized(b)[..., 0], [[b], [b]])
def test_landmark_shift_deltas(self):
fd = make_multimodal_samples(n_samples=3, random_state=1)
landmarks = make_multimodal_landmarks(n_samples=3, random_state=1)
landmarks = landmarks.squeeze()
shifts = landmark_shift_deltas(fd, landmarks).round(3)
np.testing.assert_almost_equal(shifts, [0.25, -0.25, -0.231])
def test_landmark_shift(self):
fd = make_multimodal_samples(n_samples=3, random_state=1)
landmarks = make_multimodal_landmarks(n_samples=3, random_state=1)
landmarks = landmarks.squeeze()
original_modes = fd(landmarks.reshape((3, 1, 1)),
aligned=False)
# Test default location
fd_registered = landmark_shift(fd, landmarks)
center = (landmarks.max() + landmarks.min()) / 2
reg_modes = fd_registered(center)
# Test callable location
np.testing.assert_almost_equal(reg_modes, original_modes, decimal=2)
fd_registered = landmark_shift(fd, landmarks, location=np.mean)
center = np.mean(landmarks)
reg_modes = fd_registered(center)
np.testing.assert_almost_equal(reg_modes, original_modes, decimal=2)
# Test integer location
fd_registered = landmark_shift(fd, landmarks, location=0)
center = np.mean(landmarks)
reg_modes = fd_registered(0)
np.testing.assert_almost_equal(reg_modes, original_modes, decimal=2)
# Test array location
fd_registered = landmark_shift(fd, landmarks, location=[0, 0.1, 0.2])
reg_modes = fd_registered([[0], [.1], [.2]], aligned=False)
np.testing.assert_almost_equal(reg_modes, original_modes, decimal=2)
def test_landmark_registration_warping(self):
fd = make_multimodal_samples(n_samples=3, n_modes=2, random_state=9)
landmarks = make_multimodal_landmarks(n_samples=3, n_modes=2,
random_state=9)
landmarks = landmarks.squeeze()
# Default location
warping = landmark_registration_warping(fd, landmarks)
center = (landmarks.max(axis=0) + landmarks.min(axis=0)) / 2
np.testing.assert_almost_equal(
warping(center)[..., 0], landmarks, decimal=1)
# Fixed location
center = [.3, .6]
warping = landmark_registration_warping(fd, landmarks, location=center)
np.testing.assert_almost_equal(
warping(center)[..., 0], landmarks, decimal=3)
def test_landmark_registration(self):
fd = make_multimodal_samples(n_samples=3, n_modes=2, random_state=9)
landmarks = make_multimodal_landmarks(n_samples=3, n_modes=2,
random_state=9)
landmarks = landmarks.squeeze()
original_values = fd(landmarks.reshape(3, 2), aligned=False)
# Default location
fd_reg = landmark_registration(fd, landmarks)
center = (landmarks.max(axis=0) + landmarks.min(axis=0)) / 2
np.testing.assert_almost_equal(fd_reg(center), original_values,
decimal=2)
# Fixed location
center = [.3, .6]
fd_reg = landmark_registration(fd, landmarks, location=center)
np.testing.assert_array_almost_equal(fd_reg(center), original_values,
decimal=2)
class TestShiftRegistration(unittest.TestCase):
"""Test shift registration"""
def setUp(self):
"""Initialization of samples"""
self.fd = make_sinusoidal_process(n_samples=2, error_std=0,
random_state=1)
self.fd.extrapolation = "periodic"
def test_fit_transform(self):
reg = ShiftRegistration()
# Test fit transform with FDataGrid
fd_reg = reg.fit_transform(self.fd)
# Check attributes fitted
self.assertTrue(hasattr(reg, 'deltas_'))
self.assertTrue(hasattr(reg, 'template_'))
self.assertTrue(hasattr(reg, 'n_iter_'))
self.assertTrue(isinstance(fd_reg, FDataGrid))
deltas = reg.deltas_.round(3)
np.testing.assert_array_almost_equal(deltas, [-0.022, 0.03])
# Test with Basis
fd = self.fd.to_basis(Fourier())
reg.fit_transform(fd)
deltas = reg.deltas_.round(3)
np.testing.assert_array_almost_equal(deltas, [-0.022, 0.03])
def test_fit_and_transform(self):
"""Test wrapper of shift_registration_deltas"""
fd = make_sinusoidal_process(n_samples=2, error_std=0, random_state=10)
reg = ShiftRegistration()
response = reg.fit(self.fd)
# Check attributes and returned value
self.assertTrue(hasattr(reg, 'template_'))
self.assertTrue(response is reg)
fd_registered = reg.transform(fd)
deltas = reg.deltas_.round(3)
np.testing.assert_allclose(deltas, [0.071, -0.072])
def test_inverse_transform(self):
reg = ShiftRegistration()
fd = reg.fit_transform(self.fd)
fd = reg.inverse_transform(fd)
np.testing.assert_array_almost_equal(fd.data_matrix,
self.fd.data_matrix, decimal=3)
def test_raises(self):
reg = ShiftRegistration()
# Test not fitted
with np.testing.assert_raises(NotFittedError):
reg.transform(self.fd)
reg.fit(self.fd)
reg.set_params(restrict_domain=True)
# Test use fit or transform with restrict_domain=True
with np.testing.assert_raises(AttributeError):
reg.transform(self.fd)
with np.testing.assert_raises(AttributeError):
reg.fit(self.fd)
# Test inverse_transform without previous transformation
with np.testing.assert_raises(AttributeError):
reg.inverse_transform(self.fd)
reg.fit_transform(self.fd)
# Test inverse transform with different number of sample
with np.testing.assert_raises(ValueError):
reg.inverse_transform(self.fd[:1])
fd = make_multimodal_samples(dim_domain=2, random_state=0)
with np.testing.assert_raises(ValueError):
reg.fit_transform(fd)
reg.set_params(initial=[0.])
# Wrong initial estimation
with np.testing.assert_raises(ValueError):
reg.fit_transform(self.fd)
def test_template(self):
reg = ShiftRegistration()
fd_registered_1 = reg.fit_transform(self.fd)
reg_2 = ShiftRegistration(template=reg.template_)
fd_registered_2 = reg_2.fit_transform(self.fd)
reg_3 = ShiftRegistration(template=mean)
fd_registered_3 = reg_3.fit_transform(self.fd)
reg_4 = ShiftRegistration(template=reg.template_)
fd_registered_4 = reg_4.fit(self.fd).transform(self.fd)
np.testing.assert_array_almost_equal(fd_registered_1.data_matrix,
fd_registered_3.data_matrix)
# With the template fixed could vary the convergence
np.testing.assert_array_almost_equal(fd_registered_1.data_matrix,
fd_registered_2.data_matrix,
decimal=3)
np.testing.assert_array_almost_equal(fd_registered_2.data_matrix,
fd_registered_4.data_matrix)
def test_restrict_domain(self):
reg = ShiftRegistration(restrict_domain=True)
fd_registered_1 = reg.fit_transform(self.fd)
np.testing.assert_array_almost_equal(
np.array(fd_registered_1.domain_range).round(3), [[0.022, 0.969]])
reg2 = ShiftRegistration(restrict_domain=True, template=reg.template_)
fd_registered_2 = reg2.fit_transform(self.fd)
np.testing.assert_array_almost_equal(
fd_registered_2.data_matrix, fd_registered_1.data_matrix,
decimal=3)
reg3 = ShiftRegistration(restrict_domain=True, template=mean)
fd_registered_3 = reg3.fit_transform(self.fd)
np.testing.assert_array_almost_equal(
fd_registered_3.data_matrix, fd_registered_1.data_matrix)
def test_initial_estimation(self):
reg = ShiftRegistration(initial=[-0.02161235, 0.03032652])
reg.fit_transform(self.fd)
# Only needed 1 iteration until convergence
self.assertEqual(reg.n_iter_, 1)
def test_custom_output_points(self):
reg = ShiftRegistration(output_points=np.linspace(0, 1, 50))
reg.fit_transform(self.fd)
class TestRegistrationValidation(unittest.TestCase):
"""Test shift registration"""
def setUp(self):
"""Initialization of samples"""
self.X = make_sinusoidal_process(error_std=0, random_state=0)
self.shift_registration = ShiftRegistration().fit(self.X)
def test_amplitude_phase_score(self):
scorer = AmplitudePhaseDecomposition()
score = scorer(self.shift_registration, self.X)
np.testing.assert_allclose(score, 0.972095, rtol=1e-6)
def test_amplitude_phase_score_with_output_points(self):
eval_points = self.X.grid_points[0]
scorer = AmplitudePhaseDecomposition(eval_points=eval_points)
score = scorer(self.shift_registration, self.X)
np.testing.assert_allclose(score, 0.972095, rtol=1e-6)
def test_amplitude_phase_score_with_basis(self):
scorer = AmplitudePhaseDecomposition()
X = self.X.to_basis(Fourier())
score = scorer(self.shift_registration, X)
np.testing.assert_allclose(score, 0.995087, rtol=1e-6)
def test_default_score(self):
score = self.shift_registration.score(self.X)
np.testing.assert_allclose(score, 0.972095, rtol=1e-6)
def test_least_squares_score(self):
scorer = LeastSquares()
score = scorer(self.shift_registration, self.X)
np.testing.assert_allclose(score, 0.795933, rtol=1e-6)
def test_sobolev_least_squares_score(self):
scorer = SobolevLeastSquares()
score = scorer(self.shift_registration, self.X)
np.testing.assert_allclose(score, 0.76124, rtol=1e-6)
def test_pairwise_correlation(self):
scorer = PairwiseCorrelation()
score = scorer(self.shift_registration, self.X)
np.testing.assert_allclose(score, 1.816228, rtol=1e-6)
def test_mse_decomposition(self):
fd = make_multimodal_samples(n_samples=3, random_state=1)
landmarks = make_multimodal_landmarks(n_samples=3, random_state=1)
landmarks = landmarks.squeeze()
warping = landmark_registration_warping(fd, landmarks)
fd_registered = fd.compose(warping)
scorer = AmplitudePhaseDecomposition(return_stats=True)
ret = scorer.score_function(fd, fd_registered, warping=warping)
np.testing.assert_allclose(ret.mse_amp, 0.0009866997121476962)
np.testing.assert_allclose(ret.mse_pha, 0.11576935495450151)
np.testing.assert_allclose(ret.r_squared, 0.9915489952877273)
np.testing.assert_allclose(ret.c_r, 0.999999, rtol=1e-6)
def test_raises_amplitude_phase(self):
scorer = AmplitudePhaseDecomposition()
# Inconsistent number of functions registered
with np.testing.assert_raises(ValueError):
scorer.score_function(self.X, self.X[:2])
# Inconsistent number of functions registered
with np.testing.assert_raises(ValueError):
scorer.score_function(self.X, self.X, warping=self.X[:2])
if __name__ == '__main__':
print()
unittest.main()
```
|
{
"source": "jdtzmn/ChRIS_ultron_backEnd",
"score": 2
}
|
#### File: chris_backend/uploadedfiles/views.py
```python
from rest_framework import generics, permissions
from rest_framework.response import Response
from collectionjson import services
from core.renderers import BinaryFileRenderer
from .models import UploadedFile
from .serializers import UploadedFileSerializer
from .permissions import IsOwnerOrChris
class UploadedFileList(generics.ListCreateAPIView):
"""
A view for the collection of uploaded user files.
"""
queryset = UploadedFile.objects.all()
serializer_class = UploadedFileSerializer
permission_classes = (permissions.IsAuthenticated, IsOwnerOrChris)
def get_queryset(self):
"""
Overriden to return a custom queryset that is only comprised by the files
owned by the currently authenticated user.
"""
user = self.request.user
# if the user is chris then return all the files in the sandboxed filesystem
if (user.username == 'chris'):
return UploadedFile.objects.all()
return UploadedFile.objects.filter(owner=user)
def perform_create(self, serializer):
"""
Overriden to associate an owner with the uploaded file before first
saving to the DB.
"""
request_data = serializer.context['request'].data
path = '/'
if 'upload_path' in request_data:
path = request_data['upload_path']
user = self.request.user
path = serializer.validate_file_upload_path(user, path)
serializer.save(owner=user, upload_path=path)
def list(self, request, *args, **kwargs):
"""
Overriden to return the list of instances for the queried plugin.
A collection+json template is also added to the response.
"""
response = super(UploadedFileList, self).list(request, *args, **kwargs)
# append write template
template_data = {'upload_path': "", 'fname': ""}
return services.append_collection_template(response, template_data)
class UploadedFileDetail(generics.RetrieveUpdateDestroyAPIView):
"""
A feed's file view.
"""
queryset = UploadedFile.objects.all()
serializer_class = UploadedFileSerializer
permission_classes = (permissions.IsAuthenticated, IsOwnerOrChris)
def retrieve(self, request, *args, **kwargs):
"""
Overriden to append a collection+json template.
"""
response = super(UploadedFileDetail, self).retrieve(request, *args, **kwargs)
template_data = {"upload_path": ""}
return services.append_collection_template(response, template_data)
class UploadedFileResource(generics.GenericAPIView):
"""
A view to enable downloading of a file resource .
"""
queryset = UploadedFile.objects.all()
renderer_classes = (BinaryFileRenderer,)
permission_classes = (permissions.IsAuthenticated, IsOwnerOrChris)
def get(self, request, *args, **kwargs):
"""
Overriden to be able to make a GET request to an actual file resource.
"""
user_file = self.get_object()
return Response(user_file.fname)
```
|
{
"source": "jdtzmn/kindle-news-assistant",
"score": 3
}
|
#### File: kindle_news_assistant/delivery/index.py
```python
from os import abort
from typing import List, cast, Optional
from simple_term_menu import TerminalMenu
from .template import DeliveryMethod
from .remarkable import RemarkableDelivery
from .kindle import KindleDelivery
DELIVERY_METHODS: List = [
cast(DeliveryMethod, RemarkableDelivery),
cast(DeliveryMethod, KindleDelivery),
]
class GeneralDeliveryMethod(DeliveryMethod):
"""General delivery method."""
methods: List[str] = [method.__str__(method) for method in DELIVERY_METHODS]
def __init__(self, method: Optional[str]):
"""Choose a delivery method from a list.
:param method: The method flag set through a command line interface argument.
"""
delivery_method_index = (
self.methods.index(method) if method in self.methods else -1
)
if delivery_method_index is -1:
terminal_menu = TerminalMenu(self.methods)
print("Please select a delivery method:")
optional_index = terminal_menu.show()
delivery_method_index = (
cast(int, optional_index)
if isinstance(optional_index, int)
else abort()
)
self.delivery_method: DeliveryMethod = DELIVERY_METHODS[delivery_method_index]()
def deliver_issue(self, absolute_path: str):
"""Deliver the issue using the selected delivery method.
:param absolute_path: An absolute path for the epub file.
"""
self.delivery_method.deliver_issue(absolute_path)
```
#### File: kindle_news_assistant/delivery/template.py
```python
from abc import ABC, abstractmethod
from typing import NoReturn
class DeliveryMethod(ABC):
"""An abstract class template for a given issue delivery method."""
def __str__(self):
"""Get the display name of the current class. The recommendation is to set this in the \
child class, but it is not required.
:return: The display name of this class. Useful for choosing a delivery method from a list.
"""
return self.__name__ # pylint: disable=no-member
@abstractmethod
def deliver_issue(self, absolute_path: str) -> NoReturn:
"""Handle the delivery of a news issue.
:param absolute_path: An absolute path for the epub file.
"""
```
#### File: kindle_news_assistant/model/send.py
```python
from typing import Optional
from sklearn.neural_network import MLPRegressor # type:ignore
from kindle_news_assistant.agent import Agent, DELIVERY_SIZE
from kindle_news_assistant.model_storage import load_model
from kindle_news_assistant.publisher.construct_book import construct_book_from
from kindle_news_assistant.delivery.index import GeneralDeliveryMethod
def send_articles(
language: Optional[str], thread_count: Optional[int], method: Optional[str]
):
"""Retrieve, filter, and send articles to the user.
:param language: The language to filter articles by
:param thread_count: The number of threads to use during article retrieval
:param method: The method flag set through a command line interface argument
"""
agent = Agent(None, thread_count)
articles = agent.fetch()
perceptron: MLPRegressor = load_model()
articles = agent.filter_by_model(articles, perceptron, language)
limited = articles[:DELIVERY_SIZE]
agent.download(limited)
print("The following articles were selected for delivery:")
print([article.title for article in limited])
print("Constructing epub issue...")
book_path = construct_book_from(limited)
print("Delivering news issue...")
delivery_method = GeneralDeliveryMethod(method)
delivery_method.deliver_issue(book_path)
print("Issue has been successfully delivered.")
```
#### File: kindle_news_assistant/model/test.py
```python
from typing import Optional, cast, Tuple, List
from sklearn.neural_network import MLPRegressor # type:ignore
from newspaper.article import Article
from kindle_news_assistant.agent import Agent
from kindle_news_assistant.model_storage import load_model
def start_test(language: Optional[str], thread_count: Optional[int]):
"""Start the assistant model's test.
:param language: The language to filter articles by
:param thread_count: The number of threads to use during article retrieval.
"""
agent = Agent(True, thread_count)
articles = agent.fetch()
# Filter and download articles
perceptron: MLPRegressor = load_model()
(filtered, complement) = cast(
Tuple[List[Article], List[Article]],
agent.filter_by_model(articles, perceptron, language, True),
)
agent.download(filtered)
agent.download(complement)
print("Model would not suggest: " + str(len(complement)))
print([article.title for article in complement])
print("")
print("Model would suggest: " + str(len(filtered)))
print([article.title for article in filtered])
```
#### File: kindle-news-assistant/tests/test_word_extractor.py
```python
import pytest
from kindle_news_assistant.word_extractor import article_to_frequency, numbers_to_words
@pytest.fixture
def example_article():
return """post solve option moral growth
cousin moral disappoint mole payment moral harmony
exile permission nomination driver experience
genuine broadcast parachute temple relative nomination"""
@pytest.fixture
def expected_frequency():
return [
136,
538,
565,
1312,
1697,
2023,
2926,
3855,
4337,
4903,
4952,
5017,
7423,
9153,
11132,
14378,
16327,
]
@pytest.fixture
def expected_words():
return [
"post",
"payment",
"experience",
"option",
"driver",
"permission",
"relative",
"broadcast",
"temple",
"moral",
"solve",
"genuine",
"harmony",
"nomination",
"cousin",
"exile",
"mole",
]
def test_article_to_frequency(example_article, expected_frequency):
word_frequency = article_to_frequency(example_article)
filtered = [index for (index, value) in enumerate(word_frequency) if not value == 0]
assert filtered == expected_frequency
def test_numbers_to_words(example_article, expected_words):
word_frequency = article_to_frequency(example_article)
words = numbers_to_words(word_frequency)
assert words == expected_words
```
|
{
"source": "jduan/cosmos",
"score": 2
}
|
#### File: rules/files/files.bzl
```python
def _impl(ctx):
"""This rule generates a text file."""
out = ctx.actions.declare_file(ctx.label.name)
ctx.actions.write(
output = out,
# You access attributes via "ctx.attr.username"
content = "Hello {}!\n".format(ctx.attr.username),
)
return [DefaultInfo(files = depset([out]))]
write_file = rule(
implementation = _impl,
attrs = {
# "attr.string()" says the type of the username attr is a string!
"username": attr.string(default = "John", values = ["John", "Jack", "Jake"]),
}
)
```
#### File: core_bazel/bazel/count_items.bzl
```python
load("//:bazel/counter.bzl", "Counter")
def _count_items(ctx):
item_count = len(ctx.attr.items)
return [Counter(count = item_count)]
count_items = rule(
implementation = _count_items,
attrs = {
"items": attr.int_list(),
},
)
```
#### File: cosmos/misc_scripts/mega_million.py
```python
from random import randint
def play():
print("white balls")
for i in range(0, 5):
print(randint(1, 70))
print("mega ball")
print(randint(1, 25))
play()
```
#### File: python_sandbox/python_sandbox/application-spec-dedup.py
```python
import sys
import yaml
def find_duplicate(filename):
# print("Processing file %s" % filename)
with open(filename) as f:
# use safe_load instead load
data_map = yaml.safe_load(f)
service_idl = data_map.get("service_idl", list())
service_dependency = data_map.get("service_dependency", list())
intersection = set(service_idl or list()) & set(service_dependency or list())
if intersection:
# print("found circular dependencies in %s" % filename)
print(filename)
def main():
for filename in sys.argv[1:]:
find_duplicate(filename)
if __name__ == '__main__':
main()
```
#### File: python_sandbox/effective_python/item25.py
```python
class MyBaseClass(object):
def __init__(self, value):
self.value = value
class TimesFive(MyBaseClass):
def __init__(self, value):
# this is equivalent to python's syntax:
# super(TimesFive, self).__init__(value)
# the python3's way works because python3 lets you reliabily reference the current
# class in methods using the __class__ variable.
print("__class__", __class__)
super().__init__(value)
self.value *= 5
class PlusTwo(MyBaseClass):
def __init__(self, value):
super().__init__(value)
self.value += 2
class GoodWay(TimesFive, PlusTwo):
def __init__(self, value):
super().__init__(value)
```
#### File: python_sandbox/effective_python/item26.py
```python
from pprint import pprint
import json
class ToDictMixin(object):
def to_dict(self):
return self._traverse_dict(self.__dict__)
def _traverse_dict(self, instance_dict):
output = {}
for key, value in instance_dict.items():
output[key] = self._traverse(key, value)
return output
def _traverse(self, key, value):
if isinstance(value, ToDictMixin):
return value.to_dict()
elif isinstance(value, dict):
return self._traverse_dict(value)
elif isinstance(value, list):
return [self._traverse(key, i) for i in value]
elif hasattr(value, '__dict__'):
return self._traverse_dict(value.__dict__)
else:
return value
# Note how the JsonMixin class defines both instance methods and class methods. Mix-ins let you add
# either kind of behavior.
class JsonMixin:
@classmethod
def from_json(cls, data):
kwargs = json.loads(data)
return cls(**kwargs)
def to_json(self):
# this mixin depends on the ToDictMixin to work
return json.dumps(self.to_dict())
class BinaryTree(ToDictMixin):
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
class BinaryTreeWithParent(BinaryTree):
def __init__(self, value, left=None, right=None, parent=None):
super().__init__(value, left=left, right=right)
self.parent = parent
def _traverse(self, key, value):
# Don't visit parent otherwise you will run into a loop
if isinstance(value, BinaryTreeWithParent) and key == 'parent':
return value.value
else:
return super()._traverse(key, value)
# Mix-ins can be composed together.
class DatacenterPack(ToDictMixin, JsonMixin):
def __init__(self, switch=None, machines=None):
self.switch = Switch(**switch)
self.machines = [Machine(**kwargs) for kwargs in machines]
class Switch(ToDictMixin, JsonMixin):
def __init__(self, ports, speed):
self.ports = ports
self.speed = speed
class Machine(ToDictMixin, JsonMixin):
def __init__(self, cores, ram, disk):
self.cores = cores
self.ram = ram
self.disk = disk
def main():
tree = BinaryTree(10,
left=BinaryTree(7, right=BinaryTree(9)),
right=BinaryTree(13, left=BinaryTree(11)))
pprint(tree.to_dict())
root = BinaryTreeWithParent(10)
root.left = BinaryTreeWithParent(7, parent=root)
root.left.right = BinaryTreeWithParent(9, parent=root.left)
pprint(root.to_dict())
serialized = """{
"switch": {"ports": 5, "speed": 1e9},
"machines": [
{"cores": 8, "ram": 32e9, "disk": 5e12},
{"cores": 4, "ram": 16e9, "disk": 1e12},
{"cores": 2, "ram": 4e9, "disk": 500e9}
]
}"""
datacenter_pack = DatacenterPack.from_json(serialized)
roundtrip = datacenter_pack.to_json()
assert json.loads(serialized) == json.loads(roundtrip)
if __name__ == '__main__':
main()
```
#### File: python_sandbox/effective_python/item27.py
```python
from pprint import pprint
class MyObject:
def __init__(self):
self.public_field = 5
self.__private_field = 10
def get_private_field(self):
return self.__private_field
# Class methods have access to private attributes because they are declared
# within the surrounding class block.
@classmethod
def get_private_field_of_instance(cls, instance):
return instance.__private_field
class MyChildObject(MyObject):
# subclass can't access its parent class' private fields
# this would throw:
# AttributeError: 'MyChildObject' object has no attribute '_MyChildObject__private_field'
def get_private_field(self):
return self.__private_field
def main():
foo = MyObject()
assert foo.public_field == 5
assert foo.get_private_field() == 10
# the following throw an exception:
# AttributeError: 'MyObject' object has no attribute '__private_field'
# foo.__private_field
assert MyObject.get_private_field_of_instance(foo) == 10
child = MyChildObject()
# the following throw an exception:
# AttributeError: 'MyChildObject' object has no attribute '_MyChildObject__private_field'
# child.get_private_field()
# but this works!
assert child._MyObject__private_field == 10
# this reveals private attributes:
# {
# '_MyObject__private_field': 10,
# 'public_field': 5
# }
pprint(child.__dict__)
if __name__ == '__main__':
main()
```
#### File: python_sandbox/misc/find_owners.py
```python
import os
import sys
import yaml
def find_owner(file):
fullpath = "/Users/jingjing_duan/repos2/treehouse_worktree3/%s/_infra/project.yml" % file
if os.path.exists(fullpath):
with open(fullpath) as fd:
data = yaml.load(fd, Loader=yaml.FullLoader)
email = data.get("email")
print("%s,%s,%s" % (file, email, data["teams"][0]))
else:
print("%s,," % file)
def main():
for arg in sys.argv[1:]:
if find_owner(arg):
print(arg)
if __name__ == '__main__':
main()
```
#### File: python_sandbox/misc/parse_ipv6_address.py
```python
valid_hexes = set()
for i in range(0, 10):
valid_hexes.add(str(i))
valid_hexes.add('a')
valid_hexes.add('b')
valid_hexes.add('c')
valid_hexes.add('d')
valid_hexes.add('e')
valid_hexes.add('f')
class IPv6Validator(object):
def is_valid(self, part: str) -> bool:
length = len(part)
if length < 1 or length > 4:
return False
return all([ch.lower() in valid_hexes for ch in part])
def validate_addr(self, ip: str) -> bool:
parts = ip.split(":")
if len(parts) != 8:
return False
return all([self.is_valid(part) for part in parts])
def main():
valid_addrs = [
"2001:0db8:85a3:0000:0000:8a2e:0370:7334",
"2001:db8:85a3:0:0:8A2E:0370:7334",
]
invalid_addrs = [
"2001:0db8:85a3::8A2E:037j:7334",
"02001:0db8:85a3:0000:0000:8a2e:0370:7334",
]
validator = IPv6Validator()
print("checking valid addresses")
for addr in valid_addrs:
print(validator.validate_addr(addr))
print("checking invalid addresses")
for addr in invalid_addrs:
print(validator.validate_addr(addr))
if __name__ == '__main__':
main()
```
#### File: tests/effective_python/test_item10.py
```python
import unittest
class TestItem10(unittest.TestCase):
"""
enumerate provides concise syntax for looping over an iterator and getting the index of each
item from the iterator as you go.
"""
def test1(self):
flavor_list = ['vanilla', 'chocolate', 'pecan', 'strawberry']
mapping = {}
for i, flavor in enumerate(flavor_list):
mapping[i+1] = flavor
expected = {
1: 'vanilla',
2: 'chocolate',
3: 'pecan',
4: 'strawberry',
}
self.assertEqual(expected, mapping)
def test2(self):
"""
This is similar to test1. The only difference is that you can pass
the initial index to enumerate!
:return:
:rtype:
"""
flavor_list = ['vanilla', 'chocolate', 'pecan', 'strawberry']
mapping = {}
for i, flavor in enumerate(flavor_list, 1):
mapping[i] = flavor
expected = {
1: 'vanilla',
2: 'chocolate',
3: 'pecan',
4: 'strawberry',
}
self.assertEqual(expected, mapping)
```
#### File: tests/effective_python/test_item16.py
```python
import unittest
from python_sandbox.effective_python.item16 import index_words, index_words2
class TestItem16(unittest.TestCase):
def test1(self):
address = "Four score and seven years ago..."
result = index_words(address)
self.assertEqual([0, 5, 11, 15, 21, 27], result)
def test2(self):
address = "Four score and seven years ago..."
result = index_words2(address)
self.assertEqual([0, 5, 11, 15, 21, 27], list(result))
```
#### File: python_sandbox/tests/test_template.py
```python
import unittest
from python_sandbox.effective_python.item17 import normalize
class TestItem17(unittest.TestCase):
def test1(self):
self.assertEqual(1, 1)
```
|
{
"source": "jduanen/avuePTZ",
"score": 3
}
|
#### File: jduanen/avuePTZ/avueLogger.py
```python
import argparse
import json
import logging
import os
import re
import signal
import socket
import subprocess
import sys
from threading import Timer
import time
import yaml
from yaml import Loader
import paho.mqtt.client as mqtt
from systemd.daemon import notify, Notification
import vcgencmd
DEFAULTS = {
'logLevel': "INFO", #"DEBUG" #"WARNING"
'sampleInterval': 60, # sample interval (1 min)
'mqttBroker': "gpuServer1"
}
MQTT_TOPIC = "/sensors/avue"
APPL_NAME = "AvuePTZ"
APPL_VERSION = "1.1.0"
DEV_TYPE = "Rpi3"
#### FIXME import this from SensorNet.py and add another dependency
SUB_TOPIC_DATA = 0
SUB_TOPIC_COMMAND = 1
SUB_TOPIC_RESPONSE = 2
SUB_TOPIC_ERROR = 3
SUB_TOPIC_STARTUP = 4
#### FIXME import this from SensorNet.py and add another dependency
SUB_TOPICS = {
SUB_TOPIC_DATA: "data",
SUB_TOPIC_COMMAND: "cmd",
SUB_TOPIC_RESPONSE: "response",
SUB_TOPIC_ERROR: "error",
SUB_TOPIC_STARTUP: "startup",
}
vcgen = None
running = True
def _shutdownHandler(signum, frame):
"""????
"""
global running
logging.debug(f"Caught signal: {signum}")
running = False
#### TODO put this in a common file
def macAddress(hostname):
"""Return the MAC address and interface name used to reach the given host
Returns usable strings on error.
Inputs:
hostname: string name of the target host
Returns: string name and string form of MAC address of interface used to
reach the given host
"""
ipAddr = socket.gethostbyname(hostname)
match = re.search(r"^.* dev (.*?) .*$",
str(subprocess.check_output(["ip", "route", "get", ipAddr])))
if not match:
logging.warning(f"Unable to find interface for {ipAddr}")
return "n/a", "00:00:00:00:00:00"
intf = match.group(1)
try:
macAddr = subprocess.check_output(["cat", f"/sys/class/net/{intf}/address"]).strip().decode("utf-8")
except Exception as ex:
logging.warning("Failed to get MAC address for '{hostname}': {ex}")
return "n/a", "00:00:00:00:00:00"
return intf, macAddr
#### TODO put this in a common file
def wifiQuality(interface):
"""Return a quality indicator and RSSI of the given WiFi interface
Returns usable strings on error.
Inputs:
interface: string name of WiFi interface for which to get signal quality
Returns: float value from 0.0 to 1.0 indicating worst to best signal
quality, respectively, and the RSSI value in dBm
"""
match = re.search(r'.* Quality=([0-9]+)/([0-9]+) Signal level=(.*) dBm .*$',
str(subprocess.check_output(["iwlist", interface, "scan"])))
if not match:
logging.warning(f"Unable to scan interface '{interface}'")
return "-0.0", "0"
vals = match.groups()
if len(vals) != 3:
logging.warning(f"Unable to get WiFi quality: {match.groups()}")
return "-0.0", "0"
return int(vals[0])/int(vals[1]), vals[2]
#### TODO put this in a common file
def deviceTemperature():
"""Return the temperature of the device in degrees C
Returns: float value of device temperature in degrees C
"""
global vcgen
if not vcgen:
vcgen = vcgencmd.Vcgencmd()
return vcgen.measure_temp()
#### TODO put this in a common file
class Watchdog():
@staticmethod
def notification(typ):
try:
notify(typ)
except Exception as ex:
logging.warning(f"Systemd notification ({typ}) failed: {ex}")
def __init__(self, timeout):
self.timeout = timeout
self.timer = None
if timeout:
self.timer = Timer(self.timeout, self.handler)
self.timer.start()
Watchdog.notification(Notification.READY)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
Watchdog.notification(Notification.STOPPING)
def stop(self):
if self.timer:
self.timer.cancel()
self.timer = None
def reset(self):
self.stop()
self.timer = Timer(self.timeout, self.handler)
self.timer.start()
def handler(self):
Watchdog.notification(Notification.WATCHDOG)
if self.timer:
self.reset()
def run(options):
#### FIXME
'''
def _onConnect(client, userdata, flags, rc):
"""????
"""
if rc == 0:
logging.info("Connected to MQTT broker")
else:
logging.error(f"Failed to connect to MQTT broker: {rc}")
def _onMessage(client, userData, message):
parts = message.topic.split('/')
if len(parts) < 5 or len(parts) > 6 or parts[1] != 'sensors' or parts[-1] not in SUB_TOPICS.values():
logging.warning(f"Unrecognized message: {message}")
topic = "/".join(parts)
self.msgQ.put((topic, message.payload.decode("utf-8"), message.timestamp))
'''
for s in ('TERM', 'HUP', 'INT'):
sig = getattr(signal, 'SIG'+s)
signal.signal(sig, _shutdownHandler)
client = mqtt.Client("Alien Font Display Logger")
client.enable_logger(logging.getLogger(__name__))
if client.connect(options.mqttBroker):
logging.error(f"Failed to connect to MQTT broker '{options.mqttBroker}'")
raise Exception("Failed to connect to MQTT broker")
#### FIXME
'''
client.on_message = _onMessage
client.on_connect = _onConnect
'''
client.loop_start()
intfName, macAddr = macAddress(options.mqttBroker)
topicBase = f"{MQTT_TOPIC}/{macAddr}"
quality, rssi = wifiQuality(intfName)
logging.info("Starting")
cmdTopic = f"{topicBase}/{SUB_TOPICS[SUB_TOPIC_COMMAND]}"
msg = f"Startup,{DEV_TYPE},{APPL_NAME},{APPL_VERSION},temp:.1f,q:.4f,rssi:d,{rssi}"
logging.info(f"{cmdTopic},{msg}")
res = client.publish(cmdTopic, payload=msg)
if res.rc:
logging.warning(f"Failed to publish startup message: {res}")
sys.exit(1)
dataTopic = f"{topicBase}/{SUB_TOPICS[SUB_TOPIC_DATA]}"
while running:
temperature = deviceTemperature()
quality, rssi = wifiQuality(intfName)
msg = f"{temperature},{quality:.4f},{rssi}"
logging.info(f"{dataTopic},{msg}")
res = client.publish(dataTopic, payload=msg)
if res.rc:
logging.warning(f"Failed to publish sample message: {res}")
sys.exit(1)
#### TODO think about decoupling sampling interval and watchdog timer
time.sleep(options.sampleInterval)
logging.info("Exiting")
client.loop_stop()
return(0)
def getOpts():
usage = f"Usage: {sys.argv[0]} [-v] [-L <logLevel>] [-l <logFile>] " + \
"[-D] [-m <mqttHost>] [-s <sampleInterval>]"
ap = argparse.ArgumentParser()
ap.add_argument(
"-D", "--disableWatchdog", action="store_true", default=False,
help="Disable watchdog function")
ap.add_argument(
"-L", "--logLevel", action="store", type=str,
default=DEFAULTS['logLevel'],
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Logging level")
ap.add_argument(
"-l", "--logFile", action="store", type=str,
help="Path to location of logfile (create it if it doesn't exist)")
ap.add_argument(
"-m", "--mqttBroker", action="store", type=str,
default=DEFAULTS['mqttBroker'],
help="Hostname for where the MQTT broker is running")
ap.add_argument(
"-s", "--sampleInterval", action="store", type=int, default=DEFAULTS['sampleInterval'],
help="secs between data samples")
ap.add_argument(
"-v", "--verbose", action="count", default=0,
help="Enable printing of debug info")
opts = ap.parse_args()
if opts.logFile:
logging.basicConfig(filename=opts.logFile,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=opts.logLevel)
else:
logging.basicConfig(level=opts.logLevel,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if opts.sampleInterval <= 0:
logging.error(f"Invalid sample interval: '{opts.sampleInterval}' secs")
sys.exit(1)
if opts.verbose:
print(f" MQTT Broker: {opts.mqttBroker}")
print(f" Sample Interval: {opts.sampleInterval} secs")
if opts.disableWatchdog:
print(" Watchdog not enabled")
return opts
if __name__ == '__main__':
opts = getOpts()
with Watchdog(opts.sampleInterval) as dog:
r = run(opts)
sys.exit(r)
```
|
{
"source": "jduanen/cnc_pendant",
"score": 2
}
|
#### File: jduanen/cnc_pendant/grblPendant.py
```python
import argparse
import json
import logging
import os
import signal
import sys
import threading
import time
import yaml
from yaml import Loader
from Controller import Controller
from Host import Host
from Pendant import Pendant
from Processor import Processor
DEFAULTS = {
'logLevel': "INFO", #"DEBUG" #"WARNING"
'macroPath': "./whb04b.yml"
}
def run(options):
"""????
"""
def stop():
logging.debug(f"Active Threads: {threading.enumerate()}")
if proc:
logging.debug("Shutting down Processor")
proc.shutdown()
if host:
logging.debug("Shutting down Host")
host.shutdown(False)
if ctlr:
logging.debug("Shutting down Controller")
ctlr.shutdown()
if pend:
logging.debug("Shutting down Pendant")
pend.shutdown()
def shutdownHandler(signum, frame):
logging.debug(f"Caught signal: {signum}")
stop()
for s in ('TERM', 'HUP', 'INT'):
sig = getattr(signal, 'SIG'+s)
signal.signal(sig, shutdownHandler)
def reloadHandler(signum, frame):
logging.debug(f"Caught signal: {signum}")
macros = {}
if os.path.exists(options.macroPath):
with open(options.macroPath, "r") as f:
macros = yaml.load(f, Loader=Loader)
proc.defineMacros(macros)
if options.verbose:
print("Reload Macros:")
json.dump(macros, sys.stdout, indent=4, sort_keys=True)
print("")
else:
logging.warning(f"Macros file '{options.macroPath}' does not exist")
signal.signal(signal.SIGUSR1, reloadHandler)
macros = {}
with open(options.macroPath, "r") as f:
macros = yaml.load(f, Loader=Loader)
if options.verbose:
print("Initial Macros:")
json.dump(macros, sys.stdout, indent=4, sort_keys=True)
print("")
pend = Pendant()
ctlr = Controller()
host = Host()
proc = Processor(pend, ctlr, host, macros)
if proc:
if options.magicCommands:
magicCmdNames = proc.magicCommandNames()
if options.verbose:
print("Magic Commands:")
json.dump(magicCmdNames, sys.stdout, indent=4, sort_keys=True)
print("")
else:
print(f"Magic Commands: {magicCmdNames}")
else:
while proc.isAlive():
#### FIXME do something here
print("running...")
time.sleep(30)
stop()
sys.exit(0)
def getOpts():
usage = f"Usage: {sys.argv[0]} [-v] [-L <logLevel>] [-l <logFile>] " + \
"[-m <macroPath>] [-M]"
ap = argparse.ArgumentParser()
ap.add_argument(
"-L", "--logLevel", action="store", type=str,
default=DEFAULTS['logLevel'],
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Logging level")
ap.add_argument(
"-l", "--logFile", action="store", type=str,
help="Path to location of logfile (create it if it doesn't exist)")
ap.add_argument(
"-M", "--magicCommands", action="store_true", default=False,
help="Print names of magic commands and exit")
ap.add_argument(
"-m", "--macroPath", action="store", type=str, default=DEFAULTS['macroPath'],
help="Path to YAML file containing macro key definitions")
ap.add_argument(
"-v", "--verbose", action="count", default=0,
help="Enable printing of debug info")
opts = ap.parse_args()
if opts.logFile:
logging.basicConfig(filename=opts.logFile,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=opts.logLevel)
else:
logging.basicConfig(level=opts.logLevel,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if not os.path.exists(opts.macroPath):
logging.error(f"Macro key definitions file not found: {opts.macroPath}")
sys.exit(1)
if opts.verbose:
print(f" Macro definitions file: {opts.macroPath}")
return opts
if __name__ == '__main__':
opts = getOpts()
r = run(opts)
sys.exit(r)
```
#### File: jduanen/cnc_pendant/Processor.py
```python
import logging
import threading
from parse import parse
from Controller import Controller
from Host import Host
import Pendant
JOG_SPEED = 500 #### FIXME
MAX_SPEED = 1000 #### FIXME
MAX_NUM_MACROS = 10
STATUS_POLL_INTERVAL = 0.5
assert JOG_SPEED <= MAX_SPEED
# N.B. This can be global as there's a single writer (PendantInput)
moveMode = Pendant.MotionMode.STEP
# N.B. This can be global as there's a single writer (PendantInput)
axisMode = None
class ControllerInput(threading.Thread):
"""????
<gets inputs from controller and does something with them>
"""
def __init__(self, runningEvent, controller):
self.running = runningEvent
self.ctlr = controller
super().__init__()
def run(self):
logging.debug("Starting controllerInput thread")
self.ctlr.start()
while self.running.isSet():
print("wait for ctlr input")
inputs = self.ctlr.getInput()
print("CIN:", inputs)
#### TODO if type 7 or 1, reset the display
logging.debug("Exited ControllerInput")
class ControllerStatus(threading.Thread):
"""????
<gets status from controller and updates the pendant display>
"""
COORDINATE_SPACE_MAP = {
'MPos': Pendant.CoordinateSpace.MACHINE,
'WPos': Pendant.CoordinateSpace.WORKPIECE
}
def __init__(self, runningEvent, controller, pendant):
self.running = runningEvent
self.ctlr = controller
self.pendant = pendant
super().__init__()
self.feedSpeed = 0
self.spindleSpeed = 0
def _parseStatus(self, status):
status = status[1:-1].split('|')
parts = {p.split(':')[0]: p.split(':')[1] for p in status[1:]}
parsedStatus = {'state': status[0]}
for name, part in parts.items():
if name.endswith('Pos'):
parsedStatus['coordinateSpace'] = ControllerStatus.COORDINATE_SPACE_MAP[name]
parsedStatus['coordinates'] = list(map(float, part.split(',')))
elif name == "Bf":
parsedStatus['planBuffers'] = part.split(',')[0]
parsedStatus['rxBuffers'] = part.split(',')[1]
elif name == "Ln":
parsedStatus['lineNumber'] = int(part)
elif name == "FS":
parsedStatus['feedSpeed'] = float(part.split(',')[0])
parsedStatus['spindleSpeed'] = int(part.split(',')[1])
elif name == "F":
parsedStatus['feedSpeed'] = int(part)
elif name == "WCO":
#### TODO parse this further
parsedStatus['workCoordinateOffset'] = part
elif name == "A":
#### TODO parse this further
parsedStatus['accessoryState'] = part
elif name == "Ov":
#### TODO parse this further
parsedStatus['overrides'] = part
elif name == "Pn":
#### TODO parse this further
parsedStatus['pinStates'] = part
else:
logging.error(f"Unimplemented status field: {name}: {part}")
return parsedStatus
def run(self):
global moveMode # N.B. read-only in this thread
global axisMode # N.B. read-only in this thread
logging.debug("Starting controllerStatus thread")
self.ctlr.start()
while self.running.isSet():
status = self._parseStatus(self.ctlr.getStatus())
logging.info(f"Status: {status}")
self.pendant.updateDisplay(moveMode,
status['coordinateSpace'],
status['coordinates'] if axisMode == Pendant.AxisMode.XYZ else [0.0, 0.0, 0.0],
status['feedSpeed'] if status['feedSpeed'] != self.feedSpeed else 0,
status.get('spindleSpeed', 0))
self.feedSpeed = status['feedSpeed']
self.spindleSpeed = status.get('spindleSpeed', 0)
logging.debug("Exited ControllerStatus")
class StatusPolling(threading.Thread):
"""????
"""
def __init__(self, stopEvent, controller):
self.stop = stopEvent
self.ctlr = controller
super().__init__()
def run(self):
logging.debug("Starting StatusPolling Thread")
while not self.stop.wait(STATUS_POLL_INTERVAL):
logging.debug("StatusPolling: Poll Status")
self.ctlr.realtimeCommand("STATUS")
#### TODO make use of Event objects consistent with other modules
class Processor():
"""????
"""
def __init__(self, pendant, controller, host, macros={}):
assert isinstance(pendant, Pendant.Pendant), f"pendant is not an instance of Pendant: {type(pendant)}"
self.pendant = pendant
assert isinstance(controller, Controller), f"controller is not an instance of Controller: {type(controller)}"
self.controller = controller
self.spindleState = False
self.magicCommands = self._initMagic()
self.macros = self._defineMacros(macros)
self.p2cRunning = threading.Event()
self.p2cRunning.set()
self.p2cThread = threading.Thread(target=self.pendantInput, name="p2c")
self.c2pRunning = threading.Event()
self.c2pRunning.set()
self.c2piThread = ControllerInput(self.c2pRunning, self.controller)
self.c2psThread = ControllerStatus(self.c2pRunning, self.controller, self.pendant)
self.statusStop = threading.Event()
self.statusStop.clear()
self.statusThread = StatusPolling(self.statusStop, self.controller)
self.p2cThread.start()
self.c2piThread.start()
self.c2psThread.start()
self.statusThread.start()
#### TODO hook up the host
def _executeMagic(self, commands):
results = ""
for cmd in commands:
results += self.magicCommands[cmd]() + '\n'
return results
def _initMagic(self):
def dumpState():
state = f"Running threads: {threading.enumerate()}\n"
state += f"Globals: moveMode={moveMode}, axisMode={axisMode}\n"
#### TODO add more state info
return state
def commandClosure(cmdType, cmdName):
def closure():
if cmdType == "dollar":
cmd = self.controller.dollarCommand(cmdName)
elif cmdType == "realtime":
cmd = self.controller.realtimeCommand(cmdName)
return cmd
return closure
return {
'VIEW_SETTINGS': commandClosure("dollar", "VIEW_SETTINGS"),
'VIEW_PARAMETERS': commandClosure("dollar", "VIEW_PARAMETERS"),
'VIEW_PARSER': commandClosure("dollar", "VIEW_PARSER"),
'VIEW_BUILD': commandClosure("dollar", "VIEW_BUILD"),
'VIEW_STARTUPS': commandClosure("dollar", "VIEW_STARTUPS"),
'HELP': commandClosure("dollar", "HELP"),
'KILL_ALARM': commandClosure("dollar", "KILL_ALARM"),
'CYCLE_START': commandClosure("realtime", "CYCLE_START"),
'FEED_HOLD': commandClosure("realtime", "FEED_HOLD"),
'STATUS': commandClosure("realtime", "STATUS"),
'RESET': commandClosure("realtime", "RESET"),
'JOG_CANCEL': commandClosure("realtime", "JOG_CANCEL"),
'DUMP_STATE': dumpState
}
def _defineMacros(self, macros):
##assert isinstance(macros, dict) and all([isinstance(k, int) and isinstance(v, dict) for k, v in dict.items()]), f"Invalid macros -- must be dict of dicts with integer keys and dict values: {macros}"
##assert all(['commands' in m.keys() and 'description' in m.keys() for m in macros]), f"Invalid macros -- each definition must have 'commands' and 'description' keys"
'''
for m in macros:
m.update((k, v.split()) for k, v in m.items() if k in ('before', 'after') and isinstance(v, str))
'''
macroList = [None for _ in range(0, MAX_NUM_MACROS + 1)]
for name, macro in macros.items():
res = parse("Macro-{num:d}", name)
if res:
num = res['num']
else:
logging.warning(f"Invalid macro name '{name}': ignoring")
continue
if num <= 0 or num > MAX_NUM_MACROS:
logging.warning(f"Invalid macro number '{num}': ignoring")
continue
#### TODO validate macro -- turn off motion and run through grbl to see if good
#### TODO validate magic commands in before and after fields
#assert cmd in MAGIC_COMMANDS, f"Invalid magic command: {cmd}"
macro.update((k, v.split()) for k, v in macro.items() if k in ('before', 'after') and isinstance(v, str))
macroList[num] = macro
return macroList
def defineMacros(self, macros):
self.macros = self._defineMacros(macros)
def magicCommandNames(self):
return list(self.magicCommands.keys())
def shutdown(self):
if self.statusStop.isSet():
logging.warning("ControllerStatus thread not running")
else:
self.statusStop.set()
logging.debug("Waiting for ControllerStatus thread to end")
self.statusThread.join()
logging.debug("ControllerStatus thread done")
if self.p2cRunning.isSet():
self.p2cRunning.clear()
logging.debug("Waiting for P2C thread to end")
self.p2cThread.join()
logging.debug("P2C thread done")
else:
logging.warning("Pendant to Controller thread not running")
if self.c2pRunning.isSet():
self.c2pRunning.clear()
logging.debug("Shutting down ControllerInput")
self.controller.shutdown()
assert self.controller.isShutdown(), "Controller not shut down"
logging.debug("Waiting for C2P threads to end")
self.c2piThread.join()
self.c2psThread.join()
logging.debug("C2P threads done")
else:
logging.warning("Controller to Pendant thread not running")
def isAlive(self):
"""????
"""
return self.p2cThread.is_alive()
def pendantInput(self):
"""????
"""
global moveMode # N.B. this thread is the single writer
global axisMode # N.B. this thread is the single writer
logging.debug("Starting pendantInput thread")
self.pendant.start()
while self.p2cRunning.isSet():
inputs = self.pendant.getInput()
if not inputs:
continue
inputs = inputs['data']
axisMode = Pendant.AxisMode.OFF if inputs['axis'] == 6 else Pendant.AxisMode.XYZ if inputs['axis'] < 20 else Pendant.AxisMode.ABC
logging.info(f"PendantInput: {inputs}")
key = Pendant.KEYMAP[inputs['key1']] if inputs['key2'] == 0 else Pendant.FN_KEYMAP[inputs['key2']] if inputs['key1'] == Pendant.KEYNAMES_MAP['Fn'] else None
if key:
if key == "Reset":
logging.debug("PI -- Reset and unlock GRBL")
self.controller.realtimeCommand("RESET")
self.controller.killAlarmLock()
elif key == "Stop":
logging.debug("PI -- Stop: feed hold")
self.controller.realtimeCommand("FEED_HOLD")
elif key == "StartPause":
logging.debug("PI -- StartPause: cycle start")
self.controller.realtimeCommand("CYCLE_START")
elif key.startswith("Feed"):
#### FIXME select 100/10/1 increment based on feed switch setting
if key == "Feed+":
logging.debug("PI -- Feed+: TBD")
elif key == "Feed-":
logging.debug("PI -- Feed-: TBD")
elif key.startswith("Spindle"):
#### FIXME select 100/10/1 increment based on feed switch setting
if key == "Spindle+":
logging.debug("PI -- Spindle+: TBD")
if key == "Spindle-":
logging.debug("PI -- Spindle-: TBD")
elif key == "M-Home":
logging.debug("PI -- M-Home: TBD")
elif key == "Safe-Z":
logging.debug("PI -- Save-Z: TBD")
elif key == "W-Home":
logging.debug("PI -- W-Home: TBD")
elif key == "S-on/off":
if self.spindleState:
logging.debug(f"PI -- Spindle: off")
self.spindleState = False
self.controller.streamCmd("M5")
else:
logging.debug(f"PI -- Spindle: on")
self.spindleState = True
self.controller.streamCmd("M3")
elif key == "Fn":
logging.debug("PI -- Fn")
elif key == "Probe-Z":
logging.debug("PI -- Probe-Z: TBD")
elif key == "Continuous":
moveMode = Pendant.MotionMode.CONT
logging.debug(f"PI -- Continuous: set moveMode to {moveMode}")
elif key == "Step":
moveMode = Pendant.MotionMode.STEP
logging.debug(f"PI -- Step: set moveMode to {moveMode}")
elif key == "PendantReset":
# hard-coded as key to press after Pendant power-on
logging.debug("PI -- PendantReset: bring out of reset")
self.pendant.reset(moveMode)
break
elif key == "ApplicationExit":
# hard-coded as application shutdown key
logging.debug("PI -- ApplicationExit: SHUTDOWN")
self.p2cRunning.clear()
break
elif key.startswith("Macro-"):
res = parse("Macro-{num:d}", key)
if res:
num = res['num']
if not self.macros[num]:
logging.error(f"Undefined macro: Macro-{num}")
else:
logging.debug(f"PI -- Macro-{num}: {self.macros[num]['description']}")
magic = self.macros[num]['before'] if 'before' in self.macros[num] else []
logging.debug(f"PI -- Before Magic Commands: {magic}")
res = self._executeMagic(magic)
logging.info(res)
if self.macros[num]['commands']:
self.controller.streamCmd(self.macros[num]['commands'])
magic = self.macros[num]['after'] if 'after' in self.macros[num] else []
logging.debug(f"PI -- After Magic Commands: {magic}")
res = self._executeMagic(magic)
logging.info(res)
else:
logging.error(f"Failed to parse Macro number: {key}")
else:
logging.warning(f"Unimplemented Key: {key}")
if inputs['jog']:
if axisMode == Pendant.AxisMode.XYZ:
incr = Pendant.Pendant.INCR[moveMode][inputs['incr']]
if incr:
if moveMode == Pendant.MotionMode.STEP:
distance = inputs['jog'] * incr
speed = JOG_SPEED
elif moveMode == Pendant.MotionMode.CONT:
distance = 1 #### FIXME
speed = MAX_SPEED * incr * (1 if inputs['jog'] > 0 else -1)
axis = Pendant.AXIS[inputs['axis']]
logging.debug(f"PI -- Jog: $J={axis}{distance} F{speed}")
self.controller.jogIncrementalAxis(axis, distance, speed)
elif axisMode == Pendant.AxisMode.ABC:
logging.error("TBD")
self.pendant.shutdown()
logging.debug("Exit PendantInput")
#
# TEST
#
if __name__ == '__main__':
import sys
import time
#### FIXME add real tests
logging.basicConfig(level="DEBUG",
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
print("START")
p = Pendant.Pendant()
print("p")
c = Controller()
print("c")
h = Host()
print("h")
proc = Processor(p, c, h)
print("RUN")
while proc.isAlive():
print("running...")
time.sleep(10)
print("SHUTTING DOWN")
proc.shutdown()
print("DONE")
sys.exit(0)
```
|
{
"source": "jduanen/cnc_video",
"score": 2
}
|
#### File: jduanen/cnc_video/video.py
```python
import logging
import math
import cv2
MAX_CROSSHAIR_THICKNESS = 5
MAX_VIDEO_WIDTH = (4 * 1024)
MAX_VIDEO_HEIGHT = (2 * 1024)
# scale = 1, thickness = 1
FONT_FACE_0 = cv2.FONT_HERSHEY_SIMPLEX # 27 close to fixed
FONT_FACE_1 = cv2.FONT_HERSHEY_PLAIN # 15 small, fixed
FONT_FACE_2 = cv2.FONT_HERSHEY_DUPLEX # 27 close to fixed
FONT_FACE_3 = cv2.FONT_HERSHEY_COMPLEX # 27 light serif
FONT_FACE_4 = cv2.FONT_HERSHEY_TRIPLEX # 27 heavy serif
FONT_FACE_5 = cv2.FONT_HERSHEY_COMPLEX_SMALL # 19 small serif
FONT_FACE_6 = cv2.FONT_HERSHEY_SCRIPT_SIMPLEX # 27 script-like
FONT_FACE_7 = cv2.FONT_HERSHEY_SCRIPT_COMPLEX # 27 script-like
class Crosshair(object):
"""
Crosshair to be alpha-blended over video.
Horizontal and vertical parts can be (idependently) highlighted, e.g., to
indicate alignment with selected feature.
"""
def __init__(self, width, height, confVals, adjustments=False):
"""
Instantiate Crosshair object.
@param width Crosshair image width in pixels
@param width Crosshair image height in pixels
@param confVals See 'crosshair' field in config dict
@param adjustments Enable real-time value adjustment inputs
The given width and height must be the same as that of the video image.
"""
self.hiH = False
self.hiV = False
self.width = width
self.height = height
self.color = confVals['color']
self.thick = confVals['thickness']
self.alpha = confVals['alpha']
self.highlightColor = confVals['highlightColor']
self.adjustments = adjustments
if not self._validate():
raise ValueError
def _validate(self):
if self.width < 0 or self.width > MAX_VIDEO_WIDTH:
logging.error("Invalid crosshair image width: %d", self.width)
return False
if self.height < 0 or self.height > MAX_VIDEO_HEIGHT:
logging.error("Invalid crosshair image height: %d", self.height)
return False
if self.thick < 1 or self.thick > MAX_CROSSHAIR_THICKNESS:
logging.error("Invalid crosshair thickness: %d", self.thick)
return False
if ((len(self.color) != 3) or (min(self.color) < 0) or
(max(self.color) > 255)):
logging.error("Invalid crosshair color: %s", self.color)
return False
if self.color == self.highlightColor:
logging.warning("Crosshair and highlight colors are the same")
if self.alpha < 0.0 or self.alpha > 1.0:
logging.error("Invalid crosshair alpha: %f", self.alpha)
return False
return True
def _render(self, img, hiliteH=False, hiliteV=False):
hStart = (0, (self.height / 2))
hEnd = (self.width, (self.height / 2))
vStart = ((self.width / 2), 0)
vEnd = ((self.width / 2), self.height)
if hiliteH:
color = self.highlightColor
else:
color = self.color
cv2.line(img, hStart, hEnd, color, self.thick)
if hiliteV:
color = self.highlightColor
else:
color = self.color
cv2.line(img, vStart, vEnd, color, self.thick)
return img
def setHighlightH(self, val):
"""
Turn highlight on/off for horizontal line of crosshair.
@params val If True, highlight horizontal line of crossbar
"""
if not isinstance(val, bool):
raise ValueError
self.hiH = val
def setHighlightV(self, val):
"""
Turn highlight on/off for vertical line of crosshair.
@params val If True, highlight vertical line of crossbar
"""
if not isinstance(val, bool):
raise ValueError
self.hiV = val
def overlay(self, img):
"""
Alpha-blend the crosshairs onto the (processed) video frame.
@param img Image onto which crosshair is overlayed
"""
ovrly = self._render(img.copy(), self.hiH, self.hiV)
if self.adjustments:
self.alpha = (cv2.getTrackbarPos('alpha', 'view') / 100.0)
cv2.addWeighted(ovrly, self.alpha, img, (1.0 - self.alpha), 0, img)
class OnScreenDisplay(object):
"""
On-screen display ????
<(optionally) put stuff in all four corners>
"""
MAX_LINES = 3
MAX_CHARS = 10
TOP_LEFT, TOP_RIGHT, BOTTOM_LEFT, BOTTOM_RIGHT = range(4)
def __init__(self, config):
"""
Instantiate OSD object.
@param confVals See 'osd' field in config struct dict
"""
confVals = config['osd']
self.fontFace = confVals['face']
self.fontColor = confVals['color']
self.fontScale = confVals['scale']
self.fontThickness = confVals['thickness']
txtSize, baseline = cv2.getTextSize("M" * OnScreenDisplay.MAX_CHARS,
self.fontFace, self.fontScale,
self.fontThickness)
self.txtHeight = txtSize[1]
self.maxStrWidth = txtSize[0]
imgWidth = config['imgWidth']
imgHeight = config['imgHeight']
yOffset = 5
xLeftOffset = 5
xRightOffset = (imgWidth - (self.maxStrWidth + 1))
lineSpacing = int(self.txtHeight / 3.0) + 3
lineHeight = (self.txtHeight + lineSpacing)
self.lineOrigins = [
[ # TOP_LEFT
(xLeftOffset, yOffset + self.txtHeight),
(xLeftOffset, yOffset + (lineHeight * 2)),
(xLeftOffset, yOffset + (lineHeight * 3))
],
[ # TOP_RIGHT
(xRightOffset, yOffset + self.txtHeight),
(xRightOffset, yOffset + (lineHeight * 2)),
(xRightOffset, yOffset + (lineHeight * 3))
],
[ # BOTTOM_LEFT
(xLeftOffset, imgHeight - yOffset),
(xLeftOffset, imgHeight - (yOffset + lineHeight)),
(xLeftOffset, imgHeight - (yOffset + (lineHeight * 2)))
],
[ # BOTTOM_RIGHT
(xRightOffset, imgHeight - yOffset),
(xRightOffset, imgHeight - (yOffset + lineHeight)),
(xRightOffset, imgHeight - (yOffset + (lineHeight * 2)))
]
]
def overlay(self, img, corner, lineNum, text):
"""
Overlay text on image at given location.
"""
if corner < OnScreenDisplay.TOP_LEFT or \
corner > OnScreenDisplay.BOTTOM_RIGHT:
logging.error("Invalid OSD location value: %d", corner)
raise ValueError
origTextLen = len(text)
(txtWidth, txtHeight), b = cv2.getTextSize(text, self.fontFace,
self.fontScale,
self.fontThickness)
while (txtWidth > self.maxStrWidth):
text = text[:-1]
(txtWidth, txtHeight), b = cv2.getTextSize(text, self.fontFace,
self.fontScale,
self.fontThickness)
if len(text) != origTextLen:
logging.warning("Text too long; truncated")
if lineNum < 0 or lineNum >= OnScreenDisplay.MAX_LINES:
logging.error("Invalid OSD line number: %d", lineNum)
raise ValueError
bottomLeft = self.lineOrigins[corner][lineNum]
cv2.putText(img, text, bottomLeft, self.fontFace, self.fontScale,
self.fontColor, self.fontThickness, cv2.LINE_AA, False)
return img
class Measurement(object):
def __init__(self, width, height, calData):
self.deltaX = None # distance to X axis in mm (float)
self.deltaY = None # distance to Y axis in mm (float)
self.distance = None # distance to origin in mm (float)
self.width = width # width of image in pixels (int)
self.height = height # height of image in pixels (int)
self.originX = (width / 2) # horiz center of image in pixels (int)
self.originY = (height / 2) # vertical center of image in pixels (int)
self.calib = calData
def getValues(self):
return self.deltaX, self.deltaY, self.distance
# take x/y in pixel coordinates and save distances
def setValues(self, x, y):
#### FIXME compute distances in mm (using calibration)
self.deltaX = (x - self.originX)
self.deltaY = (self.originY - y)
self.distance = math.sqrt(self.deltaX**2 + self.deltaY**2)
def getDeltaX(self):
return self.deltaX
def getDeltaY(self):
return self.deltaY
def getDistance(self):
return self.distance
# Object that encapsulates all video processing to be done on the given
# input video image stream.
class VideoProcessing(object):
def __init__(self):
pass
def processFrame(self, img):
#### TODO run img through camera calibration correction matrix
output = {}
#### Detection Pipeline:
#### * cvt2gray
#### * cvSmooth
#### * cvThreshold
#### * cvCanny
#### * cvFindContours
#### * cvApproxPoly
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
output['variance'] = cv2.Laplacian(gray, cv2.CV_64F).var()
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
####################
# Line detection (with threshold sliders)
if config['adjustments']:
thrs1 = cv2.getTrackbarPos('thrs1', 'view')
thrs2 = cv2.getTrackbarPos('thrs2', 'view')
else:
#### FIXME make these be reasonable values/variables
thrs1 = 3000
thrs2 = 4500
edges = cv2.Canny(gray, thrs1, thrs2, apertureSize = 5)
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 2, None, 30, 1)
if lines is not None:
for line in lines[0]:
pt1 = (line[0], line[1])
pt2 = (line[2], line[3])
cv2.line(img, pt1, pt2, (0, 0, 255), 2)
###############
# Corner detection
gray = np.float32(gray)
blkSize = cv2.getTrackbarPos('blkSize', 'view')
kSize = (cv2.getTrackbarPos('kernelSize', 'view') | 1)
k = (cv2.getTrackbarPos('kVal', 'view') / 100.0)
dst = cv2.cornerHarris(gray, blkSize, kSize, k)
#dst = cv2.cornerHarris(gray, 2, 3, 0.04) # img, blockSize, ksize, k
#result is dilated for marking the corners, not important
dst = cv2.dilate(dst, None)
# Threshold for an optimal value, it may vary depending on the image.
img[dst > 0.01 * dst.max()] = [0, 0, 255]
###############
# FAST detector
# Initiate FAST object with default values
fast = cv2.FastFeatureDetector_create()
####fast.setNonmaxSuppression(False)
# find and draw the keypoints
kp = fast.detect(img, None)
cv2.drawKeypoints(img, kp, img, color=(255,0,0))
################
cv2.goodFeaturesToTrack(blurred, # img
500, # maxCorners
0.03, # qualityLevel
10, # minDistance
None, # corners,
None, # mask,
2, # blockSize,
useHarrisDetector=True, # useHarrisDetector,
k=0.04 # k
)
###############
cornerMap = cv.CreateMat(im.height, im.width, cv.CV_32FC1)
cv.CornerHarris(imgray, cornerMap,3)
for y in range(0, imgray.height):
for x in range (0, imgray.width):
harris = cv.Get2D(cornerMap, y, x)
if harris[0] > 10e-06:
temp = cv.Circle(im, (x,y), 2, cv.RGB(115,0,25))
###############
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)
for y in range(0, gray.shape[0]):
for x in range(0, gray.shape[1]):
harris = cv2.Get2D(cv2.fromarray(dst), y, x) # get the x,y value
# check the corner detector response
if harris[0] > (0.01 * dst.max()):
print x,y # these are the locations of the matches
print 'Distance in pixels from origin: %d' % math.sqrt(x**2+y**2)
# draw a small circle on the original image
cv2.circle(img, (x,y), 2, (155, 0, 25))
###############
corners = cv2.goodFeaturesToTrack(img, 4, 0.5, 10)
"""
return output
def getNearestFeature(self, x, y):
#### TODO implement this
return x, y
#
# TEST
#
if __name__ == '__main__':
print("TBD")
```
#### File: jduanen/cnc_video/xcarve.py
```python
import logging
from grbl import GrblDevice
# X-Carve travel limits (in mm)
MAX_X = 260.00
MAX_Y = 260.00
MAX_Z = 100.00
class XCarve(GrblDevice):
def __init__(self, config):
"""
????
@param serialDev ?
"""
cnc = config['cnc']
if 'device' not in cnc:
logging.error("Must provide serial device name")
raise RuntimeError
serialDevice = cnc['device']
super(XCarve, self).__init__(serialDevice)
def home(self):
#### FIXME
return
def probe(self):
#### FIXME
return
def gotoMaxZ(self):
#### FIXME
return
def getPosition(self):
#### FIXME
status = self.getCurrentStatus()
print("Status: {0}".format(status))
x = y = z = None
return x, y, z
#
# TEST
#
if __name__ == '__main__':
#### FIXME
print("TBD")
```
|
{
"source": "jduanen/Enlighten",
"score": 3
}
|
#### File: jduanen/Enlighten/Enlighten.py
```python
import logging
import os
import requests
import sys
import time
URL_PREFIX = "https://api.enphaseenergy.com/api/v2/systems"
API_CMDS = ('consumptionStats',
'envoys',
'inventory',
'productionMeters',
'rgmStats',
'stats',
'summary',
'systems')
#### TODO document this
class Enlighten():
"""An object that encapsulates the Enphase Enlighten service's REST API.
"""
@staticmethod
def _rest(url):
r = requests.get(url)
if r.status_code == 200:
response = r.json()
else:
logging.error(f"Failed REST call: {r.json()}")
response = None
logging.info(response)
return response
def __init__(self, uid, apiKey, sysId=None, hitsPerMin=10):
"""An object that encapsulates the Enphase Enlighten service's REST API.
Parameters
uid: string user Id from Enphase account
apiKey: string API key from Enphase Enlighten
sysId: optional string that identifies a specific system
"""
self.uid = uid
self.apiKey = apiKey
self.hitsPerMin = hitsPerMin
self.secsBetweenHits = 60 / self.hitsPerMin
self.lastHit = None
self.allSystems = self.systems()
if sysId:
self.sysId = sysId
else:
self.sysId = self.allSystems['systems'][0]['system_id']
logging.warning("System Id not given, using first system found")
self.urlPrefix = f"{URL_PREFIX}/{self.sysId}/"
self.urlArgs = f"?key={self.apiKey}&user_id={self.uid}"
#### FIXME try to run back-to-back calls faster, but stay under minute-granularity rates
def rateLimitedREST(self, url):
"""Make a rate-limited call on the Enlighten server with the given URL
Waits until at least the specified amount of time has elapsed since
the last call.
Parameters
url: string with the url to use to call the Enlighten server
Returns: JSON object with result of the REST invocation
"""
if self.lastHit:
delta = time.time() - self.lastHit
if delta < self.secsBetweenHits:
time.sleep(self.secsBetweenHits - delta)
self.lastHit = time.time()
return Enlighten._rest(url)
def systems(self, isoFmt=False):
"""Returns information on all systems assoicated with the given user
N.B. This gets called during init so has to be handled differently
Parameters
isoFmt: optional boolean that returns datetimes in iso8601 format
if True, otherwise times are in seconds since Unix epoch
Returns: JSON object containing data about the systems associated
with the user
"""
fmt = "&datetime_format=iso8601" if isoFmt else ""
url = f"{URL_PREFIX}?key={self.apiKey}&user_id={self.uid}"
return self.rateLimitedREST(url)
def consumptionStats(self, startAt=None, endAt=None, isoFmt=False):
"""Return performance statistics from a system's consumption meter
If more than one month's worth of interfals are requested, then this
returns a single month's worth of intervals.
Intervals are 15 minutes long and start at the top of the hour.
Requested times are rounded down to the nearest preceding interval.
Returned data are tagged with the interval's end time -- therefore
the first interval will have a timestamp that is up to five minutes
Empty interval array is returned if no consumption meters are installed.
Parameters
startAt: optional starting interval time in Unix epoch seconds
endAt: optional starting interval time in Unix epoch seconds
isoFmt: optional boolean that returns datetimes in iso8601 format
if True, otherwise times are in seconds since Unix epoch
Returns: JSON object that contains summary of consumption meter ????
"""
fmt = "&datetime_format=iso8601" if isoFmt else ""
start = f"&start_at={startAt}" if startAt else ""
end = f"&end_at={endAt}" if endAt else ""
url = f"{self.urlPrefix}consumption_stats{self.urlArgs}{start}{end}{fmt}"
return self.rateLimitedREST(url)
#### TODO DRY up these methods
def envoys(self, isoFmt=False):
"""Returns information about all Envoy devices in the system
Parameters
isoFmt: optional boolean that returns datetimes in iso8601 format
if True, otherwise times are in seconds since Unix epoch
Returns: JSON object with list of objects with details about each of
the Envoys in the system
"""
fmt = "&datetime_format=iso8601" if isoFmt else ""
return self.rateLimitedREST(f"{self.urlPrefix}envoys{self.urlArgs}{fmt}")
def inventory(self, isoFmt=False):
"""Returns information about all devices in the system
Parameters
isoFmt: optional boolean that returns datetimes in iso8601 format
if True, otherwise times are in seconds since Unix epoch
Returns: JSON object with lists of objects with details about each of
the intervers and meters in the system
"""
fmt = "&datetime_format=iso8601" if isoFmt else ""
return self.rateLimitedREST(f"{self.urlPrefix}inventory{self.urlArgs}{fmt}")
def productionMeters(self, readAt=None, isoFmt=False):
"""Return the last reading of each production meter in the system
Parameters
readAt: optional time to read meter in seconds from Unix epoch
isoFmt: optional boolean that returns datetimes in iso8601 format
if True, otherwise times are in seconds since Unix epoch
Returns: JSON object with list of objects containing data for each
production meter -- serial number, value in Wh, time when
reading was taken (before or at the given readAt time), and
metadata -- status, last report, laster energy, operational
"""
fmt = "&datetime_format=iso8601" if isoFmt else ""
day = f"&end_at={readAt}" if readAt else ""
url = f"{self.urlPrefix}production_meter_readings{self.urlArgs}{fmt}{day}"
return self.rateLimitedREST(url)
def rgmStats(self, startAt=None, endAt=None, isoFmt=False):
"""Return performance statistics from a system's Revenue-Grade Meters (RGMs)
If more than one month's worth of interfals are requested, then this
returns a single month's worth of intervals.
Intervals are 15 minutes long and start at the top of the hour.
Requested times are rounded down to the nearest preceding interval.
Returned data are tagged with the interval's end time -- therefore
the first interval will have a timestamp that is up to five minutes
Empty interval array is returned if no RGMs are installed.
Parameters
startAt: optional starting interval time in Unix epoch seconds
endAt: optional starting interval time in Unix epoch seconds
isoFmt: optional boolean that returns datetimes in iso8601 format
if True, otherwise times are in seconds since Unix epoch
Returns: JSON object that contains summary of production from all
RGMs in the system
"""
fmt = "&datetime_format=iso8601" if isoFmt else ""
start = f"&start_at={startAt}" if startAt else ""
end = f"&end_at={endAt}" if endAt else ""
url = f"{self.urlPrefix}rgm_stats{self.urlArgs}{start}{end}{fmt}"
return self.rateLimitedREST(url)
def stats(self, startAt=None, endAt=None, isoFmt=False):
"""Return performance statistics as reported by microinverters
If more than one day of interfals are requested, then this returns
a single day's worth of intervals.
Intervals are five minutes long and start at the top of the hour.
Requested times are rounded down to the nearest preceding interval.
Returned data are tagged with the interval's end time -- therefore
the first interval will have a timestamp that is up to five minutes
Parameters
startAt: optional starting interval time in Unix epoch seconds
endAt: optional starting interval time in Unix epoch seconds
isoFmt: optional boolean that returns datetimes in iso8601 format
if True, otherwise times are in seconds since Unix epoch
Returns: JSON object that contains summary of production from all
reporting microinverters during each requested interval
"""
fmt = "&datetime_format=iso8601" if isoFmt else ""
start = f"&start_at={startAt}" if startAt else ""
end = f"&end_at={endAt}" if endAt else ""
url = f"{self.urlPrefix}stats{self.urlArgs}{start}{end}{fmt}"
return self.rateLimitedREST(url)
def summary(self, summaryDate=None, isoFmt=False):
"""Returns summary information for a system
If no date is provided, then the current day at minight site-local
time is used.
Parameters
summaryDate: optional string indicating the day for which a summary
is requested, given in "YYYY-mm-dd" format in the system's
timezone
isoFmt: optional boolean that returns datetimes in iso8601 format
if True, otherwise times are in seconds since Unix epoch
Returns: JSON object containing system-level summary of the system at
the requested date
"""
fmt = "&datetime_format=iso8601" if isoFmt else ""
date = f"&summary_date={summaryDate}" if summaryDate else ""
url = f"{self.urlPrefix}summary{self.urlArgs}{date}{fmt}"
return self.rateLimitedREST(url)
#
# TEST
#
if __name__ == '__main__':
raise NotImplementedError("TBD")
```
#### File: jduanen/Enlighten/monitor.py
```python
import argparse
import json
import logging
import os
import signal
import sys
from threading import Event
import time
import yaml
from blink1.blink1 import Blink1
from Enlighten import Enlighten
from Indicators import Indicators
WATCHDOG_INTERVAL = 60 * 1000 # poke watchdog every minute
DEF_CONF_FILE = "./.enphase.yml"
DEF_LOG_LEVEL = "WARNING"
DEF_INTERVAL = 6 # default: poll every 6 hours
#### TODO move this to the library
FACTORY_BLINK_PATTERN = [(255, 0, 0, 500), (255, 0, 0, 500), (0, 0, 0, 500),
(0, 255, 0, 500), (0, 255, 0, 500), (0, 0, 0, 500),
(0, 0, 255, 500), (0, 0, 255, 500), (0, 0, 0, 500),
(128, 128, 128, 1000), (0, 0, 0, 1000),
(255, 255, 255, 500), (0, 0, 0, 500),
(255, 255, 255, 500), (0, 0, 0, 1000), (0, 0, 0, 1000)]
DEF_WATCHDOG_PATTERN = FACTORY_BLINK_PATTERN #### FIXME make a better default watchdog pattern
CELLULAR_INTERVAL = 6 * 60 * 60 # Envoy updates server every six hours on Cellular
WIFI_INTERVAL = 15 * 60 # Envoy updates server every 15 mins on WiFi
UPDATE_INTERVAL = CELLULAR_INTERVAL # using Cellular
STATS_RATE = 5 * 60 # stats are sampled in 5 min intervals
RETRY_DELAY = 60 * 5 # retry every 5 mins
#CATCH_SIGNALS = ("INT", "HUP", "ILL", "TRAP", "ABRT", "KILL")
CATCH_SIGNALS = ("INT",)
exitLoop = Event()
def signalHandler(sig, frame):
''' Catch SIGINT and clean up before exiting
'''
if sig == signal.SIGINT:
logging.info("SIGINT")
else:
logging.debug("Signal:", sig)
exitLoop.set()
def run(options):
for s in CATCH_SIGNALS:
signal.signal(getattr(signal, f"SIG{s}"), signalHandler)
leds = Indicators(WATCHDOG_INTERVAL)
nliten = Enlighten(options['uid'], options['apiKey'], options['sysId'])
if options['verbose']:
json.dump(nliten.allSystems, sys.stdout, indent=4)
pollInterval = (60 * 60) * options['rate'] # number of seconds between polls
pollInterval = 3 #### TMP TMP TMP
logging.info(f"Start polling: polling interval={pollInterval} secs")
while not exitLoop.is_set():
current = False
normal = False
while not (current and normal) and not exitLoop.is_set():
summary = nliten.summary()
logging.info(f"Summary: power={summary['current_power']}, status={summary['status']}, lastReport={summary['last_report_at']}, lastInterval={summary['last_interval_end_at']}")
current = summary['last_report_at'] < time.time() - UPDATE_INTERVAL
normal = summary['status'] == "normal"
if not current:
leds.staleData()
logging.debug("Stale Data")
continue
else:
leds.currentData(normal)
if normal:
logging.debug("Good Data")
break
else:
logging.debug("Abnormal Data")
time.sleep(RETRY_DELAY)
stats = nliten.stats()
logging.debug(f"Stats: {stats['meta']}")
now = time.time()
if stats['meta']['last_report_at'] < now - UPDATE_INTERVAL:
logging.info(f"Reporting Late: last report={stats['meta']['last_report_at']}, now={now}")
leds.staleData()
elif stats['meta']['status'] != "normal":
logging.info(f"Abnormal Report: {stats['meta']}")
leds.abnormalData()
continue
exitLoop.wait(pollInterval)
logging.info("Shutting down")
#### TODO add flag to set watchdog pattern, and take it from the config file
def getOps():
usage = f"Usage: {sys.argv[0]} [-v] " + \
"[-c <confFile>] [-L <logLevel>] [-l <logFile>] " + \
"[-i] [-r <rate>]" + \
"[-a <apiKey>] [-u <uid>] [-s <sysId>]"
ap = argparse.ArgumentParser()
ap.add_argument(
"-a", "--apiKey", action="store", type=str,
help="Enphase Enlighten Systems API key")
ap.add_argument(
"-c", "--confFile", action="store", type=str,
default=DEF_CONF_FILE, help="Path to YAML file with configuration information")
ap.add_argument(
"-i", "--isoFormat", action="store_true", default=False,
help="Print datetime values in iso8601 format")
ap.add_argument(
"-L", "--logLevel", action="store", type=str, default=DEF_LOG_LEVEL,
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Logging level")
ap.add_argument(
"-l", "--logFile", action="store", type=str,
help="Path to location of logfile (create it if it doesn't exist)")
ap.add_argument(
"-r", "--rate", action="store", type=int, choices=range(1, 13),
default=DEF_INTERVAL,
help="Interval at which to poll the Enlighten server (number of hours per call)")
ap.add_argument(
"-s", "--sysId", action="store", type=str,
help="Enphase Enlighten System Id")
ap.add_argument(
"-u", "--uid", action="store", type=str,
help="Enphase Enlighten User Id")
ap.add_argument(
"-v", "--verbose", action="count", default=0, help="Print debug info")
opts = ap.parse_args()
if not os.path.exists(opts.confFile):
print(f"Error: Configuration file not found: {opts.confFile}")
sys.exit(1)
with open(opts.confFile, "r") as f:
confs = list(yaml.load_all(f, Loader=yaml.Loader))
if len(confs) < 1:
print(f"Error: Empty configuration file: {opts.confFile}")
sys.exit(1)
elif len(confs) > 1:
print(f"Warning: Using the first document in configuration file: {opts.confFile}")
conf = confs[0]
if opts.logLevel:
conf['logLevel'] = opts.logLevel
elif 'logLevel' not in conf:
conf['logLevel'] = DEF_LOG_LEVEL
logLevel = conf['logLevel']
if opts.logFile:
conf['logFile'] = opts.logFile
logFile = conf.get('logFile')
if opts.verbose:
print(f"Logging to: {logFile}")
if logFile:
logging.basicConfig(filename=logFile, level=logLevel)
else:
logging.basicConfig(level=logLevel)
if opts.apiKey:
conf['apiKey'] = opts.apiKey
elif 'apiKey' not in conf:
apiKey = os.environ.get("ENPHASE_API_KEY")
if not apiKey:
logging.error("Must provide API Key")
sys.exit(1)
conf['apiKey'] = apiKey
if opts.uid:
conf['uid'] = opts.uid
elif 'uid' not in conf:
uid = os.environ.get("ENPHASE_UID")
if not uid:
logging.error("Must provide User Id")
sys.exit(1)
conf['uid'] = uid
if opts.sysId:
conf['sysId'] = opts.sysId
elif 'sysId' not in conf or not conf['sysId']:
conf['sysId'] = os.environ.get("ENPHASE_SYSID")
else:
conf['sysId'] = None
if 'apiKey' not in conf:
logging.Error("Must supply API Key")
sys.exit(1)
if 'uid' not in conf:
logging.Error("Must supply User Id")
sys.exit(1)
options = vars(opts)
options.update(conf)
return(options)
if __name__ == '__main__':
opts = getOps()
run(opts)
```
#### File: jduanen/Enlighten/nliten.py
```python
import argparse
from datetime import datetime
import json
import logging
import os
import requests
import sys
import time
import yaml
from Enlighten import Enlighten, API_CMDS
DEF_CONF_FILE = "./.enphase.yml"
DEF_LOG_LEVEL = "WARNING"
DEF_CMDS = ["systems"]
TIME_FORMAT = "%d-%m-%Y %H:%M" # e.g., "08-02-2021 17:30"
ESC_TIME_FORMAT = TIME_FORMAT.replace('%', '%%')
#### TODO document this
def run(options):
if options['verbose'] > 1:
json.dump(options, sys.stdout, indent=4)
print("")
nliten = Enlighten(options['uid'], options['apiKey'], options['sysId'])
results = {}
if 'consumptionStats' in options['commands']:
results['consumptionsStats'] = nliten.consumptionStats(options['start'], options['end'], options['isoFormat'])
if 'envoys' in options['commands']:
results['envoys'] = nliten.envoys(options['isoFormat'])
if 'inventory' in options['commands']:
results['inventory'] = nliten.inventory(options['isoFormat'])
if 'productionMeters' in options['commands']:
results['productionMeters'] = nliten.productionMeters(options['day'], options['isoFormat'])
if 'rgmStats' in options['commands']:
results['rgmStats'] = nliten.rgmStats(options['start'], options['end'], options['isoFormat'])
if 'stats' in options['commands']:
results['stats'] = nliten.stats(options['start'], options['end'], options['isoFormat'])
if 'summary' in options['commands']:
results['summary'] = nliten.summary(options['day'], options['isoFormat'])
if 'systems' in options['commands']:
results['systems'] = nliten.systems(options['isoFormat'])
json.dump(results, sys.stdout, indent=4)
print("")
def getOps():
usage = f"Usage: {sys.argv[0]} [-v] " + \
"[-c <confFile>] [-L <logLevel>] [-l <logFile>] " + \
"[-i] [-b <time>] [-e <time>] [-d <time>]" + \
"[-a <apiKey>] [-u <uid>] [-s <sysId>] [-C <cmd>{,<cmd'>}*]"
ap = argparse.ArgumentParser()
ap.add_argument(
"-a", "--apiKey", action="store", type=str,
help="Enphase Enlighten Systems API key")
ap.add_argument(
"-b", "--beginTime", action="store", type=str,
help=f"Start of time interval of interest (format={ESC_TIME_FORMAT})")
ap.add_argument(
"-C", "--cmdsList", action="store", type=str, nargs="+",
choices=API_CMDS, default=DEF_CMDS,
help="Path to YAML file with configuration information")
ap.add_argument(
"-c", "--confFile", action="store", type=str,
default=DEF_CONF_FILE, help="Path to YAML file with configuration information")
ap.add_argument(
"-d", "--dayTime", action="store", type=str,
help=f"Day of interest (format={ESC_TIME_FORMAT})")
ap.add_argument(
"-e", "--endTime", action="store", type=str,
help=f"End of time interval of interest (format={ESC_TIME_FORMAT})")
ap.add_argument(
"-i", "--isoFormat", action="store_true", default=False,
help="Print datetime values in iso8601 format")
ap.add_argument(
"-L", "--logLevel", action="store", type=str, default=DEF_LOG_LEVEL,
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Logging level")
ap.add_argument(
"-l", "--logFile", action="store", type=str,
help="Path to location of logfile (create it if it doesn't exist)")
ap.add_argument(
"-s", "--sysId", action="store", type=str,
help="Enphase Enlighten System Id")
ap.add_argument(
"-u", "--uid", action="store", type=str,
help="Enphase Enlighten User Id")
ap.add_argument(
"-v", "--verbose", action="count", default=0, help="Print debug info")
#### read list of calls to make
opts = ap.parse_args()
if not os.path.exists(opts.confFile):
print(f"Error: Configuration file not found: {opts.confFile}")
sys.exit(1)
with open(opts.confFile, "r") as f:
confs = list(yaml.load_all(f, Loader=yaml.Loader))
if len(confs) < 1:
print(f"Error: Empty configuration file: {opts.confFile}")
sys.exit(1)
elif len(confs) > 1:
print(f"Warning: Using the first document in configuration file: {opts.confFile}")
conf = confs[0]
if opts.logLevel:
conf['logLevel'] = opts.logLevel
elif 'logLevel' not in conf:
conf['logLevel'] = DEF_LOG_LEVEL
logLevel = conf['logLevel']
if opts.logFile:
conf['logFile'] = opts.logFile
logFile = conf.get('logFile')
if opts.verbose:
print(f"Logging to: {logFile}")
if logFile:
logging.basicConfig(filename=logFile, level=logLevel)
else:
logging.basicConfig(level=logLevel)
if opts.apiKey:
conf['apiKey'] = opts.apiKey
elif 'apiKey' not in conf:
apiKey = os.environ.get("ENPHASE_API_KEY")
if not apiKey:
logging.error("Must provide API Key")
sys.exit(1)
conf['apiKey'] = apiKey
if opts.uid:
conf['uid'] = opts.uid
elif 'uid' not in conf:
uid = os.environ.get("ENPHASE_UID")
if not uid:
logging.error("Must provide User Id")
sys.exit(1)
conf['uid'] = uid
if opts.sysId:
conf['sysId'] = opts.sysId
elif 'sysId' not in conf or not conf['sysId']:
conf['sysId'] = os.environ.get("ENPHASE_SYSID")
else:
conf['sysId'] = None
if 'apiKey' not in conf:
logging.error("Must supply API Key")
sys.exit(1)
if 'uid' not in conf:
logging.error("Must supply User Id")
sys.exit(1)
day = int(datetime.strptime(opts.dayTime, TIME_FORMAT).timestamp()) if opts.dayTime else None
start = int(datetime.strptime(opts.beginTime, TIME_FORMAT).timestamp()) if opts.beginTime else None
end = int(datetime.strptime(opts.endTime, TIME_FORMAT).timestamp()) if opts.endTime else None
if start and end and start > end:
logging.error(f"Begin time ({opts.beginTime}) must be before End time ({opts.endTime})")
sys.exit(1)
options = vars(opts)
options['day'] = day
options['start'] = start
options['end'] = end
options['commands'] = opts.cmdsList
options.update(conf)
return(options)
if __name__ == '__main__':
opts = getOps()
run(opts)
```
|
{
"source": "jduanen/laserTest",
"score": 3
}
|
#### File: jduanen/laserTest/laserTest.py
```python
import argparse
import datetime
import os
import sys
def makeInputStr(minVal, maxVal, count):
"""
Create a default input string for a given test dimension.
"""
return "{0}:{1},{2}".format(minVal, maxVal, count)
class GcodeOutput(object):
"""
Encapsulates all the supported means of emitting GCodes.
Currently can write to a file or stream to GRBL.
"""
#### TODO make this Thread-safe as it's not
# Supported output modes
OUTPUT_MODES = ('FILE', 'GRBL')
def __init__(self, mode, file_):
"""
Take the output mode (currently, you can emit to a file/stdout or
drive the Arduino-based GRBL controller), and the name of the file
to write to (for file mode) or the port to which the GRBL controller
is attached and instantiate an output object that can be used to
batch up output and then emit it
"""
self.gcodes = []
mode = mode.upper()
if mode not in GcodeOutput.OUTPUT_MODES:
raise ValueError("Invalid output mode {0}, must be one of {1}".
format(mode, GcodeOutput.OUTPUT_MODES))
self.mode = mode
if self.mode == 'FILE':
if file_ == '-':
self.output = sys.stdout
else:
try:
self.output = open(file_, 'w')
except Exception as ex:
raise ValueError("Unable to write output to {0}; {1}".
format(file_, ex))
t = datetime.datetime.now()
p = os.path.basename(sys.argv[0])
self.header = "( Generated by: {0} @ {1} )\n".format(p, t)
elif self.mode == 'GRBL':
pass #### FIXME implement this
def hdr(self, str_):
"""
Add the given string to the header block that is to be emitted at the
start of the output in FILE mode.
"""
if verbosity > 0:
sys.stderr.write("{0}\n".format(str_))
if comment and self.mode == 'FILE':
if not self.header:
raise RuntimeError("Cannot add to headers after first emit")
self.header += "( {0} )\n".format(str_)
def compose(self, gcodes):
"""
Build up the GCodes to be (optionally post-processed) and emmited later
"""
#### TODO see if there are checks or modifications needed on compose
if verbosity > 1:
print gcodes
self.gcodes += gcodes
def getLen(self):
"""
Return the current number of GCodes in the buffer waiting to be post-
processed or emitted.
"""
return len(self.gcodes)
def postProcess(self):
"""
Run whatever post-processing is desired on the current collection of
GCodes.
"""
pass #### FIXME implement this
def emit(self):
"""
Emit the current contents of the GCode buffer in the chosen method, and
leave the buffer empty.
"""
if self.mode == 'FILE':
if self.header:
self.output.write(self.header)
self.header = None
outStr = "\n".join(self.gcodes)
self.output.write(outStr)
elif self.mode == 'GRBL':
pass #### FIXME
self.gcodes = []
class InputArgumentError(ValueError):
"""
Exception to indicate bad input string.
"""
pass
class Dimension(object):
"""
Base class for the three test dimensions -- i.e., speed, power, and
distance.
Takes a string of the form: <min>:<max>,<cnt> and returns an object
that holds the validated state of that input.
Throws ValueError if bad input given.
"""
def __init__(self, inputStr):
self.name = "UNINITIALIZED"
self.val = None
try:
rangeStr, countStr = inputStr.split(',')
minStr, maxStr = rangeStr.split(":")
self.minVal = float(minStr)
self.maxVal = float(maxStr)
self.count = int(countStr)
except Exception:
raise
if ((self.minVal == self.maxVal and self.count != 1) or
(self.minVal != self.maxVal and self.count == 1)):
sys.stderr.write("Warning: inconsistent fixed spec {0}\n".
format(inputStr))
if self.count > 1:
self.incr = (self.maxVal - self.minVal) / (self.count - 1)
else:
self.incr = 0.0
self.fixed = self.count == 1 or self.minVal == self.maxVal
self.reset()
def reset(self):
"""
Reset the next value counter to the start and the current value to
minVal.
"""
self.indx = 0
self.val = self.minVal
def next(self):
"""
Return the next value in the range and bump the counter 'indx'.
Returns minVal (and doesn't bump the value) if the dimension is fixed.
Throws exception if ask for more values after having reached the max.
(The caller should be looping on the count and not relying on this
for loop termination conditions.)
Compute next val based on count (as opposed to adding 'incr') to avoid
accumulating errors.
"""
if self.fixed:
return self.minVal
self.indx += 1
if self.indx >= self.count:
raise ValueError("Asked for too many next values")
self.val = round(((self.indx * ((self.maxVal - self.minVal) /
(self.count - 1))) + self.minVal), 2)
return self.val
def __str__(self):
incr = "%.1f" % round(self.incr, 1)
return "{0}: \tmin = {1}, \tmax = {2}, \tcount = {3}, \tincr = {4}, \t{5}". \
format(self.name, self.minVal, self.maxVal, self.count, incr,
('Varies', 'Fixed')[self.fixed])
def __repr__(self):
return self.__str__()
class SpeedDim(Dimension):
"""
Encapsulates the Speed test dimension.
This defines the speed (in mm/min) that the laser moves during cuts.
"""
def __init__(self, speedStr):
self.name = "Speed"
try:
super(self.__class__, self).__init__(speedStr)
except Exception:
raise
if self.minVal < TestParams.MIN_XY_SPEED:
raise InputArgumentError("Minimum speed to slow (< {0})".
format(TestParams.MIN_XY_SPEED))
if self.maxVal > TestParams.MAX_XY_SPEED:
raise InputArgumentError("Maximum speed to fast (> {0})".
format(TestParams.MAX_XY_SPEED))
class PowerDim(Dimension):
"""
Encapsulates the Power test dimension.
This is the PWM value that defines the strength of the laser.
In the case of the J-Tech laser with the Arduino GRBL, this value
ranges from 0 (i.e., off) to 10000 (i.e., max power) -- this is
all defined in the GRBL source code constants.
"""
def __init__(self, powerStr):
self.name = "Power"
try:
super(self.__class__, self).__init__(powerStr)
except Exception:
raise
if self.minVal < TestParams.MIN_POWER:
raise InputArgumentError("Minimum power too low (< {0})".
format(TestParams.MIN_POWER))
if self.maxVal > TestParams.MAX_POWER:
raise InputArgumentError("Maximum power too high (> {0})".
format(TestParams.MIN_POWER))
class DistanceDim(Dimension):
"""
Encapsulates the Distance test dimension.
This is the Z-axis distance from the cutting surface (aka: focus).
"""
def __init__(self, distanceStr):
self.name = "Distance"
try:
super(self.__class__, self).__init__(distanceStr)
except Exception:
raise
if self.minVal < TestParams.MIN_Z_DISTANCE:
raise InputArgumentError("Minimum distance too close (< {0})".
format(TestParams.MIN_Z_DISTANCE))
if self.maxVal > TestParams.MAX_Z_DISTANCE:
raise InputArgumentError("Maximum distance too far (> {0})".
format(TestParams.MAX_Z_DISTANCE))
class Shapeoko2(object):
"""
Encapsulates the specifics of a particular CNC machine.
This is the Shapeoko2 with Acme Z axis and the J-Tech 3.8W Laser Diode.
"""
# X-/Y-axis cutting speed constants (in mm/min)
DEF_XY_SPEED = 750.0
MIN_XY_SPEED = 100.0
MAX_XY_SPEED = 5000.0
# Power constants (in spindle RPM) -- J-Line 2.8W/GRBL 0.9+
DEF_POWER = 1000.0
MIN_POWER = 0.0
MAX_POWER = 10000.0
# Z-axis cutting distance constants (in mm)
DEF_Z_DISTANCE = 10.0
MIN_Z_DISTANCE = 5.0
MAX_Z_DISTANCE = 100.0
# Movement (non-cutting) speed in all axes
XY_MOVE_SPEED = 4000.0
Z_MOVE_SPEED = 400.0
# Max distance in X that the test can span (in mm)
MAX_X_DISTANCE = 150.0
# Max distance in Y that the test can span (in mm)
MAX_Y_DISTANCE = 150.0
# Max distance in Z that the test can span (in mm)
MAX_Z_DISTANCE = 50.0
class TestParams(Shapeoko2):
"""
Encapsulates the parameters for the given tests on a specific machine type.
"""
# width of laser cut (in mm) for J-Tech 3.8W 445nm laser
KERF = .38
# Min/max distance between individual test lines in X (in mm)
MIN_X_SPACING = 1.0
MAX_X_SPACING = 10.0
# Min/max distance between rows of test lines in Y (in mm)
MIN_Y_SPACING = 5.0
MAX_Y_SPACING = 10.0
# Def height of test lines (in mm)
DEF_LINE_HEIGHT = 20.0
# Default test counts
DEF_SPEED_COUNT = 1
DEF_POWER_COUNT = 1
DEF_DISTANCE_COUNT = 1
def __init__(self, speedTuple, powerTuple, distanceTuple,
lineHeight=DEF_LINE_HEIGHT):
try:
self.speed = SpeedDim(speedTuple)
self.power = PowerDim(powerTuple)
self.distance = DistanceDim(distanceTuple)
except Exception:
raise
self.lineHeight = lineHeight
self.dims = [self.speed, self.power, self.distance]
numDims = len(self.dims)
self.varDims = [dim for dim in self.dims if not dim.fixed]
self.numFixedDims = sum([self.speed.fixed, self.power.fixed,
self.distance.fixed])
self.numVarDims = numDims - self.numFixedDims
if self.numVarDims >= numDims:
# can't (effectively) plot three variable dimensions on 2D surface
raise InputArgumentError("Too many free dimensions, at least one must be fixed")
elif self.numVarDims < 2:
# at most one variable dim, so just one row
self.numRows = 1
self.yDim = None
if self.numVarDims == 1:
# only one row along the one variable dim
self.xDim = self.varDims[0]
self.numCols = self.varDims[0].count
else:
# no variable dims, so just one test cut
self.numCols = 1
self.xDim = None
else:
# max count of all variable dims
self.xDim = max(self.varDims, key=lambda item: item.count)
# not the xDim, but the other variable one
self.yDim = [dim for dim in self.varDims if dim != self.xDim][0]
self.numRows = self.yDim.count
self.numCols = self.xDim.count
# increment to move X for each new column
if self.numCols <= 1:
self.xIncr = 0.0
else:
self.xIncr = self.MAX_X_DISTANCE / (self.numCols - 1)
if (self.xIncr - self.KERF) < self.MIN_X_SPACING:
raise InputArgumentError("Too many columns; reduce {0} count".
format(self.xDim.name))
if self.xIncr > self.MAX_X_SPACING:
self.xIncr = self.MAX_X_SPACING
# increment to move Y for each new row (from base of previous row)
if self.numRows < 2:
self.yIncr = 0.0
else:
self.yIncr = ((self.MAX_Y_DISTANCE - self.lineHeight) /
(self.numRows - 1))
if (self.yIncr - self.lineHeight) < self.MIN_Y_SPACING:
raise InputArgumentError("Too many rows; reduct count of {0}".
format(self.yDim.name))
if self.yIncr > self.MAX_Y_SPACING:
self.yIncr = self.lineHeight + self.MAX_Y_SPACING
# dimensions of complete test pattern
if self.numCols > 1:
self.width = (self.xIncr * (self.numCols - 1)) + self.KERF
else:
self.width = self.KERF
if self.numRows > 1:
self.height = (self.yIncr * (self.numRows - 1)) + self.lineHeight
else:
self.height = self.lineHeight
def nextX(self):
return self.xDim.next()
def nextY(self):
self.xDim.reset()
return self.yDim.next()
def __str__(self):
s = ""
for name, dim in self.dims:
s += "{0}: {1}\n".format(name, str(dim))
s += "xDim: {0}, numRows: {1}, yDim: {2}, numCols: {3}\n".\
format(self.xDim, self.numRows, self.yDim, self.numCols)
return s
# Instantiate the defaults
defSpeed = makeInputStr(TestParams.DEF_XY_SPEED, TestParams.DEF_XY_SPEED,
TestParams.DEF_SPEED_COUNT)
defPower = makeInputStr(TestParams.DEF_POWER, TestParams.DEF_POWER,
TestParams.DEF_POWER_COUNT)
defDistance = makeInputStr(TestParams.DEF_Z_DISTANCE,
TestParams.DEF_Z_DISTANCE,
TestParams.DEF_DISTANCE_COUNT)
#
# MAIN
#
if __name__ == '__main__':
prog = sys.argv[0]
u1 = "[-v] -s <min:max,cnt> -p <min:max,cnt> -d <min:max,cnt> [-n]"
u2 = "[-m <outputMode>] [-o {<outPath>}] [-c]"
usage = prog + u1 + u2
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="count", default=0,
dest="verbosity", help="increase output verbosity")
parser.add_argument("-s", "--speed", type=str, default=defSpeed,
dest="speed",
help="cutting speed in X-/Y-axis (min:max,cnt)")
parser.add_argument("-p", "--power", type=str, default=defPower,
dest="power", help="cutting power (min:max,cnt)")
parser.add_argument("-d", "--distance", type=str, default=defDistance,
dest="distance", help="Z-axis distance (min:max,cnt)")
parser.add_argument("-m", "--output_mode", type=str, dest="outMode",
default="FILE",
choices=GcodeOutput.OUTPUT_MODES, help="output mode")
parser.add_argument("-o", "--output_path", type=str, dest="outPath",
default="-",
help="output path (filename, '-' for stdout, or USB port name)")
parser.add_argument("-n", "--dry_run", action="store_true", default=False,
dest="dryRun",
help="suppress output and just calculate values")
parser.add_argument("-c", "--comment", action="store_true", default=False,
dest="comment",
help="add header comments to gcode output")
args = parser.parse_args()
verbosity = args.verbosity
comment = args.comment
gcOut = GcodeOutput(args.outMode, args.outPath)
try:
parms = TestParams(args.speed, args.power, args.distance)
except Exception as ex:
sys.stderr.write("Error: failed to initialize -- {0}".format(ex))
sys.exit(1)
gcOut.hdr("Laser Cut Test Pattern Generator")
gcOut.hdr(" {0}".format(str(parms.speed)))
gcOut.hdr(" {0}".format(str(parms.power)))
gcOut.hdr(" {0}".format(str(parms.distance)))
if parms.xDim:
c = parms.xDim
incr = "%.1f" % round(c.incr, 1)
gcOut.hdr(" X Axis -> {0}: {1} cuts from {2} to {3} in increments of {4}".
format(c.name, c.count, c.minVal, c.maxVal, incr))
if parms.yDim:
r = parms.yDim
incr = "%.1f" % round(r.incr, 1)
gcOut.hdr(" Y Axis -> {0}: {1} rows of cuts from {2} to {3} in increments of {4}".
format(r.name, r.count, r.minVal, r.maxVal, incr))
if not parms.xDim and not parms.yDim:
gcOut.hdr(" One cut: speed={0}mm/min, power={1}, distance={2}mm".
format(parms.speed.minVal, parms.power.minVal,
parms.distance.minVal))
gcOut.hdr(" Line Height: {0}mm".format(parms.lineHeight))
gcOut.hdr(" Line Width: {0}mm".format(parms.KERF))
if parms.numCols > 1:
gcOut.hdr(" Horizontal Line Spacing: {0}mm".format(parms.xIncr))
if parms.numRows > 1:
gcOut.hdr(" Vertical Line Spacing: {0}mm".format(parms.yIncr))
gcOut.hdr(" Generating {0} row{1}with {2} column{3}of test cuts".
format(parms.numRows, ('s ', ' ')[parms.numRows < 2],
parms.numCols, ('s ', ' ')[parms.numCols < 2]))
gcOut.hdr(" Test Pattern Area: Width = {0}mm, Height = {1}mm".
format(parms.width, parms.height))
if args.outPath == '-':
oPath = 'stdout'
else:
oPath = args.outPath
gcOut.hdr(" Output: mode = {0}, path = {1}".format(args.outMode,
oPath))
if args.dryRun:
sys.stdout.write("\nDry run: exiting\n")
sys.exit(1)
#### TODO make option to cut in same direction or zig-zag
#### TODO calculate how much time it was on and off, and turn it off to meet the desired duty cycle
#### TODO make option to put in delay after cut to cool down laser -- duty cycle input
#### TODO compute laser cut metric (like chip load) - product of speed and power
#### (figure out what the difference is in speed vs. power)
#### TODO consider drawing legends for each value (x and y axis labels)
# generate G-Code preamble
# (set coordinates to metric and absolute mode (so errors don't accumulate)
# N.B. This assumes that the laser starts at the origin, so no initial moves
# are needed.
#### TODO decide if need to go to absolute Z position or if everything is relative to the starting point
startX = 0.0
startY = 0.0
startZ = parms.distance.val
x = startX
y = startY
z = startZ
preamble = ["G21",
"G90",
"G00 X{0} Y{1} Z{2}".format(x, y, z)]
gcOut.compose(preamble)
# generate tests (rows of columns)
rowBase = 0.0
for rowNum in xrange(parms.numRows):
if verbosity > 2:
print "Row: {0}".format(rowNum + 1)
rowBase = rowNum * parms.yIncr
for colNum in xrange(parms.numCols):
if verbosity > 2:
print "Column: {0}".format(colNum + 1)
# turn on laser (at power), cut vertical line, and turn laser off
# (each cut has constant Z/distance so leave it where it is)
x = colNum * parms.xIncr
y = rowBase + parms.lineHeight
p = parms.power.val
s = parms.speed.val
cutLine = ["M03 S{0}".format(p),
"G01 X{0} Y{1} F{2}".format(x, y, s),
"M05"]
gcOut.compose(cutLine)
if colNum < (parms.numCols - 1):
# bump the X dimension value
nx = parms.nextX()
if verbosity > 3:
print "Next X: {0}".format(nx)
# rapid move to baseline position for the next line in this row
x += parms.xIncr
y = rowBase
z = parms.distance.val
nextPos = ["G00 X{0} Y{1} Z{2}".format(x, y, z)]
gcOut.compose(nextPos)
if rowNum < (parms.numRows - 1):
# bump the Y dimension value
ny = parms.nextY()
if verbosity > 3:
print "Next Y: {0}".format(ny)
# rapid move to the baseline of the first line on the next row
rowBase += parms.yIncr
x = 0.0
y = rowBase
z = parms.distance.val
nextRow = ["G00 X{0} Y{1} Z{2}".format(x, y, z)]
gcOut.compose(nextRow)
#### move to (0, M*row base)
# rapid move to the origin
gotoStart = ["G00 X{0} Y{1} Z{2}".format(startX, startY, startZ)]
gcOut.compose(gotoStart)
gcOut.postProcess()
gcOut.emit()
"""
G21 (coordinates: mm)
G90 (coordinates: absolute XYZ)
G1 Z3.810 F228.6 (motion: straight line move up to starting position at 228.6mm/min)
G0 X10.000 Y0.000 (rapid motion: move to XY starting)
G1 Z-0.711 F228.6 (motion: move down to cutting position at 228.6mm/min)
G1 X10.000 Y0.000 F750.0 (motion: same position)
G1 X10.000 Y20.000 F750.0 (motion: cut 20mm in Y axis at 750mm/min)
G1 X10.000 Y20.000 F750.0 (motion: same position)
G1 Z-1.000 F228.6 (motion: raise cutter above work)
G1 X10.000 Y20.000 F750.0 (motion: same position)
G1 X10.000 Y0.000 F750.0 (motion: return to start of previous cut)
G1 Z3.810 F228.6 (motion: go back up to starting position)
G0 X20.000 Y0.000 (rapid motion: go to start of next cut)
G1 Z-0.711 F228.6 (motion: plunge cutter down into work)
G1 X20.000 Y0.000 F750.0 (motion: same position)
G1 X20.000 Y20.000 F750.0 (motion: cut 20mm in Y at 750mm/min)
G1 X20.000 Y20.000 F750.0 (motion: same position)
G1 Z-1.000 F228.6 (motion: raise cutter above work)
G1 X20.000 Y20.000 F750.0 (motion: same position)
G1 X20.000 Y0.000 F750.0 (motion: return to start of previous cut)
G21 (coordinates: mm)
"""
```
|
{
"source": "jduanen/nestcamV2",
"score": 2
}
|
#### File: jduanen/nestcamV2/nestcam_capture.py
```python
import argparse
import collections
from datetime import datetime
import glob
import json
import os
import sys
import time
import nestcam
import yaml
# N.B. Without a NestAware subscription, Google limits snapshots to 2 per minute (per-camera or per-site?)
GOOGLE_RATE_LIMIT = 30 * 1000 # 30 secs
# Initalize the default configuraton
config = {
"testing": True,
"cameraNames": [], # use all cameras
"delay": 10 * 60, # 10 mins in between captures
"maxFrames": 10, # keep last 10 frames
"numFrames": 0, # capture forever
"outputPath": "/tmp/imgs/", # save frames in /tmp/imgs/<camName>/<time>.jpg
"productId": None, # required
"productSecret": None # required
}
# Merge a new dict into an old one, updating the old one (recursively).
def dictMerge(old, new):
for k in new.keys():
if (k in old and isinstance(old[k], dict) and
isinstance(new[k], collections.Mapping)):
dictMerge(old[k], new[k])
else:
old[k] = new[k]
#
# MAIN
#
def main():
# Print error and exit
def fatalError(msg):
sys.stderr.write("Error: {0}\n".format(msg))
sys.stderr.write("Usage: {0}\n".format(usage))
sys.exit(1)
usage = sys.argv[0] + "[-v] [-L] [-S [-Q <query>]] [-n <names>] " + \
"[-c <confFile>] [-d <secs>] [-f <numFrames>] [-m <maxFrames>] " + \
"[-o <outPath>] [-p <productId>] [-s <secret>]"
ap = argparse.ArgumentParser()
ap.add_argument(
'-L', '--list', action='store_true', default=False,
help="list info on selected cameras (and return)")
ap.add_argument(
'-S', '--status', action='store_true', default=False,
help="print info for the selected cameras (and don't capture images)")
ap.add_argument(
'-Q', '--query', action='store', type=str,
help="jq-like query string to apply to Status output (defaults to '.' if not given")
ap.add_argument(
'-c', '--configFile', action='store',
help="configuration input file path (defaults to './nestcam.conf'")
ap.add_argument(
'-d', '--delay', action='store', type=int,
help="number of seconds to delay between sets of image grabs")
ap.add_argument(
'-f', '--numFrames', action='store', type=int,
help="number of frames to capture (0=infinite)")
ap.add_argument(
'-m', '--maxFrames', action='store', type=int,
help="maximum number of frames to save")
ap.add_argument(
'-n', '--names', action='store', type=str,
help="comma-separated list of camera names")
ap.add_argument(
'-o', '--outputPath', action='store', type=str,
help="base directory for output image files")
ap.add_argument(
'-p', '--productId', action='store', type=str,
help="Nest Home product ID")
ap.add_argument(
'-s', '--secret', action='store', type=str,
help="Nest Home product secret")
ap.add_argument(
'-v', '--verbose', action='count', default=0,
help="increase verbosity")
options = ap.parse_args()
# get the config file and merge with the defaults
confFilePath = None
if options.configFile:
if not os.path.isfile(options.configFile):
sys.stderr.write("Error: config file not found\n")
sys.exit(1)
confFilePath = options.configFile
else:
defaultPath = "./nestcam.conf"
if os.path.isfile(defaultPath):
confFilePath = defaultPath
else:
sys.stderr.write("Error: config file '%s' not found\n",
defaultPath)
sys.exit(1)
if confFilePath is not None:
with open(confFilePath, 'r') as ymlFile:
confFile = yaml.load(ymlFile)
if confFile:
dictMerge(config, confFile)
# overwrite values from defaults and config file with cmd line options
if options.names:
config['cameraNames'] = options.names.strip().split(",")
if options.delay:
config['delay'] = options.delay
if options.numFrames:
config['numFrames'] = options.numFrames
if options.maxFrames:
config['maxFrames'] = options.maxFrames
if options.outputPath:
config['outputPath'] = options.outputPath
if options.productId:
config['productId'] = options.productId
if options.secret:
config['productSecret'] = options.secret
# validate config values
if config['numFrames'] < 0:
fatalError("Number of frames to capture must be non-negative")
if config['maxFrames'] < 0:
fatalError("Number of frames to retain must be non-negative")
if config['delay'] < 0:
fatalError("Inter-frame delay must be non-negative")
if not config['outputPath']:
fatalError("Must provide output path")
if not config['productId'] or not config['productSecret']:
fatalError("Must provide Nest Home product ID and Secret")
# instantiate the NestCam interface object
tries = 3
while tries > 0:
try:
nest = nestcam.NestAccount(config['productId'], config['productSecret'])
break
except Exception as e:
if options.verbose > 0:
sys.stderr.write("Warning: Failed to attach to NestCam server: {0}".
format(e))
tries -= 1
if tries <= 0:
fatalError("Unable to attach to NestCam server")
# get ids for all of the selected cameras
if not config['cameraNames']:
config['cameraNames'] = nest.cameraNames()
config['cameraIds'] = nest.cameraIds()
else:
config['cameraIds'] = []
for camName in config['cameraNames']:
camIds = nest.cameraIdLookup(camName)
if camIds is None:
fatalError("Non-existant camera '{0}'".format(camName))
if len(camIds) != 1:
fatalError("Ambiguous camera name '{0}': {1}".format(camName, camIds))
config['cameraIds'].append(camIds[0])
# validate and init the directories for all of the cameras' images
if not os.path.exists(config['outputPath']):
os.makedirs(config['outputPath'])
for camId, camName in nest.camerasNameMap().iteritems():
path = os.path.join(config['outputPath'], camName + camId)
if not os.path.exists(path):
os.makedirs(path)
if options.verbose > 1:
print("Configuration:")
json.dump(config, sys.stdout, indent=4, sort_keys=True)
print("")
# get the current state of all the cameras associated with this account
if options.verbose > 2:
allCamIds = nest.cameraIds()
print("All Camera Ids: {0}".format(allCamIds))
allCamNames = nest.cameraNames()
print("All Camera Names: {0}".format(allCamNames))
allCamsMap = nest.camerasNameMap()
print("Map of all Camera IDs to Names:")
json.dump(allCamsMap, sys.stdout, indent=4, sort_keys=True)
print("")
camerasInfo = {k: v for k, v in nest.cameras().iteritems() if k in config['cameraIds']}
if options.list:
print("Cameras Info:")
json.dump(camerasInfo, sys.stdout, indent=4, sort_keys=True)
print("")
sys.exit(0)
# capture a frame from each camera in the list, writing the images to
# files in the given directory, wait the given amount of time, and repeat
count = 0
while True:
for camId in config['cameraIds']:
info = nest.cameraInfo(camId)
name = info['name_long']
ts = datetime.utcnow().isoformat()
if options.verbose > 2:
print("Timestamp: {0}".format(ts))
if options.status:
# get the status and don't capture an image
if options.verbose:
print("Camera {0} Status:".format(name))
#### TODO if there's a query filter, apply it (else emit the whole thing)
json.dump(info, sys.stdout, indent=4, sort_keys=True)
continue
# capture an image
if options.verbose:
print("Capture image from camera {0}".format(name))
try:
img = nest.getSnapshot(camId)
except Exception:
continue
if not img:
continue
# delete oldest frame if there are more than the max number of them
camOutPath = os.path.join(config['outPath'], cam.name())
camOutGlob = os.path.join(camOutPath, "*.jpg")
files = glob.glob(camOutGlob)
if len(files) > config['maxFrames']:
files.sort()
try:
if options.verbose > 2:
print("Removing file '{0}'".format(files[0]))
os.remove(files[0])
except Exception:
print("FIXME")
fPath = os.path.join(camOutPath, ts + ".jpg")
with open(fPath, "w+") as f:
if options.verbose > 2:
print("Writing frame to file '{0}'".format(fPath))
f.write(img)
if config['numFrames'] > 0:
count += 1
if count >= config['numFrames']:
if options.verbose > 3:
print("Completed capture of {0} frames per camera".
format(count))
break
if options.verbose > 2:
print("Delaying {0} secs".format(config['delay']))
print("")
time.sleep(config['delay'])
if __name__ == '__main__':
main()
```
#### File: jduanen/nestcamV2/nestcam.py
```python
import json
import os
import requests
import sys
import urllib
import urllib2
NEST_AUTH_URL = "https://home.nest.com/login/oauth2"
NEST_ACCESS_TOKEN_URL = "https://api.home.nest.com/oauth2/access_token"
NEST_API_URL = "https://developer-api.nest.com"
#### TODO remove this when the using function is removed -- in the mean time, make it part of the config
TOKEN_FILE_PATH = "./token.txt"
class NestCamLibError(Exception):
"""Base class for exceptions in this module."""
pass
class APIError(NestCamLibError):
"""Error class for exceptions in calling the Nest API Server."""
def __init__(self, result):
self.result = {"Error": result}
class NestAccount(object):
"""Encapsulation of access to Nest API Server for a given Nest account."""
@staticmethod
def _err(msg, fatal=False):
sys.stderr.write("Error: %s\n", msg)
if fatal:
sys.exit(1)
def _updateCameras(self, validate=True):
# query the API server
#### FIXME handle 307 REDIRECT returns
req = urllib2.Request(NEST_API_URL, None, self.headers)
#### FIXME handle the 429 ERROR (Too Many Requests) here
response = urllib2.urlopen(req, cafile=self.caFile)
data = json.loads(response.read())
if validate and 'devices' not in data:
raise APIError("Nest account has no devices")
devices = data["devices"]
if validate and 'cameras' not in devices:
raise APIError("Nest account has no cameras")
self.cams = devices["cameras"]
# verify the account has at least one Nest Camera
if validate and len(self.cams.keys()) < 1:
raise APIError("Nest account has no cameras")
def __init__(self, productId, productSecret, caFile=None):
""" Create connection to the NestCam API server.
Args:
productId: ID of Nest Developer product.
productSecret: Secret for Nest Developer product.
caFile: path to CA file (if None, look in default location).
Returns:
Newly created NestAccount object
"""
self.caFile = caFile
# Login to get the access token
def _login():
#### FIXME find a way to do the login automatically
# login to the NestCam API server and get auth code
queryStr = {
'client_id': productId,
'state': 'STATE'
}
response = requests.get(NEST_AUTH_URL, params=queryStr)
####response = requests.get(AUTH_URL)
print("{0}".format(response))
print("C: {0}".format(response.content))
####authCode = request.args.get("code")
authCode = None
# get the access token
data = urllib.urlencode({
'client_id': productId,
'client_secret': productSecret,
'code': authCode,
'grant_type': 'authorization_code'
})
req = urllib2.Request(NEST_ACCESS_TOKEN_URL, data)
response = urllib2.urlopen(req, cafile=self.caFile)
data = json.loads(response.read())
token = data['access_token']
return token
# Read the access token from a file
#### TODO this is a temp hack, remove it when I figure out how to login
def _readTokenFile(filePath):
token = None
with open(filePath, "r") as tokenFile:
token = tokenFile.readline().strip()
return token
if False:
token = _login()
else:
token = _readTokenFile(TOKEN_FILE_PATH)
self.headers = {
'Authorization': "Bearer {0}".format(token)
}
self._updateCameras()
def cameras(self):
""" Return info on all Nest cameras for the logged-in account.
Args:
None
Returns:
JSON object with info for all of the cameras
"""
self._updateCameras()
return self.cams
def cameraIds(self):
""" Return the IDs of all Nest cameras for the logged-in account.
Args:
None
Returns:
List of IDs for all of the cameras
"""
self._updateCameras()
return self.cams.keys()
def cameraNames(self):
""" Return the (long) names of all Nest cameras for the logged-in account.
N.B. There's no requirement that camera names be unique
Args:
None
Returns:
List of long names of cameras
"""
self._updateCameras()
return [self.cams[c]['name'] for c in self.cams]
def camerasNameMap(self):
""" Return a map of the (unique) IDs of all Nest cameras for the logged-in account to their (long) names.
Args:
None
Returns:
Dict mapping camera names to their IDs
"""
self._updateCameras()
return {k: self.cams[k]['name'] for k in self.cams.keys()}
def cameraNameLookup(self, camId):
""" Get the name for the camera with the given ID.
Args:
camId: ID of the camera of interest
Returns:
(Long) name of the given camera
"""
self._updateCameras()
return self.getInfo(camId)['name_long']
def cameraIdLookup(self, namePrefix):
""" Get the ID(s) for the camera(s) who's name starts with a given string.
Args:
namePrefix: prefix for the name of the camera(s) of interest
Returns:
List of IDs for camera(s) with given name
"""
self._updateCameras()
return [v['device_id'] for k, v in self.cams.iteritems() if v['name'].lower().startswith(namePrefix.lower())]
def snapshotUrlLookup(self, camId):
""" Get the Snapshot URL for a given camera.
Args:
camId: ID of the camera of interest
Returns:
Snapshot URL for the camera with the given ID
"""
info = self.getInfo(camId)
return info['snapshot_url']
def cameraInfo(self, camId):
""" Return info for the given camera.
Args:
camId: ID of the camera of interest
Returns:
JSON object containing information about the given camera
"""
self._updateCameras()
if camId not in self.cams:
raise APIError("Camera with ID {0} not found".format(camId))
info = self.cams[camId]
return info
def getSnapshot(self, camId):
""" Capture an image from the given camera.
Args:
camId: ID of the camera of interest
Returns:
JPEG image
"""
url = self.snapshotUrlLookup(camId)
r = requests.get(url)
r.raise_for_status()
if r.headers['content-length'] == 0:
# got empty image with success code, so throw an exception
raise requests.ConnectionError("Unable to get image from camera")
if r.headers['Content-Type'] != 'image/jpeg':
raise ValueError("Did not return a JPEG Image")
image = r.content
return image
#
# TEST CODE
#
if __name__ == '__main__':
from test_config import PRODUCT_ID, PRODUCT_SECRET, CA_FILE, CAM_NAMES_MAP, IMG_DIR
#### from test_config import AUTH_URL
nums = []
nest = NestAccount(PRODUCT_ID, PRODUCT_SECRET, CA_FILE)
cams = nest.cameras()
num = len(cams)
nums.append(num)
print("Cameras: {0}".format(num))
for camId, camInfo in cams.iteritems():
print(" Camera: {0}".format(camInfo['name_long']))
json.dump(camInfo, sys.stdout, indent=4, sort_keys=True)
print("")
camNames = nest.cameraNames()
num = len(camNames)
nums.append(num)
print("CameraNames: {0}".format(num))
json.dump(camNames, sys.stdout, indent=4, sort_keys=True)
print("")
camsNameMap = nest.camerasNameMap()
num = len(camsNameMap)
nums.append(num)
print("CamerasNameMap: {0}".format(num))
json.dump(camsNameMap, sys.stdout, indent=4, sort_keys=True)
print("")
if len(set(nums)) != 1:
print("ERROR: mismatch in number of cameras found {0}".format(nums))
sys.exit(1)
print("Snapshot URL:")
for camId, camName in camsNameMap.iteritems():
url = nest.snapshotUrlLookup(camId)
print(" {0}: {1}".format(camName, url))
print("CameraIdsLookup:")
for name, numIds in CAM_NAMES_MAP.iteritems():
ids = nest.cameraIdLookup(name)
print(" {0}: {1}".format(name, ids))
if len(ids) != numIds:
print("ERROR: got {0} IDs, wanted {1}".format(len(ids), numIds))
sys.exit(1)
print("Camera Info:")
for camId, camName in camsNameMap.iteritems():
info = nest.cameraInfo(camId)
print(" {0}:".format(camName))
json.dump(info, sys.stdout, indent=4, sort_keys=True)
print("")
break
print("Snapshot:")
for camId, camName in camsNameMap.iteritems():
path = os.path.join(IMG_DIR, camName + camId)
img = nest.getSnapshot(camId)
with open(path, "w") as outFile:
outFile.write(img)
print(" {0}".format(path))
print("SUCCESS")
```
|
{
"source": "jduanen/SensorNet",
"score": 3
}
|
#### File: sensors/PurpleAir/PurpleAir.py
```python
import argparse
import ast
import json
import logging
import os
import random
import re
import signal
import socket
import subprocess
import sys
import threading
import time
import yaml
from yaml import Loader
import paho.mqtt.client as mqtt
from purpleair.sensor import Sensor
from SensorNet import SensorNet, SubTopic, SUB_TOPICS
DEFAULTS = {
'logLevel': "INFO", #"DEBUG" #"WARNING",
'mqttBroker': "localhost",
'sensorsFile': "./sensors.yml"
}
DEF_SAMPLE_INTERVAL = 120 # 2mins between samples
MQTT_TOPIC_BASE = "/sensors/PurpleAir"
APP_VERSION = "1.0.0"
def getMacAddress(hostname):
"""Return the MAC address for the interface used to reach the given host
Inputs:
hostname: string name of the target host
Returns: string form of MAC address of interface used to reach given host
"""
ipAddr = socket.gethostbyname(hostname)
match = re.search(r"^.* dev (.*?) .*$", str(subprocess.check_output(["ip", "route", "get", ipAddr])))
assert match, f"Unable to find interface for {ipAddr}"
intf = match.group(1)
return subprocess.check_output(["cat", f"/sys/class/net/{intf}/address"]).strip().decode("utf-8")
class PurpleAir():
"""????
"""
def __init__(self, sensors, mqttBroker, sampleInterval=DEF_SAMPLE_INTERVAL, retries=3):
self.sensorsInput = sensors
self.mqttBroker = mqttBroker
self.sampleInterval = sampleInterval
self.retries = retries
self.sensorIds = list(sensors.keys())
self.sensors = {sensorId: Sensor(sensorId, parse_location=True) for sensorId in self.sensorIds}
for sensorId, sensor in self.sensors.items():
if sensor.location_type != 'outside':
logging.warning(f"Sensor '{sensorId}' not outside, removing this sensor from the list")
del self.sensors[sensorId]
self.secureRandom = random.SystemRandom()
self.baseMsg = f"{MQTT_TOPIC_BASE}/{getMacAddress(self.mqttBroker)}"
self.cmdTopic = f"{self.baseMsg}/{SUB_TOPICS[SubTopic.COMMAND]}"
self.dataTopic = f"{self.baseMsg}/{SUB_TOPICS[SubTopic.DATA]}"
RSSI = 0
msgSpec = f"ID:int,Label:s,tempF:d3,pressure:f4.2,humidity:d3," + \
f"v:f3.2,v1:f3.2,v2:f3.2,v3:f3.2,v4:f3.2,v5:f3.2,v6:f3.2," + \
f"pm:f3.2,lastModified:d13,timeSinceModified:d"
self.startupMsg = f"Startup,{socket.gethostname()},PurpleAir,{APP_VERSION},{msgSpec},{RSSI}"
self.client = mqtt.Client("PurpleAir sensor proxy")
self.client.enable_logger(logging.getLogger(__name__))
if self.client.connect(self.mqttBroker):
logging.error(f"Failed to connect to MQTT broker '{self.mqttBroker}'")
raise Exception("Failed to connect to MQTT broker")
result, msgId = self.client.subscribe(f"{self.cmdTopic}")
if result:
logging.error(f"Failed to subscribe to topic '{self.cmdTopic}'")
raise Exception("Subscription failure")
self.client.on_message = self._onMessage
self.client.loop_start()
self.running = threading.Event()
self.running.clear()
self.thread = threading.Thread(target=self.run, name="PurpleAir")
def _onMessage(self, client, userData, message):
parts = message.topic.split('/')
print(f"MSG: {parts}")
if len(parts) < 5 or len(parts) > 6 or parts[1] != 'sensors' or parts[-1] == SubTopic.COMMAND:
logging.warning(f"Unrecognized message: {message}")
msg = message.payload.decode("utf-8")
#### TODO implement the command handler here
print("TBD")
def start(self):
"""????
"""
if self.running.isSet():
logging.debug("PurpleAir thread already running")
else:
logging.debug("Starting PurpleAir thread")
self.running.set()
self.thread.start()
def shutdown(self):
"""????
"""
logging.debug("Shutting down PurpleAir thread")
self.running.clear()
while self.isRunning():
time.sleep(1)
logging.debug("PurpleAir thread done")
def isRunning(self):
return self.thread.is_alive()
def run(self):
"""????
"""
self.client.publish(self.cmdTopic, payload=self.startupMsg)
logging.info(self.startupMsg)
RSSI = 0
while self.running.isSet():
tryNumber = 0
while True:
sensorId = self.secureRandom.choice(self.sensorIds)
logging.debug(f"Sampling sensor id #{sensorId}")
sensor = self.sensors[sensorId]
if sensor.is_useful():
data = sensor.parent_data
stats = ast.literal_eval(data['Stats'])
assert stats['timeSinceModified'] < 180000, f"Sensor {sensorId} not seen for {stats['timeSinceModified']}"
msg = f"{data['ID']},{data['Label']},{data['temp_f']},{data['pressure']},{data['humidity']},"
msg += ",".join([str(s) for s in stats.values()]) + f",{RSSI}"
#### TODO publish sensor data
self.client.publish(self.dataTopic, payload=msg)
logging.info(msg)
break
else:
logging.warning(f"Sensor '{sensorId}' is not useful, skipping")
tryNumber += 1
if tryNumber >= self.retries:
logging.warning(f"Failed to get good sample in {tryNumber} attempts, giving up")
break
logging.debug(f"Sleep {self.sampleInterval} seconds before sampling again")
time.sleep(self.sampleInterval)
'''
sn = SensorNet(options.deviceFile)
results = {}
with SensorManager(options.mqttBroker) as mgr:
msg = f"{options.cmd}={options.val}" if options.val else options.cmd
for devName in options.deviceNames:
results[devName] = {}
cmdTopic = sn.buildTopic(SubTopic.COMMAND, devName)
logging.debug(f"{devName}: {cmdTopic}, {msg}")
results[devName] = mgr.issueCommand(cmdTopic, msg)
json.dump(results, sys.stdout, indent=4)
print("")
'''
'''
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.client.loop_stop()
def _getResponse(self, cmdTopic):
"""Get the corresponding result for a given command topic.
"""
tmp = cmdTopic.split('/')
tmp[-1] = "response"
responseTopic = "/".join(tmp)
result, msgId = self.client.subscribe(responseTopic)
if result:
logging.error(f"Failed to subscribe to MQTT topic '{responseTopic}'")
raise Exception("Failed to subscribe to MQTT topic")
logging.debug(f"Subscribed to: {responseTopic}")
if self.msgQ.empty():
try:
response = self.msgQ.get(block=True, timeout=self.responseWaitTime)
if response[0] != responseTopic:
logging.error(f"Response mismatch: {response} != {responseTopic}")
return None
except Exception as ex:
logging.error(f"Get MQTT topic '{responseTopic}' timed out")
return None
else:
response = self.msgQ.get()
if response[0] != responseTopic:
logging.error(f"Mismatched response: {response} != {responseTopic}")
return None
payload = response[1] if response else None
result, msgId = self.client.unsubscribe(responseTopic)
if result:
logging.warning(f"Failed to unsubscribe to MQTT topic '{responseTopic}'")
else:
logging.debug(f"Unsubscribed from: {responseTopic}")
return payload
def issueCommand(self, cmdTopic, msg, retries=3):
"""Issue a given command to a device and get the response.
N.B. no response is expected when a 'reset' command is issued.
Inputs
cmdTopic: string with command topic for desired device
msg: string in either the form '<cmd>' or '<cmd>=<val>'
Returns
Response message or None if none is received after 'retries' attempts
"""
self.client.publish(cmdTopic, payload=msg)
response = None
if msg != "reset":
for _ in range(retries):
response = self._getResponse(cmdTopic)
if not response:
logging.debug("No response, retrying...")
continue
break
return response
'''
def run(options):
pa = PurpleAir(options.sensors, options.mqttBroker, sampleInterval=10) #### TMP TMP TMP
def shutdownHandler(signum, frame):
logging.debug(f"Caught signal: {signum}")
pa.shutdown()
for s in ('TERM', 'HUP', 'INT'):
sig = getattr(signal, 'SIG'+s)
signal.signal(sig, shutdownHandler)
pa.start()
while pa.isRunning():
time.sleep(10)
pa.shutdown()
def getOpts():
usage = f"Usage: {sys.argv[0]} [-v] [-L <logLevel>] [-l <logFile>] " + \
"[-m <mqttHost>] [-s <sensorsFile>]"
ap = argparse.ArgumentParser()
ap.add_argument(
"-L", "--logLevel", action="store", type=str,
default=DEFAULTS['logLevel'],
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Logging level")
ap.add_argument(
"-l", "--logFile", action="store", type=str,
help="Path to location of logfile (create it if it doesn't exist)")
ap.add_argument(
"-m", "--mqttBroker", action="store", type=str,
default=DEFAULTS['mqttBroker'],
help="Hostname for where the MQTT broker is running")
ap.add_argument(
"-s", "--sensorsFile", action="store", type=str,
default=DEFAULTS['sensorsFile'],
help="Path to file containing identifiers for local Purple Air sensors")
ap.add_argument(
"-v", "--verbose", action="count", default=0,
help="Enable printing of debug info")
opts = ap.parse_args()
if opts.logFile:
logging.basicConfig(filename=opts.logFile,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=opts.logLevel)
else:
logging.basicConfig(level=opts.logLevel,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if not os.path.exists(opts.sensorsFile):
logging.error(f"Invalid sensor file: {opts.sensorsFile}")
sys.exit(1)
with open(opts.sensorsFile, "r") as f:
opts.sensors = yaml.load(f, Loader=Loader)
#### TODO read and parse the sensors file -- yml with description/location and id
if opts.verbose:
print(f" MQTT Broker: {opts.mqttBroker}")
print(f" Sensors File: {opts.sensorsFile}")
if opts.verbose > 1:
print(f" Sensors: {json.dumps(opts.sensors, indent=4, sort_keys=True)}")
else:
print(f" Sensors: {list(opts.sensors.keys())}")
return opts
if __name__ == '__main__':
opts = getOpts()
r = run(opts)
sys.exit(r)
```
#### File: SensorNet/tools/SensorNet.py
```python
from enum import Enum
import logging
import re
import sys
import yaml
from yaml import Loader
PREFIX = "/sensors"
class SubTopic(Enum):
DATA = 0
COMMAND = 1
RESPONSE = 2
ERROR = 3
STARTUP = 4
SUB_TOPICS = {
SubTopic.DATA: "data",
SubTopic.COMMAND: "cmd",
SubTopic.RESPONSE: "response",
SubTopic.ERROR: "error",
SubTopic.STARTUP: "startup",
}
SUB_TOPICS_MAP = {v: k for k, v in SUB_TOPICS.items()}
class SensorNet():
def __init__(self, path):
"""????
"""
self.path = path
with open(path, "r") as f:
self.devices = yaml.load(f, Loader=Loader)
#### TODO validate file contents
'''
for addr in opts.deviceAddrs:
if not re.match("[0-9a-f]{2}([-:]?)[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", addr.lower()):
logging.error(f"Invalid device address: {addr}")
sys.exit(1)
'''
self.nicknames = {info['MACaddress']: name for name, info in self.devices.items()}
def getDevices(self):
"""Return ????
"""
return self.devices
def getDeviceInfo(self, nickname):
"""Return information about a given device.
Inputs
nickname: short string uniquely identifying a device
Returns
dict with information about the given device
"""
return self.devices[nickname]
def getNickname(self, macAddr):
"""Return the device nickname associated with a given WiFi MAC address
Inputs
macAddr: string in form of six hex bytes separated by colons
Returns
short string identifier for the device with the given MAC address
"""
return self.nicknames[macAddr]
def buildTopic(self, subTopic, nickname):
"""Create a topic of a given type for a given device
Inputs
subTopic: member of SubTopic enum class
nickname: short string uniquely identifying a device
Returns
string containing desired topic
"""
assert isinstance(subTopic, SubTopic), "subTopic arg must be a member of the SubTopic enum class"
applName = self.devices[nickname]['application']
macAddr = self.devices[nickname]['MACaddress']
return f"{PREFIX}/{applName}/{macAddr}/{SUB_TOPICS[subTopic]}"
def parseSample(self, sampleParts):
"""Parse an sample/event string logged by a SensorNet device
Inputs
sampleParts: list of the comma-separated parts of a sample line
(i.e., timestamp, topic, payload)
Returns
dict with each of the components of the given sample broken out
"""
timestamp = sampleParts[0]
topic = sampleParts[1]
subparts = topic.split('/')
if subparts[0] != "" or subparts[1] != PREFIX.strip('/'):
logging.error(f"Unrecognized topic: {sampleParts}")
raise Exception("Unrecognized topic")
macAddr = subparts[-2]
appl = "/".join(subparts[2:-2])
values = None
if subparts[-1] == "data":
sampleType = "data"
subType = None
values = sampleParts[2:]
elif subparts[-1] == "cmd":
sampleType = "cmd"
if sampleParts[2] == "Startup":
subType = "startup"
values = {
'HW': sampleParts[3],
'applName': sampleParts[4],
'version': sampleParts[5],
'schema': sampleParts[6:-1],
'RSSI': sampleParts[-1]
}
elif sampleParts[2].find('=') > 1:
subType = "setCmd"
assignment = sampleParts[2].split('=')
values = {'cmd': assignment[0], 'value': assignment[1]}
else:
subType = "getCmd"
values = {'cmd': sampleParts[2]}
else:
logging.error(f"Unrecognized sample type: {sampleParts}")
raise Exception("Unrecognized sample type")
return {
'timestamp': timestamp,
'topic': topic,
'application': appl,
'macAddr': macAddr,
'nickname': self.nicknames[macAddr],
'type': sampleType,
'subtype': subType,
'values': values
}
#
# TESTS
#
if __name__ == '__main__':
sn = SensorNet("./exampleDevices.yml")
devs = sn.getDevices()
assert len(devs) == 4, f"Wrong number of devices"
b2 = sn.getDeviceInfo('b2')
assert set(b2.keys()) == set(['application', 'MACaddress', 'location']), f"Mismatched device fields"
dataTopic = sn.buildTopic(SubTopic.DATA, 'b1')
assert dataTopic == "/sensors/sensorB/12:34:56:78:9a:de/data", f"Incorrect data topic: {dataTopic}"
cmdTopic = sn.buildTopic(SubTopic.COMMAND, 'c')
assert cmdTopic == "/sensors/sensorC/12:34:56:78:9a:12/cmd", f"Incorrect command topic: {cmdTopic}"
print("All tests: PASSED")
```
#### File: SensorNet/tools/SensorPlot.py
```python
import argparse
import csv
from datetime import datetime
from io import StringIO
import json
import logging
import os
import sys
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
DEFAULTS = {
'logLevel': "INFO", #"DEBUG" #"WARNING",
'samplesFile': "/home/jdn/Data/SensorNet/sensornet.csv"
}
RESAMPLE_FREQS = {
'minutes': "T",
'hours': "H",
'days': "D",
'weeks': "W",
'months': "M"
}
def radPlotter(df):
## consider dropping CPM and just go with uSv/h
return df.plot(secondary_y='Vcc')
def spsPlotter(df):
return df.plot(secondary_y='tps')
def pmsPlotter(df):
return df.plot.line()
def bnnPlotter(df):
## consider dropping volts
return df.plot(secondary_y='grams')
def whPlotter(df):
return df.plot.line()
SENSORS = {
'Radiation': {
'topic': "/sensors/Radiation/",
'description': "Radiation sensor (SBT-11A)",
'plotters': {
'1.0.0': radPlotter
}
},
'AirQualityPMS': {
'topic': "/sensors/AirQuality/PMS/",
'description': "Air quality sensor (PMS7003)",
'plotters': {
'1.0.0': pmsPlotter
}
},
'AirQualitySPS': {
'topic': "/sensors/AirQuality/SPS/",
'description': "Air quality sensor (SPS30)",
'plotters': {
'1.0.0': spsPlotter
}
},
'BirdyNumNum': {
'topic': "/sensors/BirdyNumNum",
'description': "Hummingbird feeder sensor",
'plotters': {
'1.0.2': bnnPlotter
}
},
'WaterHeater': {
'topic': "/sensors/WaterHeater",
'description': "Water heater temperature sensor",
'plotters': {
'0.0.0': whPlotter
}
}
}
#### TODO do filtering/data-prep/stats with Pandas, pass off DFs to plotters
#### TODO create separate DFs for each sensor/device/schema and handle each separately
def run(options):
def _convDtype(str):
if str.endswith('d'):
typ = "int"
elif str.endswith('f'):
typ = "float"
elif str.endswith('s'):
typ = "str"
else:
logging.warning(f"Unknown data type: {str}")
typ = "str"
return typ
streams = {}
with open(options.samplesFile, 'r') as f:
reader = csv.reader(f)
for row in reader:
if len(row) < 3:
logging.warning(f"Bad line in csv file: {row}")
continue
parts = row[1].split('/')
appName = parts[2] if len(parts) == 5 else "".join(parts[2:-2])
if (appName not in options.sensors) or ((options.devices != None) and (parts[3] not in options.devices)):
continue
streamName = "_".join(parts[2:-1])
if row[1].endswith("/cmd"):
if streamName not in streams:
dataTypes = {'time': "str", 'topic': "str"}
dataTypes.update({h.split(':')[0]: _convDtype(h.split(':')[1]) for h in row[6:]})
streams[streamName] = {
'sensor': row[1].split('/')[2],
'device': row[1].split('/')[-2],
'appName': row[4],
'version': row[5],
'header': list(dataTypes.keys()),
'dataTypes': dataTypes,
'file': StringIO()
}
continue
if streamName not in streams:
continue
streams[streamName]['file'].write(",".join(row) + "\n")
if not streams:
logging.error("No data streams matching the given sensor/device specs")
sys.exit(1)
logging.info(f"Read data streams: {[k + '_' + v['version'] for k, v in streams.items()]}")
for name in list(streams.keys()):
streams[name]['file'].flush()
streams[name]['file'].seek(0)
if (streams[name]['appName'] not in options.sensors) or (options.devices and streams[name]['device'] not in options.devices):
streams[name]['file'].close()
del streams[name]
axs = []
for name in streams.keys():
stream = streams[name]
header = stream['header']
df = pd.read_csv(stream['file'],
sep=',',
names=header,
dtype=stream['dataTypes'],
index_col=0,
parse_dates=[0])
if options.verbose:
buf = StringIO()
df.info(options.verbose > 1, buf=buf, max_cols=len(header), show_counts=True)
print(buf.getvalue())
if False: #### TMP TMP TMP
df.to_csv(f"/tmp/{name}.csv", index=False)
if len(df.index) < 1:
logging.warning(f"No data for stream '{name}', skipping it")
continue
firstDatetime = df.first_valid_index().to_pydatetime()
lastDatetime = df.last_valid_index().to_pydatetime()
startDatetime = firstDatetime
if options.startDate:
if options.startDate < firstDatetime or options.startDate > lastDatetime:
logging.error(f"Invalid start date: {options.startDate} not between {firstDatetime} and {lastDatetime}, skipping {name}")
continue
startDatetime = options.startDate
endDatetime = lastDatetime
if options.endDate:
if options.endDate < firstDatetime or options.endDate > lastDatetime:
logging.error(f"Invalid end date: {options.endDate} not between {firstDatetime} and {lastDatetime}")
sys.exit(1)
if options.endDate < options.startDate:
logging.error(f"Invalid end date: {options.endDate} not after {options.startDate}")
sys.exit(1)
endDatetime = options.endDate
duration = endDatetime - startDatetime
samples = df[startDatetime:endDatetime]
if options.resample:
# samples = samples.rolling(options.avgSamples).mean()
samples = samples.resample(RESAMPLE_FREQS[options.resample]).mean()
numSamples = len(samples)
samplesPerMin = (numSamples / duration.total_seconds()) * 60.0
if options.verbose:
print(f" Sensor: {name}")
print(f" Start date: {startDatetime}")
print(f" End date: {endDatetime}")
print(f" Duration: {duration}")
print(f" Number of samples: {numSamples}")
print(f" Samples/min: {samplesPerMin:.6f}")
#### TODO slice df based on start/end times
axs.append(SENSORS[stream['appName']]['plotters'][stream['version']](samples))
plt.show()
def validDate(dateStr):
try:
return datetime.fromisoformat(dateStr)
except ValueError:
raise argparse.ArgumentTypeError(f"Invalid date: {dateStr}")
def getOpts():
usage = f"Usage: {sys.argv[0]} [-v] [-L <logLevel>] [-l <logFile>] " + \
"[-s <samplesFile>] [-r <resample>] [-S <isodate>] [-E <isodate>] [-d <device>]* {<sensor>}+"
ap = argparse.ArgumentParser()
ap.add_argument(
"-r", "--resample", action="store", choices=RESAMPLE_FREQS.keys(),
default="minutes", help="Resample to the given frequency")
ap.add_argument(
"-d", "--devices", action="append", type=str,
help="MAC address of device to explore")
ap.add_argument(
"-E", "--endDate", action="store", type=validDate,
help="End date (in ISO8016 format) -- defaults to last date in data log")
ap.add_argument(
"-L", "--logLevel", action="store", type=str,
default=DEFAULTS['logLevel'],
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Logging level")
ap.add_argument(
"-l", "--logFile", action="store", type=str,
help="Path to location of logfile (create it if it doesn't exist)")
ap.add_argument(
"-S", "--startDate", action="store", type=validDate,
help="Start date (in ISO8016 format) -- defaults to first date in data log")
ap.add_argument(
"-s", "--samplesFile", action="store", type=str,
default=DEFAULTS['samplesFile'],
help="Path to location of file with sensor samples")
ap.add_argument(
"-v", "--verbose", action="count", default=0,
help="Enable printing of debug info")
ap.add_argument(
"sensors", nargs="+", type=str, choices=SENSORS.keys(),
help="Type of sensor to explore")
opts = ap.parse_args()
if opts.logFile:
logging.basicConfig(filename=opts.logFile,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=opts.logLevel)
else:
logging.basicConfig(level=opts.logLevel,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if not os.path.exists(opts.samplesFile):
logging.error(f"Samples file '{opts.samplesFile}' doesn't exist")
sys.exit(1)
if opts.devices:
for dev in opts.devices:
if len(dev.split(":")) != 6:
logging.error(f"Invalid device MAC address: {dev}")
sys.exit(1)
if opts.verbose:
print(f" Sensor Type(s): {[s + ': ' + SENSORS[s]['description'] for s in opts.sensors]}")
if opts.devices:
print(f" Devices: {opts.devices}")
print(f" Samples File: {opts.samplesFile}")
print(f" Resample Freq.: {opts.resample}")
return opts
if __name__ == '__main__':
opts = getOpts()
r = run(opts)
sys.exit(r)
```
|
{
"source": "jduanen/teslawatch",
"score": 2
}
|
#### File: teslawatch/notifiers/sms.py
```python
import sys
'''
--------------------
import requests
requests.post('https://textbelt.com/text', {
'phone': '5557727420',
'message': 'Hello world',
'key': 'textbelt',
})
$ curl -X POST https://textbelt.com/text \
--data-urlencode phone='5557727420' \
--data-urlencode message='Hello world' \
-d key=textbelt
{"success":true,"textId":"2861516228856794","quotaRemaining":249}[jdn@jdnLinux teslawatch]$
$ curl https://textbelt.com/status/2861516228856794
{"success":true,"status":"DELIVERED"}
'''
'''
TODO:
* look at how I did the JBOD drivers and make a subdir with Event Function
* instantiate an EventHandler object that binds event types to Event Functions
* create structure with Event types (and args)
'''
def main(args):
if len(args) != 2:
sys.stderr.write("Error: invalid number of args '{0}' != 2\n".format(len(args)))
sys.exit(1)
print("SMS: '{0}'".format(args[1]))
#
# MAIN
#
if __name__ == '__main__':
main(sys.argv)
```
#### File: jduanen/teslawatch/tracker.py
```python
import queue
import sys
import time
import traceback
from geopy import distance
from teslawatch import geocoder, dictDiff
from regions import Region
'''
TODO
* generate parked->moving/stopped and parked/moving->stopped events
* look for geofence entry/exit events
* look for temperature events (too hot/cold)
* look for doors/windows open/unlocked for period of time events
* look for battery going below a threshold events
* add arg with dict of all the callbacks for the events of interest and the parameters for that type of event
* make hooks that can call arbitrary functions in addition to sending messages on events
- allow automation of locking/shutting/turning on/off climate, opening/closing stuff
* specify functions to call (potentially external/shell cmds, in addition to those in events.py) in configs file
* up the poll rate for drive state when moving to more frequent, less when stopped/parked
* poll frequency: driveState (D/R vs P), vehicleState, chargeState, {climateSettings, guiSettings}
* make the tracker keep track of previous states, detect transitions, and call the event object (with args relevent to the event)
- create list of event/arg tuples and send to eventHandler object for the car
* instantiate per-car eventHandler object in main loop and pass in as arg to the tracker
- generate all events when this first starts up
* instantiate a new location object everytime get a new location from the driveState API
- check if new location entered/exited a region
- create (home/work) regions from config spec, add new regions through various means
- location object encapsulates lat/lon, computes distances, give it list of region objects and get back boolean vector (inside/outside)
- keep track of regions here and detect transitions when new location given
* do compression -- keep last contents of each table, and suppress db store if no change (ignore/mask timestamps)
Questions
* make an event object that gets instantiated with params from the config file?
* allow change of events/event parameters while continuing to run the tracker, or start a new tracker?
EVENT TYPES: all events qualified by day-of-week and time-of-day ranges
* location state transition events
- parked->moving/stopped
- parked/moving->stopped
* geofence events (specify set of different types of geofence regions)
- enter region
- exit region
* temperature excursions
- dropped below low threshold
- rose above high threshold
* door/window/sunroof open/unlocked while parked, for greater than threshold of time
* battery level goes below a given threshold
'''
# event types recognized by the application
EVENT_TYPES = (
"STOPPED_MOVING",
"STARTED_MOVING",
"ENTER_REGION", # arg: regionId
"EXIT_REGION", # arg: regionId
)
# commands that can be sent on the trackers' command Queue
TRACKER_CMDS = ("PAUSE", "RESUME", "STOP")
class Tracker(object):
''' Object that encapsulates all of the state associated with a car that is being tracked
'''
def __init__(self, carObj, carDB, tables, settings, regions, notifier, inQ, outQ):
''' Construct a tracker object
Inputs
carObj: Car object for the car to track
carDB: CarDB object to log the data for the car being tracked
tables: ????
settings: ????
regions: ????
notifier: ????
inQ: MP Queue object for receiving commands from the master
outQ: MP Queue object for returning status
'''
self.car = carObj
self.db = carDB
self.tables = tables
self.settings = settings
self.regions = regions
self.notifier = notifier
self.inQ = inQ
self.outQ = outQ
self.samples = {t: {'sample': {}, 'time': None} for t in tables}
def run(self):
''' Per-car process that polls the Tesla API, logs the data, and emits
notifications.
This is intended to be called by Multiprocessing.Process()
'''
CHANGING_FIELDS = set(['timestamp', 'gps_as_of'])
now = int(time.time())
try:
state = self.car.getCarState()
if self.db:
self.db.insertState(state)
prevLoc = geocoder.reverse((state['driveState']['latitude'],
state['driveState']['longitude']))
for tableName in self.samples:
self.samples[tableName]['sample'] = state[tableName]
self.samples[tableName]['time'] = now
carName = self.car.getName()
self.outQ.put("TRACKING {0}".format(carName))
while True:
try:
cmd = self.inQ.get_nowait()
if cmd == "STOP":
self.outQ.put("STOPPING {0}".format(carName))
print("Exiting:", self.car.vin) #### TMP TMP TMP
break
elif cmd == "PAUSE":
cmd = self.inQ.get()
while cmd != "RESUME":
cmd = self.inQ.get()
elif cmd == "RESUME":
pass
else:
sys.stderr.write("WARNING: unknown tracker command '{0}'".format(cmd))
except queue.Empty:
pass
#### TODO implement the poll loop
#### TODO get current Location object, create vector of In-Region booleans, compute other events, call notifier for events
events = {}
curTime = int(time.time())
for tableName in self.samples:
if curTime >= self.samples[tableName]['time'] + self.settings['intervals'][tableName]:
sample = self.car.getTable(tableName)
print(f"Sample: {tableName}; VIN: {self.car.vin}") #### TMP TMP TMP
add, rem, chg, _ = dictDiff(self.samples[tableName]['sample'], sample)
if self.db:
if add or rem:
self.outQ.put("Table {0} Schema Change: ADD={1}, REM={2}".
format(tableName, add, rem))
if not chg - CHANGING_FIELDS:
print(f"Write to DB: {self.car.vin}") #### TMP TMP TMP
self.db.insertRow(tableName, sample)
if tableName == 'driveState':
newLoc = geocoder.reverse((sample['latitude'],
sample['longitude']))
dist = distance.distance(prevLoc.point, newLoc.point).km
print(f"Distance: {dist} km; VIN: {self.car.vin}") #### TMP TMP TMP
if dist > self.settings['thresholds']['distance']:
print(f"Moved: {dist}")
#### TODO implement the logic to detect state transitions -- i.e., keep state, note the change, update accordingly
'''
if self.moving:
self.notifier.notify("STOPPED_MOVING", arg)
else:
self.notifier.notify("STARTED_MOVING", arg)
'''
self.samples[tableName]['sample'] = sample
self.samples[tableName]['time'] = curTime
#### call notifier and pass it events
print("Call Notifier:", self.car.vin) #### TMP TMP TMP
pass
#### TODO make the polling interval a function of the car's state
#### poll more frequently when driving and less when parked
print(f"Sleep: {self.car.vin}")
nextInterval = 1.0 #### TMP TMP TMP
time.sleep(nextInterval)
except Exception as e:
traceback.print_exc()
self.outQ.put("BAILING {0}: {1}".format(self.car.vin, e))
if self.db:
self.db.close()
#
# TESTING
#
if __name__ == '__main__':
#### TODO implement test cases
pass
```
|
{
"source": "jduan/saleor",
"score": 2
}
|
#### File: saleor/account/emails.py
```python
from urllib.parse import urlencode
from django.conf import settings
from django.contrib.auth.tokens import default_token_generator
from django.urls import reverse
from templated_email import send_templated_mail
from ..account import events as account_events
from ..celeryconf import app
from ..core.emails import get_email_base_context
from ..core.utils import build_absolute_uri
def send_user_password_reset_email(redirect_url, user):
"""Trigger sending a password reset email for the given user."""
token = default_token_generator.make_token(user)
send_password_reset_email_with_url.delay(redirect_url, user.email, token, user.pk)
def _send_password_reset_email(reset_url, recipient_email, user_id):
context = get_email_base_context()
context["reset_url"] = reset_url
send_templated_mail(
template_name="account/password_reset",
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[recipient_email],
context=context,
)
account_events.customer_password_reset_link_sent_event(user_id=user_id)
@app.task
def send_password_reset_email(context, recipient_email, user_id):
reset_url = build_absolute_uri(
reverse(
"account:reset-password-confirm",
kwargs={"uidb64": context["uid"], "token": context["token"]},
)
)
_send_password_reset_email(reset_url, recipient_email, user_id)
@app.task
def send_password_reset_email_with_url(redirect_url, recipient_email, token, user_id):
params = urlencode({"email": recipient_email, "token": token})
reset_url = "%(redirect_url)s?%(params)s" % {
"redirect_url": redirect_url,
"params": params,
}
_send_password_reset_email(reset_url, recipient_email, user_id)
@app.task
def send_account_delete_confirmation_email(token, recipient_email):
delete_url = build_absolute_uri(
reverse("account:delete-confirm", kwargs={"token": token})
)
ctx = get_email_base_context()
ctx["delete_url"] = delete_url
send_templated_mail(
template_name="account/account_delete",
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[recipient_email],
context=ctx,
)
```
#### File: core/utils/url.py
```python
from urllib.parse import urlparse
from django.conf import settings
from django.core.exceptions import ValidationError
from django.http.request import validate_host
def validate_storefront_url(url):
"""Validate the storefront URL.
Raise ValidationError if URL isn't in RFC 1808 format
or it isn't allowed by ALLOWED_STOREFRONT_HOSTS in settings.
"""
try:
parsed_url = urlparse(url)
except ValueError as error:
raise ValidationError({"redirectUrl": str(error)})
if not validate_host(parsed_url.netloc, settings.ALLOWED_STOREFRONT_HOSTS):
raise ValidationError(
{
"redirectUrl": "%s this is not valid storefront address."
% parsed_url.netloc
}
)
```
#### File: graphql/product/enums.py
```python
import graphene
from ...product import AttributeInputType
from ..core.enums import to_enum
AttributeInputTypeEnum = to_enum(AttributeInputType)
class AttributeTypeEnum(graphene.Enum):
PRODUCT = "PRODUCT"
VARIANT = "VARIANT"
class AttributeValueType(graphene.Enum):
COLOR = "COLOR"
GRADIENT = "GRADIENT"
URL = "URL"
STRING = "STRING"
class StockAvailability(graphene.Enum):
IN_STOCK = "AVAILABLE"
OUT_OF_STOCK = "OUT_OF_STOCK"
class ProductOrderField(graphene.Enum):
NAME = "name"
PRICE = "price"
DATE = "updated_at"
@property
def description(self):
if self == ProductOrderField.NAME:
return "Sort products by name."
if self == ProductOrderField.PRICE:
return "Sort products by price."
if self == ProductOrderField.DATE:
return "Sort products by update date."
raise ValueError("Unsupported enum value: %s" % self.value)
class OrderDirection(graphene.Enum):
ASC = ""
DESC = "-"
@property
def description(self):
if self == OrderDirection.ASC:
return "Specifies an ascending sort order."
if self == OrderDirection.DESC:
return "Specifies a descending sort order."
raise ValueError("Unsupported enum value: %s" % self.value)
class CollectionPublished(graphene.Enum):
PUBLISHED = "published"
HIDDEN = "hidden"
class ProductTypeConfigurable(graphene.Enum):
CONFIGURABLE = "configurable"
SIMPLE = "simple"
class ProductTypeEnum(graphene.Enum):
DIGITAL = "digital"
SHIPPABLE = "shippable"
class AttributeSortField(graphene.Enum):
NAME = "name"
SLUG = "slug"
VALUE_REQUIRED = "value_required"
IS_VARIANT_ONLY = "is_variant_only"
VISIBLE_IN_STOREFRONT = "visible_in_storefront"
FILTERABLE_IN_STOREFRONT = "filterable_in_storefront"
FILTERABLE_IN_DASHBOARD = "filterable_in_dashboard"
DASHBOARD_VARIANT_POSITION = "dashboard_variant_position"
DASHBOARD_PRODUCT_POSITION = "dashboard_product_position"
STOREFRONT_SEARCH_POSITION = "storefront_search_position"
AVAILABLE_IN_GRID = "available_in_grid"
@property
def description(self):
if self == AttributeSortField.NAME:
return "Sort attributes by name."
if self == AttributeSortField.SLUG:
return "Sort attributes by slug."
if self == AttributeSortField.VALUE_REQUIRED:
return "Sort attributes by the value required flag."
if self == AttributeSortField.IS_VARIANT_ONLY:
return "Sort attributes by the variant only flag."
if self == AttributeSortField.VISIBLE_IN_STOREFRONT:
return "Sort attributes by visibility in the storefront."
if self == AttributeSortField.FILTERABLE_IN_STOREFRONT:
return "Sort attributes by the filterable in storefront flag."
if self == AttributeSortField.FILTERABLE_IN_DASHBOARD:
return "Sort attributes by the filterable in dashboard flag."
if self == AttributeSortField.DASHBOARD_VARIANT_POSITION:
return "Sort variant attributes by their position in dashboard."
if self == AttributeSortField.DASHBOARD_PRODUCT_POSITION:
return "Sort product attributes by their position in dashboard."
if self == AttributeSortField.STOREFRONT_SEARCH_POSITION:
return "Sort attributes by their position in storefront."
if self == AttributeSortField.AVAILABLE_IN_GRID:
return (
"Sort attributes based on whether they can be displayed "
"or not in a product grid."
)
raise ValueError("Unsupported enum value: %s" % self.value)
```
#### File: graphql/product/utils.py
```python
from collections import defaultdict
from typing import Dict, List
import graphene
from django.utils.text import slugify
from ...product import AttributeInputType, models
from .interfaces import ResolvedAttributeInput
attribute_input_type = List[ResolvedAttributeInput]
attribute_map_type = Dict[str, models.Attribute]
def validate_attribute_input(instance: models.Attribute, values: List[str]):
if not values:
if not instance.value_required:
return
raise ValueError(f"{instance.slug} expects a value but none were given")
if instance.input_type != AttributeInputType.MULTISELECT and len(values) != 1:
raise ValueError(f"A {instance.input_type} attribute must take only one value")
def _resolve_attributes_input(
attribute_input: List[dict],
attributes_map_by_slug: attribute_map_type,
attributes_map_by_id: attribute_map_type,
) -> attribute_input_type:
"""This resolves a raw GraphQL input to proper attribute. Its job is to ensure
a backward compatibility with passing attributes by slug."""
resolved_input = [] # type: List[ResolvedAttributeInput]
for attribute_input in attribute_input:
if "slug" in attribute_input:
attribute = attributes_map_by_slug.get(attribute_input["slug"])
elif "id" in attribute_input:
type_, attribute_id = graphene.Node.from_global_id(attribute_input["id"])
if type_ != "Attribute":
raise ValueError(f"Couldn't resolve to a node: {attribute_input['id']}")
attribute = attributes_map_by_id.get(attribute_id)
else:
raise ValueError("The value ID or slug was not provided")
if not attribute:
raise ValueError(
"The given attribute doesn't belong to given product type."
)
values = attribute_input.get("values")
validate_attribute_input(attribute, values)
resolved_input.append(ResolvedAttributeInput(instance=attribute, values=values))
return resolved_input
def attributes_to_json(
raw_input: List[dict], attributes_queryset
) -> Dict[str, List[str]]:
"""Transform attributes to the HStore representation.
Attributes configuration per product is stored in a HStore field as
a dict of IDs. This function transforms the list of `AttributeValueInput`
objects to this format.
"""
attributes_json = defaultdict(list)
passed_slugs = set()
attributes_map_by_slug = {} # type: attribute_map_type
attributes_map_by_id = {} # type: attribute_map_type
for attr in attributes_queryset:
attributes_map_by_slug[attr.slug] = attr
attributes_map_by_id[str(attr.id)] = attr
resolved_input = _resolve_attributes_input(
raw_input, attributes_map_by_slug, attributes_map_by_id
)
values_map = {}
for attr in attributes_queryset:
for value in attr.values.all():
values_map[value.slug] = value.id
for item in resolved_input:
passed_slugs.add(item.instance.slug)
for value in item.values:
value_id = values_map.get(value)
if value_id is None:
# `value_id` was not found; create a new AttributeValue
# instance from the provided `value`.
obj = item.instance.values.get_or_create(
name=value, slug=slugify(value)
)[0]
value_id = obj.pk
attributes_json[str(item.instance.pk)].append(str(value_id))
# Check that all required attributes were passed
for missing_slug in attributes_map_by_slug.keys() ^ passed_slugs:
attribute = attributes_map_by_slug[missing_slug] # type: models.Attribute
validate_attribute_input(attribute, [])
return attributes_json
```
|
{
"source": "jdubansky/openstates.org",
"score": 3
}
|
#### File: openstates.org/testutils/factories.py
```python
import random
from openstates.data.models import Organization, Bill, LegislativeSession
def create_test_bill(
session,
chamber,
*,
sponsors=0,
actions=0,
votes=0,
versions=0,
documents=0,
sources=0,
subjects=None,
identifier=None,
):
chamber = Organization.objects.get(classification=chamber)
session = LegislativeSession.objects.get(identifier=session)
b = Bill.objects.create(
identifier=identifier or ("Bill " + str(random.randint(1000, 9000))),
title="Random Bill",
legislative_session=session,
from_organization=chamber,
subject=subjects or [],
)
for n in range(sponsors):
b.sponsorships.create(name="Someone")
for n in range(actions):
b.actions.create(
description="Something", order=n, organization=chamber, date="2020-06-01"
)
for n in range(votes):
b.votes.create(
identifier="A Vote Occurred",
organization=chamber,
legislative_session=session,
)
for n in range(versions):
b.versions.create(note="Version")
for n in range(documents):
b.documents.create(note="Document")
for n in range(sources):
b.sources.create(url="http://example.com")
return b
def create_test_vote(bill, *, yes_count=0, no_count=0, yes_votes=None, no_votes=None):
vote = bill.votes.create(
identifier="test vote",
organization=bill.from_organization,
legislative_session=bill.legislative_session,
)
vote.counts.create(option="yes", value=yes_count)
vote.counts.create(option="no", value=no_count)
for name in yes_votes or []:
vote.votes.create(option="yes", voter_name=name)
for name in no_votes or []:
vote.votes.create(option="no", voter_name=name)
```
|
{
"source": "jdubkim/Self-play-on-Multi-Sankes-Environment",
"score": 2
}
|
#### File: Self-play-on-Multi-Sankes-Environment/src/dqn2015.py
```python
import numpy as np
import tensorflow as tf
import gym
from gym import wrappers
import random
from collections import deque
from pathlib import Path
from typing import List
import dqn
import setting
DISCOUNT_RATE = 0.99
REPLAY_MEMORY = 50000
BATCH_SIZE = 64
TARGET_UPDATE_FREQUENCY = 5
MAX_EPISODES = 5000
class DQN2015:
def __init__(self, env: gym.Env):
self.env = env # environment
self.input_size = np.ndarray([env.observation_space.shape[0], env.observation_space.shape[1], 3]) # 224 * 256 * 3
self.output_size = 6 # Num of Arrow Keys
def replay_train(self, mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list) -> float:
x_stack = np.empty(0).reshape(0, mainDQN.input_size)
y_stack = np.empty(0).reshape(0, mainDQN.output_size)
# Get stored information from the buffer
for state, action, reward, next_state, done in train_batch:
if state is None:
print("None State, ", action, " , ", reward, " , ", next_state, " , ", done)
else:
Q = mainDQN.predict(state)
if done:
Q[0, action] = reward
else:
Q[0, action] = reward + DISCOUNT_RATE * np.max(targetDQN.predict(next_state))
y_stack = np.vstack([y_stack, Q])
x_stack = np.vstack([x_stack, state.reshape(-1, mainDQN.input_size)]) # to fit for super mario
# Train our network using target and predicted Q values on each episode
return mainDQN.update(x_stack, y_stack)
def get_copy_var_ops(self, *, dest_scope_name="target", src_scope_name="main"):
# Copy variables in mainDQN to targetDQN
# Update weights in mainDQN to targetDQN
op_holder = []
src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=src_scope_name)
dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
op_holder.append(dest_var.assign(src_var.value()))
return op_holder
def bot_play(self, mainDQN):
# See our trained network in action
state = self.env.reset()
reward_sum = 0
while True:
self.env.render()
action = np.argmax(mainDQN.predict(state))
state, reward, done, _ = self.env.step(action)
reward_sum += reward
if done:
print("Total score: {}".format(reward_sum))
break
def run(self):
print(self.env.observation_space.shape[0], self.env.observation_space.shape[1], self.output_size)
# store the previous observations in replay memory
replay_buffer = deque()
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, self.input_size, self.output_size, name="main")
targetDQN = dqn.DQN(sess, self.input_size, self.output_size, name="target")
self.saver = tf.train.Saver()
model_stored = Path("/models/")
if model_stored.is_file():
self.saver.restore(sess, "/models/model.ckpt")
print("Existing model restored")
else:
tf.global_variables_initializer().run()
# initial copy q_net -> target_net
copy_ops = self.get_copy_var_ops(dest_scope_name="target", src_scope_name="main")
sess.run(copy_ops)
for episode in range(MAX_EPISODES):
e = 1. / ((episode / 10) + 1)
done = False
max_distance = 0
prev_pos = 0
state = self.env.reset()
while not done:
if np.random.rand(1) < e or state is None or state.size == 1:
action = self.env.action_space.sample()
else:
# action = np.argmax(mainDQN.predict(state))
action = mainDQN.predict(state).flatten().tolist() #flatten it and change it as a list
for i in range(len(action)): #the action list has to have only integer 1 or 0
if action[i] > 0.5:
action[i] = 1 #integer 1 only, no 1.0
else:
action[i] = 0 #integer 0 only, no 0.0
# Get new state and reward from environment
next_state, reward, done, info = self.env.step(action)
#(info)
current_distance = info['distance']
if done: # Death or stayed more than 10000 steps
reward -= 3
if current_distance > prev_pos: # Move right
reward += 0.5
elif current_distance < prev_pos: # Move left
reward -= 1.0
if current_distance - prev_pos > 8: # Move right fast
reward += 1.0
elif current_distance - prev_pos < -8: # Move left fast
reward -= 1.5
#if is_level_cleared:
# reward += 2
prev_pos = current_distance
# Save the experience to our buffer
replay_buffer.append((state, action, reward, next_state, done))
if len(replay_buffer) > REPLAY_MEMORY:
replay_buffer.popleft()
state = next_state
# Check if second level is unlocked
if not info['locked_levels'][1]:
pass
if episode % 10 == 1: # train every 10 episode
# Get a random batch of experiences
for _ in range(50):
minibatch = random.sample(replay_buffer, 10)
loss, _ = self.replay_train(mainDQN, targetDQN, minibatch)
print("Loss: ", loss)
# copy q_net -> target_net
sess.run(copy_ops)
save_path = self.saver.save(sess, 'models/model.ckpt')
print("Model saved in file: %s" % save_path)
print("Episode %d: Maximum distance: %d"%(episode, current_distance))
# See our trained bot in action
env2 = wrappers.Monitor(self.env, 'gym-results', force=True)
for i in range(200):
self.bot_play(mainDQN)
env2.close()
```
#### File: Self-play-on-Multi-Sankes-Environment/src/dqn.py
```python
import numpy as np
import tensorflow as tf
class DQN:
def __init__(self, session: tf.Session, input: np.ndarray, output: int, name: str = "main") -> None:
self.session = session
self.input = input
self.input_size = input.size
self.output_size = output
self.net_name = name
self._build_network()
def _build_network(self, h_size = 16, l_rate = 0.001) -> None:
with tf.variable_scope(self.net_name):
self._X = tf.placeholder(tf.float32, [None, self.input_size], name = "input_x")
x, y, z = self.input.shape
net = tf.reshape(self._X, [-1, x, y, z]) # 224 * 256 * 3
padding = 'SAME'
activation = tf.nn.relu
init = tf.truncated_normal_initializer(mean=0.0, stddev=2e-2)
#1st cnn layer
net = tf.layers.conv2d(inputs=net, name='layer_conv1',
filters=32, kernel_size=5, strides=1,
padding=padding,
kernel_initializer=init, activation=activation)
#2nd cnn layer
net = tf.layers.conv2d(inputs=net, name='layer_conv2',
filters=32, kernel_size=5, strides=2,
padding=padding,
kernel_initializer=init, activation=activation)
#3rd cnn layer
net = tf.layers.conv2d(inputs=net, name='layer_conv3',
filters=64, kernel_size=5, strides=4,
padding=padding,
kernel_initializer=init, activation=activation)
net = tf.contrib.layers.flatten(net)
# First fully-connected (aka. dense) layer.
net = tf.layers.dense(inputs=net, name='layer_fc1', units=1024,
kernel_initializer=init, activation=activation)
net = tf.layers.dense(inputs=net, name='layer_fc2', units=1024,
kernel_initializer=init, activation=activation)
# Final fully-connected layer.
net = tf.layers.dense(inputs=net, name='layer_fc_out', units=self.output_size,
kernel_initializer=init, activation=None)
self._Qpred = net
self._Y = tf.placeholder(tf.float32, shape=[None, self.output_size])
self._loss = tf.losses.mean_squared_error(self._Y, self._Qpred)
self._train = tf.train.AdamOptimizer(learning_rate = l_rate).minimize(self._loss)
def predict(self, state: np.ndarray) -> np.ndarray:
""" Returns Q(s, a)
"""
x = np.reshape(state, [-1, self.input_size])
return self.session.run(self._Qpred, feed_dict={self._X: x})
def update(self, x_stack: np.ndarray, y_stack: np.ndarray) -> list:
feed = {
self._X: x_stack,
self._Y: y_stack
}
return self.session.run([self._loss, self._train], feed)
```
#### File: Self-play-on-Multi-Sankes-Environment/src/evaluate_snake.py
```python
import gym
import numpy as np
from policies import CnnPolicy
import time
import joblib
import multiprocessing
import tensorflow as tf
import sys
import argparse
import gym_snake
from config import Config
import utils
from utils import WarpFrame
parser = argparse.ArgumentParser(description='Visualize a trained snakes in the Slitherin environment')
parser.add_argument("--n-snakes", default=1, help="number of snakes in the environment")
parser.add_argument("--file1", type=str, default="example_model.pkl", help="file to load")
parser.add_argument("--file2", type=str, default="example_model.pkl", help="file to load")
def load_act_model(load_file, model_scope, env, nenvs=1, num_actions=5):
print('Loading from...', load_file)
ob_shape = utils.get_shape(env.observation_space)
ac_space = env.action_space
sess = tf.get_default_session()
act = CnnPolicy(sess, ob_shape, ac_space, nenvs, 1, model_scope, reuse=False)
with tf.variable_scope(model_scope):
params = tf.trainable_variables(model_scope)
loaded_params = joblib.load(Config.MODEL_DIR + load_file)
restores = []
for p, loaded_p in zip(params, loaded_params):
restores.append(p.assign(loaded_p))
sess.run(restores)
return act
def main():
args = parser.parse_args()
num_snakes = int(args.n_snakes)
Config.set_num_snakes(num_snakes)
print("Num snakes is ", num_snakes)
agent_file1 = args.file1
agent_file2 = args.file2
tf.Session().__enter__()
env = gym.make('snake-new-multiple-v0')
env = WarpFrame(env)
act0 = load_act_model(agent_file1, 'model', env)
act1 = load_act_model(agent_file2, 'model_2', env)
act2 = None
if num_snakes == 3:
act2 = load_act_model(agent_file2, 'model_3', env)
while True:
time.sleep(0.5)
obs, done = env.reset(), False
episode_rew = 0
t_step = 0
has_transitioned = False
last_done = False
states0s = act0.initial_state
states1s = act1.initial_state
states2s = None
if act2 is not None:
states2s = act2.initial_state
while True:
obs = obs.__array__()
obs = obs.reshape((1,) + np.shape(obs))
obs0 = obs[:,:,:,0:3]
obs1 = obs[:,:,:,3:6]
obs2 = None
if num_snakes == 3:
obs2 = obs[:,:,:,6:9]
action0, _, states0s, _ = act0.step(obs0, states0s, [last_done])
action1, _, states1s, _ = act1.step(obs1, states1s, [last_done])
if num_snakes == 3:
action2, _, states2s, _ = act2.step(obs2, states2s, [last_done])
action = [action0[0], action1[0]]
if num_snakes == 3:
action.append(action2[0])
obs, rew, done, info = env.step(action)
# print("reward: {0}, done: {1}, info: {2}".format(rew, done, info))
last_done = done
episode_rew += rew
sleep_time = 0
if info["num_snakes"] <= 1:
sleep_time = .05
if not has_transitioned:
has_transitioned = True
else:
sleep_time = .05
env.render()
if sleep_time > 0:
time.sleep(sleep_time)
t_step += 1
if info["num_snakes"] <= 0:
break
return episode_rew, ep_len
if __name__ == '__main__':
main()
```
#### File: gym_snake/envs/snake_multiple_env_new.py
```python
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from gym.envs.classic_control import rendering
from gym_snake.core.new_world import Snake, World
class NewMultipleSnakes(gym.Env):
def __init__(self, size=(10, 10), n_snakes=2, n_fruits=4, screen_res=300):
self.SIZE = size
self.dim = size[0]
self.current_step = 0
self.n_snakes = n_snakes
self.n_fruits = n_fruits
self.screen_res = screen_res
self.seed()
# Create the world
self.world = World(size, n_snakes=self.n_snakes, n_fruits=self.n_fruits, seed=self.np_rand)
self.action_space = spaces.Discrete(5)
self.viewer = None
def seed(self, seed=None):
self.np_rand, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.current_step = 0
# Create world
# self.world.free()
self.world = World(self.SIZE, n_snakes=self.n_snakes, n_fruits=self.n_fruits, seed=self.np_rand)
self.steps_beyond_done = None
return self.world.get_multi_snake_obs()
def step(self, actions):
reward, done = self.world.move_snakes(actions)
if done:
reward = -1
self.current_step += 1
done = self.current_step >= 2000 or done
n_alives = len(self.world.snakes) - len(self.world.dead_snakes)
# print("n snakes is ", self.world.snakes)
# print("dead snakes is ", self.world.dead_snakes)
return self.world.get_multi_snake_obs(), reward, done, {"ale.lives": 1, "num_snakes": n_alives}
def render(self, mode='human'):
dim = self.dim
screen_dim = self.screen_res
view_dim = dim + 2
grid_res = screen_dim / view_dim
if self.viewer is None:
self.viewer = rendering.Viewer(screen_dim, screen_dim)
self.grids = []
for i in range(view_dim):
for j in range(view_dim):
l = grid_res * i
r = grid_res * (i + 1)
b = grid_res * j
t = grid_res * (j + 1)
grid = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
self.viewer.add_geom(grid)
self.grids.append(grid)
obs = self.world.get_obs_world()
for i in range(view_dim):
for j in range(view_dim):
ik = i * view_dim + j
pixel = obs[i][j]
self.grids[ik].set_color(pixel[0]/255, pixel[1]/255, pixel[2]/255)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
```
#### File: Self-play-on-Multi-Sankes-Environment/src/ppo_multi_agent.py
```python
import os
import time
import sys
import datetime
import joblib
import numpy as np
import os.path as osp
import tensorflow as tf
sys.path.append('../')
from baselines import logger
from collections import deque
from baselines.common import explained_variance
import multiprocessing
import sys
from gym import spaces
import random
import gym
from config import Config
import utils
class MultiModel(object):
def __init__(self, main_model, opponent_model1, opponent_model2=None):
self.opponent_model1 = opponent_model1
self.opponent_model2 = opponent_model2
def multi_step(obs, opponent_obs1, opponent_obs2, states, dones):
actions, values, ret_states, neglogpacs = main_model.step(obs, states, dones)
if self.opponent_model1 is None:
opponent_actions1 = [1 for i in range(len(actions))]
else:
opponent_actions1, _, _, _ = self.opponent_model1.step(opponent_obs1, states, dones)
if self.opponent_model2 is None:
opponent_actions2 = [1 for i in range(len(actions))]
else:
opponent_actions2, _, _, _ = self.opponent_model2.step(opponent_obs2, states, dones)
if self.opponent_model2 is None:
self.full_actions = list(zip(actions, opponent_actions1))
else:
self.full_actions = list(zip(actions, opponent_actions1, opponent_actions2))
print("---- TEST ----")
print("action is : ", actions)
print("opponent actions1 is : ", opponent_actions1)
print("opponent actions2 is : ", opponent_actions2)
print("full actions is : ", self.full_actions)
return actions, values, ret_states, neglogpacs
self.multi_step = multi_step
self.value = main_model.value
class Model(object):
def __init__(self, *, policy, ob_shape, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm, scope_name):
sess = tf.get_default_session()
act_model = policy(sess, ob_shape, ac_space, nbatch_act, 1, scope_name, reuse=False)
train_model = policy(sess, ob_shape, ac_space, nbatch_train, nsteps, scope_name, reuse=True)
A = train_model.pdtype.sample_placeholder([None])
ADV = tf.placeholder(tf.float32, [None])
R = tf.placeholder(tf.float32, [None])
OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
OLDVPRED = tf.placeholder(tf.float32, [None])
LR = tf.placeholder(tf.float32, [])
CLIPRANGE = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
entropy = tf.reduce_mean(train_model.pd.entropy())
vpred = train_model.vf
vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)
vf_losses1 = tf.square(vpred - R)
vf_losses2 = tf.square(vpredclipped - R)
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
pg_losses = -ADV * ratio
pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
with tf.variable_scope(scope_name):
params = tf.trainable_variables(scope_name)
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
_train = trainer.apply_gradients(grads)
def train(lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
advs = returns - values
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {train_model.X:obs, A:actions, ADV:advs, R:returns, LR:lr,
CLIPRANGE:cliprange, OLDNEGLOGPAC:neglogpacs, OLDVPRED:values}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
return sess.run(
[pg_loss, vf_loss, entropy, approxkl, clipfrac, _train],
td_map
)[:-1]
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
def save(save_file):
ps = sess.run(params)
joblib.dump(ps, Config.MODEL_DIR + Config.EXPR_DIR + save_file)
update_placeholders = []
update_ops = []
for p in params:
update_placeholder = tf.placeholder(p.dtype, shape=p.get_shape())
update_placeholders.append(update_placeholder)
update_op = p.assign(update_placeholder)
update_ops.append(update_op)
def load(load_file):
loaded_params = joblib.load(Config.MODEL_DIR + Config.EXPR_DIR + load_file)
feed_dict = {}
for update_placeholder, loaded_p in zip(update_placeholders, loaded_params):
feed_dict[update_placeholder] = loaded_p
sess.run(update_ops, feed_dict=feed_dict)
self.train = train
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = save
self.load = load
tf.global_variables_initializer().run(session=sess) #pylint: disable=E1101
class Runner(object):
def __init__(self, *, env, model, opponent_model1, opponent_model2=None, nsteps, gamma, lam):
self.env = env
self.model = MultiModel(model, opponent_model1, opponent_model2)
nenv = env.num_envs
input_shape = utils.get_shape(env.observation_space)
self.primary_obs = np.zeros((nenv,) + input_shape, dtype=model.train_model.X.dtype.name)
self.opponent_obs1 = np.zeros((nenv,) + input_shape, dtype=model.train_model.X.dtype.name)
self.opponent_obs2 = None
if Config.NUM_SNAKES == 3:
self.opponent_obs2 = np.zeros((nenv,) + input_shape, dtype=model.train_model.X.dtype.name)
multi_agent_obs = env.reset()
self.use_multi_agent_obs(multi_agent_obs)
self.gamma = gamma
self.lam = lam
self.nsteps = nsteps
self.states = model.initial_state
self.dones = [False for _ in range(nenv)]
def use_multi_agent_obs(self, multi_agent_obs):
self.primary_obs[:] = multi_agent_obs[:,:,:,0:3]
self.opponent_obs1[:] = multi_agent_obs[:,:,:,3:6]
if Config.NUM_SNAKES == 3:
self.opponent_obs2[:] = multi_agent_obs[:, :, :, 6:9]
def run(self):
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
for _ in range(self.nsteps):
time.sleep(0.1)
actions, values, self.states, neglogpacs = \
self.model.multi_step(self.primary_obs, self.opponent_obs1, self.opponent_obs2, self.states, self.dones)
mb_obs.append(self.primary_obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
multi_agent_obs, rewards, self.dones, infos = self.env.step(self.model.full_actions)
self.use_multi_agent_obs(multi_agent_obs)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.primary_obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.primary_obs, self.states, self.dones)
#discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
def constfn(val):
def f(_):
return val
return f
def learn(*, policy, env, nsteps, total_timesteps, ent_coef, lr,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0):
if isinstance(lr, float):
lr = constfn(lr)
else:
assert callable(lr)
if isinstance(cliprange, float):
cliprange = constfn(cliprange)
else:
assert callable(cliprange)
total_timesteps = int(total_timesteps)
csv_writer = logger.CSVOutputFormat('{0}.csv'.format(Config.EXPR_NAME))
tensorboard_writer = logger.TensorBoardOutputFormat('./tensorboard/ppo/')
nenvs = env.num_envs
ob_shape = utils.get_shape(env.observation_space)
ac_space = env.action_space
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
make_model = lambda scope_name: Model(policy=policy, ob_shape=ob_shape, ac_space=ac_space, nbatch_act=nenvs,
nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, scope_name=scope_name)
if save_interval and logger.get_dir():
import cloudpickle
with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh:
fh.write(cloudpickle.dumps(make_model))
model = make_model(Config.PRIMARY_MODEL_SCOPE)
opponent_model1 = None
opponent_model2 = None
baseline_file = None
# baseline_file = 'dual_snake_3.pkl'
if Config.NUM_SNAKES > 1:
opponent_model1 = make_model(Config.OPPONENT_MODEL_SCOPE)
if Config.NUM_SNAKES > 2:
opponent_model2 = make_model(Config.OPPONENT_MODEL2_SCOPE)
if baseline_file != None:
model.load(baseline_file)
if opponent_model1 != None:
opponent_model1.load(baseline_file)
if opponent_model2 != None:
opponent_model2.load(baseline_file)
runner = Runner(env=env, model=model, opponent_model1=opponent_model1,
opponent_model2=opponent_model2, nsteps=nsteps, gamma=gamma, lam=lam)
maxlen = 100
epinfobuf = deque(maxlen=maxlen)
tfirststart = time.time()
next_highscore = 5
highscore_interval = 1
opponent_save_interval = Config.OPPONENT_SAVE_INTERVAL
max_saved_opponents = Config.MAX_SAVED_OPPONENTS
opponent1_idx = 0
num_opponents1 = 0
opponent2_idx = 0
num_opponents2 = 0
model_idx = 0
model.save(utils.get_opponent1_file(opponent1_idx))
opponent1_idx += 1
num_opponents1 += 1
model.save(utils.get_opponent2_file(opponent2_idx))
opponent2_idx += 1
num_opponents2 += 1
nupdates = total_timesteps // nbatch
for update in range(1, nupdates + 1):
if opponent_model1 != None:
selected_opponent1_idx = random.randint(0, max(num_opponents1 - 1, 0))
print('Loading checkpoint ' + str(selected_opponent1_idx) + '...')
opponent_model1.load(utils.get_opponent1_file(selected_opponent1_idx))
if opponent_model2 != None:
selected_opponent2_idx = random.randint(0, max(num_opponents2 - 1, 0))
print('Loading checkpoint ' + str(selected_opponent2_idx) + '...')
opponent_model2.load(utils.get_opponent2_file(selected_opponent2_idx))
assert nbatch % nminibatches == 0
nbatch_train = nbatch // nminibatches
tstart = time.time()
frac = 1.0 - (update - 1.0) / nupdates
lrnow = lr(frac)
cliprangenow = cliprange(frac)
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() # pylint: disable=E0632
epinfobuf.extend(epinfos)
mblossvals = []
inds = np.arange(nbatch)
for _ in range(noptepochs):
np.random.shuffle(inds)
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
lossvals = np.mean(mblossvals, axis=0)
tnow = time.time()
fps = int(nbatch / (tnow - tstart))
ep_rew_mean = safemean([epinfo['r'] for epinfo in epinfobuf])
if update % opponent_save_interval == 0 and opponent_model1 != None:
print('Saving opponent model1 ' + str(opponent1_idx) + '...')
model.save(utils.get_opponent1_file(opponent1_idx))
opponent1_idx += 1
num_opponents1 = max(opponent1_idx, num_opponents1)
opponent1_idx = opponent1_idx % max_saved_opponents
if update % opponent_save_interval == 0 and opponent_model2 != None:
print('Saving opponent model2 ' + str(opponent2_idx) + '...')
model.save(utils.get_opponent2_file(opponent2_idx))
opponent2_idx += 1
num_opponents2 = max(opponent2_idx, num_opponents2)
opponent2_idx = opponent2_idx % max_saved_opponents
if update % log_interval == 0 or update == 1:
if (Config.NUM_SNAKES == 1):
pass
# logger.logkv('next_highscore', next_highscore) # TODO: Change it to 100 ep mean score
else:
logger.logkv('num_opponents', num_opponents1)
ev = explained_variance(values, returns)
logger.logkv("serial_timesteps", update * nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update * nbatch)
# logger.logkv("fps", fps)
logger.logkv("explained_variance", float(ev))
logger.logkv('eprewmean ' + str(maxlen), ep_rew_mean)
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.logkv('time_elapsed', tnow - tfirststart)
# logger.logkv('nenvs nsteps nmb nopte', [nenvs, nsteps, nminibatches, noptepochs])
logger.logkv('ep_rew_mean', ep_rew_mean)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
kvs = logger.getkvs()
csv_writer.writekvs(kvs)
tensorboard_writer.writekvs(kvs)
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir():
model.save('snake_model_num{0}_{1}.pkl'.format(Config.NUM_SNAKES, model_idx))
model_idx += 1
# Highscores only indicate better performance in single agent setting, free of opponent agent dependencies
if (ep_rew_mean > next_highscore) and Config.NUM_SNAKES == 1:
print('saving agent with new highscore ', next_highscore, '...')
next_highscore += highscore_interval
model.save('highscore_model.pkl')
model.save('snake_model_num{0}.pkl'.format(Config.NUM_SNAKES))
env.close()
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
```
#### File: src/test/keyboard_test.py
```python
import gym
from time import sleep
import numpy as np
import gym_snake
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='snake-single-v0',
help="""\
Select environment ID.
""")
FLAGS, unparsed = parser.parse_known_args()
env = gym.make(FLAGS.env)
def keyboard_input():
inpt = input()
if inpt == "h" or inpt == "H":
return 3
elif inpt == "j" or inpt == "J":
return 2
elif inpt == "k" or inpt == "K":
return 0
else:
return 1
env.reset()
for _ in range(10):
obs = env.reset()
done = False
r = 0
print('example action: {}'.format(env.action_space.sample()))
while not done:
env.render(mode='human')
action = keyboard_input()
if action != 0:
print("good")
obs, reward, done, info = env.step(action)
print('reward: {} done: {}'.format(reward, done))
sleep(0.01)
```
#### File: src/test/single_test.py
```python
import gym
import itertools
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
import argparse
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
import sys
sys.path.append('../')
import baselines.common.tf_util as U
from baselines import logger
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer
from baselines.deepq.utils import ObservationInput
from baselines.common.schedules import LinearSchedule
import gym_snake
BATCH_SIZE = 32
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID', default='snake-single-v0')
parser.add_argument('--seed', help='Random seed', type=int, default=0)
parser.add_argument('--prioritized', type=int, default=1)
parser.add_argument('--prioritized-replay-alpha', type=float, default=0.6)
parser.add_argument('--dueling', type=int, default=1)
parser.add_argument('--num-timesteps', type=int, default=int(10e6))
parser.add_argument('--checkpoint-freq', type=int, default=10000)
parser.add_argument('--checkpoint-path', type=str, default='./single-dqn/')
args = parser.parse_args()
# make_session first argument : num of cpus
with U.make_session(8):
env = gym.make(args.env)
print("observation space is ", env.observation_space)
print("action space is ", env.action_space)
model = deepq.models.cnn_to_mlp(
convs=[(32, 5, 1), (64, 5, 1)],
hiddens=[256],
dueling=bool(args.dueling)
)
act = deepq.learn(env,
q_func=model,
lr=1e-4,
max_timesteps=2000000,
buffer_size=50000,
train_freq=10,
exploration_fraction=0.1,
exploration_final_eps=0.02,
gamma=0.99,
print_freq=10,
checkpoint_freq=args.checkpoint_freq,
checkpoint_path=args.checkpoint_path,
param_noise=True)
act.save("../models/single-dqn/single_dqn_model_final.pkl")
env.close()
if __name__ == '__main__':
main()
# import gym
# import sys
# import gym_snake
# sys.path.append('../model/')
# import dqn2015
# from time import sleep
# import argparse
#
# parser = argparse.ArgumentParser()
# parser.add_argument('--env', type=str, default='snake-single-v0',
# help="""\
# Select environment ID.
# """)
# FLAGS, unparsed = parser.parse_known_args()
#
# env = gym.make(FLAGS.env)
# env.reset()
# qNet = dqn2015.DQN2015(env)
# qNet.run()
# #env = gym.wrappers.Monitor(env, 'tmp_video')
#
# # for e in range(500):
# # obs = env.reset()
# # done = False
# # r = 0
# # while not done:
# # action = env.action_space.sample()
# # obs, reward, done, info = env.step(action)
# # r += reward
# # env.render(mode='human')
# # sleep(0.01)
#
# env.close()
```
|
{
"source": "jducastel/sardines",
"score": 3
}
|
#### File: sardines/tests/python.py
```python
from django.test import TestCase
@requires_authorization
def somefunc(param1='', param2=0):
r'''A docstring'''
if param1 > param2: # interesting
print 'Gre\'ater'
return (param2 - param1 + 1) or None
class SomeClass:
def cats(arg):
abc = 1
deg = [1, 2, '3']
def another(arg):
9.0/12.0
pass
def method(self, arg2='default', arg3):
self.arg2 = arg3
if __name__ == '__main__':
SomeClass.another('hello')
>>> message = '''interpreter
... prompt'''
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.