repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
scionrep/scioncc | src/ion/service/test/test_governance.py | 1 | 83576 | #!/usr/bin/env python
__author__ = 'Stephen P. Henrie'
import unittest, os, gevent, platform, simplejson
from mock import Mock, patch
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.containers import get_ion_ts
from nose.plugins.attrib import attr
from pyon.util.context import LocalContextMixin
from pyon.datastore.datastore import DatastoreManager
from pyon.ion.event import EventRepository
from pyon.core.exception import BadRequest, Conflict, Inconsistent, NotFound, Unauthorized
from pyon.public import PRED, RT, IonObject, CFG, log, OT, LCS, LCE, AS
from pyon.ion.resregistry import ResourceRegistryServiceWrapper
from pyon.core.governance.negotiation import Negotiation
from ion.process.bootstrap.load_system_policy import LoadSystemPolicy
from pyon.core.governance import MODERATOR_ROLE, MEMBER_ROLE, SUPERUSER_ROLE, OPERATOR_ROLE, get_system_actor, get_system_actor_header
from pyon.core.governance import get_actor_header
from pyon.net.endpoint import RPCClient, BidirClientChannel
from interface.services.core.iresource_registry_service import ResourceRegistryServiceProcessClient
from interface.services.core.iorg_management_service import OrgManagementServiceProcessClient
from interface.services.core.iidentity_management_service import IdentityManagementServiceProcessClient
from interface.services.core.iexchange_management_service import ExchangeManagementServiceProcessClient
from interface.services.core.ipolicy_management_service import PolicyManagementServiceProcessClient
from interface.services.core.isystem_management_service import SystemManagementServiceProcessClient
from interface.objects import AgentCommand, ProposalOriginatorEnum, ProposalStatusEnum, NegotiationStatusEnum, ComputedValueAvailability
ORG2 = 'Org 2'
DENY_EXCHANGE_TEXT = '''
<Rule RuleId="%s" Effect="Deny">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">exchange_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
</Target>
</Rule>
'''
TEST_POLICY_TEXT = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">anonymous</AttributeValue>
<SubjectAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">create_exchange_space</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
</Target>
</Rule>
'''
TEST_BOUNDARY_POLICY_TEXT = '''
<Rule RuleId="%s" Effect="Deny">
<Description>
%s
</Description>
<Target>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">anonymous</AttributeValue>
<SubjectAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
</Rule>
'''
###########
DENY_PARAM_50_RULE = '''
<Rule RuleId="%s:" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-regexp-match">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">.*$</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">agent</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:receiver-type" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">set_resource</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-code">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string"><![CDATA[def policy_func(process, message, headers):
params = message['params']
if params['INTERVAL'] <= 50:
return True, ''
return False, 'The value for SBE37Parameter.INTERVAL cannot be greater than 50'
]]>
</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule>
'''
DENY_PARAM_30_RULE = '''
<Rule RuleId="%s:" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-regexp-match">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">.*$</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">agent</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:receiver-type" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">set_resource</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-code">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string"><![CDATA[def policy_func(process, message, headers):
params = message['params']
if params['INTERVAL'] <= 30:
return True, ''
return False, 'The value for SBE37Parameter.INTERVAL cannot be greater than 30'
]]>
</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule>
'''
DENY_PARAM_10_RULE = '''
<Rule RuleId="%s:" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-regexp-match">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">.*$</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">agent</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:receiver-type" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">set_resource</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-code">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string"><![CDATA[def policy_func(process, message, headers):
params = message['params']
if params['INTERVAL'] <= 10:
return True, ''
return False, 'The value for SBE37Parameter.INTERVAL cannot be greater than 10'
]]>
</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule>
'''
@attr('INT', group='coi')
class TestGovernanceHeaders(IonIntegrationTestCase):
def setUp(self):
# Start container and services
self._start_container()
self.container.start_rel_from_url('res/deploy/basic.yml')
#Instantiate a process to represent the test
process = GovernanceTestProcess()
self.rr_client = ResourceRegistryServiceProcessClient(process=process)
#Get info on the ION System Actor
self.system_actor = get_system_actor()
log.info('system actor:' + self.system_actor._id)
self.system_actor_header = get_system_actor_header()
self.resource_id_header_value = ''
def test_governance_message_headers(self):
'''
This test is used to make sure the ION endpoint code is properly setting the
'''
#Get function pointer to send function
old_send = BidirClientChannel._send
# make new send to patch on that duplicates send
def patched_send(*args, **kwargs):
#Only duplicate the message send from the initial client call
msg_headers = kwargs['headers']
if (self.resource_id_header_value == '') and 'resource-id' in msg_headers:
self.resource_id_header_value = msg_headers['resource-id']
return old_send(*args, **kwargs)
# patch it into place with auto-cleanup to try to interogate the message headers
patcher = patch('pyon.net.endpoint.BidirClientChannel._send', patched_send)
patcher.start()
self.addCleanup(patcher.stop)
# Instantiate an object
obj = IonObject("ActorIdentity", name="name")
# Can't call update with object that hasn't been persisted
with self.assertRaises(BadRequest) as cm:
self.rr_client.update(obj)
# self.assertTrue(cm.exception.message.startswith("Object does not have required '_id' or '_rev' attribute"))
self.resource_id_header_value = ''
# Persist object and read it back
obj_id, obj_rev = self.rr_client.create(obj)
log.debug('The id of the created object is %s', obj_id)
self.assertEqual(self.resource_id_header_value, '' )
self.resource_id_header_value = ''
read_obj = self.rr_client.read(obj_id)
self.assertEqual(self.resource_id_header_value, obj_id )
# Cannot create object with _id and _rev fields pre-set
self.resource_id_header_value = ''
with self.assertRaises(BadRequest) as cm:
self.rr_client.create(read_obj)
#self.assertTrue(cm.exception.message.startswith("Doc must not have '_id'"))
self.assertEqual(self.resource_id_header_value, '' )
# Update object
read_obj.name = "John Doe"
self.resource_id_header_value = ''
self.rr_client.update(read_obj)
self.assertEqual(self.resource_id_header_value, obj_id )
# Update should fail with revision mismatch
self.resource_id_header_value = ''
with self.assertRaises(Conflict) as cm:
self.rr_client.update(read_obj)
#self.assertTrue(cm.exception.message.startswith("Object not based on most current version"))
self.assertEqual(self.resource_id_header_value, obj_id )
# Re-read and update object
self.resource_id_header_value = ''
read_obj = self.rr_client.read(obj_id)
self.assertEqual(self.resource_id_header_value, obj_id )
self.resource_id_header_value = ''
self.rr_client.update(read_obj)
self.assertEqual(self.resource_id_header_value, obj_id )
#Create second object
obj = IonObject("ActorIdentity", name="Babs Smith")
self.resource_id_header_value = ''
# Persist object and read it back
obj2_id, obj2_rev = self.rr_client.create(obj)
log.debug('The id of the created object is %s', obj_id)
self.assertEqual(self.resource_id_header_value, '' )
#Test for multi-read
self.resource_id_header_value = ''
objs = self.rr_client.read_mult([obj_id, obj2_id])
self.assertAlmostEquals(self.resource_id_header_value, [obj_id, obj2_id])
self.assertEqual(len(objs),2)
# Delete object
self.resource_id_header_value = ''
self.rr_client.delete(obj_id)
self.assertEqual(self.resource_id_header_value, obj_id )
# Delete object
self.resource_id_header_value = ''
self.rr_client.delete(obj2_id)
self.assertEqual(self.resource_id_header_value, obj2_id )
class GovernanceTestProcess(LocalContextMixin):
name = 'gov_test'
id='gov_client'
process_type = 'simple'
@attr('INT', group='coi')
class TestGovernanceInt(IonIntegrationTestCase):
def setUp(self):
from unittest import SkipTest
raise SkipTest("Need to rework governance tests")
# Start container
self._start_container()
#Load a deploy file
self.container.start_rel_from_url('res/deploy/basic.yml')
#Instantiate a process to represent the test
process=GovernanceTestProcess()
#Load system policies after container has started all of the services
policy_loaded = CFG.get_safe('system.load_policy', False)
if not policy_loaded:
log.debug('Loading policy')
LoadSystemPolicy.op_load_system_policies(process)
gevent.sleep(self.SLEEP_TIME*2) # Wait for events to be fired and policy updated
self.rr_msg_client = ResourceRegistryServiceProcessClient(process=process)
self.rr_client = ResourceRegistryServiceWrapper(self.container.resource_registry, process)
self.id_client = IdentityManagementServiceProcessClient(process=process)
self.pol_client = PolicyManagementServiceProcessClient(process=process)
self.org_client = OrgManagementServiceProcessClient(process=process)
self.ems_client = ExchangeManagementServiceProcessClient(process=process)
self.sys_management = SystemManagementServiceProcessClient(process=process)
#Get info on the ION System Actor
self.system_actor = get_system_actor()
log.info('system actor:' + self.system_actor._id)
self.system_actor_header = get_system_actor_header()
self.anonymous_actor_headers = {'ion-actor-id':'anonymous'}
self.ion_org = self.org_client.find_org()
# Setup access to event repository
dsm = DatastoreManager()
ds = dsm.get_datastore("events")
self.event_repo = EventRepository(dsm)
def tearDown(self):
policy_list, _ = self.rr_client.find_resources(restype=RT.Policy)
# Must remove the policies in the reverse order they were added
for policy in sorted(policy_list, key=lambda p: p.ts_created, reverse=True):
self.pol_client.delete_policy(policy._id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be fired and policy updated
def test_basic_policy_operations(self):
#Make sure that the system policies have been loaded
policy_list,_ = self.rr_client.find_resources(restype=RT.Policy, id_only=True)
self.assertNotEqual(len(policy_list),0,"The system policies have not been loaded into the Resource Registry")
log.debug('Begin testing with policies')
#First check existing policies to see if they are in place to keep an anonymous user from creating things
with self.assertRaises(Unauthorized) as cm:
test_org_id = self.org_client.create_org(org=IonObject(RT.Org, name='test_org', description='A test Org'))
self.assertIn( 'org_management(create_org) has been denied',cm.exception.message)
with self.assertRaises(NotFound) as cm:
test_org = self.org_client.find_org(name='test_org')
#Add a new policy to deny all operations to the exchange_management by default .
test_policy_id = self.pol_client.create_service_access_policy('exchange_management', 'Exchange_Management_Deny_Policy',
'Deny all operations in Exchange Management Service by default',
DENY_EXCHANGE_TEXT, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be fired and policy updated
#Attempt to access an operation in service which does not have specific policies set
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(Unauthorized) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'exchange_management(create_exchange_space) has been denied',cm.exception.message)
#Add a new policy to allow the the above service call.
test_policy_id = self.pol_client.create_service_access_policy('exchange_management', 'Exchange_Management_Test_Policy',
'Allow specific operations in the Exchange Management Service for anonymous user',
TEST_POLICY_TEXT, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be fired and policy updated
#The previous attempt at this operations should now be allowed.
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(BadRequest) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'Arguments not set',cm.exception.message)
#disable the test policy to try again
self.pol_client.disable_policy(test_policy_id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#The same request that previously was allowed should now be denied
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(Unauthorized) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'exchange_management(create_exchange_space) has been denied',cm.exception.message)
#now enable the test policy to try again
self.pol_client.enable_policy(test_policy_id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#The previous attempt at this operations should now be allowed.
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(BadRequest) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'Arguments not set',cm.exception.message)
#Now test service operation specific policies - specifically that there can be more than one on the same operation.
pol1_id = self.pol_client.add_process_operation_precondition_policy(process_name='policy_management', op='disable_policy', policy_content='func1_pass', headers=self.system_actor_header )
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#try to disable the test policy again
self.pol_client.disable_policy(test_policy_id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#The same request that previously was allowed should now be denied
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(Unauthorized) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'exchange_management(create_exchange_space) has been denied',cm.exception.message)
#now enable the test policy to try again
self.pol_client.enable_policy(test_policy_id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#The previous attempt at this operations should now be allowed.
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(BadRequest) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'Arguments not set',cm.exception.message)
pol2_id = self.pol_client.add_process_operation_precondition_policy(process_name='policy_management', op='disable_policy', policy_content='func2_deny', headers=self.system_actor_header )
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#try to disable the test policy again
with self.assertRaises(Unauthorized) as cm:
self.pol_client.disable_policy(test_policy_id, headers=self.system_actor_header)
self.assertIn( 'Denied for no reason',cm.exception.message)
self.pol_client.delete_policy(pol2_id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#try to disable the test policy again
self.pol_client.disable_policy(test_policy_id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#The same request that previously was allowed should now be denied
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(Unauthorized) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'exchange_management(create_exchange_space) has been denied',cm.exception.message)
#try to enable the test policy again
self.pol_client.enable_policy(test_policy_id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#The previous attempt at this operations should now be allowed.
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(BadRequest) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'Arguments not set',cm.exception.message)
pre_func1 =\
"""def precondition_func(process, msg, headers):
if headers['op'] == 'disable_policy':
return False, 'Denied for no reason again'
else:
return True, ''
"""
#Create a dynamic precondition function to deny calls to disable policy
pre_func1_id = self.pol_client.add_process_operation_precondition_policy(process_name='policy_management', op='disable_policy', policy_content=pre_func1, headers=self.system_actor_header )
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#try to disable the test policy again
with self.assertRaises(Unauthorized) as cm:
self.pol_client.disable_policy(test_policy_id, headers=self.system_actor_header)
self.assertIn( 'Denied for no reason again',cm.exception.message)
#Now delete the most recent precondition policy
self.pol_client.delete_policy(pre_func1_id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#The previous attempt at this operations should now be allowed.
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(BadRequest) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'Arguments not set',cm.exception.message)
#Now test that a precondition function can be enabled and disabled
pre_func2 =\
"""def precondition_func(process, msg, headers):
if headers['op'] == 'create_exchange_space':
return False, 'Denied for from a operation precondition function'
else:
return True, ''
"""
#Create a dynamic precondition function to deny calls to disable policy
pre_func2_id = self.pol_client.add_process_operation_precondition_policy(process_name='exchange_management', op='create_exchange_space', policy_content=pre_func2, headers=self.system_actor_header )
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#The same request that previously was allowed should now be denied
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(Unauthorized) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'Denied for from a operation precondition function',cm.exception.message)
#try to enable the precondition policy
self.pol_client.disable_policy(pre_func2_id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#The previous attempt at this operations should now be allowed.
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(BadRequest) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'Arguments not set',cm.exception.message)
#try to enable the precondition policy
self.pol_client.enable_policy(pre_func2_id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#The same request that previously was allowed should now be denied
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(Unauthorized) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'Denied for from a operation precondition function',cm.exception.message)
#Delete the precondition policy
self.pol_client.delete_policy(pre_func2_id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#The previous attempt at this operations should now be allowed.
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(BadRequest) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'Arguments not set',cm.exception.message)
self.pol_client.delete_policy(test_policy_id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
#The same request that previously was allowed should now be denied
es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
with self.assertRaises(Unauthorized) as cm:
self.ems_client.create_exchange_space(es_obj, headers=self.anonymous_actor_headers)
self.assertIn( 'exchange_management(create_exchange_space) has been denied',cm.exception.message)
###########
### Now test access to service create* operations based on roles...
#Anonymous users should not be allowed
with self.assertRaises(Unauthorized) as cm:
id = self.ssclient.create_interval_timer(start_time="now", event_origin="Interval_Timer_233", headers=self.anonymous_actor_headers)
self.assertIn( 'scheduler(create_interval_timer) has been denied',cm.exception.message)
#now try creating a new user with a valid actor
actor_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True, headers=self.apache_actor_header)
log.info( "actor id=" + actor_id)
actor_header = get_actor_header(actor_id)
#User without OPERATOR or MANAGER role should not be allowed
with self.assertRaises(Unauthorized) as cm:
id = self.ssclient.create_interval_timer(start_time="now", event_origin="Interval_Timer_233", headers=actor_header)
self.assertIn( 'scheduler(create_interval_timer) has been denied',cm.exception.message)
#Remove the OPERATOR_ROLE from the user.
self.org_client.grant_role(self.ion_org._id, actor_id, MODERATOR_ROLE, headers=self.system_actor_header)
#Refresh headers with new role
actor_header = get_actor_header(actor_id)
#User with proper role should now be allowed to access this service operation.
id = self.ssclient.create_interval_timer(start_time="now", end_time="-1", event_origin="Interval_Timer_233", headers=actor_header)
@patch.dict(CFG, {'container':{'org_boundary':True}})
def test_policy_cache_reset(self):
before_policy_set = self.container.governance_controller.get_active_policies()
#First clear all of the policies to test that failures will be caught due to missing policies
self.container.governance_controller._clear_container_policy_caches()
empty_policy_set = self.container.governance_controller.get_active_policies()
self.assertEqual(len(empty_policy_set['service_access'].keys()), 0)
self.assertEqual(len(empty_policy_set['resource_access'].keys()), 0)
#With policies gone, an anonymous user should be able to create an object
test_org_id = self.org_client.create_org(org=IonObject(RT.Org, name='test_org1', description='A test Org'))
test_org = self.org_client.find_org(name='test_org1')
self.assertEqual(test_org._id, test_org_id)
#Trigger the event to reset the policy caches
self.sys_management.reset_policy_cache()
gevent.sleep(20) # Wait for events to be published and policy reloaded for all running processes
after_policy_set = self.container.governance_controller.get_active_policies()
#With policies refreshed, an anonymous user should NOT be able to create an object
with self.assertRaises(Unauthorized) as cm:
test_org_id = self.org_client.create_org(org=IonObject(RT.Org, name='test_org2', description='A test Org'))
self.assertIn( 'org_management(create_org) has been denied',cm.exception.message)
with self.assertRaises(NotFound) as cm:
test_org = self.org_client.find_org(name='test_org2')
self.assertEqual(len(before_policy_set.keys()), len(after_policy_set.keys()))
self.assertEqual(len(before_policy_set['service_access'].keys()), len(after_policy_set['service_access'].keys()))
self.assertEqual(len(before_policy_set['resource_access'].keys()), len(after_policy_set['resource_access'].keys()))
self.assertEqual(len(before_policy_set['service_operation'].keys()), len(after_policy_set['service_operation'].keys()))
#If the number of keys for service operations were equal, then check each set of operation precondition functions
for key in before_policy_set['service_operation']:
self.assertEqual(len(before_policy_set['service_operation'][key]), len(after_policy_set['service_operation'][key]))
@patch.dict(CFG, {'container':{'org_boundary':True}})
def test_org_boundary(self):
with self.assertRaises(NotFound) as nf:
org2 = self.org_client.find_org(ORG2)
self.assertIn('The Org with name Org 2 does not exist',nf.exception.message)
#Create a second Org
org2 = IonObject(RT.Org, name=ORG2, description='A second Org')
org2_id = self.org_client.create_org(org2, headers=self.system_actor_header)
org2 = self.org_client.find_org(ORG2)
self.assertEqual(org2_id, org2._id)
#First try to get a list of Users by hitting the RR anonymously - should be allowed.
actors,_ = self.rr_msg_client.find_resources(restype=RT.ActorIdentity)
self.assertEqual(len(actors),2) #Should include the ION System Actor, Web auth actor.
log.debug('Begin testing with policies')
#Create a new actor - should be denied for anonymous access
with self.assertRaises(Unauthorized) as cm:
actor_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True, headers=self.anonymous_actor_headers)
self.assertIn( 'identity_management(signon) has been denied',cm.exception.message)
#now try creating a new actors with a valid actor
actor_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True, headers=self.apache_actor_header)
log.info( "actor id=" + actor_id)
actor_header = get_actor_header(actor_id)
#First try to get a list of Users by hitting the RR anonymously - should be allowed.
actors,_ = self.rr_msg_client.find_resources(restype=RT.ActorIdentity)
self.assertEqual(len(actors),3) #Should include the ION System Actor and web auth actor as well.
#Now enroll the actor as a member of the Second Org
self.org_client.enroll_member(org2_id,actor_id, headers=self.system_actor_header)
actor_header = get_actor_header(actor_id)
#Add a new Org boundary policy which deny's all anonymous access
test_policy_id = self.pol_client.create_resource_access_policy( org2_id, 'Org_Test_Policy',
'Deny all access for anonymous actor',
TEST_BOUNDARY_POLICY_TEXT, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be fired and policy updated
#Hack to force container into an Org Boundary for second Org
self.container.governance_controller._container_org_name = org2.org_governance_name
self.container.governance_controller._is_container_org_boundary = True
#First try to get a list of Users by hitting the RR anonymously - should be denied.
with self.assertRaises(Unauthorized) as cm:
actors,_ = self.rr_msg_client.find_resources(restype=RT.ActorIdentity, headers=self.anonymous_actor_headers)
self.assertIn( 'resource_registry(find_resources) has been denied',cm.exception.message)
#Now try to hit the RR with a real user and should now be allowed
actors,_ = self.rr_msg_client.find_resources(restype=RT.ActorIdentity, headers=actor_header)
self.assertEqual(len(actors),3) #Should include the ION System Actor and web auth actor as well.
#TODO - figure out how to right a XACML rule to be a member of the specific Org as well
#Hack to force container back to default values
self.container.governance_controller._container_org_name = 'ION'
self.container.governance_controller._is_container_org_boundary = False
self.container.governance_controller._container_org_id = None
self.pol_client.delete_policy(test_policy_id, headers=self.system_actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published and policy updated
def test_org_enroll_negotiation(self):
#Make sure that the system policies have been loaded
policy_list,_ = self.rr_client.find_resources(restype=RT.Policy)
self.assertNotEqual(len(policy_list),0,"The system policies have not been loaded into the Resource Registry")
with self.assertRaises(BadRequest) as cm:
myorg = self.org_client.read_org()
self.assertTrue(cm.exception.message == 'The org_id parameter is missing')
log.debug('Begin testing with policies')
#Create a new user - should be denied for anonymous access
with self.assertRaises(Unauthorized) as cm:
actor_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True, headers=self.anonymous_actor_headers)
self.assertIn( 'identity_management(signon) has been denied',cm.exception.message)
#Now create user with proper credentials
actor_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True, headers=self.apache_actor_header)
log.info( "actor id=" + actor_id)
#Build the message headers used with this user
actor_header = get_actor_header(actor_id)
#Get the associated user id
user_info = IonObject(RT.UserInfo, name='Test User')
actor_user_id = self.id_client.create_user_info(actor_id=actor_id, user_info=user_info, headers=actor_header)
#Attempt to enroll a user anonymously - should not be allowed
with self.assertRaises(Unauthorized) as cm:
self.org_client.enroll_member(self.ion_org._id,actor_id, headers=self.anonymous_actor_headers)
self.assertIn( 'org_management(enroll_member) has been denied',cm.exception.message)
#Attempt to let a user enroll themselves - should not be allowed
with self.assertRaises(Unauthorized) as cm:
self.org_client.enroll_member(self.ion_org._id,actor_id, headers=actor_header)
self.assertIn( 'org_management(enroll_member) has been denied',cm.exception.message)
#Attept to enroll the user in the ION Root org as a manager - should not be allowed since
#registration with the system implies membership in the ROOT Org.
with self.assertRaises(BadRequest) as cm:
self.org_client.enroll_member(self.ion_org._id,actor_id, headers=self.system_actor_header)
self.assertTrue(cm.exception.message == 'A request to enroll in the root ION Org is not allowed')
#Verify that anonymous user cannot find a list of enrolled users in an Org
with self.assertRaises(Unauthorized) as cm:
actors = self.org_client.list_enrolled_actors(self.ion_org._id, headers=self.anonymous_actor_headers)
self.assertIn('org_management(list_enrolled_actors) has been denied',cm.exception.message)
#Verify that a user without the proper Org Manager cannot find a list of enrolled users in an Org
with self.assertRaises(Unauthorized) as cm:
actors = self.org_client.list_enrolled_actors(self.ion_org._id, headers=actor_header)
self.assertIn( 'org_management(list_enrolled_actors) has been denied',cm.exception.message)
actors = self.org_client.list_enrolled_actors(self.ion_org._id, headers=self.system_actor_header)
self.assertEqual(len(actors),3) # WIll include the ION system actor
#Create a second Org
with self.assertRaises(NotFound) as nf:
org2 = self.org_client.find_org(ORG2)
self.assertIn('The Org with name Org 2 does not exist',nf.exception.message)
org2 = IonObject(RT.Org, name=ORG2, description='A second Org')
org2_id = self.org_client.create_org(org2, headers=self.system_actor_header)
org2 = self.org_client.find_org(ORG2)
self.assertEqual(org2_id, org2._id)
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),0)
#Build the Service Agreement Proposal for enrollment request
sap = IonObject(OT.EnrollmentProposal,consumer=actor_id, provider=org2_id )
sap_response = self.org_client.negotiate(sap, headers=actor_header )
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),1)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, headers=actor_header)
self.assertEqual(len(negotiations),1)
#Build the Service Agreement Proposal for enrollment request
sap2 = IonObject(OT.EnrollmentProposal,consumer=actor_id, provider=org2_id )
#User tried proposing an enrollment again - this should fail
with self.assertRaises(BadRequest) as cm:
self.org_client.negotiate(sap2, headers=actor_header )
self.assertIn('A precondition for this request has not been satisfied: not is_enroll_negotiation_open',cm.exception.message)
#Manager trys to reject the proposal but incorrectly
negotiations = self.org_client.find_org_negotiations(org2_id, proposal_type=OT.EnrollmentProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=self.system_actor_header)
sap_response = Negotiation.create_counter_proposal(negotiations[0], ProposalStatusEnum.REJECTED, ProposalOriginatorEnum.PROVIDER)
sap_response.sequence_num -= 1
#Should fail because the proposal sequence was not incremented
with self.assertRaises(Inconsistent) as cm:
self.org_client.negotiate(sap_response, headers=actor_header )
self.assertIn('The Service Agreement Proposal does not have the correct sequence_num value (0) for this negotiation (1)',cm.exception.message)
#Manager now trys to reject the proposal but with the correct proposal sequence
sap_response.sequence_num += 1
sap_response2 = self.org_client.negotiate(sap_response, headers=self.system_actor_header )
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),1)
self.assertEqual(negotiations[0].negotiation_status, NegotiationStatusEnum.REJECTED)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published
#Check that there are the correct number of events
events_r = self.event_repo.find_events(origin=sap_response2.negotiation_id, event_type=OT.EnrollmentNegotiationStatusEvent)
self.assertEquals(len(events_r), 2)
self.assertEqual(events_r[-1][2].description, ProposalStatusEnum._str_map[ProposalStatusEnum.REJECTED])
#Create a new enrollment proposal
#Build the Service Agreement Proposal to enroll
sap = IonObject(OT.EnrollmentProposal,consumer=actor_id, provider=org2_id, description='Enrollment request for test user' )
sap_response = self.org_client.negotiate(sap, headers=actor_header )
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),2)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, headers=actor_header)
self.assertEqual(len(negotiations),2)
actors = self.org_client.list_enrolled_actors(org2_id, headers=self.system_actor_header)
self.assertEqual(len(actors),0)
#Check the get extended marine facility to check on the open and closed negotiations when called by normal user
ext_mf = self.obs_client.get_marine_facility_extension(org_id=org2_id,user_id=actor_user_id, headers=actor_header)
self.assertEqual(len(ext_mf.closed_requests), 0)
self.assertEqual(len(ext_mf.open_requests), 0)
#Check the get extended marine facility to check on the open and closed negotiations when called by privledged user
ext_mf = self.obs_client.get_marine_facility_extension(org_id=org2_id,user_id=self.system_actor._id, headers=self.system_actor_header)
self.assertEqual(len(ext_mf.closed_requests), 1)
self.assertEqual(len(ext_mf.open_requests), 1)
#Manager approves proposal
negotiations = self.org_client.find_org_negotiations(org2_id, proposal_type=OT.EnrollmentProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=self.system_actor_header)
#Make sure the Negotiation object has the proper description set from the initial SAP
self.assertEqual(negotiations[0].description, sap.description)
sap_response = Negotiation.create_counter_proposal(negotiations[0], ProposalStatusEnum.ACCEPTED, ProposalOriginatorEnum.PROVIDER)
sap_response2 = self.org_client.negotiate(sap_response, headers=self.system_actor_header )
actors = self.org_client.list_enrolled_actors(org2_id, headers=self.system_actor_header)
self.assertEqual(len(actors),1)
#User tried requesting enrollment again - this should fail
with self.assertRaises(BadRequest) as cm:
sap = IonObject(OT.EnrollmentProposal,consumer=actor_id, provider=org2_id )
neg_id = self.org_client.negotiate(sap, headers=actor_header )
self.assertIn('A precondition for this request has not been satisfied: not is_enrolled',cm.exception.message)
#Check the get extended marine facility to check on the open and closed negotiations when called by normal user
ext_mf = self.obs_client.get_marine_facility_extension(org_id=org2_id,user_id=actor_user_id, headers=actor_header)
self.assertEqual(len(ext_mf.closed_requests), 0)
self.assertEqual(len(ext_mf.open_requests), 0)
#Check the get extended marine facility to check on the open and closed negotiations when called by privledged user
ext_mf = self.obs_client.get_marine_facility_extension(org_id=org2_id,user_id=self.system_actor._id, headers=self.system_actor_header)
self.assertEqual(len(ext_mf.closed_requests), 2)
self.assertEqual(len(ext_mf.open_requests), 0)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published
#Check that there are the correct number of events
events_r = self.event_repo.find_events(origin=sap_response2.negotiation_id, event_type=OT.EnrollmentNegotiationStatusEvent)
self.assertEquals(len(events_r), 4)
self.assertEqual(events_r[-1][2].description, ProposalStatusEnum._str_map[ProposalStatusEnum.GRANTED])
events_c = self.event_repo.find_events(origin=org2_id, event_type=OT.OrgMembershipGrantedEvent)
self.assertEquals(len(events_c), 1)
events_i = self.event_repo.find_events(origin=org2_id, event_type=OT.OrgNegotiationInitiatedEvent)
self.assertEquals(len(events_i), 2)
ret = self.org_client.is_enrolled(org_id=org2_id, actor_id=actor_id, headers=self.system_actor_header)
self.assertEquals(ret, True)
self.org_client.cancel_member_enrollment(org_id=org2_id, actor_id=actor_id, headers=self.system_actor_header)
ret = self.org_client.is_enrolled(org_id=org2_id, actor_id=actor_id, headers=self.system_actor_header)
self.assertEquals(ret, False)
def test_org_role_negotiation(self):
#Make sure that the system policies have been loaded
policy_list,_ = self.rr_client.find_resources(restype=RT.Policy)
self.assertNotEqual(len(policy_list),0,"The system policies have not been loaded into the Resource Registry")
with self.assertRaises(BadRequest) as cm:
myorg = self.org_client.read_org()
self.assertTrue(cm.exception.message == 'The org_id parameter is missing')
log.debug('Begin testing with policies')
#Create a new user - should be denied for anonymous access
with self.assertRaises(Unauthorized) as cm:
actor_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True, headers=self.anonymous_actor_headers)
self.assertIn( 'identity_management(signon) has been denied',cm.exception.message)
#Now create user with proper credentials
actor_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True, headers=self.apache_actor_header)
log.info( "actor id=" + actor_id)
#Build the message headers used with this user
actor_header = get_actor_header(actor_id)
actors = self.org_client.list_enrolled_actors(self.ion_org._id, headers=self.system_actor_header)
self.assertEqual(len(actors),3) # WIll include the ION system actor and the non user actor from setup
## test_org_roles and policies
roles = self.org_client.list_org_roles(self.ion_org._id)
self.assertEqual(len(roles),3)
self.assertItemsEqual([r.governance_name for r in roles], [MODERATOR_ROLE, MEMBER_ROLE, SUPERUSER_ROLE])
roles = self.org_client.list_enrolled_actors(self.ion_org._id, self.system_actor._id, headers=self.system_actor_header)
self.assertEqual(len(roles),3)
self.assertItemsEqual([r.governance_name for r in roles], [MEMBER_ROLE, MODERATOR_ROLE, SUPERUSER_ROLE])
roles = self.org_client.list_enrolled_actors(self.ion_org._id, actor_id, headers=self.system_actor_header)
self.assertEqual(len(roles),1)
self.assertItemsEqual([r.governance_name for r in roles], [MEMBER_ROLE])
#Create a second Org
with self.assertRaises(NotFound) as nf:
org2 = self.org_client.find_org(ORG2)
self.assertIn('The Org with name Org 2 does not exist',nf.exception.message)
org2 = IonObject(RT.Org, name=ORG2, description='A second Org')
org2_id = self.org_client.create_org(org2, headers=self.system_actor_header)
org2 = self.org_client.find_org(ORG2)
self.assertEqual(org2_id, org2._id)
roles = self.org_client.list_org_roles(org2_id)
self.assertEqual(len(roles),2)
self.assertItemsEqual([r.governance_name for r in roles], [MODERATOR_ROLE, MEMBER_ROLE])
#Create the Instrument Operator Role
operator_role = IonObject(RT.UserRole, governance_name=OPERATOR_ROLE,name='Instrument Operator', description='Instrument Operator')
#First try to add the user role anonymously
with self.assertRaises(Unauthorized) as cm:
self.org_client.add_org_role(org2_id, operator_role, headers=self.anonymous_actor_headers)
self.assertIn('org_management(add_org_role) has been denied',cm.exception.message)
self.org_client.add_org_role(org2_id, operator_role, headers=self.system_actor_header)
roles = self.org_client.list_org_roles(org2_id)
self.assertEqual(len(roles),3)
self.assertItemsEqual([r.governance_name for r in roles], [MODERATOR_ROLE, MEMBER_ROLE, OPERATOR_ROLE])
#Add the same role to the first Org as well
self.org_client.add_org_role(self.ion_org._id, operator_role, headers=self.system_actor_header)
# test proposals roles.
#First try to find user requests anonymously
with self.assertRaises(Unauthorized) as cm:
requests = self.org_client.find_org_negotiations(org2_id, headers=self.anonymous_actor_headers)
self.assertIn('org_management(find_org_negotiations) has been denied',cm.exception.message)
#Next try to find user requests as as a basic member
with self.assertRaises(Unauthorized) as cm:
requests = self.org_client.find_org_negotiations(org2_id, headers=actor_header)
self.assertIn('org_management(find_org_negotiations) has been denied',cm.exception.message)
#Should not be denied for user with Org Manager role or ION System manager role
requests = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(requests),0)
#Build the Service Agreement Proposal for assigning a role to a user
sap = IonObject(OT.RequestRoleProposal,consumer=actor_id, provider=org2_id, role_name=OPERATOR_ROLE )
# First try to request a role anonymously
with self.assertRaises(Unauthorized) as cm:
sap_response = self.org_client.negotiate(sap, headers=self.anonymous_actor_headers)
self.assertIn('org_management(negotiate) has been denied',cm.exception.message)
# Next try to propose to assign a role without being a member
with self.assertRaises(BadRequest) as cm:
sap_response = self.org_client.negotiate(sap, headers=actor_header )
self.assertIn('A precondition for this request has not been satisfied: is_enrolled',cm.exception.message)
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),0)
#Build the Service Agreement Proposal to enroll
sap = IonObject(OT.EnrollmentProposal,consumer=actor_id, provider=org2_id )
sap_response = self.org_client.negotiate(sap, headers=actor_header )
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),1)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, headers=actor_header)
self.assertEqual(len(negotiations),1)
actors = self.org_client.list_enrolled_actors(org2_id, headers=self.system_actor_header)
self.assertEqual(len(actors),0)
#Manager approves proposal
negotiations = self.org_client.find_org_negotiations(org2_id, proposal_type=OT.EnrollmentProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=self.system_actor_header)
sap_response = Negotiation.create_counter_proposal(negotiations[0], ProposalStatusEnum.ACCEPTED, ProposalOriginatorEnum.PROVIDER)
sap_response2 = self.org_client.negotiate(sap_response, headers=self.system_actor_header )
actors = self.org_client.list_enrolled_actors(org2_id, headers=self.system_actor_header)
self.assertEqual(len(actors),1)
#Create a proposal to add a role to a user
sap = IonObject(OT.RequestRoleProposal,consumer=actor_id, provider=org2_id, role_name=OPERATOR_ROLE )
sap_response = self.org_client.negotiate(sap, headers=actor_header )
ret = self.org_client.has_role(org2_id, actor_id,OPERATOR_ROLE, headers=actor_header )
self.assertEqual(ret, False)
#Run through a series of differet finds to ensure the various parameter filters are working.
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),2)
negotiations = self.org_client.find_org_negotiations(org2_id,negotiation_status=NegotiationStatusEnum.OPEN, headers=self.system_actor_header)
self.assertEqual(len(negotiations),1)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, headers=actor_header)
self.assertEqual(len(negotiations),2)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, proposal_type=OT.RequestRoleProposal, headers=actor_header)
self.assertEqual(len(negotiations),1)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
self.assertEqual(len(negotiations),1)
#Manager rejects the initial role proposal
negotiations = self.org_client.find_org_negotiations(org2_id, proposal_type=OT.RequestRoleProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=self.system_actor_header)
sap_response = Negotiation.create_counter_proposal(negotiations[0], ProposalStatusEnum.REJECTED, ProposalOriginatorEnum.PROVIDER)
sap_response2 = self.org_client.negotiate(sap_response, headers=self.system_actor_header )
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),2)
negotiations = self.org_client.find_org_negotiations(org2_id,negotiation_status=NegotiationStatusEnum.REJECTED, headers=self.system_actor_header)
self.assertEqual(len(negotiations),1)
self.assertEqual(negotiations[0].negotiation_status, NegotiationStatusEnum.REJECTED)
#Make sure the user still does not have the requested role
ret = self.org_client.has_role(org2_id, actor_id,OPERATOR_ROLE, headers=actor_header )
self.assertEqual(ret, False)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published
#Check that there are the correct number of events
events_r = self.event_repo.find_events(origin=sap_response2.negotiation_id, event_type=OT.RequestRoleNegotiationStatusEvent)
self.assertEquals(len(events_r), 2)
self.assertEqual(events_r[-1][2].description, ProposalStatusEnum._str_map[ProposalStatusEnum.REJECTED])
#Create a second proposal to add a role to a user
sap = IonObject(OT.RequestRoleProposal,consumer=actor_id, provider=org2_id, role_name=OPERATOR_ROLE )
sap_response = self.org_client.negotiate(sap, headers=actor_header )
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),3)
closed_negotiations = self.org_client.find_org_closed_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(closed_negotiations),2)
#Create an instrument resource
ia_list,_ = self.rr_client.find_resources(restype=RT.InstrumentAgent)
self.assertEqual(len(ia_list),0)
ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent1', description='The first Instrument Agent')
#Intruments should not be able to be created by anoymous users
with self.assertRaises(Unauthorized) as cm:
self.ims_client.create_instrument_agent(ia_obj, headers=self.anonymous_actor_headers)
self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message)
#Intruments should not be able to be created by users that are not Instrument Operators
with self.assertRaises(Unauthorized) as cm:
self.ims_client.create_instrument_agent(ia_obj, headers=actor_header)
self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message)
#Manager approves proposal for role request
negotiations = self.org_client.find_org_negotiations(org2_id, proposal_type=OT.RequestRoleProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=self.system_actor_header)
sap_response = Negotiation.create_counter_proposal(negotiations[0], ProposalStatusEnum.ACCEPTED, ProposalOriginatorEnum.PROVIDER)
sap_response2 = self.org_client.negotiate(sap_response, headers=self.system_actor_header )
#mke sure there are no more open negotiations
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
self.assertEqual(len(negotiations),0)
#Verify the user has been assigned the requested role in the second Org
ret = self.org_client.has_role(org2_id, actor_id,OPERATOR_ROLE, headers=actor_header )
self.assertEqual(ret, True)
#Verify the user has only been assigned the requested role in the second Org and not in the first Org
ret = self.org_client.has_role(self.ion_org._id, actor_id,OPERATOR_ROLE, headers=actor_header )
self.assertEqual(ret, False)
#Refresh headers with new role
actor_header = get_actor_header(actor_id)
#now try to request the same role for the same user - should be denied
with self.assertRaises(BadRequest) as cm:
sap = IonObject(OT.RequestRoleProposal,consumer=actor_id, provider=org2_id, role_name=OPERATOR_ROLE )
sap_response = self.org_client.negotiate(sap, headers=actor_header )
self.assertIn('A precondition for this request has not been satisfied: not has_role',cm.exception.message)
#Now the user with the proper role should be able to create an instrument.
self.ims_client.create_instrument_agent(ia_obj, headers=actor_header)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published
#Check that there are the correct number of events
events_r = self.event_repo.find_events(origin=sap_response2.negotiation_id, event_type=OT.RequestRoleNegotiationStatusEvent)
self.assertEquals(len(events_r), 4)
self.assertEqual(events_r[-1][2].description, ProposalStatusEnum._str_map[ProposalStatusEnum.GRANTED])
self.assertEqual(events_r[-1][2].role_name, sap_response2.role_name)
events_c = self.event_repo.find_events(origin=org2_id, event_type=OT.UserRoleGrantedEvent)
self.assertEquals(len(events_c), 2)
events_i = self.event_repo.find_events(origin=org2_id, event_type=OT.OrgNegotiationInitiatedEvent)
self.assertEquals(len(events_i), 3)
def test_org_acquire_resource_negotiation(self):
#Make sure that the system policies have been loaded
policy_list,_ = self.rr_client.find_resources(restype=RT.Policy)
self.assertNotEqual(len(policy_list),0,"The system policies have not been loaded into the Resource Registry")
with self.assertRaises(BadRequest) as cm:
myorg = self.org_client.read_org()
self.assertTrue(cm.exception.message == 'The org_id parameter is missing')
log.debug('Begin testing with policies')
#Create a new user - should be denied for anonymous access
with self.assertRaises(Unauthorized) as cm:
actor_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True, headers=self.anonymous_actor_headers)
self.assertIn( 'identity_management(signon) has been denied',cm.exception.message)
#Now create user with proper credentials
actor_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True, headers=self.apache_actor_header)
log.info( "actor id=" + actor_id)
#Create a second Org
org2 = IonObject(RT.Org, name=ORG2, description='A second Org')
org2_id = self.org_client.create_org(org2, headers=self.system_actor_header)
org2 = self.org_client.find_org(ORG2)
self.assertEqual(org2_id, org2._id)
roles = self.org_client.list_org_roles(org2_id)
self.assertEqual(len(roles),2)
self.assertItemsEqual([r.governance_name for r in roles], [MODERATOR_ROLE, MEMBER_ROLE])
#Create the Instrument Operator Role
operator_role = IonObject(RT.UserRole, governance_name=OPERATOR_ROLE,name='Instrument Operator', description='Instrument Operator')
#And add it to all Orgs
self.org_client.add_org_role(self.ion_org._id, operator_role, headers=self.system_actor_header)
self.org_client.add_org_role(org2_id, operator_role, headers=self.system_actor_header)
#Add the OPERATOR_ROLE to the User for the ION Org
self.org_client.grant_role(self.ion_org._id, actor_id, OPERATOR_ROLE, headers=self.system_actor_header)
#Enroll the user in the second Org - do without Negotiation for test
self.org_client.enroll_member(org2_id, actor_id,headers=self.system_actor_header )
#Build the message headers used with this user
actor_header = get_actor_header(actor_id)
#Test the invitation process
#Create a invitation proposal to add a role to a user
sap = IonObject(OT.RequestRoleProposal,consumer=actor_id, provider=org2_id, role_name=OPERATOR_ROLE,
originator=ProposalOriginatorEnum.PROVIDER )
sap_response = self.org_client.negotiate(sap, headers=self.system_actor_header )
ret = self.org_client.has_role(org2_id, actor_id,OPERATOR_ROLE, headers=actor_header )
self.assertEqual(ret, False)
#User creates proposal to approve
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, proposal_type=OT.RequestRoleProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
sap_response = Negotiation.create_counter_proposal(negotiations[0], ProposalStatusEnum.ACCEPTED)
sap_response2 = self.org_client.negotiate(sap_response, headers=actor_header )
#Verify the user has been assigned the requested role in the second Org
ret = self.org_client.has_role(org2_id, actor_id,OPERATOR_ROLE, headers=actor_header )
self.assertEqual(ret, True)
#Build the message headers used with this user
actor_header = get_actor_header(actor_id)
gevent.sleep(self.SLEEP_TIME) # Wait for events to be published
#Check that there are the correct number of events
events_r = self.event_repo.find_events(origin=sap_response2.negotiation_id, event_type=OT.RequestRoleNegotiationStatusEvent)
self.assertEquals(len(events_r), 4)
self.assertEqual(events_r[-1][2].description, ProposalStatusEnum._str_map[ProposalStatusEnum.GRANTED])
#Create the instrument agent with the user that has the proper role
ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent1', description='The Instrument Agent')
self.ims_client.create_instrument_agent(ia_obj, headers=actor_header)
#Ensure the instrument agent has been created
ia_list,_ = self.rr_client.find_resources(restype=RT.InstrumentAgent)
self.assertEqual(len(ia_list),1)
self.assertEquals(ia_list[0].lcstate, LCS.DRAFT)
self.assertEquals(ia_list[0].availability, AS.PRIVATE)
#Advance the Life cycle to planned. Must be OPERATOR so anonymous user should fail
with self.assertRaises(Unauthorized) as cm:
self.ims_client.execute_instrument_agent_lifecycle(ia_list[0]._id, LCE.PLAN, headers=self.anonymous_actor_headers)
self.assertIn( 'instrument_management(execute_instrument_agent_lifecycle) has been denied',cm.exception.message)
#Advance the Life cycle to planned. Must be OPERATOR
self.ims_client.execute_instrument_agent_lifecycle(ia_list[0]._id, LCE.PLAN, headers=actor_header)
ia = self.rr_client.read(ia_list[0]._id)
self.assertEquals(ia.lcstate, LCS.PLANNED)
#First make a acquire resource request with an non-enrolled user.
with self.assertRaises(BadRequest) as cm:
sap = IonObject(OT.AcquireResourceProposal,consumer=self.system_actor._id, provider=org2_id, resource_id=ia_list[0]._id )
sap_response = self.org_client.negotiate(sap, headers=self.system_actor_header )
self.assertIn('A precondition for this request has not been satisfied: is_enrolled',cm.exception.message)
#Make a proposal to acquire a resource with an enrolled user that has the right role but the resource is not shared the Org
with self.assertRaises(BadRequest) as cm:
sap = IonObject(OT.AcquireResourceProposal,consumer=actor_id, provider=org2_id, resource_id=ia_list[0]._id)
sap_response = self.org_client.negotiate(sap, headers=actor_header )
self.assertIn('A precondition for this request has not been satisfied: is_resource_shared',cm.exception.message)
#So share the resource
self.org_client.share_resource(org_id=org2_id, resource_id=ia_list[0]._id, headers=self.system_actor_header )
#Verify the resource is shared
res_list,_ = self.rr_client.find_objects(org2,PRED.hasResource)
self.assertEqual(len(res_list), 1)
self.assertEqual(res_list[0]._id, ia_list[0]._id)
#First try to acquire the resource exclusively but it should fail since the user cannot do this without first
#having had acquired the resource
with self.assertRaises(BadRequest) as cm:
sap = IonObject(OT.AcquireResourceExclusiveProposal,consumer=actor_id, provider=org2_id, resource_id=ia_list[0]._id)
sap_response = self.org_client.negotiate(sap, headers=actor_header )
self.assertIn('A precondition for this request has not been satisfied: is_resource_acquired',cm.exception.message)
#Make a proposal to acquire a resource with an enrolled user that has the right role and is now shared
sap = IonObject(OT.AcquireResourceProposal,consumer=actor_id, provider=org2_id, resource_id=ia_list[0]._id)
sap_response = self.org_client.negotiate(sap, headers=actor_header )
negotiations = self.org_client.find_org_negotiations(org2_id, headers=self.system_actor_header)
self.assertEqual(len(negotiations),2)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, headers=actor_header)
self.assertEqual(len(negotiations),2)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, proposal_type=OT.AcquireResourceProposal, headers=actor_header)
self.assertEqual(len(negotiations),1)
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
self.assertEqual(len(negotiations),1)
self.assertEqual(negotiations[0]._id, sap_response.negotiation_id)
#Manager Creates a counter proposal
negotiations = self.org_client.find_org_negotiations(org2_id, proposal_type=OT.AcquireResourceProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=self.system_actor_header)
#Counter proposals for demonstration only
#Calculate one week from now in milliseconds
cur_time = int(get_ion_ts())
week_expiration = cur_time + ( 7 * 24 * 60 * 60 * 1000 )
sap_response = Negotiation.create_counter_proposal(negotiations[0], originator=ProposalOriginatorEnum.PROVIDER)
sap_response.expiration = str(week_expiration)
sap_response2 = self.org_client.negotiate(sap_response, headers=self.system_actor_header )
#User Creates a counter proposal
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, proposal_type=OT.AcquireResourceProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
cur_time = int(get_ion_ts())
month_expiration = cur_time + ( 30 * 24 * 60 * 60 * 1000 )
sap_response = Negotiation.create_counter_proposal(negotiations[0])
sap_response.expiration = str(month_expiration)
sap_response2 = self.org_client.negotiate(sap_response, headers=self.system_actor_header )
gevent.sleep(self.SLEEP_TIME+1) # Wait for events to be published
#Check that there are the correct number of events
events_r = self.event_repo.find_events(origin=sap_response2.negotiation_id, event_type=OT.AcquireResourceNegotiationStatusEvent)
self.assertEquals(len(events_r), 3)
self.assertEqual(events_r[-1][2].description, ProposalStatusEnum._str_map[ProposalStatusEnum.COUNTER])
self.assertEqual(events_r[-1][2].resource_id, ia_list[0]._id)
#Manager approves Instrument resource proposal
negotiations = self.org_client.find_org_negotiations(org2_id, proposal_type=OT.AcquireResourceProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=self.system_actor_header)
sap_response = Negotiation.create_counter_proposal(negotiations[0], ProposalStatusEnum.ACCEPTED, ProposalOriginatorEnum.PROVIDER)
sap_response2 = self.org_client.negotiate(sap_response, headers=self.system_actor_header )
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
self.assertEqual(len(negotiations),0) #Should be no more open negotiations for a user because auto-accept is enabled
#The following are no longer needed with auto-accept enabled for acquiring a resource
'''
self.assertEqual(len(negotiations),1)
#User accepts proposal in return
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, proposal_type=OT.AcquireResourceProposal,
negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
sap_response = Negotiation.create_counter_proposal(negotiations[0], ProposalStatusEnum.ACCEPTED)
sap_response2 = self.org_client.negotiate(sap_response, headers=actor_header )
'''
negotiations = self.org_client.find_user_negotiations(actor_id, org2_id, negotiation_status=NegotiationStatusEnum.OPEN, headers=actor_header)
self.assertEqual(len(negotiations),0)
#Check commitment to be active
commitments, _ = self.rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.Commitment)
self.assertEqual(len(commitments),1)
resource_commitment, _ = self.rr_client.find_objects(actor_id,PRED.hasCommitment, RT.Commitment)
self.assertEqual(len(resource_commitment),1)
self.assertNotEqual(resource_commitment[0].lcstate, LCS.DELETED)
subjects, _ = self.rr_client.find_subjects(None,PRED.hasCommitment, commitments[0]._id)
self.assertEqual(len(subjects),3)
contracts, _ = self.rr_client.find_subjects(RT.Negotiation,PRED.hasContract, commitments[0]._id)
self.assertEqual(len(contracts),1)
cur_time = int(get_ion_ts())
invalid_expiration = cur_time + ( 13 * 60 * 60 * 1000 ) # 12 hours from now
#Now try to acquire the resource exclusively for longer than 12 hours
sap = IonObject(OT.AcquireResourceExclusiveProposal,consumer=actor_id, provider=org2_id, resource_id=ia_list[0]._id,
expiration=str(invalid_expiration))
sap_response = self.org_client.negotiate(sap, headers=actor_header )
#make sure the negotiation was rejected for being too long.
negotiation = self.rr_client.read(sap_response.negotiation_id)
self.assertEqual(negotiation.negotiation_status, NegotiationStatusEnum.REJECTED)
#Now try to acquire the resource exclusively for 20 minutes
cur_time = int(get_ion_ts())
valid_expiration = cur_time + ( 20 * 60 * 1000 ) # 12 hours from now
sap = IonObject(OT.AcquireResourceExclusiveProposal,consumer=actor_id, provider=org2_id, resource_id=ia_list[0]._id,
expiration=str(valid_expiration))
sap_response = self.org_client.negotiate(sap, headers=actor_header )
#Check commitment to be active
commitments, _ = self.rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.Commitment)
self.assertEqual(len(commitments),2)
exclusive_contract, _ = self.rr_client.find_objects(sap_response.negotiation_id,PRED.hasContract, RT.Commitment)
self.assertEqual(len(contracts),1)
#Now try to acquire the resource exclusively again - should fail
with self.assertRaises(BadRequest) as cm:
sap = IonObject(OT.AcquireResourceExclusiveProposal,consumer=actor_id, provider=org2_id, resource_id=ia_list[0]._id)
sap_response = self.org_client.negotiate(sap, headers=actor_header )
self.assertIn('A precondition for this request has not been satisfied: not is_resource_acquired_exclusively',cm.exception.message)
#Release the exclusive commitment to the resource
self.org_client.release_commitment(exclusive_contract[0]._id, headers=actor_header)
#Check exclusive commitment to be inactive
commitments, _ = self.rr_client.find_resources(restype=RT.Commitment, lcstate=LCS.DELETED)
self.assertEqual(len(commitments),1)
self.assertEqual(commitments[0].commitment.exclusive, True)
#Shared commitment is still actove
commitments, _ = self.rr_client.find_objects(ia_list[0],PRED.hasCommitment, RT.Commitment)
self.assertEqual(len(commitments),1)
self.assertNotEqual(commitments[0].lcstate, LCS.DELETED)
#Now release the shared commitment
self.org_client.release_commitment(resource_commitment[0]._id, headers=actor_header)
#Check for both commitments to be inactive
commitments, _ = self.rr_client.find_resources(restype=RT.Commitment, lcstate=LCS.DELETED)
self.assertEqual(len(commitments),2)
commitments, _ = self.rr_client.find_objects(ia_list[0],PRED.hasCommitment, RT.Commitment)
self.assertEqual(len(commitments),0)
#Now check some negative cases...
#Attempt to acquire the same resource from the ION Org which is not sharing it - should fail
with self.assertRaises(BadRequest) as cm:
sap = IonObject(OT.AcquireResourceProposal,consumer=actor_id, provider=self.ion_org._id, resource_id=ia_list[0]._id)
sap_response = self.org_client.negotiate(sap, headers=actor_header )
self.assertIn('A precondition for this request has not been satisfied: is_resource_shared',cm.exception.message)
#Remove the OPERATOR_ROLE from the user.
self.org_client.revoke_role(org2_id, actor_id, OPERATOR_ROLE, headers=self.system_actor_header)
#Refresh headers with new role
actor_header = get_actor_header(actor_id)
#Make a proposal to acquire a resource with an enrolled user that does not have the right role
with self.assertRaises(BadRequest) as cm:
sap = IonObject(OT.AcquireResourceProposal,consumer=actor_id, provider=org2_id, resource_id=ia_list[0]._id )
sap_response = self.org_client.negotiate(sap, headers=actor_header )
self.assertIn('A precondition for this request has not been satisfied: has_role',cm.exception.message)
gevent.sleep(self.SLEEP_TIME+1) # Wait for events to be published
#Check that there are the correct number of events
events_r = self.event_repo.find_events(origin=sap_response2.negotiation_id, event_type=OT.AcquireResourceNegotiationStatusEvent)
self.assertEquals(len(events_r), 6)
self.assertEqual(events_r[-1][2].description, ProposalStatusEnum._str_map[ProposalStatusEnum.GRANTED])
self.assertEqual(events_r[-1][2].resource_id, ia_list[0]._id)
events_c = self.event_repo.find_events(origin=org2_id, event_type=OT.ResourceCommitmentCreatedEvent)
self.assertEquals(len(events_c), 2)
events_i = self.event_repo.find_events(origin=org2_id, event_type=OT.OrgNegotiationInitiatedEvent)
self.assertEquals(len(events_i), 4)
ret = self.org_client.is_resource_shared(org_id=org2_id, resource_id=ia_list[0]._id, headers=self.system_actor_header )
self.assertEquals(ret, True)
#So unshare the resource
self.org_client.unshare_resource(org_id=org2_id, resource_id=ia_list[0]._id, headers=self.system_actor_header )
ret = self.org_client.is_resource_shared(org_id=org2_id, resource_id=ia_list[0]._id, headers=self.system_actor_header )
self.assertEquals(ret, False)
| bsd-2-clause | 6,302,384,183,825,388,000 | 51.039851 | 205 | 0.675876 | false |
stephenlienharrell/roster-dns-management | test/dnsrmacl_test.py | 1 | 8124 | #!/usr/bin/python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Regression test for dnsrmacl
Make sure you are running this against a database that can be destroyed.
DO NOT EVER RUN THIS TEST AGAINST A PRODUCTION DATABASE.
"""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import os
import sys
import socket
import threading
import time
import getpass
import unittest
import roster_core
import roster_server
from roster_user_tools import roster_client_lib
USER_CONFIG = 'test_data/roster_user_tools.conf'
CONFIG_FILE = 'test_data/roster.conf' # Example in test_data
SCHEMA_FILE = '../roster-core/data/database_schema.sql'
DATA_FILE = 'test_data/test_data.sql'
HOST = u'localhost'
USERNAME = u'sharrell'
PASSWORD = u'test'
KEYFILE=('test_data/dnsmgmt.key.pem')
CERTFILE=('test_data/dnsmgmt.cert.pem')
CREDFILE='%s/.dnscred' % os.getcwd()
EXEC='../roster-user-tools/scripts/dnsrmacl'
class options(object):
password = u'test'
username = u'sharrell'
server = None
ldap = u'ldaps://ldap.cs.university.edu:636'
credfile = CREDFILE
view_name = None
ip_address = None
target = u'machine1'
ttl = 64
class DaemonThread(threading.Thread):
def __init__(self, config_instance, port):
threading.Thread.__init__(self)
self.config_instance = config_instance
self.port = port
self.daemon_instance = None
def run(self):
self.daemon_instance = roster_server.Server(self.config_instance, KEYFILE,
CERTFILE)
self.daemon_instance.Serve(port=self.port)
class Testdnsrmacl(unittest.TestCase):
def setUp(self):
def PickUnusedPort():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, 0))
addr, port = s.getsockname()
s.close()
return port
self.config_instance = roster_core.Config(file_name=CONFIG_FILE)
db_instance = self.config_instance.GetDb()
db_instance.CreateRosterDatabase()
data = open(DATA_FILE, 'r').read()
db_instance.StartTransaction()
db_instance.cursor.execute(data)
db_instance.EndTransaction()
db_instance.close()
self.port = PickUnusedPort()
self.server_name = 'https://%s:%s' % (HOST, self.port)
self.daemon_thread = DaemonThread(self.config_instance, self.port)
self.daemon_thread.daemon = True
self.daemon_thread.start()
self.core_instance = roster_core.Core(USERNAME, self.config_instance)
self.password = 'test'
time.sleep(1)
roster_client_lib.GetCredentials(USERNAME, u'test', credfile=CREDFILE,
server_name=self.server_name)
def tearDown(self):
if( os.path.exists(CREDFILE) ):
os.remove(CREDFILE)
def testRemoveAcl(self):
self.core_instance.MakeACL(u'acl1', u'192.168.1.0/24')
self.assertEqual(self.core_instance.ListACLs(),
{u'acl1': [{'cidr_block': u'192.168.1.0/24'}],
u'any': [{'cidr_block': None}]})
command = os.popen('python %s --force -a acl1 -u %s -p %s --config-file %s '
'-s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG, self.server_name, CREDFILE))
self.assertEqual(command.read(),
'REMOVED ACL: acl: acl1\n')
command.close()
self.assertEqual(self.core_instance.ListACLs(),
{u'any': [{'cidr_block': None}]})
def testRemoveCIDRFromAcl(self):
self.core_instance.MakeACL(u'acl1', u'192.168.1.0/24')
self.core_instance.MakeACL(u'acl1', u'192.168.2.0/24')
self.assertEqual(self.core_instance.ListACLs(),
{u'acl1': [{'cidr_block': u'192.168.1.0/24'},
{'cidr_block': u'192.168.2.0/24'}],
u'any': [{'cidr_block': None}]})
command = os.popen('python %s -a acl1 --cidr-block 192.168.2.0/24 '
'-u %s -p %s --config-file %s -s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG, self.server_name, CREDFILE))
self.assertEqual(command.read(),
'REMOVED ACL: acl: acl1 cidr_block: 192.168.2.0/24\n')
self.assertEqual(self.core_instance.ListACLs(),
{u'acl1': [{'cidr_block': u'192.168.1.0/24'}],
u'any': [{'cidr_block': None}]})
command = os.popen('python %s -a acl1 --cidr-block 192.168.1.0/24 '
'-u %s -p %s --config-file %s -s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG, self.server_name, CREDFILE))
self.assertEqual(command.read(),
'REMOVED ACL: acl: acl1 cidr_block: 192.168.1.0/24\n')
self.assertEqual(self.core_instance.ListACLs(),
{u'any': [{'cidr_block': None}]})
self.core_instance.MakeACL(u'acl1', u'192.168.1.0/24')
self.core_instance.MakeACL(u'acl1', u'192.168.2.0/24')
command = os.popen('python %s -a acl1 --cidr-block 192.168.2.0/24 '
'-u %s -p %s --config-file %s -s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG, self.server_name, CREDFILE))
self.assertEqual(command.read(),
'REMOVED ACL: acl: acl1 cidr_block: 192.168.2.0/24\n')
command = os.popen('python %s -a acl1 --cidr-block 192.168.1.0/24 '
'-u %s -p %s --config-file %s -s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG, self.server_name, CREDFILE))
self.assertEqual(command.read(),
'REMOVED ACL: acl: acl1 cidr_block: 192.168.1.0/24\n')
self.assertEqual(self.core_instance.ListACLs(),
{u'any': [{'cidr_block': None}]})
def testErrors(self):
command = os.popen('python %s -a acl1 --cidr-block 192.168.2.0/24 '
'-u %s -p %s --config-file %s -s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG, self.server_name, CREDFILE))
self.assertEqual(command.read(),
'CLIENT ERROR: No acl found with acl: acl1 cidr_block: 192.168.2.0/24'
'\n')
command = os.popen('python %s -u %s -p %s --config-file %s '
'--force -s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG,
self.server_name, CREDFILE))
self.assertEqual(command.read(),
"CLIENT ERROR: The -a/--acl flag is required.\n")
command.close()
command = os.popen('python %s --acl acl1 -u %s -p %s --config-file %s '
'-s %s -c %s' % (
EXEC, USERNAME, self.password, USER_CONFIG,
self.server_name, CREDFILE))
self.assertEqual(command.read(),
"CLIENT ERROR: Must use --force to delete entire ACL.\n")
command.close()
if( __name__ == '__main__' ):
unittest.main()
| bsd-3-clause | -5,292,346,615,663,352,000 | 39.217822 | 80 | 0.64094 | false |
Camiloasc1/AstronomyUNAL | CelestialMechanics/orbits/test/test_ellipse.py | 1 | 4276 | import unittest
from astropy import constants as astroconst
from astropy import units as u
from astropy.time import Time
from CelestialMechanics.kepler import constants
from CelestialMechanics.orbits import ellipse
from CelestialMechanics.orbits.ellipse import delta_t_t0_aeangle
class MyTestCase(unittest.TestCase):
def test_ellipse(self):
r = ellipse.r(1.5236164, 0.0932802, 32.)
self.assertAlmostEqual(1.3996391, r, places=7)
a, e = ellipse.ae(0.275, 1.168)
self.assertAlmostEqual(0.722, a, places=3)
self.assertAlmostEqual(0.618, e, places=2)
sun = astroconst.M_sun
mercury = astroconst.M_sun / constants.Mercury
energy = ellipse.E((0.38709 * u.au).to(u.m), sun, mercury)
self.assertAlmostEqual(-3.817E32, energy.value, delta=1E32)
# 4.14
a = 17.8 * u.au
e = 0.967
q, Q = ellipse.qQ(a, e)
self.assertAlmostEqual(0.031478, ellipse.v_sun(q, a, 0).value, places=5)
self.assertAlmostEqual(0.000528, ellipse.v_sun(Q, a, 0).value, places=5)
self.assertAlmostEqual(54.50, ellipse.v_sun(q, a, 0).to(u.km / u.s).value, places=2)
self.assertAlmostEqual(0.91, ellipse.v_sun(Q, a, 0).to(u.km / u.s).value, places=2)
vq, vQ = ellipse.vqQ_sun(a, e, 0)
self.assertAlmostEqual(0.031478, vq.value, places=2)
self.assertAlmostEqual(0.000528, vQ.value, places=2)
self.assertAlmostEqual(54.50, vq.to(u.km / u.s).value, places=2)
self.assertAlmostEqual(0.91, vQ.to(u.km / u.s).value, places=2)
# 4.15
a = astroconst.R_earth + 560 * u.km
self.assertAlmostEqual(7569.5, ellipse.v(a, a, astroconst.M_earth, 0).value, delta=20)
def test_chapter_5(self):
from CelestialMechanics.mu import mu_sun
# 5.5
t0 = Time('2014-01-03T00:00:00Z', format='isot', scale='utc').jd * u.d + 0.633 * u.d
t1 = Time('2014-04-03T00:00:00Z', format='isot', scale='utc').jd * u.d + 0.9 * u.d
t2 = Time('2014-10-05T00:00:00Z', format='isot', scale='utc').jd * u.d + 0.5 * u.d
a = 1 * u.au
e = 0.01669
r = 1 * u.au
mu = mu_sun(1 / constants.Earth_Moon)
angles = ellipse.angles(a, e, r)
self.assertAlmostEqual(90.9563109612867, angles[0].value)
self.assertAlmostEqual(269.0436890387133, angles[1].value)
delta_t_t0 = delta_t_t0_aeangle(a, e, angles[0], mu) % (1 * u.yr).to(u.d) # module 1 year
self.assertAlmostEqual((t1 - t0).value, delta_t_t0.value, delta=0.1)
delta_t_t0 = delta_t_t0_aeangle(a, e, angles[1], mu) % (1 * u.yr).to(u.d) # module 1 year
self.assertAlmostEqual((t2 - t0).value, delta_t_t0.value, delta=0.1)
# 5.6
a = 17.834144 * u.au
e = 0.967143
angle = 60 * u.deg
mu = mu_sun(0)
delta_t_t0 = delta_t_t0_aeangle(a, e, angle, mu)
self.assertAlmostEqual(23.7573, delta_t_t0.value, places=2)
# 5.7
t0 = Time('2003-10-23T05:57:10Z', format='isot', scale='utc').jd * u.d
t1 = Time('2007-06-20T00:00:00Z', format='isot', scale='utc').jd * u.d
a = 2.56743 * u.au
e = 0.75355
r = 2.325364 * u.au
mu = mu_sun(0)
angles = ellipse.angles(a, e, r)
self.assertAlmostEqual(360 - 226.064389, angles[0].value, places=5)
self.assertAlmostEqual(226.064389, angles[1].value, places=5)
angle = angles[1] # r. < 0
# inlined ellipse.delta_t_t0_aeangle()
E = ellipse.E_angle(e, angle)
M = ellipse.angle_M_eE(e, E)
from CelestialMechanics.kepler.kepler3 import T_sun
T = T_sun(a, 0) # 1 year (of the minor planet)
delta_t_t0 = ellipse.delta_t_t0_Mn(M, ellipse.n(a, mu)) % T # module 1 year (of the minor planet)
self.assertAlmostEqual(277.187625, E.to(u.deg).value % 360, places=6)
self.assertAlmostEqual(320.023578, M.to(u.deg).value % 360, places=6)
self.assertAlmostEqual(((t1 - t0) % T).value, delta_t_t0.value, places=4)
t0_calculated = t1 - delta_t_t0
# print(Time(t0_calculated, format='jd', scale='utc').isot)
self.assertAlmostEqual(t0.value, t0_calculated.value, places=4)
if __name__ == '__main__':
unittest.main()
| mit | -1,451,791,043,087,483,000 | 40.115385 | 106 | 0.600094 | false |
erangre/Dioptas | dioptas/model/util/PeakShapes.py | 1 | 1278 | # -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher ([email protected])
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
s2pi = np.sqrt(2 * np.pi)
def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):
"""1 dimensional gaussian:
gaussian(x, amplitude, center, sigma)
"""
return (amplitude / (s2pi * sigma)) * np.exp(-(1.0 * x - center) ** 2 / (2 * sigma ** 2))
| gpl-3.0 | -9,125,827,370,369,577,000 | 43.068966 | 94 | 0.723005 | false |
scaramallion/pynetdicom3 | setup.py | 1 | 1741 | from setuptools import setup, find_packages
import os
import sys
# Version
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
VERSION_FILE = os.path.join(BASE_DIR, 'pynetdicom', '_version.py')
with open(VERSION_FILE) as fp:
exec(fp.read())
with open("README.rst", "r") as fp:
long_description = fp.read()
setup(
name = "pynetdicom",
packages = find_packages(),
include_package_data = True,
version = __version__,
zip_safe = False,
description = "A Python implementation of the DICOM networking protocol",
long_description = long_description,
long_description_content_type="text/x-rst",
author = "",
author_email = "[email protected]",
url = "https://github.com/pydicom/pynetdicom",
license = "MIT",
keywords = (
"dicom python medicalimaging radiotherapy oncology pydicom imaging"
),
project_urls={
'Documentation' : 'https://pydicom.github.io/pynetdicom/'
},
classifiers = [
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
#"Development Status :: 4 - Beta",
"Development Status :: 5 - Production/Stable",
"Natural Language :: English",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Software Development :: Libraries",
],
install_requires = ["pydicom>=2.0.0"],
)
| mit | 3,179,822,450,583,220,700 | 33.82 | 77 | 0.628374 | false |
qtproject/qt3d | tools/utils/exporters/blender/qt3d_armature_export.py | 2 | 5606 | #############################################################################
##
## Copyright (C) 2017 Klaralvdalens Datakonsult AB (KDAB).
## Contact: https://www.qt.io/licensing/
##
## This file is part of the Qt3D module of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 3 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL3 included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 3 requirements
## will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 2.0 or (at your option) the GNU General
## Public license version 3 or any later version approved by the KDE Free
## Qt Foundation. The licenses are as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-2.0.html and
## https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
# Required Blender information.
bl_info = {
"name": "Qt3D Armature Exporter",
"author": "Sean Harmer <[email protected]>, Robert Brock <[email protected]>",
"version": (0, 2),
"blender": (2, 80, 0),
"location": "File > Export > Qt3D Armature Exporter (.json)",
"description": "Export Armature to json to use with Qt3D",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Import-Export"
}
import bpy
import os
import struct
import mathutils
import math
import json
from array import array
from bpy import context
from bpy_extras.io_utils import ExportHelper
from bpy.props import (
BoolProperty,
FloatProperty,
StringProperty,
EnumProperty,
)
from collections import defaultdict
def jsonBuilder():
bonesList = []
name = ""
boneParent = ""
ob = bpy.context.object.data
if not hasattr(ob, 'bones'):
return bonesList
for bone in ob.bones:
#check parent exists
if bone.parent:
boneParent = bone.parent.name
else:
boneParent = ""
#add the bones
bonesList.append({"bone": bone.name,
"parent": boneParent,
"matrix": jsonMatrix(bone)
})
return bonesList
def jsonMatrix(bone):
matrix = []
for i in range(0, 4):
matrix.append(str("%.4f, %.4f, %.4f, %.4f" % (bone.matrix_local[i][0],
bone.matrix_local[i][1],
bone.matrix_local[i][2],
bone.matrix_local[i][3])))
return matrix
class Qt3DMeshDataConverter:
def boneInfoToJson(self):
# Pass 1 - collect data we need to produce the output in pass 2
print(">>> About to enter Objects")
jsonData = json.dumps({ "bones": jsonBuilder()}, indent=2, sort_keys=True, separators=(',', ': '))
return jsonData
class Qt3DArmatureExporter(bpy.types.Operator, ExportHelper):
"""Qt3D Exporter"""
bl_idname = "export_scene.qt3d_armature_exporter";
bl_label = "Qt3DArmatureExporter";
bl_options = {'PRESET'};
filename_ext = ""
use_filter_folder = True
def __init__(self):
pass
def execute(self, context):
print("In Execute" + bpy.context.scene.name)
self.userpath = self.properties.filepath
# unselect all
bpy.ops.object.select_all(action='DESELECT')
converter = Qt3DMeshDataConverter()
fileContent = converter.boneInfoToJson()
with open(self.userpath + ".json", '+w') as f:
f.write(fileContent)
return {'FINISHED'}
def createBlenderMenu(self, context):
self.layout.operator(Qt3DArmatureExporter.bl_idname, text="Qt3D Armature Export(.json)")
# Register against Blender
def register():
bpy.utils.register_class(Qt3DArmatureExporter)
if bpy.app.version < (2, 80, 0):
bpy.types.INFO_MT_file_export.append(createBlenderMenu)
else:
bpy.types.TOPBAR_MT_file_export.append(createBlenderMenu)
def unregister():
bpy.utils.unregister_class(Qt3DArmatureExporter)
if bpy.app.version < (2, 80, 0):
bpy.types.INFO_MT_file_export.remove(createBlenderMenu)
else:
bpy.types.TOPBAR_MT_file_export.remove(createBlenderMenu)
# Handle running the script from Blender's text editor.
if (__name__ == "__main__"):
register();
bpy.ops.export_scene.qt3d_Armature_exporter();
| lgpl-3.0 | 1,695,850,950,272,810,000 | 32.568862 | 106 | 0.620407 | false |
aroig/metadata-readers | libs/docmeta/utils.py | 1 | 2250 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# docmeta - A python module to extract metadata from document files
# Copyright 2012 Abdó Roig-Maranges <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
import subprocess
class CommandError(Exception):
"""Conversion error"""
def __init__(self, desc, cmdlist, retlist, stderrlist):
Exception.__init__(self, desc)
self.desc = desc
self.cmdlist = cmdlist
self.retlist = retlist
self.stderrlist = stderrlist
print("Command Error !!!")
print(" cmd: %s" % ' | '.join([' '.join(c) for c in self.cmdlist]))
print(" ret: %s" % str(self.retlist))
print(" stderr: %s" % str(self.stderrlist))
def executepipe(cmdlst, outfile=None, checkreturn=True):
N = len(cmdlst)
p = []
for n in range(0,N):
cmd = cmdlst[n]
if n == 0: sin = None
else: sin = plast.stdout
if n < N-1:
sout = subprocess.PIPE
else:
if outfile != None: sout = open(outfile, 'w')
else: sout = subprocess.PIPE
serr = subprocess.PIPE
plast = subprocess.Popen(cmd, stdout=sout, stderr=serr, stdin=sin)
p.append(plast)
ret,err = plast.communicate()
if checkreturn and plast.returncode != 0:
raise CommandError("Command produced errors", cmdlst, plast.returncode, err)
if outfile == None:
if sys.version_info[0] >= 3: return ret.decode('utf-8')
else: return ret
else:
sout.close()
return None
| gpl-3.0 | 8,381,212,299,770,244,000 | 32.073529 | 84 | 0.618497 | false |
jhamman/xray | xarray/backends/netCDF4_.py | 1 | 13873 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import numpy as np
from .. import Variable
from ..conventions import pop_to
from ..core import indexing
from ..core.utils import (FrozenOrderedDict, NdimSizeLenMixin,
DunderArrayMixin, close_on_error,
is_remote_uri)
from ..core.pycompat import iteritems, basestring, OrderedDict, PY3
from .common import (WritableCFDataStore, robust_getitem,
DataStorePickleMixin, find_root)
from .netcdf3 import (encode_nc3_attr_value, encode_nc3_variable,
maybe_convert_to_char_array)
# This lookup table maps from dtype.byteorder to a readable endian
# string used by netCDF4.
_endian_lookup = {'=': 'native',
'>': 'big',
'<': 'little',
'|': 'native'}
class BaseNetCDF4Array(NdimSizeLenMixin, DunderArrayMixin):
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
dtype = array.dtype
if dtype is str:
# use object dtype because that's the only way in numpy to
# represent variable length strings; it also prevents automatic
# string concatenation via conventions.decode_cf_variable
dtype = np.dtype('O')
self.dtype = dtype
def get_array(self):
self.datastore.assert_open()
return self.datastore.ds.variables[self.variable_name]
class NetCDF4ArrayWrapper(BaseNetCDF4Array):
def __getitem__(self, key):
if self.datastore.is_remote: # pragma: no cover
getitem = functools.partial(robust_getitem, catch=RuntimeError)
else:
getitem = operator.getitem
with self.datastore.ensure_open(autoclose=True):
try:
data = getitem(self.get_array(), key)
except IndexError:
# Catch IndexError in netCDF4 and return a more informative
# error message. This is most often called when an unsorted
# indexer is used before the data is loaded from disk.
msg = ('The indexing operation you are attempting to perform '
'is not valid on netCDF4.Variable object. Try loading '
'your data into memory first by calling .load().')
if not PY3:
import traceback
msg += '\n\nOriginal traceback:\n' + traceback.format_exc()
raise IndexError(msg)
if self.ndim == 0:
# work around for netCDF4-python's broken handling of 0-d
# arrays (slicing them always returns a 1-dimensional array):
# https://github.com/Unidata/netcdf4-python/pull/220
data = np.asscalar(data)
return data
def _nc4_values_and_dtype(var):
if var.dtype.kind == 'U':
# this entire clause should not be necessary with netCDF4>=1.0.9
if len(var) > 0:
var = var.astype('O')
dtype = str
elif var.dtype.kind == 'S':
# use character arrays instead of unicode, because unicode support in
# netCDF4 is still rather buggy
data, dims = maybe_convert_to_char_array(var.data, var.dims)
var = Variable(dims, data, var.attrs, var.encoding)
dtype = var.dtype
elif var.dtype.kind in ['i', 'u', 'f', 'c']:
dtype = var.dtype
else:
raise ValueError('cannot infer dtype for netCDF4 variable')
return var, dtype
def _nc4_group(ds, group, mode):
if group in set([None, '', '/']):
# use the root group
return ds
else:
# make sure it's a string
if not isinstance(group, basestring):
raise ValueError('group must be a string or None')
# support path-like syntax
path = group.strip('/').split('/')
for key in path:
try:
ds = ds.groups[key]
except KeyError as e:
if mode != 'r':
ds = ds.createGroup(key)
else:
# wrap error to provide slightly more helpful message
raise IOError('group not found: %s' % key, e)
return ds
def _ensure_fill_value_valid(data, attributes):
# work around for netCDF4/scipy issue where _FillValue has the wrong type:
# https://github.com/Unidata/netcdf4-python/issues/271
if data.dtype.kind == 'S' and '_FillValue' in attributes:
attributes['_FillValue'] = np.string_(attributes['_FillValue'])
def _force_native_endianness(var):
# possible values for byteorder are:
# = native
# < little-endian
# > big-endian
# | not applicable
# Below we check if the data type is not native or NA
if var.dtype.byteorder not in ['=', '|']:
# if endianness is specified explicitly, convert to the native type
data = var.data.astype(var.dtype.newbyteorder('='))
var = Variable(var.dims, data, var.attrs, var.encoding)
# if endian exists, remove it from the encoding.
var.encoding.pop('endian', None)
# check to see if encoding has a value for endian its 'native'
if not var.encoding.get('endian', 'native') is 'native':
raise NotImplementedError("Attempt to write non-native endian type, "
"this is not supported by the netCDF4 "
"python library.")
return var
def _extract_nc4_variable_encoding(variable, raise_on_invalid=False,
lsd_okay=True, backend='netCDF4'):
encoding = variable.encoding.copy()
safe_to_drop = set(['source', 'original_shape'])
valid_encodings = set(['zlib', 'complevel', 'fletcher32', 'contiguous',
'chunksizes'])
if lsd_okay:
valid_encodings.add('least_significant_digit')
if (encoding.get('chunksizes') is not None and
(encoding.get('original_shape', variable.shape) !=
variable.shape) and not raise_on_invalid):
del encoding['chunksizes']
for k in safe_to_drop:
if k in encoding:
del encoding[k]
if raise_on_invalid:
invalid = [k for k in encoding if k not in valid_encodings]
if invalid:
raise ValueError('unexpected encoding parameters for %r backend: '
' %r' % (backend, invalid))
else:
for k in list(encoding):
if k not in valid_encodings:
del encoding[k]
return encoding
def _open_netcdf4_group(filename, mode, group=None, **kwargs):
import netCDF4 as nc4
ds = nc4.Dataset(filename, mode=mode, **kwargs)
with close_on_error(ds):
ds = _nc4_group(ds, group, mode)
_disable_mask_and_scale(ds)
return ds
def _disable_mask_and_scale(ds):
for var in ds.variables.values():
# we handle masking and scaling ourselves
var.set_auto_maskandscale(False)
class NetCDF4DataStore(WritableCFDataStore, DataStorePickleMixin):
"""Store for reading and writing data via the Python-NetCDF4 library.
This store supports NetCDF3, NetCDF4 and OpenDAP datasets.
"""
def __init__(self, netcdf4_dataset, mode='r', writer=None, opener=None,
autoclose=False):
if autoclose and opener is None:
raise ValueError('autoclose requires an opener')
_disable_mask_and_scale(netcdf4_dataset)
self.ds = netcdf4_dataset
self._autoclose = autoclose
self._isopen = True
self.format = self.ds.data_model
self._filename = self.ds.filepath()
self.is_remote = is_remote_uri(self._filename)
self._mode = mode = 'a' if mode == 'w' else mode
if opener:
self._opener = functools.partial(opener, mode=self._mode)
else:
self._opener = opener
super(NetCDF4DataStore, self).__init__(writer)
@classmethod
def open(cls, filename, mode='r', format='NETCDF4', group=None,
writer=None, clobber=True, diskless=False, persist=False,
autoclose=False):
if format is None:
format = 'NETCDF4'
opener = functools.partial(_open_netcdf4_group, filename, mode=mode,
group=group, clobber=clobber,
diskless=diskless, persist=persist,
format=format)
ds = opener()
return cls(ds, mode=mode, writer=writer, opener=opener,
autoclose=autoclose)
def open_store_variable(self, name, var):
with self.ensure_open(autoclose=False):
dimensions = var.dimensions
data = indexing.LazilyIndexedArray(NetCDF4ArrayWrapper(name, self))
attributes = OrderedDict((k, var.getncattr(k))
for k in var.ncattrs())
_ensure_fill_value_valid(data, attributes)
# netCDF4 specific encoding; save _FillValue for later
encoding = {}
filters = var.filters()
if filters is not None:
encoding.update(filters)
chunking = var.chunking()
if chunking is not None:
if chunking == 'contiguous':
encoding['contiguous'] = True
encoding['chunksizes'] = None
else:
encoding['contiguous'] = False
encoding['chunksizes'] = tuple(chunking)
# TODO: figure out how to round-trip "endian-ness" without raising
# warnings from netCDF4
# encoding['endian'] = var.endian()
pop_to(attributes, encoding, 'least_significant_digit')
# save source so __repr__ can detect if it's local or not
encoding['source'] = self._filename
encoding['original_shape'] = var.shape
return Variable(dimensions, data, attributes, encoding)
def get_variables(self):
with self.ensure_open(autoclose=False):
dsvars = FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in
iteritems(self.ds.variables))
return dsvars
def get_attrs(self):
with self.ensure_open(autoclose=True):
attrs = FrozenOrderedDict((k, self.ds.getncattr(k))
for k in self.ds.ncattrs())
return attrs
def get_dimensions(self):
with self.ensure_open(autoclose=True):
dims = FrozenOrderedDict((k, len(v))
for k, v in iteritems(self.ds.dimensions))
return dims
def get_encoding(self):
with self.ensure_open(autoclose=True):
encoding = {}
encoding['unlimited_dims'] = {
k for k, v in self.ds.dimensions.items() if v.isunlimited()}
return encoding
def set_dimension(self, name, length):
with self.ensure_open(autoclose=False):
self.ds.createDimension(name, size=length)
def set_attribute(self, key, value):
with self.ensure_open(autoclose=False):
if self.format != 'NETCDF4':
value = encode_nc3_attr_value(value)
self.ds.setncattr(key, value)
def set_variables(self, *args, **kwargs):
with self.ensure_open(autoclose=False):
super(NetCDF4DataStore, self).set_variables(*args, **kwargs)
def prepare_variable(self, name, variable, check_encoding=False,
unlimited_dims=None):
attrs = variable.attrs.copy()
variable = _force_native_endianness(variable)
if self.format == 'NETCDF4':
variable, datatype = _nc4_values_and_dtype(variable)
else:
variable = encode_nc3_variable(variable)
datatype = variable.dtype
self.set_necessary_dimensions(variable, unlimited_dims=unlimited_dims)
fill_value = attrs.pop('_FillValue', None)
if fill_value in ['', '\x00']:
# these are equivalent to the default FillValue, but netCDF4
# doesn't like setting fill_value to an empty string
fill_value = None
encoding = _extract_nc4_variable_encoding(
variable, raise_on_invalid=check_encoding)
nc4_var = self.ds.createVariable(
varname=name,
datatype=datatype,
dimensions=variable.dims,
zlib=encoding.get('zlib', False),
complevel=encoding.get('complevel', 4),
shuffle=encoding.get('shuffle', True),
fletcher32=encoding.get('fletcher32', False),
contiguous=encoding.get('contiguous', False),
chunksizes=encoding.get('chunksizes'),
endian='native',
least_significant_digit=encoding.get('least_significant_digit'),
fill_value=fill_value)
nc4_var.set_auto_maskandscale(False)
for k, v in iteritems(attrs):
# set attributes one-by-one since netCDF4<1.0.10 can't handle
# OrderedDict as the input to setncatts
nc4_var.setncattr(k, v)
return nc4_var, variable.data
def sync(self):
with self.ensure_open(autoclose=True):
super(NetCDF4DataStore, self).sync()
self.ds.sync()
def close(self):
if self._isopen:
# netCDF4 only allows closing the root group
ds = find_root(self.ds)
if ds._isopen:
ds.close()
self._isopen = False
| apache-2.0 | -2,989,005,801,748,905,500 | 36.904372 | 79 | 0.579975 | false |
mvtuong/mysite | v1/blog/migrations/0002_auto_20150708_1454.py | 1 | 1092 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='blog',
name='file',
field=models.FileField(upload_to=b'user_upload/files/', blank=True),
),
migrations.AddField(
model_name='blog',
name='image',
field=models.ImageField(upload_to=b'user_upload/images/', blank=True),
),
migrations.AddField(
model_name='tag',
name='description',
field=models.TextField(max_length=500, blank=True),
),
migrations.AddField(
model_name='topic',
name='description',
field=models.TextField(max_length=500, blank=True),
),
migrations.AlterField(
model_name='blog',
name='tag',
field=models.ManyToManyField(to='blog.Tag', blank=True),
),
]
| apache-2.0 | 8,163,413,012,375,880,000 | 27 | 82 | 0.540293 | false |
arnikz/EMF_data_mining | DEA/mzscore.py | 1 | 12015 | #!/usr/bin/env python
#
# This script takes a database (SQLite) obtained from the PIQMIe service and populates
# additional tables/views to facilitate differential protein expression analyses based
# on standardized SILAC ratios.
#
# Note:
# z_score_{raw|norm}_ratio - column with canonical Z-score transformed raw/normalized
# SILAC protein ratios
#
# mz_score_{raw|norm}_ratio - column with modified Z-score transformed SILAC protein ratios
# suitable for heavy-tailed data (Iglewicz and Hoaglin, 1993)
#
#
# Author: Arnold Kuzniar
#
# Version: 1.0
#
import os
import sys
import argparse as argp
import math
import numpy as np
import scipy.stats as st
import sqlite3 as sqlt
import collections as cls
ratio_types = { # lookup to link column values to column names
'RATIO H/L': 'raw_ratio_HL',
'RATIO H/M': 'raw_ratio_HM',
'RATIO M/L': 'raw_ratio_ML',
'RATIO H/L NORMALIZED': 'norm_ratio_HL',
'RATIO H/M NORMALIZED': 'norm_ratio_HM',
'RATIO M/L NORMALIZED': 'norm_ratio_ML'
}
score_types = { # lookup to link user input to table column
'Zr' : 'z_score_raw_ratio',
'Zn' : 'z_score_norm_ratio',
'Mr' : 'm_score_raw_ratio',
'Mn' : 'm_score_norm_ratio'
}
# parse command-line args
parser = argp.ArgumentParser(
description = 'Differential analysis of SILAC protein ratios based on standardized scores.')
parser.add_argument(
'-n',
action = 'store_true',
dest = 'new_tabs',
help = 'populate new db tables with (modified) Z-scores')
parser.add_argument(
'-d',
dest = 'dts',
required = True,
choices = ['VH10', 'U2OS', 'IB10'],
help = 'select one of the data sets or cell lines')
parser.add_argument(
'-s',
required = True,
choices = score_types.keys(),
help = 'select one of the score types for filtering: Z*,M* - Z-score or modified Z-score; *r,*n - score based on raw or normalized SILAC protein ratios')
parser.add_argument(
'-c',
required = True,
dest = 'cutoff',
type = float,
help = 'absolute score cutoff (e.g. 1.65, 1.96 or 2.58)')
parser.add_argument(
'-o',
dest = 'outfile',
help = 'output file')
parser.add_argument(
'dbfile',
help = 'sqlite3 database file')
args = parser.parse_args()
# check user input
dbfile = args.dbfile
outfile = args.outfile
new_tabs = args.new_tabs
dts = args.dts
stype = args.s
cutoff = args.cutoff
score_type = None
if os.path.isfile(dbfile) is False:
parser.error("dbfile '%s' not found" % dbfile)
if stype and cutoff:
score_type = score_types[stype]
else:
parser.error('-s and -c args must be used together')
if outfile is None:
# set the default output filename
outfile = os.path.join(os.path.dirname(dbfile), '%s_mzscore_%s_%.2f.tab' % (dts, stype, cutoff))
if cutoff < 0:
parser.error('the absolute score cutoff must be a positive value')
# print info into STDOUT
print """
dbfile = %s
outfile = %s
dataset = %s
re-score = %s
score type = %s
score cutoff = %.2f
""" % (dbfile, outfile, dts, new_tabs, stype, cutoff)
# sqlite3 user-defined functions (UDFs)
def log(value, base):
try:
return math.log(value) / math.log(base)
except:
return None
def sqrt(value):
try:
return math.sqrt(value)
except:
return None
def pvalue(score): # convert Z- or M-score to two-tailed probability (P-value)
try:
return 2 * st.norm.cdf(-abs(score))
except:
return None
class Stdev: # sample standard deviation (aggregate function)
def __init__(self):
self.vec = []
def step(self, value):
self.vec.append(value)
def finalize(self):
return np.array(self.vec).std(ddof=1)
class Median: # median (aggregate function)
def __init__(self):
self.arr = []
def step(self, value):
self.arr.append(value)
def finalize(self):
return np.median(np.array(self.arr))
class Mad: # median absolute deviation (aggregate function)
def __init__(self):
self.arr = []
def step(self, value):
self.arr.append(value)
def finalize(self):
median = np.median(np.array(self.arr))
return np.median(np.abs(self.arr - median))
# SQL statements to populate tables/views
sql_create_tables = """
DROP VIEW IF EXISTS V_PGROUP_RATIO;
CREATE VIEW V_PGROUP_RATIO AS
-- simplifies the selection of SILAC ratios/types
SELECT
A.grp_id,
exp_name,
CAST(CASE %s
END AS TEXT) AS ratio_type,
CAST(quant_value AS NUMERIC) AS ratio_value
FROM
PGROUP_QUANT A, V_PGROUP B
WHERE
A.grp_id = B.grp_id
AND quant_type IN ('%s')
AND quant_value;
DROP TABLE IF EXISTS PGROUP_LOG2RATIO_STAT;
CREATE TABLE PGROUP_LOG2RATIO_STAT AS
-- stores descriptive statistics on SILAC protein ratios for each experiment
SELECT
exp_name,
ratio_type,
CAST(COUNT(ratio_value) AS INT) AS n,
CAST(MIN(LOG(ratio_value, 2)) AS NUMERIC) AS min,
CAST(MAX(LOG(ratio_value, 2)) AS NUMERIC) AS max,
CAST(AVG(LOG(ratio_value, 2)) AS NUMERIC) AS mean,
CAST(MEDIAN(LOG(ratio_value, 2)) AS NUMERIC) AS median,
CAST(STDEV(LOG(ratio_value, 2)) AS NUMERIC) AS sd,
CAST(MAD(LOG(ratio_value, 2)) AS NUMERIC) AS mad
FROM
V_PGROUP_RATIO
GROUP BY
exp_name, ratio_type;
CREATE INDEX idx_PGROUP_LOG2RATIO_STAT_exp_name_ratio_type ON PGROUP_LOG2RATIO_STAT(exp_name, ratio_type);
DROP VIEW IF EXISTS V_PGROUP_LOG2RATIO_STAT;
CREATE VIEW V_PGROUP_LOG2RATIO_STAT AS
-- shows rounded values of the statistics
SELECT
exp_name,
ratio_type,
n,
ROUND(min, 4) AS min,
ROUND(max, 4) AS max,
ROUND(mean, 4) AS mean,
ROUND(median, 4) AS median,
ROUND(sd, 4) AS sd,
ROUND(mad, 4) AS mad
FROM
PGROUP_LOG2RATIO_STAT;
DROP TABLE IF EXISTS PGROUP_MZSCORE;
CREATE TABLE PGROUP_MZSCORE AS
-- stores (modified) Z-score transformed SILAC protein raw/norm ratios
SELECT
grp_id,
A.exp_name AS exp_name,
CAST(A.ratio_type AS TEXT) AS ratio_type,
CAST((LOG(ratio_value, 2) - mean) / sd AS NUMERIC) AS z_score,
CAST(0.6745 * (LOG(ratio_value, 2) - median) / mad AS NUMERIC) AS m_score
FROM
V_PGROUP_RATIO A, PGROUP_LOG2RATIO_STAT B
WHERE
A.exp_name = B.exp_name
AND A.ratio_type = B.ratio_type;
CREATE INDEX idx_PGROUP_MZSCORE_grp_id ON PGROUP_MZSCORE(grp_id);
CREATE INDEX idx_PGROUP_MZSCORE_exp_name_ratio_type ON PGROUP_MZSCORE(exp_name, ratio_type);
""" % (' '.join([ "\n\tWHEN quant_type='%s' THEN '%s'" % (k, v) for (k, v) in ratio_types.iteritems() ]),
"','".join(ratio_types.keys()))
# dynamically construct SQL query to select diff. reg. protein groups
sql_sel_pgrps = """
SELECT
A.grp_id grp_id,
IFNULL(GROUP_CONCAT(DISTINCT gene), '-') genes,
{dts}_L0_M0_H1_{score_type}_HL '{stype}_H1L0', -- Z or M-score ON/OFF (treat1)
{dts}_L1_M1_H0_{score_type}_LH '{stype}_L1H0', -- Z or M-score ON/OFF (treat2)
{dts}_L0_M0_H1_{score_type}_HM '{stype}_H1M0', -- Z or M-score ON/OFF (treat3)
{dts}_L1_M1_H0_{score_type}_MH '{stype}_M1H0', -- Z or M-score ON/OFF (treat4)
{dts}_L0_M0_H1_{score_type}_LM '{stype}_L0M0', -- Z or M-score OFF/OFF (ctrl1)
{dts}_L1_M1_H0_{score_type}_LM '{stype}_L1M1', -- Z or M-score ON/ON (ctrl2)
PVALUE({dts}_L0_M0_H1_{score_type}_HL) 'pval_H1L0', -- P-value ON/OFF (treat1)
PVALUE({dts}_L1_M1_H0_{score_type}_LH) 'pval_L1H0', -- P-value ON/OFF (treat2)
PVALUE({dts}_L0_M0_H1_{score_type}_HM) 'pval_H1M0', -- P-value ON/OFF (treat3)
PVALUE({dts}_L1_M1_H0_{score_type}_MH) 'pval_M1H0', -- P-value ON/OFF (treat4)
PVALUE({dts}_L0_M0_H1_{score_type}_LM) 'pval_L0M0', -- P-value OFF/OFF (ctrl1)
PVALUE({dts}_L1_M1_H0_{score_type}_LM) 'pval_L1M1' -- P-value ON/ON (ctrl2)
FROM
V_PGROUP_MZSCORE A, PROT2GRP B, V_PROTEIN C
WHERE
A.grp_id = B.grp_id
AND B.prot_acc = C.acc
AND (({dts}_L0_M0_H1_{score_type}_HL > {cutoff}
AND {dts}_L0_M0_H1_{score_type}_HM > {cutoff}
AND {dts}_L1_M1_H0_{score_type}_LH > {cutoff}
AND {dts}_L1_M1_H0_{score_type}_MH > {cutoff})
OR ({dts}_L0_M0_H1_{score_type}_LH > {cutoff}
AND {dts}_L0_M0_H1_{score_type}_MH > {cutoff}
AND {dts}_L1_M1_H0_{score_type}_HL > {cutoff}
AND {dts}_L1_M1_H0_{score_type}_HM > {cutoff}))
AND {dts}_L0_M0_H1_{score_type}_ML <= {cutoff}
AND {dts}_L0_M0_H1_{score_type}_LM <= {cutoff}
AND {dts}_L1_M1_H0_{score_type}_ML <= {cutoff}
AND {dts}_L1_M1_H0_{score_type}_LM <= {cutoff}
GROUP BY A.grp_id;
""".format(dts=dts, score_type=score_type, stype=stype, cutoff=cutoff)
# connect to db
with sqlt.connect(args.dbfile) as conn:
conn.row_factory = sqlt.Row # enable column access by name: row['colnm']
conn.create_function('log', 2, log)
conn.create_function('sqrt', 1, sqrt)
conn.create_function('pvalue', 1, pvalue)
conn.create_aggregate('stdev', 1, Stdev)
conn.create_aggregate('median', 1, Median)
conn.create_aggregate('mad', 1, Mad)
cur = conn.cursor()
if new_tabs is True: # populate tables/views only with -n option
cur.executescript(sql_create_tables)
cur.execute('SELECT DISTINCT exp_name FROM EXPERIMENT')
exp_names = [ str(r[0]) for r in cur.fetchall() ]
cur.execute("SELECT DISTINCT ratio_type FROM PGROUP_LOG2RATIO_STAT")
ratio_types = [ str(r[0]) for r in cur.fetchall() ]
n = len(exp_names) * len(ratio_types)
i = 0
comma = ','
# create view for selecting diff. reg. proteins
sql_create_view = """
DROP VIEW IF EXISTS V_PGROUP_MZSCORE;
CREATE VIEW V_PGROUP_MZSCORE AS
SELECT
grp_id,
"""
for e in exp_names:
for r in ratio_types:
i += 1
rr = r[:-2] + r[-2:][::-1] # add inverse ratio (e.g., {raw|norm}_ratio_HL for *_ratio_LH)
if i == n: comma = ''
sql_create_view += "\tROUND(CAST(GROUP_CONCAT(CASE WHEN exp_name = '{exp}' AND ratio_type = '{ratio}' THEN z_score ELSE NULL END) AS NUMERIC), 4) AS '{exp}_z_score_{ratio}',\n".format(exp=e, ratio=r)
sql_create_view += "\tROUND(CAST(GROUP_CONCAT(CASE WHEN exp_name = '{exp}' AND ratio_type = '{ratio}' THEN -1 * z_score ELSE NULL END) AS NUMERIC), 4) AS '{exp}_z_score_{iratio}',\n".format(exp=e, ratio=r, iratio=rr)
sql_create_view += "\tROUND(CAST(GROUP_CONCAT(CASE WHEN exp_name = '{exp}' AND ratio_type = '{ratio}' THEN m_score ELSE NULL END) AS NUMERIC), 4) AS '{exp}_m_score_{ratio}',\n".format(exp=e, ratio=r)
sql_create_view += "\tROUND(CAST(GROUP_CONCAT(CASE WHEN exp_name = '{exp}' AND ratio_type = '{ratio}' THEN -1 * m_score ELSE NULL END) AS NUMERIC), 4) AS '{exp}_m_score_{iratio}'{comma}\n".format(exp=e, ratio=r, iratio=rr, comma=comma)
sql_create_view += "FROM PGROUP_MZSCORE GROUP BY grp_id"
cur.executescript(sql_create_view)
# write results onto tab-delim file
if dts is not None:
sep = '\t' # column separator
n_pgrps = 0 # count diff. reg. protein groups
with open(outfile, 'w+') as fout:
try:
for drow in [ cls.OrderedDict(xi) for xi in cur.execute(sql_sel_pgrps) ]:
# first output column names
if n_pgrps == 0:
header = sep.join(drow.keys()) + os.linesep
fout.write(header)
# output remaining rows with column values (grp_id, Z-/M-scores and P-values)
row = drow.values()
grp_id = str(drow['grp_id'])
genes = str(drow['genes'])
scores = [ str(round(float(x), 4)) for x in row[2:] ]
srow = grp_id + sep + genes + sep + sep.join(scores) + os.linesep
fout.write(srow)
n_pgrps += 1
except sqlt.OperationalError as e:
sys.stderr.write('Error: Selected data set not found: %s\n' % e)
sys.exit(1)
# remove empty outfile
if os.path.getsize(outfile) == 0:
print 'Nothing to write onto outfile.'
os.remove(outfile)
else:
print 'Ndiff =', n_pgrps
| gpl-3.0 | 7,526,957,426,108,315,000 | 33.426934 | 247 | 0.627549 | false |
mupi/escolamupi | timtec/settings.py | 1 | 18426 | # -*- coding: utf-8 -*-
# Django settings for timtec project.
from django.utils.translation import ugettext_lazy as _
import os
SETTINGS_DIR = os.path.dirname(os.path.realpath(__file__))
PROJECT_ROOT = os.path.dirname(SETTINGS_DIR)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
SITE_HOME = '/'
SITE_NAME = 'Escola Mupi'
SITE_DOMAIN = 'escolamupi.com.br'
SITE_URL = 'http://escolamupi.com.br'
ADMINS = (
('mupi', '[email protected]'),
)
MANAGERS = (ADMINS[0],)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'localhost'
DEFAULT_FROM_EMAIL = '[email protected]'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'timtec.sqlite', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
METRON_SETTINGS = {
"google": {
1: "set-your-google-analytics-key-here",
},
}
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/my-courses'
AUTHENTICATION_BACKENDS = ("django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend")
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Sao_Paulo'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'mupi'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
LANGUAGES = (
('mupi', _('Mupi translation')),
)
LOCALE_PATHS = (
os.path.join(PROJECT_ROOT, 'locale'),
)
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
#MEDIA_ROOT = '~/tmp/escola/media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
#STATIC_ROOT = '~/tmp/escola/static'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static2')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, "static"),
os.path.join(PROJECT_ROOT, 'bower_components'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'pipeline.finders.FileSystemFinder',
# 'pipeline.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
'pipeline.finders.CachedFileFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
PIPELINE_JS_COMPRESSOR = 'timtec.ngminyuglify.NgminYuglifyCompressor'
PIPELINE_COMPILERS = (
'pipeline.compilers.less.LessCompiler',
)
PIPELINE_CSS = {
'common': {
'source_filenames': (
'fonts/teuton/stylesheet.css',
'fonts/questrial/stylesheet.css',
'font-awesome/css/font-awesome.css',
'codemirror/lib/codemirror.css',
'codemirror/addon/hint/show-hint.css',
'codemirror/theme/monokai.css',
'css/codemirrorconf.css',
),
'output_filename': 'css/common.css',
'extra_context': {
'media': 'screen,projection,print',
},
},
'public': {
'source_filenames': (
'css/mupi.less',
),
'output_filename': 'css/public.css',
'extra_context': {
'media': 'screen,projection,print',
},
},
'admin': {
'source_filenames': (
'css/mupi-admin.less',
),
'output_filename': 'css/admin.css',
'extra_context': {
'media': 'screen,projection,print',
},
'variant': 'datauri',
},
}
PIPELINE_JS = {
'all': {
'source_filenames': (
'modernizr/modernizr.js',
'jquery/dist/jquery.js',
'jquery-ui/ui/jquery-ui.js',
'jquery-ui/ui/jquery.ui.sortable.js',
'bootstrap/dist/js/bootstrap.js',
'angular/angular.js',
'angular-animate/angular-animate.js',
'angular-cookies/angular-cookies.js',
'angular-resource/angular-resource.js',
'angular-route/angular-route.js',
'angular-sanitize/angular-sanitize.js',
'angular-bootstrap/ui-bootstrap-tpls.js',
'angular-gettext/dist/angular-gettext.js',
'js/consolelogfallback.js',
'js/django.js',
'js/contact_form.js',
'js/helpers.js',
'js/angular-youtube.js',
'js/truncate.js',
'js/layout.js',
'markdown/lib/markdown.js',
'mupi/js/plans-boxes.js',
'mupi/js/courses-tab.js',
),
'output_filename': 'js/all.js',
},
'markdown': {
'source_filenames': (
'js/vendor/pagedown/Markdown.Converter.js',
'js/vendor/pagedown/Markdown.Editor.js',
'js/vendor/pagedown/Markdown.Sanitizer.js',
'js/markdown/app.js',
'js/markdown/filters.js',
),
'output_filename': 'js/markdown.js',
},
'messages': {
'source_filenames': (
'js/messages/app.js',
'js/messages/controllers.js',
'js/messages/services.js',
'checklist-model/checklist-model.js',
'js/markdown/app.js',
'js/markdown/filters.js',
),
'output_filename': 'js/messages.js',
},
'codemirror': {
'source_filenames': (
'codemirror/lib/codemirror.js',
'codemirror/addon/fold/xml-fold.js',
'codemirror/addon/hint/show-hint.js',
'codemirror/addon/hint/xml-hint.js',
'codemirror/addon/hint/html-hint.js',
'codemirror/addon/hint/css-hint.js',
'codemirror/addon/hint/javascript-hint.js',
'codemirror/addon/edit/matchbrackets.js',
'codemirror/addon/edit/closebrackets.js',
'codemirror/addon/edit/matchtags.js',
'codemirror/mode/xml/xml.js',
'codemirror/mode/css/css.js',
'codemirror/mode/javascript/javascript.js',
'codemirror/mode/htmlmixed/htmlmixed.js',
'codemirror/mode/clike/clike.js',
'codemirror/mode/php/php.js',
# 'js/codemirrorconf.js',
'js/vendor/angular-ui-codemirror/ui-codemirror.js',
),
'output_filename': 'js/codemirrorcomp.js',
},
'markdown_editor': {
'source_filenames': (
'js/vendor/pagedown/Markdown.Converter.js',
'js/vendor/pagedown/Markdown.Editor.js',
'js/vendor/pagedown/Markdown.Sanitizer.js',
),
'output_filename': 'js/markdown_editor.js',
},
'lesson': {
'source_filenames': (
'js/activities/app.js',
'js/activities/controllers.js',
'js/activities/directives.js',
'js/activities/services.js',
'js/lesson/app.js',
'js/lesson/controllers.js',
'js/lesson/services.js',
'js/directives/markdowneditor.js',
'js/directives/codemirror.js',
'js/markdown/app.js',
'js/markdown/filters.js',
),
'output_filename': 'js/lesson.js',
},
'course_material': {
'source_filenames': (
'js/course_material/app.js',
'js/course_material/controllers.js',
'js/course_material/directives.js',
'js/course_material/filters.js',
'js/course_material/services.js',
'dropzone/downloads/dropzone.js',
),
'output_filename': 'js/course_material.js',
},
'forum': {
'source_filenames': (
'js/forum/app.js',
'js/forum/controllers.js',
'js/forum/directives.js',
'js/forum/filters.js',
'js/forum/services.js',
'js/truncate.js',
),
'output_filename': 'js/forum.js',
},
'notes': {
'source_filenames': (
'js/notes/app.js',
'js/notes/controllers.js',
'js/notes/services.js',
),
'output_filename': 'js/notes.js',
},
'admin_course_header': {
'source_filenames': (
'js/admin-header/app.js',
'js/admin-header/controllers.js',
'js/factories/timtec-models.js',
),
'output_filename': 'js/admin_course_header.js',
},
'reports': {
'source_filenames': (
'js/reports/app.js',
'js/reports/controllers.js',
'js/reports/services.js',
),
'output_filename': 'js/reports.js',
},
'core': {
'source_filenames': (
'js/core/app.js',
'js/core/controllers.js',
'js/core/services.js',
'angular-tweet-filter/index.js',
),
'output_filename': 'js/core.js',
},
}
MOMMY_CUSTOM_FIELDS_GEN = {
'jsonfield.JSONField': lambda: '{}',
}
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissions'
],
'DEFAULT_FILTER_BACKENDS': [
'rest_framework.filters.DjangoFilterBackend'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
)
}
APPEND_SLASH = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'e%6a01vfbue28$xxssu!9r_)usqjh817((mr+7vv3ek&@#p0!$'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'administration/templates/mupi'),
os.path.join(PROJECT_ROOT, 'core/templates/mupi'),
os.path.join(PROJECT_ROOT, 'accounts/templates/mupi'),
os.path.join(PROJECT_ROOT, 'forum/templates/mupi'),
os.path.join(PROJECT_ROOT, 'notes/templates/mupi'),
os.path.join(PROJECT_ROOT, 'payments/templates/mupi'),
#os.path.join(PROJECT_ROOT, 'administration/templates'),
#os.path.join(PROJECT_ROOT, 'core/templates'
)
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'core.context_processors.contact_form',
'core.context_processors.site_settings',
'timtec.locale_context_processor.locale',
'django.core.context_processors.media',
'payments.context_processors.test_user_paid',
)
# Django Suit configuration example
SUIT_CONFIG = {
# header
'ADMIN_NAME': 'Timtec Admin',
'HEADER_DATE_FORMAT': 'l, j. F Y',
'HEADER_TIME_FORMAT': 'H:i',
# forms
# 'SHOW_REQUIRED_ASTERISK': True, # Default True
# 'CONFIRM_UNSAVED_CHANGES': True, # Default True
# menu
'SEARCH_URL': '/admin/accounts/timtecuser/',
# 'MENU_ICONS': {
# 'sites': 'icon-leaf',
# 'auth': 'icon-lock',
# },
# 'MENU_OPEN_FIRST_CHILD': True, # Default True
# 'MENU_EXCLUDE': ('auth.group',),
# 'MENU': (
# 'sites',
# {'app': 'auth', 'icon':'icon-lock', 'models': ('user', 'group')},
# {'label': 'Settings', 'icon':'icon-cog', 'models': ('accounts.TimtecUser', 'auth.group')},
# # {'label': 'Support', 'icon':'icon-question-sign', 'url': '/support/'},
# ),
# misc
# 'LIST_PER_PAGE': 15
}
AUTH_USER_MODEL = 'accounts.TimtecUser'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'timtec.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'timtec.wsgi.application'
SOUTH_MIGRATION_MODULES = {
'easy_thumbnails': 'easy_thumbnails.south_migrations',
}
INSTALLED_APPS = (
'django_extensions',
'south',
'pipeline',
'suit',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
'django.contrib.admin',
'filer',
'mptt',
'easy_thumbnails',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'rest_framework',
'rosetta',
'autoslug',
# TIM Tec
'core',
'accounts',
'activities',
'administration',
'forum',
'course_material',
'notes',
'reports',
'payments',
# django-metron
'metron',
# allauth
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'django_markdown',
'paypal.standard.ipn',
# raven has to be the last one
'raven.contrib.django.raven_compat',
)
PAYPAL_TEST = True
PAYPAL_RECEIVER_EMAIL = "[email protected]"
PAYPAL_SUBSCRIPTION_SANDBOX_IMAGE = "https://www.paypalobjects.com/pt_BR/BR/i/btn/btn_subscribeCC_LG.gif"
PAYPAL_SUBSCRIPTION_IMAGE = "https://www.paypalobjects.com/pt_BR/BR/i/btn/btn_subscribeCC_LG.gif"
PAYPAL_IMAGE = "https://www.paypalobjects.com/pt_BR/BR/i/btn/btn_subscribeCC_LG.gif"
PAYPAL_SANDBOX_IMAGE = "http://escolamupi.com.br/static/img/logo.5d174d245c09.png"
#PAYPAL_DONATION_SANDBOX_IMAGE = "https://www.paypalobjects.com/pt_BR/BR/i/btn/btn_subscribeCC_LG.gif"
#PAYPAL_DONATION_IMAGE = "https://www.paypalobjects.com/pt_BR/BR/i/btn/btn_subscribeCC_LG.gif"
# PAYPAL_RECEIVER_EMAIL = "[email protected]"
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'SCOPE': ['email', 'publish_stream'],
'AUTH_PARAMS': {'auth_type': 'reauthenticate'},
'METHOD': 'oauth2',
}
}
if DEBUG:
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INSTALLED_APPS += (
'debug_toolbar',
)
INTERNAL_IPS = ('127.0.0.1', )
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
# django-registration flag
# ACCOUNT_ACTIVATION_DAYS = 7
ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = "/accounts/payment"
REGISTRATION_DEFAULT_GROUP_NAME = 'students'
ACCOUNT_ADAPTER = "accounts.adapter.TimtecAdapter"
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7
ACCOUNT_EMAIL_REQUIRED = True
#ACCOUNT_EMAIL_VERIFICATION =
ACCOUNT_EMAIL_SUBJECT_PREFIX = "[Escola Mupi] "
ACCOUNT_SIGNUP_FORM_CLASS = 'accounts.forms.SignupForm'
SOCIALACCOUNT_EMAIL_VERIFICATION = False
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
TWITTER_ACESS_TOKEN = ''
TWITTER_ACESS_TOKEN_SECRET = ''
TWITTER_USER = ''
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
try:
from .settings_local import *
except ImportError:
pass
| agpl-3.0 | -4,608,623,161,978,571,000 | 31.045217 | 127 | 0.622381 | false |
brando56894/Dungeon-Quest | player.py | 1 | 8551 | #!/usr/bin/python2
#
#~~Player Functions~~
from superRandom import superRandint, superChoice
from time import sleep
import actions
import monsters
class CreatePlayer(object):
def __init__(self, name):
self.health = 125
self.xp = 0 #TODO: use gained XP to gain levels
self.potions = 0
self.gold = 0
self.weapons = ["dagger"]
self.name = name
self.steps = 0
self.damage_dealt = 12 #not needed
self.current_weapon = "dagger"
self.dragon_attack = False
self.basilisk_attack = False
self.has_sword = False
self.has_pistol = False
self.has_rifle = False
self.run_away = 0
self.has_key = False
self.turns = 0
def __repr__(self):
return ("\nName: %s\nHealth: %d\nXP: %d\nPotions: "
"%d\nGold: %d\nWeapons: %s\nSteps: %d\nCurr"
"ent Weapon: %s\nDragon Attack: %s\nBasili"
"skAttack: %s\nHas Sword: %s\nHas Pistol: "
"%s\nHas Rifle: %s\nTimes Run Away: %d\nHa"
"s Key: %s\nTurns: %s" % (self.name,self.health,self.xp,
self.potions,self.gold,self.weapons,
self.steps,self.current_weapon,
self.dragon_attack,self.basilisk_attack,
self.has_sword,self.has_pistol,self.has_rifle,
self.run_away,self.has_key,self.turns)
)
def find_gold(self):
amount = superRandint(1,25)
self.gold += amount
print "\nYou found %d gold coins, which brings you to a total of %d coins!" % (amount, self.gold)
sleep(2)
return self
def find_gold_debug(self,amount):
self.gold += amount
print "\nYou found %d gold coins, which brings you to a total of %d coins!" % (amount, self.gold)
sleep(2)
return self
def find_potions(self):
self.potions += 1
print "\nYou found a health potion! You now have %d potions in your inventory." % self.potions
sleep(2)
return self
def find_weapon(self):
#TODO: add more weapons
weapons = ["sword","pistol","rifle"]
found = superChoice(weapons)
print "\nYou found a %s!" % found
if found == "sword":
self.has_sword = True
elif found == "pistol":
self.has_pistol = True
else:
self.has_rifle = True
return self
def buy_potions(self):
print "\nGold: %d" % self.gold
print "Each potion costs 20 gold pieces and restores 25 HP."
amount = raw_input("\nHow many would you like to purchase? ")
cost = int(amount) * 20
if self.gold >= int(cost):
self.gold = self.gold - int(cost)
self.potions += int(amount)
print "\n%d potions have been added to your inventory." % int(amount)
sleep(2)
return self
else:
print "\nSorry you don't have enough gold for %d potions!" % int(amount)
sleep(2)
actions.visit_shop(self)
def use_potion(self):
if self.potions > 0 and self.potions < 2:
self.potions -= 1
self.health += 25
print "\nYour health is now at %d" % self.health
elif self.potions > 1:
print "\nYou currently have %d potions" % self.potions
amount = int(raw_input("\nHow many? "))
raise_health = amount * 25
self.health += raise_health
self.potions -= amount
print "\nYour health is now at %d" % self.health
else:
print "\nSorry you don't have any more potions!"
sleep(2)
return self
def list_inventory(self):
actions.clearscreen()
print ("\nName: %s\n"
"Exp. Points: %d\n"
"Potions Held: %d\n"
"Gold: %d pieces\n"
"Current Weapon: %s" %(self.name, self.xp,
self.potions, self.gold, self.current_weapon)
)
if self.has_pistol is True and "pistol" not in self.weapons:
self.weapons.append("pistol")
elif self.has_rifle is True and "rifle" not in self.weapons:
self.weapons.append("rifle")
elif self.has_sword is True and "sword" not in self.weapons:
self.weapons.append("sword")
print "Weapons: %s" % ", ".join(str(weapon) for weapon in self.weapons)
sleep(4)
def low_health(self):
if self.health <= 60 and self.potions > 0:
print "\n*****DANGER*****\n"
choice = raw_input("\nYour health is currently at %d, a"
"nd you currently have %d potions in your inven"
"tory. \nWould you like to use one? " % (self.health,self.potions)
)
choice.lower()
if choice == 'y' or choice == 'yes':
self.use_potion()
return self
else:
print "\nOk tough guy."
sleep(2)
return self
def set_health(self, newHealth):
self.health = newHealth
print "\nHealth set to %d" % self.health
sleep(2)
return self
def take_damage(self, damage):
self.health -= damage
print "\nYour health is now at %d" % self.health
if self.health < 0:
print "\nYou were slain! Maybe you should carry more health potions with you next time!\n"
exit(0)
sleep(2)
return self
def deal_damage(self,Monster):
if self.current_weapon == "sword":
damage_dealt = superRandint(13,30)
elif self.current_weapon == "pistol":
damage_dealt = superRandint(31,60)
elif self.current_weapon == "rifle":
damage_dealt = superRandint(61,120)
else:
damage_dealt = superRandint(1,12)
Monster.take_damage(damage_dealt,self)
def gain_xp(self,monster_name):
if monster_name == "Dragon":
gained = superRandint(40,150)
elif monster_name == "Gremlin":
gained = superRandint(1,35)
elif monster_name == "Demon":
gained = superRandint(15,50)
elif monster_name == "Zombie":
gained = superRandint(16,75)
else:
gained = superRandint(1,30)
self.xp += gained
print "\nYou gained %d XP!" % gained
return self
def buy_weapon(self):
print "\nS)word: 25 Gold"
print "P)istol: 60 Gold"
print "R)ifle: 120 Gold"
choice = raw_input("\nWhich one would you like to purchase? ").lower()
if choice == 's'and self.gold >= 25:
self.gold -= 25
self.has_sword = True
print "\nA sword has been added to your inventory."
sleep(2)
elif choice == 'p' and self.gold >= 60:
self.gold -= 60
self.has_pistol = True
print "\nA pistol has been added to your inventory."
sleep(2)
elif choice == 'r' and self.gold >= 120:
self.gold -= 120
self.has_rifle = True
print "\nA rifle has been added to your inventory."
sleep(2)
else:
print "\nSorry you don't have enough gold for that purchase."
sleep(2)
actions.visit_shop(self)
return (self)
def set_current_weapon(self):
print "\nCurrent Weapon: " + self.current_weapon
#doesn't show correct weapons after a new weapon is found
#even if weapon is in inventory, method itself works perfectly.
print "Available Weapons: %s" % ", ".join(str(weapon) for weapon in self.weapons)
choice = raw_input("\nUse weapon: ").lower()
if choice == "sword" and self.has_sword is True:
self.current_weapon = "sword"
elif choice == "pistol" and self.has_pistol is True:
self.current_weapon = "pistol"
elif choice == "rifle" and self.has_rifle is True:
self.current_weapon = "rifle"
elif choice == "dagger":
self.current_weapon = "dagger"
else:
print "\nSorry you don't currently have that weapon in your inventory."
print "\nCurrent weapon has been changed to: %s" % self.current_weapon
sleep(2)
return self
| gpl-3.0 | 426,067,084,616,806,100 | 36.017316 | 105 | 0.535376 | false |
adelina-t/compute-hyperv | hyperv/nova/serialproxy.py | 1 | 3962 | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import patcher
import functools
import socket
from nova.i18n import _
from hyperv.nova import constants
from hyperv.nova import vmutils
threading = patcher.original('threading')
def handle_socket_errors(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
self._client_connected.clear()
return wrapper
class SerialProxy(threading.Thread):
def __init__(self, instance_name, addr, port, input_queue,
output_queue, client_connected):
super(SerialProxy, self).__init__()
self.setDaemon(True)
self._instance_name = instance_name
self._addr = addr
self._port = port
self._conn = None
self._input_queue = input_queue
self._output_queue = output_queue
self._client_connected = client_connected
self._stopped = threading.Event()
def _setup_socket(self):
try:
self._sock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self._sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1)
self._sock.bind((self._addr, self._port))
self._sock.listen(1)
except socket.error as err:
self._sock.close()
msg = (_('Failed to initialize serial proxy on'
'%(addr)s:%(port)s, handling connections '
'to instance %(instance_name)s. Error: %(error)s') %
{'addr': self._addr, 'port': self._port,
'instance_name': self._instance_name,
'error': err})
raise vmutils.HyperVException(msg)
def stop(self):
self._stopped.set()
self._client_connected.clear()
if self._conn:
self._conn.shutdown(socket.SHUT_RDWR)
self._conn.close()
self._sock.close()
def run(self):
self._setup_socket()
while not self._stopped.isSet():
self._accept_conn()
@handle_socket_errors
def _accept_conn(self):
self._conn, client_addr = self._sock.accept()
self._client_connected.set()
workers = []
for job in [self._get_data, self._send_data]:
worker = threading.Thread(target=job)
worker.setDaemon(True)
worker.start()
workers.append(worker)
for worker in workers:
worker_running = (worker.is_alive() and
worker is not threading.current_thread())
if worker_running:
worker.join()
self._conn.close()
self._conn = None
@handle_socket_errors
def _get_data(self):
while self._client_connected.isSet():
data = self._conn.recv(constants.SERIAL_CONSOLE_BUFFER_SIZE)
if not data:
self._client_connected.clear()
return
self._input_queue.put(data)
@handle_socket_errors
def _send_data(self):
while self._client_connected.isSet():
data = self._output_queue.get_burst()
if data:
self._conn.sendall(data)
| apache-2.0 | -2,187,697,925,085,981,000 | 31.47541 | 78 | 0.572438 | false |
Auzzy/pyinq | pyinq/tests/test_results.py | 1 | 9565 | """
Copyright (c) 2012-2013, Austin Noto-Moniz ([email protected])
Permission to use, copy, modify, and/or distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright notice
and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
"""
from pyinq.results import *
##### TEST ASSERT RESULTS #####
LINENO = 12
CALL = "assert_true(True)"
FAIL = "FAIL"
TRACE = "TRACE"
EXPECTED = IOError
def test_Result_true():
check_Result(True)
def test_Result_false():
check_Result(False)
def test_AssertResult_true():
check_AssertResult(True)
def test_AssertResult_false():
check_AssertResult(False)
def test_AssertEqualsResult_true():
check_AssertEqualsResult(True,4,4)
def test_AssertEqualsResult_false():
check_AssertEqualsResult(False,4,5)
def test_AssertInResult_true():
check_AssertInResult(True,4,[1,2,4,8,16,32,64])
def test_AssertInResult_false():
check_AssertInResult(False,4,[1,1,2,3,5,8,13])
def test_AssertInstanceResult_true():
check_AssertInstanceResult(True,IOError,Exception)
def test_AssertInstanceResult_false():
check_AssertInstanceResult(False,IOError,WindowsError)
def test_AssertRaisesResult_true():
check_AssertRaisesResult(True,TRACE)
def test_AssertRaisesResult_false():
check_AssertRaisesResult(False,"")
def test_ExpectedErrorResult_true():
check_ExpectedErrorResult(True,LINENO)
def test_ExpectedErrorResult_false():
check_ExpectedErrorResult(False,None)
def test_FailResult():
result = FailResult(LINENO,FAIL)
assert result.lineno == LINENO
assert result.mess == FAIL
assert result.result == False
def test_AssertError():
result = AssertError(TRACE)
assert result.trace == TRACE
assert result.result is None
##### TEST RESULTS #####
NAME = "FOO"
def test_TestResult():
test_result = TestResult(NAME)
assert test_result.name == NAME
assert not test_result.before
assert not test_result.after
def test_TestResult_true():
test_result = TestResult(NAME)
test_result.extend(make_AssertResult_list(True,True,True))
assert test_result.get_status() == True
def test_TestResult_false():
test_result = TestResult(NAME)
test_result.extend(make_AssertResult_list(True,True,False))
assert test_result.get_status() == False
def test_TestClassResult():
cls_result = TestClassResult(NAME)
assert cls_result.name == NAME
assert not cls_result.before
assert not cls_result.after
def test_TestClassResult_true():
cls_result = TestClassResult(NAME)
cls_result.extend(make_TestResult_list(True,True,True))
assert cls_result.get_status() == True
def test_TestClassResult_false():
cls_result = TestClassResult(NAME)
cls_result.extend(make_TestResult_list(True,True,False))
assert cls_result.get_status() == False
def test_TestModuleResult():
mod_result = TestModuleResult(NAME)
assert mod_result.name == NAME
assert not mod_result.before
assert not mod_result.after
def test_TestModuleResult_true():
mod_result = TestModuleResult(NAME)
mod_result.extend(make_TestClassResult_list(True,True,True))
assert mod_result.get_status() == True
def test_TestModuleResult_false():
mod_result = TestModuleResult(NAME)
mod_result.extend(make_TestClassResult_list(True,True,False))
assert mod_result.get_status() == False
def test_TestSuiteResult():
suite_result = TestSuiteResult(NAME)
assert suite_result.name == NAME
assert not suite_result.before
assert not suite_result.after
def test_TestSuiteResult_true():
suite_result = TestSuiteResult(NAME)
suite_result.extend(make_TestModuleResult_list(True,True,True))
assert suite_result.get_status() == True
def test_TestSuiteResult_false():
suite_result = TestModuleResult(NAME)
suite_result.extend(make_TestModuleResult_list(True,True,False))
assert suite_result.get_status() == False
##### TEST ERRORS #####
def construct_call_str(name, args):
arg_str = ",".join([str(arg) for arg in args])
return "{name}({arg_str})".format(name=name,arg_str=arg_str)
def check_PyInqError(func_name, arg_dict, error_cls, result_cls, check_func):
call = construct_call_str(func_name,arg_dict.values())
error = error_cls(LINENO,call,**arg_dict)
result = error.result()
assert error.lineno == LINENO
assert error.call == call
for arg_name in arg_dict:
assert getattr(error,arg_name) == arg_dict[arg_name]
assert type(result) is result_cls
check_func(state=False,lineno=LINENO,call=call,result=result,**arg_dict)
def test_PyInqAssertError():
arg_dict = {}
check_PyInqError("assert_true",arg_dict,PyInqAssertError,AssertResult,check_AssertResult)
def test_PyInqAssertEqualsError():
arg_dict = {"actual":4, "expected":42}
check_PyInqError("assert_equal",arg_dict,PyInqAssertEqualsError,AssertEqualsResult,check_AssertEqualsResult)
def test_PyInqAssertInError():
arg_dict = {"item":4, "collection":[1,1,2,3,5,8,13,21]}
check_PyInqError("assert_in",arg_dict,PyInqAssertInError,AssertInResult,check_AssertInResult)
def test_PyInqAssertInstanceError():
arg_dict = {"obj":IOError, "cls":IndexError}
check_PyInqError("assert_is_instance",arg_dict,PyInqAssertInstanceError,AssertInstanceResult,check_AssertInstanceResult)
def test_PyInqAssertRaisesError():
arg_dict = {"expected":IOError, "trace":""}
check_PyInqError("assert_raises",arg_dict,PyInqAssertRaisesError,AssertRaisesResult,check_AssertRaisesResult)
def test_PyInqFailError():
arg_dict = {"mess":"This is a failure message."}
error = PyInqFailError(LINENO,**arg_dict)
result = error.result()
assert error.lineno == LINENO
assert error.mess == arg_dict["mess"]
assert type(result) is FailResult
assert result.lineno == LINENO
assert result.mess == arg_dict["mess"]
assert result.result == False
##### TEST HELPERS #####
def check_Result(state, result=None):
if not result:
result = Result(state)
assert result.result == state
def check_AssertResult(state, lineno=LINENO, call=CALL, result=None):
if not result:
result = AssertResult(lineno,call,state)
assert result.lineno == lineno
assert result.call == call
assert result.result == state
def check_AssertEqualsResult(state, actual, expected, lineno=LINENO, call=CALL, result=None):
if not result:
result = AssertEqualsResult(lineno,call,state,actual,expected)
assert result.lineno == lineno
assert result.call == call
assert result.result == state
assert result.actual == actual
assert result.expected == expected
def check_AssertInResult(state, item, collection, lineno=LINENO, call=CALL, result=None):
if not result:
result = AssertInResult(lineno,call,state,item,collection)
assert result.lineno == lineno
assert result.call == call
assert result.result == state
assert result.item == item
assert result.collection == collection
def check_AssertInstanceResult(state, obj, cls, lineno=LINENO, call=CALL, result=None):
if not result:
result = AssertInstanceResult(lineno,call,state,obj,cls)
assert result.lineno == lineno
assert result.call == call
assert result.result == state
assert result.obj_name == obj.__class__.__name__
assert result.class_name == cls.__name__
def check_AssertRaisesResult(state, trace, lineno=LINENO, call=CALL, expected=EXPECTED, result=None):
if not result:
result = AssertRaisesResult(lineno,call,state,trace,expected)
assert result.lineno == lineno
assert result.call == call
assert result.result == state
assert remove_whitespace(result.trace) == remove_whitespace(trace)
assert result.expected == expected.__name__
def check_ExpectedErrorResult(state, lineno, expected=EXPECTED, result=None):
if not result:
result = ExpectedErrorResult(state,expected,lineno)
assert result.expected == expected.__name__
assert result.lineno == lineno
assert result.call is None
assert result.result == state
def make_AssertResult_list(*state_list):
return [AssertResult(LINENO,CALL,state) for state in state_list]
def make_TestResult_list(*state_list):
result_list = []
for state in state_list:
result = TestResult(NAME)
result.extend(make_AssertResult_list(state))
result_list.append(result)
return result_list
def make_TestClassResult_list(*state_list):
result_list = []
for state in state_list:
result = TestClassResult(NAME)
result.extend(make_TestResult_list(state))
result_list.append(result)
return result_list
def make_TestModuleResult_list(*state_list):
result_list = []
for state in state_list:
result = TestModuleResult(NAME)
result.extend(make_TestClassResult_list(state))
result_list.append(result)
return result_list
##### UTIL #####
def remove_whitespace(string):
return ''.join([line.strip() for line in string.splitlines()])
| isc | 8,524,622,938,886,892,000 | 30.883333 | 124 | 0.716675 | false |
neogi/machine-learning | clustering_and_retrieval/gaussian_mixture_model/em-gmm.py | 1 | 11332 | # Imports
import sframe
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
import copy
from PIL import Image
from io import BytesIO
import matplotlib.mlab as mlab
import colorsys
def generate_MoG_data(num_data, means, covariances, weights):
data = []
for i in range(num_data):
# Use np.random.choice and weights to pick a cluster id greater than or equal to 0 and less than num_clusters.
k = np.random.choice(len(weights), 1, p=weights)[0]
# Use np.random.multivariate_normal to create data from this cluster
x = np.random.multivariate_normal(means[k], covariances[k])
data.append(x)
return data
def log_sum_exp(Z):
""" Compute log(\sum_i exp(Z_i)) for some array Z."""
return np.max(Z) + np.log(np.sum(np.exp(Z - np.max(Z))))
def loglikelihood(data, weights, means, covs):
""" Compute the loglikelihood of the data for a Gaussian mixture model with the given parameters. """
num_clusters = len(means)
num_dim = len(data[0])
ll = 0
for d in data:
Z = np.zeros(num_clusters)
for k in range(num_clusters):
# Compute (x-mu)^T * Sigma^{-1} * (x-mu)
delta = np.array(d) - means[k]
exponent_term = np.dot(delta.T, np.dot(np.linalg.inv(covs[k]), delta))
# Compute loglikelihood contribution for this data point and this cluster
Z[k] += np.log(weights[k])
Z[k] -= 1/2. * (num_dim * np.log(2*np.pi) + np.log(np.linalg.det(covs[k])) + exponent_term)
# Increment loglikelihood contribution of this data point across all clusters
ll += log_sum_exp(Z)
return ll
def EM(data, init_means, init_covariances, init_weights, maxiter=1000, thresh=1e-4):
# Make copies of initial parameters, which we will update during each iteration
means = copy.deepcopy(init_means)
covariances = copy.deepcopy(init_covariances)
weights = copy.deepcopy(init_weights)
# Infer dimensions of dataset and the number of clusters
num_data = len(data)
num_dim = len(data[0])
num_clusters = len(means)
# Initialize some useful variables
resp = np.zeros((num_data, num_clusters))
ll = loglikelihood(data, weights, means, covariances)
ll_trace = [ll]
for i in range(maxiter):
if i % 5 == 0:
print("Iteration %s" % i)
# E-step: compute responsibilities
# Update resp matrix so that resp[j, k] is the responsibility of cluster k for data point j.
# Hint: To compute likelihood of seeing data point j given cluster k, use multivariate_normal.pdf.
for j in range(num_data):
for k in range(num_clusters):
# YOUR CODE HERE
resp[j, k] = weights[k] * multivariate_normal.pdf(x=data[j],
mean=means[k],
cov=covariances[k])
row_sums = resp.sum(axis=1)[:, np.newaxis]
resp = resp / row_sums # normalize over all possible cluster assignments
# M-step
# Compute the total responsibility assigned to each cluster, which will be useful when
# implementing M-steps below. In the lectures this is called N^{soft}
counts = np.sum(resp, axis=0)
for k in range(num_clusters):
# Update the weight for cluster k using the M-step update rule for the cluster weight, \hat{\pi}_k.
# YOUR CODE HERE
Nsoft_k = counts[k]
weights[k] = float(Nsoft_k)/float(num_data)
# Update means for cluster k using the M-step update rule for the mean variables.
# This will assign the variable means[k] to be our estimate for \hat{\mu}_k.
weighted_sum = 0
for j in range(num_data):
# YOUR CODE HERE
weighted_sum += resp[j, k] * data[j]
# YOUR CODE HERE
means[k] = weighted_sum/Nsoft_k
# Update covariances for cluster k using the M-step update rule for covariance variables.
# This will assign the variable covariances[k] to be the estimate for \hat{Sigma}_k.
weighted_sum = np.zeros((num_dim, num_dim))
for j in range(num_data):
# YOUR CODE HERE (Hint: Use np.outer on the data[j] and this cluster's mean)
weighted_sum += resp[j, k] * np.outer(data[j] - means[k], data[j] - means[k])
# YOUR CODE HERE
covariances[k] = weighted_sum/Nsoft_k
# Compute the loglikelihood at this iteration
# YOUR CODE HERE
ll_latest = loglikelihood(data, weights, means, covariances)
ll_trace.append(ll_latest)
# Check for convergence in log-likelihood and store
if (ll_latest - ll) < thresh and ll_latest > -np.inf:
break
ll = ll_latest
if i % 5 != 0:
print("Iteration %s" % i)
out = {'weights': weights, 'means': means, 'covs': covariances, 'loglik': ll_trace, 'resp': resp}
return out
def plot_contours(data, means, covs, title):
plt.figure()
plt.plot([x[0] for x in data], [y[1] for y in data],'ko') # data
delta = 0.025
k = len(means)
x = np.arange(-2.0, 7.0, delta)
y = np.arange(-2.0, 7.0, delta)
X, Y = np.meshgrid(x, y)
col = ['green', 'red', 'indigo']
for i in range(k):
mean = means[i]
cov = covs[i]
sigmax = np.sqrt(cov[0][0])
sigmay = np.sqrt(cov[1][1])
sigmaxy = cov[0][1]/(sigmax*sigmay)
Z = mlab.bivariate_normal(X, Y, sigmax, sigmay, mean[0], mean[1], sigmaxy)
plt.contour(X, Y, Z, colors = col[i])
plt.title(title)
plt.rcParams.update({'font.size':16})
plt.tight_layout()
def plot_responsibilities_in_RB(img, resp, title):
N, K = resp.shape
HSV_tuples = [(x*1.0/K, 0.5, 0.9) for x in range(K)]
RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)
R = img['red']
B = img['blue']
resp_by_img_int = [[resp[n][k] for k in range(K)] for n in range(N)]
cols = [tuple(np.dot(resp_by_img_int[n], np.array(RGB_tuples))) for n in range(N)]
plt.figure()
for n in range(len(R)):
plt.plot(R[n], B[n], 'o', c=cols[n])
plt.title(title)
plt.xlabel('R value')
plt.ylabel('B value')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
def get_top_images(assignments, cluster, k=5):
# YOUR CODE HERE
images_in_cluster = assignments[assignments['assignments']==cluster]
print images_in_cluster
top_images = images_in_cluster.topk('probs', k)
return top_images['image']
def save_images(images, prefix):
for i, image in enumerate(images):
Image.open(BytesIO(image._image_data)).save(prefix % i)
# Model parameters
init_means = [
[5, 0], # mean of cluster 1
[1, 1], # mean of cluster 2
[0, 5] # mean of cluster 3
]
init_covariances = [
[[.5, 0.], [0, .5]], # covariance of cluster 1
[[.92, .38], [.38, .91]], # covariance of cluster 2
[[.5, 0.], [0, .5]] # covariance of cluster 3
]
init_weights = [1/4., 1/2., 1/4.] # weights of each cluster
# Generate data
np.random.seed(4)
data = generate_MoG_data(100, init_means, init_covariances, init_weights)
# Plot clusters
plt.figure()
d = np.vstack(data)
plt.plot(d[:,0], d[:,1],'ko')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
# Test EM algorithm
np.random.seed(4)
# Initialization of parameters
chosen = np.random.choice(len(data), 3, replace=False)
initial_means = [data[x] for x in chosen]
initial_covs = [np.cov(data, rowvar=0)] * 3
initial_weights = [1/3.] * 3
# Run EM
results = EM(data, initial_means, initial_covs, initial_weights)
# Parameters after initialization
plot_contours(data, initial_means, initial_covs, 'Initial clusters')
# Parameters after 12 iterations
results = EM(data, initial_means, initial_covs, initial_weights, maxiter=12)
plot_contours(data, results['means'], results['covs'], 'Clusters after 12 iterations')
# Parameters after running EM to convergence
results = EM(data, initial_means, initial_covs, initial_weights)
plot_contours(data, results['means'], results['covs'], 'Final clusters')
# Log-likelihood plot
loglikelihoods = results['loglik']
plt.plot(range(len(loglikelihoods)), loglikelihoods, linewidth=4)
plt.xlabel('Iteration')
plt.ylabel('Log-likelihood')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
# Load image data
images = sframe.SFrame('../data/Week04/images.sf/')
images['rgb'] = images.pack_columns(['red', 'green', 'blue'])['X4']
# Run EM on image data
np.random.seed(1)
# Initalize parameters
init_means = [images['rgb'][x] for x in np.random.choice(len(images), 4, replace=False)]
cov = np.diag([images['red'].var(), images['green'].var(), images['blue'].var()])
init_covariances = [cov, cov, cov, cov]
init_weights = [1/4., 1/4., 1/4., 1/4.]
# Convert rgb data to numpy arrays
img_data = [np.array(i) for i in images['rgb']]
# Run our EM algorithm on the image data using the above initializations.
# This should converge in about 125 iterations
out = EM(img_data, init_means, init_covariances, init_weights)
# Log-likelihood plot
ll = out['loglik']
plt.plot(range(len(ll)),ll,linewidth=4)
plt.xlabel('Iteration')
plt.ylabel('Log-likelihood')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
plt.figure()
plt.plot(range(10,len(ll)),ll[10:],linewidth=4)
plt.xlabel('Iteration')
plt.ylabel('Log-likelihood')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
# Visualize evolution of responsibility
N, K = out['resp'].shape
random_resp = np.random.dirichlet(np.ones(K), N)
plot_responsibilities_in_RB(images, random_resp, 'Random responsibilities')
out = EM(img_data, init_means, init_covariances, init_weights, maxiter=1)
plot_responsibilities_in_RB(images, out['resp'], 'After 1 iteration')
out = EM(img_data, init_means, init_covariances, init_weights, maxiter=20)
plot_responsibilities_in_RB(images, out['resp'], 'After 20 iterations')
# Interpreting clusters
weights = out['weights']
means = out['means']
covariances = out['covs']
rgb = images['rgb']
N = len(images) # number of images
K = len(means) # number of clusters
assignments = [0]*N
probs = [0]*N
for i in range(N):
# Compute the score of data point i under each Gaussian component:
p = np.zeros(K)
for k in range(K):
p[k] = weights[k]*multivariate_normal.pdf(rgb[i], mean=means[k], cov=covariances[k])
# Compute assignments of each data point to a given cluster based on the above scores:
assignments[i] = np.argmax(p)
# For data point i, store the corresponding score under this cluster assignment:
probs[i] = np.max(p)
assignments = sframe.SFrame({'assignments':assignments, 'probs':probs, 'image': images['image']})
for idx in range(4):
get_top_images(assignments, idx)
for component_id in range(4):
print 'Component {0:d}'.format(component_id)
images = get_top_images(assignments, component_id)
save_images(images, 'component_{0:d}_%d.jpg'.format(component_id))
print '\n'
| gpl-3.0 | 2,065,051,852,382,707,500 | 33.867692 | 119 | 0.623544 | false |
xorpaul/check_mk | modules/automation.py | 1 | 45601 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
class MKAutomationError(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return self.reason
def do_automation(cmd, args):
try:
if cmd == "get-configuration":
read_config_files(with_conf_d=False)
result = automation_get_configuration()
elif cmd == "get-check-information":
result = automation_get_check_information()
elif cmd == "get-check-manpage":
result = automation_get_check_manpage(args)
elif cmd == "get-check-catalog":
result = automation_get_check_catalog(args)
elif cmd == "notification-get-bulks":
result = automation_get_bulks(args)
else:
read_config_files()
if cmd == "try-inventory":
result = automation_try_discovery(args)
elif cmd == "inventory":
result = automation_discovery(args)
elif cmd == "analyse-service":
result = automation_analyse_service(args)
elif cmd == "active-check":
result = automation_active_check(args)
elif cmd == "get-autochecks":
result = automation_get_autochecks(args)
elif cmd == "set-autochecks":
result = automation_set_autochecks(args)
elif cmd == "reload":
result = automation_restart("reload")
elif cmd == "restart":
result = automation_restart("restart")
elif cmd == "scan-parents":
result = automation_scan_parents(args)
elif cmd == "diag-host":
result = automation_diag_host(args)
elif cmd == "delete-host":
result = automation_delete_host(args)
elif cmd == "rename-host":
result = automation_rename_host(args)
elif cmd == "create-snapshot":
result = automation_create_snapshot(args)
elif cmd == "notification-replay":
result = automation_notification_replay(args)
elif cmd == "notification-analyse":
result = automation_notification_analyse(args)
elif cmd == "update-dns-cache":
result = automation_update_dns_cache()
elif cmd == "bake-agents":
result = automation_bake_agents()
else:
raise MKAutomationError("Automation command '%s' is not implemented." % cmd)
except MKAutomationError, e:
sys.stderr.write("%s\n" % e)
if opt_debug:
raise
output_profile()
sys.exit(1)
except Exception, e:
if opt_debug:
raise
else:
sys.stderr.write("%s\n" % e)
output_profile()
sys.exit(2)
if opt_debug:
import pprint
sys.stdout.write(pprint.pformat(result)+"\n")
else:
sys.stdout.write("%r\n" % (result,))
output_profile()
sys.exit(0)
# Does inventory for *one* host. Possible values for how:
# "new" - find only new services (like -I)
# "remove" - remove exceeding services
# "fixall" - find new, remove exceeding
# "refresh" - drop all services and reinventorize
def automation_discovery(args):
# Error sensivity
if args[0] == "@raiseerrors":
args = args[1:]
on_error = "raise"
os.dup2(os.open("/dev/null", os.O_WRONLY), 2)
else:
on_error = "ignore"
# perform full SNMP scan on SNMP devices?
if args[0] == "@scan":
do_snmp_scan = True
args = args[1:]
else:
do_snmp_scan = False
# use cache files if present?
if args[0] == "@cache":
args = args[1:]
use_caches = True
else:
use_caches = False
if len(args) < 2:
raise MKAutomationError("Need two arguments: new|remove|fixall|refresh HOSTNAME")
how = args[0]
hostnames = args[1:]
counts = {}
failed_hosts = {}
for hostname in hostnames:
counts.setdefault(hostname, [0, 0, 0, 0]) # added, removed, kept, total
try:
# in "refresh" mode we first need to remove all previously discovered
# checks of the host, so that get_host_services() does show us the
# new discovered check parameters.
if how == "refresh":
counts[hostname][1] += remove_autochecks_of(hostname) # this is cluster-aware!
# Compute current state of new and existing checks
services = get_host_services(hostname, use_caches=use_caches,
do_snmp_scan=do_snmp_scan, on_error=on_error)
# Create new list of checks
new_items = {}
for (check_type, item), (check_source, paramstring) in services.items():
if check_source in ("custom", "legacy", "active", "manual"):
continue # this is not an autocheck or ignored and currently not checked
# Note discovered checks that are shadowed by manual checks will vanish
# that way.
if check_source in ("new"):
if how in ("new", "fixall", "refresh"):
counts[hostname][0] += 1 # added
counts[hostname][3] += 1 # total
new_items[(check_type, item)] = paramstring
elif check_source in ("old", "ignored"):
# keep currently existing valid services in any case
new_items[(check_type, item)] = paramstring
counts[hostname][2] += 1 # kept
counts[hostname][3] += 1 # total
elif check_source in ("obsolete", "vanished"):
# keep item, if we are currently only looking for new services
# otherwise fix it: remove ignored and non-longer existing services
if how not in ("fixall", "remove"):
new_items[(check_type, item)] = paramstring
counts[hostname][2] += 1 # kept
counts[hostname][3] += 1 # total
else:
counts[hostname][1] += 1 # removed
# Silently keep clustered services
elif check_source.startswith("clustered_"):
new_items[(check_type, item)] = paramstring
else:
raise MKGeneralException("Unknown check source '%s'" % check_source)
set_autochecks_of(hostname, new_items)
except Exception, e:
if opt_debug:
raise
failed_hosts[hostname] = str(e)
return counts, failed_hosts
def automation_try_discovery(args):
use_caches = False
do_snmp_scan = False
if args[0] == '@noscan':
args = args[1:]
do_snmp_scan = False
use_caches = True
elif args[0] == '@scan':
args = args[1:]
do_snmp_scan = True
use_caches = False
if args[0] == '@raiseerrors':
on_error = "raise"
args = args[1:]
else:
on_error = "ignore"
# TODO: Remove this unlucky option opt_use_cachefile. At least do not
# handle this option so deep in the code. It should only be handled
# by top-level functions.
global opt_use_cachefile, check_max_cachefile_age
opt_use_cachefile = use_caches
if use_caches:
check_max_cachefile_age = inventory_max_cachefile_age
hostname = args[0]
table = get_check_preview(hostname, use_caches=use_caches,
do_snmp_scan=do_snmp_scan, on_error=on_error)
return table
# Set the new list of autochecks. This list is specified by a
# table of (checktype, item). No parameters are specified. Those
# are either (1) kept from existing autochecks or (2) computed
# from a new inventory. Note: we must never convert check parameters
# from python source code to actual values.
def automation_set_autochecks(args):
hostname = args[0]
new_items = eval(sys.stdin.read())
set_autochecks_of(hostname, new_items)
def set_autochecks_of(hostname, new_items):
# A Cluster does not have an autochecks file
# All of its services are located in the nodes instead
# So we cycle through all nodes remove all clustered service
# and add the ones we've got from stdin
if is_cluster(hostname):
for node in nodes_of(hostname):
new_autochecks = []
existing = parse_autochecks_file(node)
for check_type, item, paramstring in existing:
descr = service_description(check_type, item)
if hostname != host_of_clustered_service(node, descr):
new_autochecks.append((check_type, item, paramstring))
for (check_type, item), paramstring in new_items.items():
new_autochecks.append((check_type, item, paramstring))
# write new autochecks file for that host
automation_write_autochecks_file(node, new_autochecks)
else:
existing = parse_autochecks_file(hostname)
# write new autochecks file, but take paramstrings from existing ones
# for those checks which are kept
new_autochecks = []
for ct, item, paramstring in existing:
if (ct, item) in new_items:
new_autochecks.append((ct, item, paramstring))
del new_items[(ct, item)]
for (ct, item), paramstring in new_items.items():
new_autochecks.append((ct, item, paramstring))
# write new autochecks file for that host
automation_write_autochecks_file(hostname, new_autochecks)
def automation_write_autochecks_file(hostname, table):
if not os.path.exists(autochecksdir):
os.makedirs(autochecksdir)
path = "%s/%s.mk" % (autochecksdir, hostname)
f = file(path, "w")
f.write("[\n")
for check_type, item, paramstring in table:
f.write(" (%r, %r, %s),\n" % (check_type, item, paramstring))
f.write("]\n")
if inventory_check_autotrigger and inventory_check_interval:
schedule_inventory_check(hostname)
def automation_get_autochecks(args):
hostname = args[0]
result = []
for ct, item, paramstring in parse_autochecks_file(hostname):
result.append((ct, item, eval(paramstring), paramstring))
return result
def schedule_inventory_check(hostname):
try:
import socket
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(livestatus_unix_socket)
now = int(time.time())
if 'cmk-inventory' in use_new_descriptions_for:
command = "SCHEDULE_FORCED_SVC_CHECK;%s;Check_MK Discovery;%d" % (hostname, now)
else:
# FIXME: Remove this old name handling one day
command = "SCHEDULE_FORCED_SVC_CHECK;%s;Check_MK inventory;%d" % (hostname, now)
s.send("COMMAND [%d] %s\n" % (now, command))
except Exception, e:
if opt_debug:
raise
# Determine the type of the check, and how the parameters are being
# constructed
def automation_analyse_service(args):
global g_hostname
hostname = args[0]
servicedesc = args[1]
g_hostname = hostname # To be sure for all subfunctions
# We just consider types of checks that are managed via WATO.
# We have the following possible types of services:
# 1. manual checks (static_checks) (currently overriding inventorized checks)
# 2. inventorized check
# 3. classical checks
# 4. active checks
# Compute effective check table, in order to remove SNMP duplicates
check_table = get_check_table(hostname, remove_duplicates = True)
# 1. Manual checks
for nr, (checkgroup, entries) in enumerate(static_checks.items()):
for entry in entries:
entry, rule_options = get_rule_options(entry)
if rule_options.get("disabled"):
continue
# Parameters are optional
if len(entry[0]) == 2:
checktype, item = entry[0]
params = None
else:
checktype, item, params = entry[0]
if len(entry) == 3:
taglist, hostlist = entry[1:3]
else:
hostlist = entry[1]
taglist = []
if hosttags_match_taglist(tags_of_host(hostname), taglist) and \
in_extraconf_hostlist(hostlist, hostname):
descr = service_description(checktype, item)
if descr == servicedesc:
return {
"origin" : "static",
"checkgroup" : checkgroup,
"checktype" : checktype,
"item" : item,
"rule_nr" : nr,
"parameters" : params,
}
# 2. Load all autochecks of the host in question and try to find
# our service there
try:
path = "%s/%s.mk" % (autochecksdir, hostname)
for entry in eval(file(path).read()):
if len(entry) == 4: # old format
hn, ct, item, params = entry
else:
ct, item, params = entry # new format without host name
hn = hostname
if (ct, item) not in check_table:
continue # this is a removed duplicate or clustered service
descr = service_description(ct, item)
if hn == hostname and descr == servicedesc:
dlv = check_info[ct].get("default_levels_variable")
if dlv:
fs = factory_settings.get(dlv, None)
else:
fs = None
return {
"origin" : "auto",
"checktype" : ct,
"checkgroup" : check_info[ct].get("group"),
"item" : item,
"inv_parameters" : params,
"factory_settings" : fs,
"parameters" : compute_check_parameters(hostname, ct, item, params),
}
except:
if opt_debug:
raise
# 3. Classical checks
custchecks = host_extra_conf(hostname, custom_checks)
for nr, entry in enumerate(custchecks):
desc = entry["service_description"]
if desc == servicedesc:
result = {
"origin" : "classic",
"rule_nr" : nr,
}
if "command_line" in entry: # Only active checks have a command line
result["command_line"] = entry["command_line"]
return result
# 4. Active checks
for acttype, rules in active_checks.items():
entries = host_extra_conf(hostname, rules)
if entries:
act_info = active_check_info[acttype]
for params in entries:
description = act_info["service_description"](params)
if description == servicedesc:
return {
"origin" : "active",
"checktype" : acttype,
"parameters" : params,
}
return {} # not found
# TODO: Was ist mit Clustern???
# TODO: Klappt das mit automatischen verschatten von SNMP-Checks (bei dual Monitoring)
def automation_delete_host(args):
hostname = args[0]
for path in [
"%s/%s" % (precompiled_hostchecks_dir, hostname),
"%s/%s.py" % (precompiled_hostchecks_dir, hostname),
"%s/%s.mk" % (autochecksdir, hostname),
"%s/%s" % (logwatch_dir, hostname),
"%s/%s" % (counters_directory, hostname),
"%s/%s" % (tcp_cache_dir, hostname),
"%s/%s.*" % (tcp_cache_dir, hostname)]:
os.system("rm -rf '%s'" % path)
def automation_restart(job = "restart", use_rushd = True):
# make sure, Nagios does not inherit any open
# filedescriptors. This really happens, e.g. if
# check_mk is called by WATO via Apache. Nagios inherits
# the open file where Apache is listening for incoming
# HTTP connections. Really.
if monitoring_core == "nagios":
objects_file = nagios_objects_file
for fd in range(3, 256):
try:
os.close(fd)
except:
pass
else:
objects_file = var_dir + "/core/config"
if job == "restart":
job = "reload" # force reload for CMC
# os.closerange(3, 256) --> not available in older Python versions
class null_file:
def write(self, stuff):
pass
def flush(self):
pass
# Deactivate stdout by introducing fake file without filedescriptor
old_stdout = sys.stdout
sys.stdout = null_file()
try:
backup_path = None
if not lock_objects_file():
raise MKAutomationError("Cannot activate changes. "
"Another activation process is currently in progresss")
if os.path.exists(objects_file):
backup_path = objects_file + ".save"
os.rename(objects_file, backup_path)
else:
backup_path = None
try:
if monitoring_core == "nagios":
create_nagios_config(file(objects_file, "w"))
else:
do_create_cmc_config(opt_cmc_relfilename, use_rushd = use_rushd)
if "do_bake_agents" in globals() and bake_agents_on_restart:
do_bake_agents()
except Exception, e:
if backup_path:
os.rename(backup_path, objects_file)
if opt_debug:
raise
raise MKAutomationError("Error creating configuration: %s" % e)
if do_check_nagiosconfig():
if backup_path:
os.remove(backup_path)
if monitoring_core == "cmc":
do_pack_config()
else:
do_precompile_hostchecks()
do_core_action(job)
else:
if backup_path:
os.rename(backup_path, objects_file)
else:
os.remove(objects_file)
raise MKAutomationError("Configuration for monitoring core is invalid. Rolling back.")
except Exception, e:
if backup_path and os.path.exists(backup_path):
os.remove(backup_path)
if opt_debug:
raise
raise MKAutomationError(str(e))
sys.stdout = old_stdout
def automation_get_configuration():
# We read the list of variable names from stdin since
# that could be too much for the command line
variable_names = eval(sys.stdin.read())
result = {}
for varname in variable_names:
if varname in globals():
if not hasattr(globals()[varname], '__call__'):
result[varname] = globals()[varname]
return result
def automation_get_check_catalog(args):
def path_prefix_matches(p, op):
if op and not p:
return False
elif not op:
return True
else:
return p[0] == op[0] and path_prefix_matches(p[1:], op[1:])
read_manpage_catalog()
tree = {}
if len(args) > 0:
only_path = tuple(args)
else:
only_path = ()
for path, entries in g_manpage_catalog.items():
if not path_prefix_matches(path, only_path):
continue
subtree = tree
for component in path[:-1]:
subtree = subtree.setdefault(component, {})
subtree[path[-1]] = map(strip_manpage_entry, entries)
for p in only_path:
tree = tree[p]
return tree, manpage_catalog_titles
def strip_manpage_entry(entry):
return dict([ (k,v) for (k,v) in entry.items() if k in [
"name", "agents", "title"
]])
def automation_get_check_information():
manuals = all_manuals()
checks = {}
for check_type, check in check_info.items():
manfile = manuals.get(check_type)
if manfile:
title = file(manfile).readline().strip().split(":", 1)[1].strip()
else:
title = check_type
checks[check_type] = { "title" : title }
if check["group"]:
checks[check_type]["group"] = check["group"]
checks[check_type]["service_description"] = check.get("service_description","%s")
checks[check_type]["snmp"] = check_uses_snmp(check_type)
return checks
def automation_get_check_manpage(args):
if len(args) != 1:
raise MKAutomationError("Need exactly one argument.")
check_type = args[0]
manpage = load_manpage(args[0])
# Add a few informations from check_info. Note: active checks do not
# have an entry in check_info
if check_type in check_info:
manpage["type"] = "check_mk"
info = check_info[check_type]
for key in [ "snmp_info", "has_perfdata", "service_description" ]:
if key in info:
manpage[key] = info[key]
if "." in check_type:
section = check_type.split(".")[0]
if section in check_info and "snmp_info" in check_info[section]:
manpage["snmp_info"] = check_info[section]["snmp_info"]
if "group" in info:
manpage["group"] = info["group"]
# Assume active check
elif check_type.startswith("check_"):
manpage["type"] = "active"
return manpage
def automation_scan_parents(args):
settings = {
"timeout" : int(args[0]),
"probes" : int(args[1]),
"max_ttl" : int(args[2]),
"ping_probes" : int(args[3]),
}
hostnames = args[4:]
traceroute_prog = find_bin_in_path('traceroute')
if not traceroute_prog:
raise MKAutomationError("Cannot find binary <tt>traceroute</tt> in search path.")
try:
gateways = scan_parents_of(hostnames, silent=True, settings=settings)
return gateways
except Exception, e:
raise MKAutomationError(str(e))
def automation_diag_host(args):
import subprocess
hostname, test, ipaddress, snmp_community = args[:4]
agent_port, snmp_timeout, snmp_retries = map(int, args[4:7])
cmd = args[7]
if not ipaddress:
try:
ipaddress = lookup_ipaddress(hostname)
except:
raise MKGeneralException("Cannot resolve hostname %s into IP address" % hostname)
try:
if test == 'ping':
p = subprocess.Popen('ping -A -i 0.2 -c 2 -W 5 %s 2>&1' % ipaddress, shell = True, stdout = subprocess.PIPE)
response = p.stdout.read()
return (p.wait(), response)
elif test == 'agent':
if not cmd:
cmd = get_datasource_program(hostname, ipaddress)
if cmd:
return 0, get_agent_info_program(cmd)
else:
return 0, get_agent_info_tcp(hostname, ipaddress, agent_port or None)
elif test == 'traceroute':
traceroute_prog = find_bin_in_path('traceroute')
if not traceroute_prog:
return 1, "Cannot find binary <tt>traceroute</tt>."
else:
p = subprocess.Popen('traceroute -n %s 2>&1' % ipaddress, shell = True, stdout = subprocess.PIPE)
response = p.stdout.read()
return (p.wait(), response)
elif test.startswith('snmp'):
if snmp_community:
explicit_snmp_communities[hostname] = snmp_community
# override timing settings if provided
if snmp_timeout or snmp_retries:
timing = {}
if snmp_timeout:
timing['timeout'] = snmp_timeout
if snmp_retries:
timing['retries'] = snmp_retries
snmp_timing.insert(0, (timing, [], [hostname]))
# SNMP versions
global bulkwalk_hosts, snmpv2c_hosts
if test == 'snmpv2':
bulkwalk_hosts = [hostname]
elif test == 'snmpv2_nobulk':
bulkwalk_hosts = []
snmpv2c_hosts = [hostname]
elif test == 'snmpv1':
bulkwalk_hosts = []
snmpv2c_hosts = []
else:
return 1, "SNMP command not implemented"
data = get_snmp_table(hostname, ipaddress, None, ('.1.3.6.1.2.1.1', ['1.0', '4.0', '5.0', '6.0']))
if data:
return 0, 'sysDescr:\t%s\nsysContact:\t%s\nsysName:\t%s\nsysLocation:\t%s\n' % tuple(data[0])
else:
return 1, 'Got empty SNMP response'
else:
return 1, "Command not implemented"
except Exception, e:
if opt_debug:
raise
return 1, str(e)
# WATO calls this automation when a host has been renamed. We need to change
# several file and directory names.
# HIRN: Hier auch das neue Format berücksichtigen! Andererseits sollte
# eigentlich auch nix Schlimmes passieren, wenn der Hostname *nicht* in
# der Datei steht.
def automation_rename_host(args):
oldname = args[0]
newname = args[1]
actions = []
# Autochecks: simply read and write out the file again. We do
# not store a host name here anymore - but old versions did.
# by rewriting we get rid of the host name.
acpath = autochecksdir + "/" + oldname + ".mk"
if os.path.exists(acpath):
old_autochecks = parse_autochecks_file(oldname)
out = file(autochecksdir + "/" + newname + ".mk", "w")
out.write("[\n")
for ct, item, paramstring in old_autochecks:
out.write(" (%r, %r, %s),\n" % (ct, item, paramstring))
out.write("]\n")
out.close()
os.remove(acpath) # Remove old file
actions.append("autochecks")
# At this place WATO already has changed it's configuration. All further
# data might be changed by the still running core. So we need to stop
# it now.
core_was_running = core_is_running()
if core_was_running:
do_core_action("stop", quiet=True)
# Rename temporary files of the host
for d in [ "cache", "counters" ]:
if rename_host_file(tmp_dir + "/" + d + "/", oldname, newname):
actions.append(d)
if rename_host_dir(tmp_dir + "/piggyback/", oldname, newname):
actions.append("piggyback-load")
# Rename piggy files *created* by the host
piggybase = tmp_dir + "/piggyback/"
if os.path.exists(piggybase):
for piggydir in os.listdir(piggybase):
if rename_host_file(piggybase + piggydir, oldname, newname):
actions.append("piggyback-pig")
# Logwatch
if rename_host_dir(logwatch_dir, oldname, newname):
actions.append("logwatch")
# SNMP walks
if rename_host_file(snmpwalks_dir, oldname, newname):
actions.append("snmpwalk")
# OMD-Stuff. Note: The question really is whether this should be
# included in Check_MK. The point is - however - that all these
# actions need to take place while the core is stopped.
if omd_root:
actions += omd_rename_host(oldname, newname)
# Start monitoring again. In case of CMC we need to ignore
# any configuration created by the CMC Rushahead daemon
if core_was_running:
global ignore_ip_lookup_failures
ignore_ip_lookup_failures = True # force config generation to succeed. The core *must* start.
automation_restart("start", use_rushd = False)
if monitoring_core == "cmc":
try:
os.remove(var_dir + "/core/config.rush")
os.remove(var_dir + "/core/config.rush.id")
except:
pass
if failed_ip_lookups:
actions.append("ipfail")
return actions
def rename_host_dir(basedir, oldname, newname):
import shutil
if os.path.exists(basedir + "/" + oldname):
if os.path.exists(basedir + "/" + newname):
shutil.rmtree(basedir + "/" + newname)
os.rename(basedir + "/" + oldname, basedir + "/" + newname)
return 1
return 0
def rename_host_file(basedir, oldname, newname):
if os.path.exists(basedir + "/" + oldname):
if os.path.exists(basedir + "/" + newname):
os.remove(basedir + "/" + newname)
os.rename(basedir + "/" + oldname, basedir + "/" + newname)
return 1
return 0
# This functions could be moved out of Check_MK.
def omd_rename_host(oldname, newname):
oldregex = oldname.replace(".", "[.]")
newregex = newname.replace(".", "[.]")
actions = []
# Temporarily stop processing of performance data
npcd_running = os.path.exists(omd_root + "/tmp/pnp4nagios/run/npcd.pid")
if npcd_running:
os.system("omd stop npcd >/dev/null 2>&1 </dev/null")
rrdcache_running = os.path.exists(omd_root + "/tmp/run/rrdcached.sock")
if rrdcache_running:
os.system("omd stop rrdcached >/dev/null 2>&1 </dev/null")
# Fix pathnames in XML files
dirpath = omd_root + "/var/pnp4nagios/perfdata/" + oldname
os.system("sed -i 's@/perfdata/%s/@/perfdata/%s/@' %s/*.xml 2>/dev/null" % (oldname, newname, dirpath))
# RRD files
if rename_host_dir(rrd_path, oldname, newname):
actions.append("rrd")
# entries of rrdcached journal
dirpath = omd_root + "/var/rrdcached/"
if not os.system("sed -i 's@/perfdata/%s/@/perfdata/%s/@' "
"%s/var/rrdcached/rrd.journal.* 2>/dev/null" % ( oldregex, newregex, omd_root)):
actions.append("rrdcached")
# Spoolfiles of NPCD
if not os.system("sed -i 's/HOSTNAME::%s /HOSTNAME::%s /' "
"%s/var/pnp4nagios/perfdata.dump %s/var/pnp4nagios/spool/perfdata.* 2>/dev/null" % (
oldregex, newregex, omd_root, omd_root)):
actions.append("pnpspool")
if rrdcache_running:
os.system("omd start rrdcached >/dev/null 2>&1 </dev/null")
if npcd_running:
os.system("omd start npcd >/dev/null 2>&1 </dev/null")
# Logfiles and history files of CMC and Nagios. Problem
# here: the exact place of the hostname varies between the
# various log entry lines
sed_commands = r'''
s/(INITIAL|CURRENT) (HOST|SERVICE) STATE: %(old)s;/\1 \2 STATE: %(new)s;/
s/(HOST|SERVICE) (DOWNTIME |FLAPPING |)ALERT: %(old)s;/\1 \2ALERT: %(new)s;/
s/PASSIVE (HOST|SERVICE) CHECK: %(old)s;/PASSIVE \1 CHECK: %(new)s;/
s/(HOST|SERVICE) NOTIFICATION: ([^;]+);%(old)s;/\1 NOTIFICATION: \2;%(new)s;/
''' % { "old" : oldregex, "new" : newregex }
patterns = [
"var/check_mk/core/history",
"var/check_mk/core/archive/*",
"var/nagios/nagios.log",
"var/nagios/archive/*",
]
one_matched = False
for pattern in patterns:
command = "sed -ri --file=/dev/fd/0 %s/%s >/dev/null 2>&1" % (omd_root, pattern)
p = os.popen(command, "w")
p.write(sed_commands)
if not p.close():
one_matched = True
if one_matched:
actions.append("history")
# State retention (important for Downtimes, Acknowledgements, etc.)
if monitoring_core == "nagios":
if not os.system("sed -ri 's/^host_name=%s$/host_name=%s/' %s/var/nagios/retention.dat" % (
oldregex, newregex, omd_root)):
actions.append("retention")
else: # CMC
# Create a file "renamed_hosts" with the information about the
# renaming of the hosts. The core will honor this file when it
# reads the status file with the saved state.
file(var_dir + "/core/renamed_hosts", "w").write("%s\n%s\n" % (oldname, newname))
actions.append("retention")
# NagVis maps
if not os.system("sed -i 's/^[[:space:]]*host_name=%s[[:space:]]*$/host_name=%s/' "
"%s/etc/nagvis/maps/*.cfg 2>/dev/null" % (
oldregex, newregex, omd_root)):
actions.append("nagvis")
return actions
def automation_create_snapshot(args):
try:
import tarfile, time, cStringIO, shutil, subprocess, thread, traceback, threading
from hashlib import sha256
the_data = sys.stdin.read()
data = eval(the_data)
snapshot_name = data["snapshot_name"]
snapshot_dir = var_dir + "/wato/snapshots"
work_dir = snapshot_dir + "/workdir/%s" % snapshot_name
if not os.path.exists(work_dir):
os.makedirs(work_dir)
# Open / initialize files
filename_target = "%s/%s" % (snapshot_dir, snapshot_name)
filename_work = "%s/%s.work" % (work_dir, snapshot_name)
filename_status = "%s/%s.status" % (work_dir, snapshot_name)
filename_pid = "%s/%s.pid" % (work_dir, snapshot_name)
filename_subtar = ""
current_domain = ""
file(filename_target, "w").close()
file(filename_status, "w").close()
def wipe_directory(path):
for entry in os.listdir(path):
if entry not in [ '.', '..' ]:
p = path + "/" + entry
if os.path.isdir(p):
shutil.rmtree(p)
else:
os.remove(p)
lock_status_file = threading.Lock()
def update_status_file(domain = None, infotext = None):
lock_status_file.acquire()
if os.path.exists(filename_status):
if domain:
statusinfo[domain] = infotext
statusfile = file(filename_status, "w")
statusfile.write("comment:%s\n" % data.get("comment"," ").encode("utf-8"))
status_list = list(statusinfo.items())
status_list.sort()
for status in status_list:
statusfile.write("%s.tar.gz:%s\n" % status)
lock_status_file.release()
# Set initial status info
statusinfo = {}
for name in data.get("domains", {}).keys():
statusinfo[name] = "TODO:0"
update_status_file()
# Now fork into our own process to have an asynchronous backup creation
try:
pid = os.fork()
if pid > 0:
# Exit parent process
return
# Decouple from parent environment
os.chdir("/")
os.umask(0)
os.setsid()
# Close all fd except stdin,out,err
for fd in range(3, 256):
try:
os.close(fd)
except OSError:
pass
sys.stdout.flush()
sys.stderr.flush()
si = os.open("/dev/null", os.O_RDONLY)
so = os.open("/dev/null", os.O_WRONLY)
os.dup2(si, 0)
os.dup2(so, 1)
os.dup2(so, 2)
os.close(si)
os.close(so)
except OSError, e:
raise MKAutomationError(str(e))
# Save pid of working process.
file(filename_pid, "w").write("%d" % os.getpid())
def cleanup():
wipe_directory(work_dir)
os.rmdir(work_dir)
def check_should_abort():
if not os.path.exists(filename_target):
cleanup()
sys.exit(0)
def get_basic_tarinfo(name):
tarinfo = tarfile.TarInfo(name)
tarinfo.mtime = time.time()
tarinfo.uid = 0
tarinfo.gid = 0
tarinfo.mode = 0644
tarinfo.type = tarfile.REGTYPE
return tarinfo
def update_subtar_size(seconds):
while current_domain != None:
try:
if current_domain:
if os.path.exists(path_subtar):
update_status_file(current_domain, "Processing:%d" % os.stat(path_subtar).st_size)
except:
pass
time.sleep(seconds)
def snapshot_secret():
path = default_config_dir + '/snapshot.secret'
try:
return file(path).read()
except IOError:
# create a secret during first use
try:
s = os.urandom(256)
except NotImplementedError:
s = sha256(time.time())
file(path, 'w').write(s)
return s
#
# Initialize the snapshot tar file and populate with initial information
#
tar_in_progress = tarfile.open(filename_work, "w")
# Add comment to tar file
if data.get("comment"):
tarinfo = get_basic_tarinfo("comment")
tarinfo.size = len(data.get("comment").encode("utf-8"))
tar_in_progress.addfile(tarinfo, cStringIO.StringIO(data.get("comment").encode("utf-8")))
if data.get("created_by"):
tarinfo = get_basic_tarinfo("created_by")
tarinfo.size = len(data.get("created_by"))
tar_in_progress.addfile(tarinfo, cStringIO.StringIO(data.get("created_by")))
# Add snapshot type
snapshot_type = data.get("type")
tarinfo = get_basic_tarinfo("type")
tarinfo.size = len(snapshot_type)
tar_in_progress.addfile(tarinfo, cStringIO.StringIO(snapshot_type))
# Close tar in progress, all other files are included via command line tar
tar_in_progress.close()
#
# Process domains (sorted)
#
subtar_update_thread = thread.start_new_thread(update_subtar_size, (1,))
domains = map(lambda x: x, data.get("domains").items())
domains.sort()
subtar_info = {}
for name, info in domains:
current_domain = name # Set name for update size thread
prefix = info.get("prefix","")
exclude_options = ""
for entry in info.get("exclude", []):
exclude_options += "--exclude=%s " % entry
check_should_abort()
filename_subtar = "%s.tar.gz" % name
path_subtar = "%s/%s" % (work_dir, filename_subtar)
if info.get("backup_command"):
command = info.get("backup_command") % {
"prefix" : prefix,
"path_subtar" : path_subtar,
"work_dir" : work_dir
}
else:
paths = map(lambda x: x[1] == "" and "." or x[1], info.get("paths", []))
command = "tar czf %s --ignore-failed-read --force-local %s -C %s %s" % \
(path_subtar, exclude_options, prefix, " ".join(paths))
proc = subprocess.Popen(command, shell=True, stdin=None, close_fds=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=prefix)
stdout, stderr = proc.communicate()
exit_code = proc.wait()
# Allow exit codes 0 and 1 (files changed during backup)
if exit_code not in [0, 1]:
raise MKAutomationError("Error while creating backup of %s (Exit Code %d) - %s.\n%s" %
(current_domain, exit_code, stderr, command))
subtar_size = os.stat(path_subtar).st_size
subtar_hash = sha256(file(path_subtar).read()).hexdigest()
subtar_signed = sha256(subtar_hash + snapshot_secret()).hexdigest()
subtar_info[filename_subtar] = (subtar_hash, subtar_signed)
# Append tar.gz subtar to snapshot
command = "tar --append --file=%s %s ; rm %s" % \
(filename_work, filename_subtar, filename_subtar)
proc = subprocess.Popen(command, shell=True, cwd = work_dir)
proc.communicate()
exit_code = proc.wait()
if exit_code != 0:
raise MKAutomationError("Error on adding backup domain %s to tarfile" % current_domain)
current_domain = ""
update_status_file(name, "Finished:%d" % subtar_size)
# Now add the info file which contains hashes and signed hashes for
# each of the subtars
info = ''.join([ '%s %s %s\n' % (k, v[0], v[1]) for k, v in subtar_info.items() ]) + '\n'
tar_in_progress = tarfile.open(filename_work, "a")
tarinfo = get_basic_tarinfo("checksums")
tarinfo.size = len(info)
tar_in_progress.addfile(tarinfo, cStringIO.StringIO(info))
tar_in_progress.close()
current_domain = None
shutil.move(filename_work, filename_target)
cleanup()
except Exception, e:
cleanup()
raise MKAutomationError(str(e))
def automation_notification_replay(args):
nr = args[0]
return notification_replay_backlog(int(nr))
def automation_notification_analyse(args):
nr = args[0]
return notification_analyse_backlog(int(nr))
def automation_get_bulks(args):
only_ripe = args[0] == "1"
return find_bulks(only_ripe)
def automation_active_check(args):
hostname, plugin, item = args
actchecks = []
needed_commands = []
if plugin == "custom":
custchecks = host_extra_conf(hostname, custom_checks)
for entry in custchecks:
if entry["service_description"] == item:
command_line = replace_core_macros(hostname, entry.get("command_line", ""))
if command_line:
command_line = autodetect_plugin(command_line)
return execute_check_plugin(command_line)
else:
return -1, "Passive check - cannot be executed"
else:
rules = active_checks.get(plugin)
if rules:
entries = host_extra_conf(hostname, rules)
if entries:
act_info = active_check_info[plugin]
for params in entries:
description = act_info["service_description"](params).replace('$HOSTNAME$', hostname)
if description == item:
args = act_info["argument_function"](params)
command_line = replace_core_macros(hostname, act_info["command_line"].replace("$ARG1$", args))
return execute_check_plugin(command_line)
def load_resource_file(macros):
try:
for line in file(omd_root + "/etc/nagios/resource.cfg"):
line = line.strip()
if not line or line[0] == '#':
continue
varname, value = line.split('=', 1)
macros[varname] = value
except:
if opt_debug:
raise
# Simulate replacing some of the more important macros of hosts. We
# cannot use dynamic macros, of course. Note: this will not work
# without OMD, since we do not know the value of $USER1$ and $USER2$
# here. We could read the Nagios resource.cfg file, but we do not
# know for sure the place of that either.
def replace_core_macros(hostname, commandline):
macros = {
"$HOSTNAME$" : hostname,
"$HOSTADDRESS$" : lookup_ipaddress(hostname),
}
load_resource_file(macros)
for varname, value in macros.items():
commandline = commandline.replace(varname, value)
return commandline
def execute_check_plugin(commandline):
try:
p = os.popen(commandline + " 2>&1")
output = p.read().strip()
ret = p.close()
if not ret:
status = 0
else:
if ret & 0xff == 0:
status = ret / 256
else:
status = 3
if status < 0 or status > 3:
status = 3
output = output.split("|",1)[0] # Drop performance data
return status, output
except Exception, e:
if opt_debug:
raise
return 3, "UNKNOWN - Cannot execute command: %s" % e
def automation_update_dns_cache():
return do_update_dns_cache()
def automation_bake_agents():
if "do_bake_agents" in globals():
return do_bake_agents()
| gpl-2.0 | -6,314,698,494,620,364,000 | 35.774194 | 120 | 0.551162 | false |
nilmtk/nilmtk | nilmtk/dataset_converters/greend/convert_greend.py | 1 | 6684 | from os import listdir, getcwd
from os.path import join, isdir, isfile, dirname, abspath
import pandas as pd
import numpy as np
import datetime
import time
from nilmtk.datastore import Key
from nilmtk.measurement import LEVEL_NAMES
from nilm_metadata import convert_yaml_to_hdf5
import warnings
import numpy as np
from io import StringIO
from multiprocessing import Pool
from nilmtk.utils import get_module_directory
def _get_blocks(filename):
'''
Return a list of dataframes from a GREEND CSV file
GREEND files can be interpreted as multiple CSV blocks concatenated into
a single file per date. Since the columns of the individual blocks can
vary in a single file, they need to be read separately.
There are some issues we need to handle in the converter:
- the headers from the multiple blocks
- corrupted data (lines with null chars, broken lines)
- more fields than specified in header
'''
block_data = None
dfs = []
previous_header = None
print(filename)
# Use float64 for timestamps and float32 for the rest of the columns
dtypes = {}
dtypes['timestamp'] = np.float64
def _process_block():
if block_data is None:
return
block_data.seek(0)
try:
# ignore extra fields for some files
error_bad_lines = not (
('building5' in filename and 'dataset_2014-02-04.csv' in filename)
)
df = pd.read_csv(block_data, index_col='timestamp', dtype=dtypes, error_bad_lines=error_bad_lines)
except: #(pd.errors.ParserError, ValueError, TypeError):
print("ERROR", filename)
raise
df.index = pd.to_datetime(df.index, unit='s')
df = df.tz_localize("UTC").tz_convert("CET").sort_index()
dfs.append(df)
block_data.close()
special_check = (
('dataset_2014-01-28.csv' in filename and 'building5' in filename) or
('dataset_2014-09-02.csv' in filename and 'building6' in filename)
)
with open(filename, 'r') as f:
for line in f:
# At least one file have a bunch of nulls present, let's clean the data
line = line.strip('\0')
if 'time' in line:
# Found a new block
if not line.startswith('time'):
# Some lines are corrupted, e.g. 1415605814.541311,0.0,NULL,NUtimestamp,000D6F00029C2918...
line = line[line.find('time'):]
if previous_header == line.strip():
# Same exact header, we can treat it as the same block
# print('Skipping split')
continue
# Using a defaultdict for the dtypes didn't work with read_csv,
# so we fill a normal dict when we find the columns
cols = line.strip().split(',')[1:]
for col in cols:
dtypes[col] = np.float32
# print('Found new block')
_process_block()
block_data = StringIO()
previous_header = line.strip()
if special_check:
if ('0.072.172091508705606' in line or
'1409660828.0753369,NULL,NUL' == line):
continue
block_data.write(line)
# Process the remaining block
_process_block()
return (filename, dfs)
def _get_houses(greend_path):
house_list = listdir(greend_path)
return [h for h in house_list if isdir(join(greend_path,h))]
def convert_greend(greend_path, hdf_filename, use_mp=True):
"""
Parameters
----------
greend_path : str
The root path of the greend dataset.
hdf_filename : str
The destination HDF5 filename (including path and suffix).
use_mp : bool
Defaults to True. Use multiprocessing to load the files for
each building.
"""
store = pd.HDFStore(hdf_filename, 'w', complevel=5, complib='zlib')
houses = sorted(_get_houses(greend_path))
print('Houses found:', houses)
if use_mp:
pool = Pool()
h = 1 # nilmtk counts buildings from 1 not from 0 as we do, so everything is shifted by 1
for house in houses:
print('Loading', house)
abs_house = join(greend_path, house)
dates = [d for d in listdir(abs_house) if d.startswith('dataset')]
target_filenames = [join(abs_house, date) for date in dates]
if use_mp:
house_data = pool.map(_get_blocks, target_filenames)
# Ensure the blocks are sorted by date and make a plain list
house_data_dfs = []
for date, data in sorted(house_data, key=lambda x: x[0]):
house_data_dfs.extend(data)
else:
house_data_dfs = []
for fn in target_filenames:
house_data_dfs.extend(_get_blocks(fn)[1])
overall_df = pd.concat(house_data_dfs, sort=False).sort_index()
dups_in_index = overall_df.index.duplicated(keep='first')
if dups_in_index.any():
print("Found duplicated values in index, dropping them.")
overall_df = overall_df[~dups_in_index]
m = 1
for column in overall_df.columns:
print("meter {}: {}".format(m, column))
key = Key(building=h, meter=m)
print("Putting into store...")
df = overall_df[column].to_frame() #.dropna(axis=0)
# if drop_duplicates:
# print("Dropping duplicated values in data...")
# df = df.drop_duplicates()
df.columns = pd.MultiIndex.from_tuples([('power', 'active')])
df.columns.set_names(LEVEL_NAMES, inplace=True)
store.put(str(key), df, format = 'table')
m += 1
# print('Flushing store...')
# store.flush()
h += 1
store.close()
# retrieve the dataset metadata in the metadata subfolder
metadata_dir = join(get_module_directory(), 'dataset_converters', 'greend', 'metadata')
convert_yaml_to_hdf5(metadata_dir, hdf_filename)
#is only called when this file is the main file... only test purpose
if __name__ == '__main__':
t1 = time.time()
convert_greend('GREEND_0-2_300615',
'GREEND_0-2_300615.h5')
dt = time.time() - t1
print()
print()
print('Time passed: {}:{}'.format(int(dt/60), int(dt%60)))
| apache-2.0 | 5,599,120,602,864,636,000 | 34.553191 | 111 | 0.565679 | false |
CIGIHub/greyjay | greyjay/content_notes/apps.py | 1 | 1139 | from django.apps import AppConfig
class EndNotesAppConfig(AppConfig):
name = 'greyjay.content_notes'
label = 'content_notes'
verbose_name = "Wagtail end notes"
def ready(self):
from greyjay.articles.models import ArticlePage
from wagtail.wagtailadmin.edit_handlers import (
MultiFieldPanel,
FieldPanel,
InlinePanel,
ObjectList
)
notes_panel = [
MultiFieldPanel(
[
FieldPanel('endnotes_heading'),
FieldPanel('endnote_identifier_style'),
InlinePanel('endnote_links', label="End Notes"),
],
heading="End Notes Section"
),
MultiFieldPanel(
[
FieldPanel('citations_heading'),
InlinePanel('citation_links', label="Citations"),
],
heading="Citations Section"
),
]
ArticlePage.edit_handler.children.insert(
-1,
ObjectList(notes_panel, heading="Notes")
)
| mit | 8,242,842,528,254,381,000 | 28.205128 | 69 | 0.507463 | false |
rew4332/tensorflow | tensorflow/python/ops/array_ops.py | 1 | 94317 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Casting
TensorFlow provides several operations that you can use to cast tensor data
types in your graph.
@@string_to_number
@@to_double
@@to_float
@@to_bfloat16
@@to_int32
@@to_int64
@@cast
@@saturate_cast
## Shapes and Shaping
TensorFlow provides several operations that you can use to determine the shape
of a tensor and change the shape of a tensor.
@@shape
@@size
@@rank
@@reshape
@@squeeze
@@expand_dims
@@meshgrid
## Slicing and Joining
TensorFlow provides several operations to slice or extract parts of a tensor,
or join multiple tensors together.
@@slice
@@strided_slice
@@split
@@tile
@@pad
@@concat
@@pack
@@unpack
@@reverse_sequence
@@reverse
@@transpose
@@extract_image_patches
@@space_to_batch
@@batch_to_space
@@space_to_depth
@@depth_to_space
@@gather
@@gather_nd
@@dynamic_partition
@@dynamic_stitch
@@boolean_mask
@@one_hot
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import logging_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_baseslice = slice
# Aliases for some automatically-generated names.
listdiff = gen_array_ops.list_diff
def shape(input, name=None):
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
shape(t) ==> [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
return shape_internal(input, name, optimize=True)
def shape_internal(input, name=None, optimize=True):
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(input, ops.SparseTensor):
return gen_math_ops.cast(input.shape, dtypes.int32)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
# Static shape inference can be incorrect when loops are involved: disable
# shape optimization in this case to avoid generating invalid constants.
optimize &= input_tensor.graph._get_control_flow_context() is None
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), dtypes.int32, name=name)
return gen_array_ops.shape(input, name=name)
def size(input, name=None):
"""Returns the size of a tensor.
This operation returns an integer representing the number of elements in
`input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
size(t) ==> 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
return size_internal(input, name, optimize=True)
def size_internal(input, name=None, optimize=True):
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(input, ops.SparseTensor):
return gen_math_ops._prod(gen_math_ops.cast(input.shape, dtypes.int32), 0,
name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
# Static shape inference can be incorrect when loops are involved: disable
# shape optimization in this case to avoid generating invalid constants.
optimize &= input_tensor.graph._get_control_flow_context() is None
if optimize and input_shape.is_fully_defined():
return constant(input_shape.num_elements(), dtypes.int32, name=name)
return gen_array_ops.size(input, name=name)
def rank(input, name=None):
"""Returns the rank of a tensor.
This operation returns an integer representing the rank of `input`.
For example:
```python
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
# shape of tensor 't' is [2, 2, 3]
rank(t) ==> 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(input, ops.SparseTensor):
return gen_array_ops.size(input.shape, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
# Static shape inference can be incorrect when loops are involved: disable
# shape optimization in this case to avoid generating invalid constants.
optimize &= input_tensor.graph._get_control_flow_context() is None
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
# DEPRECATED use init_ops.zeros_initializer
# TODO(irving) Move it to init_ops.py
def zeros_initializer(shape, dtype=dtypes.float32):
"""An adaptor for zeros() to match the Initializer spec."""
return zeros(shape, dtype)
def _SliceHelper(tensor, slice_spec):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to numpy with the restriction that
currently only support basic indexing. That means that
using a tensor as input is not currently allowed
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _baseslice):
strides.append(s.step if s.step is not None else 1)
# python doesn't always use None when constructing ranges
# for example a[:] gives slice(None,sys.maxsize,None)
# whereas a[::1] gives slice(None,None,None)
if s.start is not None and s.start is not sys.maxsize:
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and s.stop != sys.maxsize:
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
try:
s = int(s)
except TypeError:
raise TypeError("Bad slice index %s of type %s" % (s, type(s)))
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
index += 1
# pack possibly involves often involves no tensors, so we must use op_scope
# correct graph
with ops.name_scope(None, "strided_slice",
[tensor] + begin + end + strides) as name:
begin_pack, end_pack, strides_pack = pack(begin), pack(end), pack(strides)
return strided_slice(tensor,
begin_pack,
end_pack,
strides_pack,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name)
# pylint: disable=undefined-variable,protected-access
def slice(input_, begin, size, name=None):
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input`. In other
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```
# 'input' is [[[1, 1, 1], [2, 2, 2]],
# [[3, 3, 3], [4, 4, 4]],
# [[5, 5, 5], [6, 6, 6]]]
tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]]
tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],
[[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
def strided_slice(input_,
begin,
end,
strides,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
name=None):
"""Extracts a strided slice from a tensor.
To a first order, this operation extracts a slice of size `end - begin`
from a tensor `input`
starting at the location specified by `begin`. The slice continues by adding
`stride` to the `begin` index until all dimensions are not less than `end`.
Note that components of stride can be negative, which causes a reverse
slice.
This operation can be thought of an encoding of a numpy style sliced
range. Given a python slice input[<spec0>, <spec1>, ..., <specn>]
this function will be called as follows.
`begin`, `end`, and `strides` will be all length n. n is in general
not the same dimensionality as `input`.
For the ith spec,
`begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask`,
and `shrink_axis_mask` will have the ith bit corrsponding to
the ith spec.
If the ith bit of `begin_mask` is non-zero, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask`, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is one, then a `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example `foo[3:5,4]` on a 10x8 tensor produces a shape 2 tensor
whereas `foo[3:5,4:5]` produces a shape 2x1 tensor with shrink_mask
being 1<<1 == 2.
If the ith bit of `shrink_axis_mask` is one, then `begin`,
`end[i]`, and `stride[i]` are used to do a slice in the appropriate
dimension, but the output tensor will be reduced in dimensionality
by one. This is only valid if the ith entry of slice[i]==1.
NOTE: `begin` and `end` are zero-indexed`.
`strides` entries must be non-zero.
```
# 'input' is [[[1, 1, 1], [2, 2, 2]],
# [[3, 3, 3], [4, 4, 4]],
# [[5, 5, 5], [6, 6, 6]]]
tf.slice(input, [1, 0, 0], [2, 1, 3], [1, 1, 1]) ==> [[[3, 3, 3]]]
tf.slice(input, [1, 0, 0], [2, 2, 3], [1, 1, 1]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.slice(input, [1, 1, 0], [2, -1, 3], [1, -1, 1]) ==>[[[4, 4, 4],
[3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops.strided_slice(input_,
begin,
end,
strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
ops.Tensor._override_operator("__getitem__", _SliceHelper)
def pack(values, axis=0, name="pack"):
"""Packs a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```prettyprint
# 'x' is [1, 4]
# 'y' is [2, 5]
# 'z' is [3, 6]
pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of unpack. The numpy equivalent is
tf.pack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to pack along. Defaults to the first dimension.
Supports negative indexes.
name: A name for this operation (optional).
Returns:
output: A packed `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name).get_shape()
if value_shape.ndims is not None:
expanded_num_dims = value_shape.ndims + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -expanded_num_dims, expanded_num_dims))
return gen_array_ops._pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if ops.is_dense_tensor_like(elem):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError(
"Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" % (elem.dtype, dtype, elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if ops.is_dense_tensor_like(converted_elem):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if ops.is_dense_tensor_like(elem):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops._pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be
converted to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if ops.is_dense_tensor_like(elem):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref:
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is not None and dtype != inferred_dtype:
return NotImplemented
return _autopacking_helper(v, inferred_dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function(
(list, tuple), _autopacking_conversion_function, 99)
def unpack(value, num=None, axis=0, name="unpack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[axis]` is not known, `ValueError` is raised.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice
`value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
(Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice
`value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of pack. The numpy equivalent is
tf.unpack(x, n) = list(x)
Args:
value: A rank `R > 0` `Tensor` to be unpacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred
if `None` (the default).
axis: An `int`. The axis to unpack along. Defaults to the first
dimension. Supports negative indexes.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unpacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
ValueError: If `axis` is out of the range [-R, R).
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops._unpack(value, num=num, axis=axis, name=name)
def concat(concat_dim, values, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `concat_dim`. If
`values[i].shape = [D0, D1, ... Dconcat_dim(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Rconcat_dim, ...Dn]
where
Rconcat_dim = sum(Dconcat_dim(i))
That is, the data from the input tensors is joined along the `concat_dim`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `concat_dim` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat(0, [t1, t2]) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat(1, [t1, t2]) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat(0, [t3, t4])) ==> [4, 3]
tf.shape(tf.concat(1, [t3, t4])) ==> [2, 6]
```
Note: If you are concatenating along a new axis consider using pack.
E.g.
```python
tf.concat(axis, [tf.expand_dims(t, axis) for t in tensors])
```
can be rewritten as
```python
tf.pack(tensors, axis=axis)
```
Args:
concat_dim: 0-D `int32` `Tensor`. Dimension along which to concatenate.
values: A list of `Tensor` objects or a single `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that concat_dim is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(concat_dim,
name="concat_dim",
dtype=dtypes.int32).get_shape(
).assert_is_compatible_with(tensor_shape.scalar())
return identity(values[0], name=scope)
return gen_array_ops._concat(concat_dim=concat_dim,
values=values,
name=name)
@ops.RegisterShape("Pack")
def _PackShape(op):
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()]
for inp in op.inputs[1:]:
input_shape = input_shape.merge_with(inp.get_shape())
input_shape = input_shape.as_list()
input_shape.insert(op.get_attr("axis"), len(op.inputs))
return [tensor_shape.TensorShape(input_shape)]
@ops.RegisterShape("Unpack")
def _UnpackShape(op):
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()] * op.get_attr("num")
input_shape = input_shape.as_list()
del input_shape[op.get_attr("axis")]
return [tensor_shape.TensorShape(input_shape)] * op.get_attr("num")
@ops.RegisterShape("Concat")
def _ConcatShape(op):
concat_dim = tensor_util.constant_value(op.inputs[0])
if concat_dim is None:
# Return an unknown shape with the same rank as the inputs, or an
# unknown rank if no input's rank is known.
rank = None
for value in op.inputs[1:]:
if rank is not None:
value.get_shape().assert_has_rank(rank)
else:
rank = value.get_shape().ndims
if rank == 0:
raise ValueError("Can't concatenate scalars (use tf.pack instead)")
return [tensor_shape.unknown_shape(ndims=rank)]
else:
# Merge all the non-concat dims, and sum the concat dim to make an
# output shape.
concat_dim = int(concat_dim)
if concat_dim < 0:
raise ValueError("Expected concat_dim >= 0, but got %d" % concat_dim)
output_shape = op.inputs[1].get_shape()
for value in op.inputs[2:]:
value_shape = value.get_shape()
if value_shape.ndims is not None and concat_dim >= value_shape.ndims:
raise ValueError("Expected concat_dim in range [0, %d), but got %d" %
(value_shape.ndims, concat_dim))
before = output_shape[:concat_dim].merge_with(value_shape[:concat_dim])
at = output_shape[concat_dim] + value_shape[concat_dim]
after = output_shape[
concat_dim + 1:].merge_with(value_shape[concat_dim + 1:])
output_shape = before.concatenate(at).concatenate(after)
return [output_shape]
@ops.RegisterShape("ConcatOffset")
def _ConcatOffsetShape(op):
return [x.get_shape() for x in op.inputs[1:]]
def boolean_mask(tensor, mask, name="boolean_mask"):
"""Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = [True, False, True, False]
boolean_mask(tensor, mask) ==> [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
Returns:
Tensor populated by entries in `tensor` corresponding to `True` values in
`mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = [True, False, True]
boolean_mask(tensor, mask) ==> [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where(mask), squeeze_dims=[1])
return gather(reshaped_tensor, indices)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"mask dimensions must be specified, even if some dimensions are None"
". E.g. shape=[None] is ok, but shape=None is not.")
shape_tensor[:ndims_mask].assert_is_compatible_with(shape_mask)
tensor = reshape(tensor, concat(0, [[-1], shape(tensor)[ndims_mask:]]))
first_dim = shape_tensor[:ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape([first_dim])
.concatenate(shape_tensor[ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask)
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices => [12, 26, 37, 45]
tf.shape(a.values) => [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse_mask(a, [12, 45])
b.indices => [26, 37]
tf.shape(b.values) => [2, 10]
```
Args:
* `a`: An `IndexedSlices` instance.
* `mask_indices`: Indices of elements to mask.
* `name`: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = listdiff(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
def split(split_dim, num_split, value, name="split"):
"""Splits a tensor into `num_split` tensors along one dimension.
Splits `value` along dimension `split_dim` into `num_split` smaller tensors.
Requires that `num_split` evenly divide `value.shape[split_dim]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(1, 3, value)
tf.shape(split0) ==> [5, 10]
```
Note: If you are splitting along an axis by the length of that axis, consider
using unpack, e.g.
```python
num_items = t.get_shape()[axis].value
[tf.squeeze(s, [axis]) for s in tf.split(axis, num_items, t)]
```
can be rewritten as
```python
tf.unpack(t, axis=axis)
```
Args:
split_dim: A 0-D `int32` `Tensor`. The dimension along which to split.
Must be in the range `[0, rank(value))`.
num_split: A Python integer. The number of ways to split.
value: The `Tensor` to split.
name: A name for the operation (optional).
Returns:
`num_split` `Tensor` objects resulting from splitting `value`.
"""
return gen_array_ops._split(split_dim=split_dim,
num_split=num_split,
value=value,
name=name)
@ops.RegisterShape("Reverse")
def _ReverseShape(op):
input_shape = op.inputs[0].get_shape()
dims_shape = op.inputs[1].get_shape().with_rank(1)
if dims_shape[0].value is not None:
input_shape = input_shape.with_rank(dims_shape[0])
if input_shape.ndims is not None and input_shape.ndims > 8:
raise ValueError(
"tf.reverse() does not work on tensors with more than 8 dimensions")
return [input_shape]
def transpose(a, perm=None, name="transpose"):
"""Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example:
```python
# 'x' is [[1 2 3]
# [4 5 6]]
tf.transpose(x) ==> [[1 4]
[2 5]
[3 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) ==> [[1 4]
[2 5]
[3 6]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
# 'x' is [[[1 2 3]
# [4 5 6]]
# [[7 8 9]
# [10 11 12]]]
# Take the transpose of the matrices in dimension-0
tf.transpose(x, perm=[0, 2, 1]) ==> [[[1 4]
[2 5]
[3 6]]
[[7 10]
[8 11]
[9 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
if perm is None:
rank = gen_array_ops.rank(a)
perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
ret = gen_array_ops.transpose(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = gen_array_ops.transpose(a, perm, name=name)
return ret
# pylint: disable=invalid-name
def batch_matrix_transpose(a, name="batch_matrix_transpose"):
"""Transposes last two dimensions of batch matrix `a`.
For example:
```python
# Matrix with no batch dimension.
# 'x' is [[1 2 3]
# [4 5 6]]
tf.batch_matrixtranspose(x) ==> [[1 4]
[2 5]
[3 6]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.batch_matrix_transpose(x) is shape [1, 2, 4, 3]
```
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat(
0, (gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]))
return transpose(a, perm=perm)
# pylint: enable=invalid-name
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.name_scope(name, "zeros", [shape]) as name:
try:
shape = tensor_shape.as_shape(shape)
output = constant(0, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(0, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtypes.as_dtype(dtype).base_dtype
return output
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, or `complex128`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
if dtype is not None and tensor.dtype != dtype:
ret = zeros(shape_internal(tensor, optimize=optimize), dtype, name=name)
ret.set_shape(tensor.get_shape())
return ret
else:
return gen_array_ops._zeros_like(tensor, name=name)
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, or `complex128`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
ret.set_shape(tensor.get_shape())
return ret
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], int32) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.name_scope(name, "ones", [shape]) as name:
try:
shape = tensor_shape.as_shape(shape)
output = constant(1, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(1, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtypes.as_dtype(dtype).base_dtype
return output
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape = tensor_shape.as_shape(shape)
if shape.is_fully_defined():
dim_list = shape.as_list()
else:
dim_list = []
ret = gen_array_ops._placeholder(
dtype=dtype,
shape=dim_list,
name=name)
ret.set_shape(shape)
return ret
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, shape=shape)
sp_value = sp.eval(session)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
if shape is None:
shape = placeholder(
dtypes.int64, name=(name + "/shape") if name is not None else None)
else:
shape = ops.convert_to_tensor(
shape, name=(name + "/shape") if name is not None else None)
return ops.SparseTensor(
values=placeholder(
dtype, name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64,
name=(name + "/indices") if name is not None else None),
shape=shape
)
def pad(tensor, paddings, mode="CONSTANT", name=None): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
# 't' is [[1, 2, 3], [4, 5, 6]].
# 'paddings' is [[1, 1,], [2, 2]].
# rank of 't' is 2.
pad(t, paddings, "CONSTANT") ==> [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0, 0],
[0, 0, 4, 5, 6, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
pad(t, paddings, "REFLECT") ==> [[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1]]
pad(t, paddings, "SYMMETRIC") ==> [[2, 1, 1, 2, 3, 3, 2],
[2, 1, 1, 2, 3, 3, 2],
[5, 4, 4, 5, 6, 6, 5],
[5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
if mode == "CONSTANT":
return gen_array_ops._pad(tensor, paddings, name=name)
if mode == "REFLECT":
return gen_array_ops._mirror_pad(tensor,
paddings,
mode="REFLECT",
name=name)
if mode == "SYMMETRIC":
return gen_array_ops._mirror_pad(tensor,
paddings,
mode="SYMMETRIC",
name=name)
raise ValueError("Unknown padding mode: %s" % mode)
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```prettyprint
x = [1, 2, 3]
y = [4, 5, 6]
```
results in
```prettyprint
X = [[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]
Y = [[4, 5, 6],
[4, 5, 6],
[4, 5, 6]]
```
Args:
*args: `Tensor`s with rank 1
indexing: Either 'xy' or 'ij' (optional, default: 'xy')
name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if len(kwargs) > 0:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
num_inputs = len(args)
ones = (1,) * num_inputs
asserts = [logging_ops.Assert(
gen_math_ops.equal(rank(x), 1),
["Input %d needs to have rank 1: " % i, rank(x)],
) for i, x in enumerate(args)]
# Prepare reshape by inserting dimensions with size 1 where needed
shapes = [ones[:i] + (-1,) + ones[i + 1:] for i in range(num_inputs)]
# Create parameters for broadcasting each tensor to the full size
sizes = [size(x) for x in args]
bcast = [sizes[:i] + [1] + sizes[i + 1:] for i in range(num_inputs)]
# By default, the numpy version swaps the instructions
# for the first and second dimension
if indexing == "xy" and num_inputs > 1:
shapes[0], shapes[1] = shapes[1], shapes[0]
bcast[0], bcast[1] = bcast[1], bcast[0]
results = []
with ops.control_dependencies(asserts):
for a, r, e in zip(args, shapes, bcast):
results.append(tile(reshape(a, r), e))
return results
@ops.RegisterShape("Placeholder")
def _PlaceholderShape(op):
given_shape = tensor_util.TensorShapeProtoToList(op.get_attr("shape"))
if given_shape:
return [tensor_shape.TensorShape(given_shape)]
else:
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("CheckNumerics")
@ops.RegisterShape("Identity")
@ops.RegisterShape("RefIdentity")
@ops.RegisterShape("StopGradient")
@ops.RegisterShape("BatchMatrixBandPart")
@ops.RegisterShape("QuantizeAndDequantize")
def _UnchangedShape(op):
return [op.inputs[0].get_shape()]
@ops.RegisterShape("Rank")
@ops.RegisterShape("Size")
def _ScalarShape(unused_op):
return [tensor_shape.scalar()]
@ops.RegisterShape("Slice")
def _SliceShape(op):
"""Shape function for array_ops.slice."""
input_shape = op.inputs[0].get_shape()
begin_shape = op.inputs[1].get_shape().with_rank(1)
sizes_shape = op.inputs[2].get_shape().with_rank(1)
ndims = begin_shape.merge_with(sizes_shape)[0].value
if ndims is not None:
input_shape.assert_has_rank(ndims)
# NOTE(mrry): Use `constant_value_as_shape()` to handle
# partially-known values.
begin_value = tensor_util.constant_value_as_shape(
op.inputs[1]).with_rank(ndims)
# NOTE(mrry): We can't use `constant_value_as_shape()` for `sizes`
# because it might contain -1, which can't be represented as a
# `TensorShape`.
sizes_value = tensor_util.constant_value(op.inputs[2])
if sizes_value is not None:
returned_dims = []
for i, (slice_size, begin_dim) in enumerate(zip(sizes_value.ravel(),
begin_value.dims)):
if slice_size != -1:
returned_dims.append(slice_size)
else:
returned_dims.append(input_shape[i] - begin_dim)
return [tensor_shape.TensorShape(returned_dims)]
else:
if input_shape.ndims is not None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
elif ndims is not None:
return [tensor_shape.unknown_shape(ndims=ndims)]
else:
return [tensor_shape.unknown_shape()]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
@ops.RegisterShape("StridedSliceGrad")
def _StridedSliceGradShape(op):
"""Shape function for gradient of array_ops.slice."""
return [tensor_util.constant_value(op.inputs[0])]
@ops.RegisterShape("StridedSlice")
def _StridedSliceShape(op):
"""Shape function for array_ops.slice."""
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()]
ndims = len(input_shape)
begin_shape = op.inputs[1].get_shape().with_rank(1)
end_shape = op.inputs[2].get_shape().with_rank(1)
strides_shape = op.inputs[3].get_shape().with_rank(1)
# get constant values if available
begin_value = tensor_util.constant_value(op.inputs[1])
end_value = tensor_util.constant_value(op.inputs[2])
strides_value = tensor_util.constant_value(op.inputs[3])
sparse_dims = begin_shape.merge_with(end_shape).merge_with(strides_shape)[
0].value
if (sparse_dims is None or begin_value is None or end_value is None or
strides_value is None):
return [tensor_shape.unknown_shape()]
begin_mask = op.get_attr("begin_mask")
end_mask = op.get_attr("end_mask")
ellipsis_mask = op.get_attr("ellipsis_mask")
new_axis_mask = op.get_attr("new_axis_mask")
shrink_axis_mask = op.get_attr("shrink_axis_mask")
# find the ellipsis
ellipsis_index = -1
# look for ellipses
num_add_axis_after_ellipsis = 0
for i in range(sparse_dims):
if ellipsis_index != -1 and ((1 << i) & new_axis_mask) != 0:
num_add_axis_after_ellipsis += 1
if (1 << i) & ellipsis_mask:
if ellipsis_index != -1:
raise ValueError("Multiple ellipses not allowed")
ellipsis_index = i
# insert a virtual ellipsis if not seen
if ellipsis_index == -1:
ellipsis_mask |= (1 << sparse_dims)
sparse_dims += 1
# build the dense specification
dense_dims = ndims # not accounting for newaxis and shrink
final_shape_gather = []
full_index = 0
dense_shrink_axis = 0
dense_specs = []
for dim in range(sparse_dims):
bit = 1 << dim
if bit & ellipsis_mask:
next_index = min(dense_dims -
(sparse_dims - dim) + 1 + num_add_axis_after_ellipsis,
dense_dims)
while full_index < next_index:
dense_specs.append(_baseslice(None, None, 1))
final_shape_gather.append(full_index)
full_index += 1
elif bit & new_axis_mask:
final_shape_gather.append(NEW_AXIS)
else:
dense_specs.append(_baseslice(
None if (begin_mask & bit) else begin_value[dim], None if (
end_mask & bit) else end_value[dim], strides_value[dim]))
if shrink_axis_mask & bit:
dense_shrink_axis |= (1 << full_index)
final_shape_gather.append(SHRINK_AXIS)
else:
final_shape_gather.append(full_index)
full_index += 1
# Compute each dimensions contribution to the "processing" shape
final_dims = []
for dim in range(dense_dims):
shrink = (dense_shrink_axis & (1 << dim)) != 0
final_dims.append(
_compute_size_of_strided_dim(shrink, dense_specs[dim], input_shape.dims[
dim]))
# Gather the final shape from the processing shape
final_shape = []
for index in final_shape_gather:
if index == NEW_AXIS:
final_shape.append(1)
elif index == SHRINK_AXIS:
pass
else:
final_shape.append(final_dims[index])
return [tensor_shape.TensorShape(final_shape)]
@ops.RegisterShape("Gather")
def _GatherShape(op):
"""Shape function for array_ops.gather."""
params_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
return [indices_shape.concatenate(params_shape[1:])]
@ops.RegisterShape("GatherNd")
def _GatherNdShape(op):
"""Shape function for array_ops.gather_nd."""
params_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape().with_rank_at_least(1)
indices_rank = indices_shape.ndims
indices_lookup_rank = (
None if indices_rank is None else indices_shape[-1].value)
if params_shape.ndims is None or indices_lookup_rank is None:
return [tensor_shape.unknown_shape()]
else:
if indices_lookup_rank > params_shape.ndims:
raise ValueError(
"indices.shape[-1] must be <= params.rank, but saw indices shape: %s "
" and params shape: %s" % (indices_shape, params_shape))
indices_lookup_shape = indices_shape[:-1]
params_slices_shape = params_shape[indices_lookup_rank:]
return [indices_lookup_shape.concatenate(params_slices_shape)]
@ops.RegisterShape("Unique")
def _UniqueShape(op):
"""Shape function for array_ops.Unique."""
# The output is a vector with data-dependent length.
input_shape = op.inputs[0].get_shape()
input_shape.assert_has_rank(1)
return [tensor_shape.vector(None), input_shape]
@ops.RegisterShape("UniqueWithCounts")
def _UniqueWithCountsShape(op):
"""Shape function for array_ops.Unique."""
# The output is a vector with data-dependent length.
input_shape = op.inputs[0].get_shape()
input_shape.assert_has_rank(1)
return [tensor_shape.vector(None), input_shape, tensor_shape.vector(None)]
@ops.RegisterShape("BatchMatrixDiag")
def _BatchMatrixDiagShape(op):
"""Shape function for array_ops.batch_matrix_diag."""
diag_shape = op.inputs[0].get_shape().with_rank_at_least(1)
return [diag_shape.concatenate(diag_shape[-1])]
@ops.RegisterShape("BatchMatrixSetDiag")
def _BatchMatrixSetDiagShape(op):
"""Shape function for array_ops.batch_matrix_set_diag."""
input_shape = op.inputs[0].get_shape().with_rank_at_least(2)
diag_shape = op.inputs[1].get_shape().with_rank_at_least(1)
output_shape = diag_shape.concatenate(diag_shape[-1])
output_shape = output_shape.merge_with(input_shape)
return [output_shape]
@ops.RegisterShape("BatchMatrixDiagPart")
def _BatchMatrixDiagPartShape(op):
"""Shape function for array_ops.batch_matrix_diag_part."""
input_shape = op.inputs[0].get_shape().with_rank_at_least(2)
# Last two dims must match
input_shape[-1].assert_is_compatible_with(input_shape[-2])
return [input_shape[:-1]]
@ops.RegisterShape("Diag")
def _DiagShape(op):
"""Shape function for array_ops.diag.
This op has one input (of rank k <= 3), and one output (of rank 2k),
where the shape of the output is the concatenation of the input
shape with itself.
Args:
op: A Diag Operation.
Returns:
A single-element list containing the shape of the output.
"""
input_shape = op.inputs[0].get_shape().with_rank_at_most(3)
return [input_shape.concatenate(input_shape)]
@ops.RegisterShape("DiagPart")
def _DiagPartShape(op):
"""Shape function for array_ops.diag_part.
This op has one input (of rank k = 2, 4, or 6), and one output (of rank k/2),
where the shape of the output is the diagonal of the input shape.
Args:
op: A DiagPart Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If input has odd rank or greater than 6, or the first and
second halves of the shape are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank_at_most(6)
rank = input_shape.ndims
if rank is None:
return [tensor_shape.unknown_shape()]
if rank % 2:
raise ValueError("Input must be even rank, got rank = " + str(rank) + ".")
mid = rank // 2
return [input_shape[:mid].merge_with(input_shape[mid:])]
@ops.RegisterShape("ExpandDims")
def _ExpandDimsShape(op):
"""Determine shape for expand op's output tensor.
Args:
op: Operation for which to determine shape.
op.inputs[0] is the input tensor.
op.inputs[1] is the dimension in which to expand.
Returns:
Shape of op's output tensor.
Raises:
ValueError: If dim is outside of [-rank - 1, rank], where rank is the number
of dimensions in the input tensor.
"""
input_shape = op.inputs[0].get_shape()
if input_shape.dims is None:
return [tensor_shape.unknown_shape()]
dim = tensor_util.constant_value(op.inputs[1])
input_ndims = input_shape.ndims
if dim < -input_ndims - 1 or dim > input_ndims:
raise ValueError(
"dim %d not in [%d, %d]." % (dim, -input_ndims, input_ndims))
if dim < 0:
dim += (input_ndims + 1)
result_shape = list(input_shape.dims)
result_shape.insert(dim, 1)
return [tensor_shape.TensorShape(result_shape)]
@ops.RegisterShape("Squeeze")
def _SqueezeShape(op):
"""Determine shape for squeeze op's output tensor.
Args:
op: Operation for which to determine shape.
Returns:
Shape of op's output tensor.
Raises:
ValueError: if squeeze_dims includes a dimension outside of [-rank, rank),
where rank is the number of dimensions in the input tensor. Or, if
squeeze_dims includes a dimension for which input shape has a value
not equal to 1.
"""
input_shape = op.inputs[0].get_shape()
if input_shape.dims is None:
return [tensor_shape.unknown_shape()]
squeeze_dims = op.get_attr("squeeze_dims") or []
wrapped_squeeze_dims = []
input_ndims = input_shape.ndims
for i, squeeze_dim in enumerate(squeeze_dims):
if squeeze_dim < -input_ndims or squeeze_dim >= input_ndims:
raise ValueError(
"squeeze_dims[%d]=%d not in [%d, %d)." % (
i, squeeze_dim, -input_ndims, input_ndims))
if squeeze_dim < 0:
squeeze_dim += input_ndims
wrapped_squeeze_dims.append(squeeze_dim)
result_shape = []
for i, dim in enumerate([d.value for d in input_shape.dims]):
is_explicit_match = i in wrapped_squeeze_dims
if dim is None:
if is_explicit_match:
# Assume that the squeezed dimension will be 1 at runtime.
continue
if not wrapped_squeeze_dims:
# If squeezing all 1 dimensions and we see a None, give up.
return [tensor_shape.unknown_shape()]
elif dim == 1:
if is_explicit_match or not wrapped_squeeze_dims:
continue
elif is_explicit_match:
raise ValueError(
"Can not squeeze dim[%d], expected a dimension of 1, got %d." % (
i, dim))
result_shape.append(dim)
return [tensor_shape.TensorShape(result_shape)]
@ops.RegisterShape("Bitcast")
def _BitcastShape(op):
"""Shape function for Bitcast op."""
input_shape = op.inputs[0].get_shape()
if input_shape == tensor_shape.unknown_shape():
return [tensor_shape.unknown_shape()]
input_type = op.inputs[0].dtype
size_of_input = input_type.size
output = dtypes.as_dtype(op.get_attr("type"))
size_of_output = output.size
if size_of_input == size_of_output:
return [input_shape]
else:
if size_of_output > size_of_input:
new_shape = input_shape.with_rank_at_least(1).as_list()
last_val = new_shape[-1]
if last_val is None or last_val == (size_of_output // size_of_input):
new_shape = new_shape[:-1]
else:
raise ValueError(
"Cannot bitcast due to shape. %d is not evenly divisible by %d." %
(new_shape[-1], size_of_input // size_of_output))
else:
new_shape = input_shape
new_shape = new_shape.concatenate([size_of_input // size_of_output])
return [tensor_shape.TensorShape(new_shape)]
@ops.RegisterShape("Reshape")
def _ReshapeShape(op):
"""Shape function for Reshape op."""
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is not None:
num_elements = tensor_shape.Dimension(1)
for dim in input_shape.dims:
num_elements *= dim
else:
num_elements = tensor_shape.Dimension(None)
new_shape = tensor_util.constant_value_as_shape(op.inputs[1])
if new_shape.ndims is None:
# We have no information about the shape of the output.
return [new_shape]
if None not in new_shape.as_list():
# The new shape is fully defined.
if (num_elements.value is not None
and num_elements.value != np.prod(new_shape)):
raise ValueError(
"Cannot reshape a tensor with %d elements to shape %s (%d elements)"
% (num_elements.value, new_shape, np.prod(new_shape)))
elif num_elements.value is not None:
# We know the number of elements, so we can calculate the missing
# dimension in the new_shape.
known_elements = 1
unknown_indices = []
for i, dim in enumerate(new_shape):
if dim.value is None:
unknown_indices.append(i)
else:
known_elements *= dim.value
if known_elements != 0:
if num_elements % known_elements != 0:
raise ValueError("input has %s elements, which isn't divisible by %d" %
(num_elements, known_elements))
if len(unknown_indices) == 1:
unknown_index = unknown_indices[0]
new_shape = new_shape.merge_with(
new_shape[:unknown_index].concatenate(
[num_elements // known_elements]).concatenate(
new_shape[unknown_index+1:]))
return [new_shape]
@ops.RegisterShape("BroadcastGradientArgs")
def _BroadcastGradientArgsShape(op):
"""Shape function for the BroadcastGradientArgs op."""
# TODO(mrry): Implement constant_value for BroadcastGradientArgs?
op.inputs[0].get_shape().assert_has_rank(1)
op.inputs[1].get_shape().assert_has_rank(1)
return [tensor_shape.vector(None), tensor_shape.vector(None)]
@ops.RegisterShape("Fill")
def _FillShape(op):
"""Shape function for the Fill op.
This op takes a vector of dimensions and a scalar, and produces a
tensor with the given dimensions.
Args:
op: A Fill Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes or arguments are known to be invalid.
"""
op.inputs[0].get_shape().assert_has_rank(1)
op.inputs[1].get_shape().assert_has_rank(0)
fill_dims = tensor_util.constant_value(op.inputs[0])
if fill_dims is not None and any(d < 0 for d in fill_dims):
raise ValueError("Fill dimensions must be >= 0")
return [tensor_util.constant_value_as_shape(op.inputs[0])]
@ops.RegisterShape("InvertPermutation")
def _InvertPermutationShape(op):
"""Shape function for the InvertPermutation op."""
return [op.inputs[0].get_shape().with_rank(1)]
@ops.RegisterShape("ListDiff")
def _ListDiffShape(op):
"""Shape function for the ListDiff op."""
op.inputs[0].get_shape().assert_has_rank(1)
op.inputs[1].get_shape().assert_has_rank(1)
# TODO(mrry): Indicate that the length falls within an interval?
return [tensor_shape.vector(None)] * 2
@ops.RegisterShape("Pad")
@ops.RegisterShape("MirrorPad")
def _PadShape(op):
"""Shape function for the Pad op.
This op has two inputs:
* input: A rank-N tensor.
* paddings: An N-by-2 matrix, in which the i^th row contains the
number of padding elements to add before and after `input` in the
i^th dimension.
It has one output, which has the same rank as input, and additional
elements according to the values in paddings.
Args:
op: A Pad Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the input shapes are incompatible.
"""
paddings_shape = op.inputs[1].get_shape().with_rank(2)
input_shape = op.inputs[0].get_shape()
input_shape = input_shape.with_rank(paddings_shape[0].value)
paddings_shape = paddings_shape.merge_with(
tensor_shape.matrix(input_shape.ndims, 2))
paddings = tensor_util.constant_value(op.inputs[1])
if paddings is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
output_dims = []
for i, dim in enumerate(input_shape.dims):
if paddings[i, 0] < 0 or paddings[i, 1] < 0:
raise ValueError("paddings must be non-negative")
output_dims.append(dim + paddings[i, 0] + paddings[i, 1])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("MirrorPadGrad")
def _MirrorPadGradShape(op):
"""Shape function for the MirrorPadGrad op."""
paddings_shape = op.inputs[1].get_shape().with_rank(2)
input_shape = op.inputs[0].get_shape().with_rank(paddings_shape[0].value)
paddings_shape = paddings_shape.merge_with(tensor_shape.matrix(
input_shape.ndims, 2))
paddings = tensor_util.constant_value(op.inputs[1])
if paddings is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
output_dims = []
for i, dim in enumerate(input_shape.dims):
if paddings[i, 0] < 0 or paddings[i, 1] < 0:
raise ValueError("Paddings must be non-negative.")
if dim < paddings[i, 0] + paddings[i, 1]:
raise ValueError("Output dimension is negative.")
output_dims.append(dim - paddings[i, 0] - paddings[i, 1])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("ReverseSequence")
def _ReverseSequenceShape(op):
"""Shape function for the ReverseSequence op.
This op has two inputs:
* input: A rank-N tensor with size B in the 0th dimension.
* seq_lens: A vector of length B.
It has one output, with the same size as input.
Args:
op: A ReverseSequence Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the input shapes are incompatible or seq_dim == batch_dim.
"""
input_shape = op.inputs[0].get_shape()
seq_lens_shape = op.inputs[1].get_shape().with_rank(1)
if input_shape.ndims is None:
return [None]
seq_dim = op.get_attr("seq_dim")
batch_dim = op.get_attr("batch_dim")
if input_shape.ndims is not None:
if batch_dim >= input_shape.ndims:
raise ValueError("batch_dim must be < input.dims() (%d vs %d)" %
(batch_dim, input_shape.ndims))
if seq_dim >= input_shape.ndims:
raise ValueError("seq_dim must be < input.dims() (%d vs %d)" %
(seq_dim, input_shape.ndims))
batch_size = input_shape[batch_dim].merge_with(seq_lens_shape[0])
input_shape = tensor_shape.TensorShape([
value if ix != batch_dim else batch_size
for ix, value in enumerate(input_shape)])
return [input_shape]
@ops.RegisterShape("Shape")
@ops.RegisterShape("ShapeN")
def _ShapeNShape(op):
"""Shape function for the Shape/ShapeN op."""
return [tensor_shape.vector(x.get_shape().ndims) for x in op.inputs]
@ops.RegisterShape("Transpose")
def _TransposeShape(op):
"""Shape function for the Transpose op.
This op takes two inputs:
* input: a rank-N tensor of arbitrary shape.
* shuffle: a length-N vector.
Its output is the rank-N tensor computed by permuting the dimensions
of input according to shuffle.
Args:
op: A Transpose op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of input and shuffle are incompatible.
IndexError: If shuffle contains an index that is >= the rank of input.
"""
input_shape = op.inputs[0].get_shape()
transpose_shape = op.inputs[1].get_shape().merge_with(tensor_shape.vector(
input_shape.ndims))
transpose_vec = tensor_util.constant_value(op.inputs[1])
if transpose_vec is None:
return [tensor_shape.unknown_shape(ndims=transpose_shape[0].value)]
else:
return [tensor_shape.TensorShape([input_shape[i]
for i in transpose_vec.tolist()])]
@ops.RegisterShape("Split")
def _SplitShape(op):
"""Shape function for the Split op."""
split_dim = tensor_util.constant_value(op.inputs[0])
num_split = len(op.outputs)
input_shape = op.inputs[1].get_shape()
if split_dim is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)] * num_split
else:
split_dim = int(split_dim)
input_shape = input_shape.with_rank_at_least(split_dim + 1)
if not (input_shape[split_dim] % num_split).is_compatible_with(0):
raise ValueError(
"Number of ways to split should evenly divide the split "
"dimension but got split_dim %d (size = %d) and num_split %d" %
(split_dim, input_shape[split_dim].value, num_split))
prefix = input_shape[:split_dim]
size_in_split_dim = input_shape[split_dim] // num_split
suffix = input_shape[split_dim + 1:]
output_shape = prefix.concatenate(size_in_split_dim).concatenate(suffix)
return [output_shape] * num_split
@ops.RegisterShape("Tile")
def _TileShape(op):
"""Shape function for the Tile op.
This op has two inputs:
* input: A rank-N tensor.
* multiples: A length-N vector, in which the i^th element contains
the factor by which `input` will be tiled in the i^th dimension.
It has one output, which has the same rank as input, and additional
elements according to the values in multiples
Args:
op: A Tile Operation.
Returns:
A single-element list containing the shape of the output.
"""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0].value)
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim * multiple)
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("TileGrad")
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("Where")
def _WhereShape(op):
"""Shape function for the Where op."""
input_shape = op.inputs[0].get_shape()
return [tensor_shape.matrix(None, input_shape.ndims)]
@ops.RegisterShape("ZerosLike")
def _ZerosLikeShape(op):
"""Shape function for the ZerosLike op."""
return [op.inputs[0].get_shape()]
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"]
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]]
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(hypothesis, ops.SparseTensor):
raise TypeError("Hypothesis must be a SparseTensor")
if not isinstance(truth, ops.SparseTensor):
raise TypeError("Truth must be a SparseTensor")
return gen_array_ops._edit_distance(hypothesis.indices,
hypothesis.values,
hypothesis.shape,
truth.indices,
truth.values,
truth.shape,
normalize=normalize,
name=name)
@ops.RegisterShape("EditDistance")
def _EditDistanceShape(op):
"""Shape function for the EditDistance op."""
hypothesis_shape = tensor_util.constant_value(op.inputs[2])
truth_shape = tensor_util.constant_value(op.inputs[5])
if hypothesis_shape is not None and truth_shape is not None:
if len(hypothesis_shape) != len(truth_shape):
raise ValueError(
"Inconsistent ranks in hypothesis and truth. Saw shapes: %s and %s" %
(str(hypothesis_shape), str(truth_shape)))
return [tensor_shape.TensorShape(
[max(h, t) for h, t in zip(hypothesis_shape[:-1], truth_shape[:-1])])]
return [tensor_shape.unknown_shape()]
# The remaining ops do not change the shape of their inputs.
@ops.RegisterShape("Quantize")
@ops.RegisterShape("Dequantize")
def _QuantizeDequantizeShape(op):
unused_min_range = op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
unused_max_range = op.inputs[2].get_shape().merge_with(tensor_shape.scalar())
return common_shapes.unchanged_shape(op)
@ops.RegisterShape("ExtractImagePatches")
def _ExtractImagePatchesShape(op):
"""Shape function for the ExtractImagePatches op.
Args:
op: An ExtractImagePatches op.
Raises:
ValueError: If the strides or padding are invalid.
Returns:
The shape of the op output.
"""
images_shape = op.inputs[0].get_shape().with_rank(4)
batch = images_shape[0]
in_rows = images_shape[1]
in_cols = images_shape[2]
in_depth = images_shape[3]
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksizes")
if ksize_b != 1 or ksize_d != 1:
raise ValueError("Current implementation does not yet support "
"ksizes in the batch and depth dimensions.")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
rate_b, rate_r, rate_c, rate_d = op.get_attr("rates")
if rate_b != 1 or rate_d != 1:
raise ValueError("Current implementation does not yet support "
"rates in the batch and depth dimensions.")
# Effective patch size, taking into account filter upsampling by rates.
ksize_r_eff = ksize_r + (ksize_r - 1) * (rate_r - 1)
ksize_c_eff = ksize_c + (ksize_c - 1) * (rate_c - 1)
padding = op.get_attr("padding")
out_rows, out_cols = common_shapes.get2d_conv_output_size(in_rows, in_cols,
ksize_r_eff,
ksize_c_eff,
stride_r, stride_c,
padding)
out_depth = None if in_depth is None else ksize_r * ksize_c * int(in_depth)
output_shape = [batch, out_rows, out_cols, out_depth]
return [tensor_shape.TensorShape(output_shape)]
@ops.RegisterShape("SpaceToBatch")
def _SpaceToBatchShape(op):
"""Shape function for the SpaceToBatch op.
The output shape is determined by the following inputs/ attributes:
* input: A rank-4 tensor with shape [B, H, W, D]
* paddings: A 2-by-2 matrix, specified as follows:
paddings = [[pad_top, pad_bottom], [pad_left, pad_right]],
implying effective padded spatial dimensions:
Hp = pad_top + H + pad_bottom
Wp = pad_left + W + pad_right
Both Hp and Wp must be multiples of block_size.
* block_size: an int.
Its output is also a rank-4 tensor with shape:
[B*block_size*block_size, Hp/block_size, Wp/block_size, D]
Args:
op: A SpaceToBatch op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of inputs are not as expected.
IndexError: If block_size does not divide Wp or Hp.
"""
# Check that the input tensor is 4-D.
try:
input_shape = op.inputs[0].get_shape().with_rank(4)
except ValueError:
raise ValueError(
"tf.space_to_batch() requires 4-D input tensor.")
# Check that the paddings tensor is a matrix with shape [2, 2].
try:
paddings_shape = op.inputs[1].get_shape().with_rank(2)
except ValueError:
raise ValueError(
"tf.space_to_batch() requires 2-D paddings tensor.")
if paddings_shape[0] != 2 or paddings_shape[1] != 2:
raise ValueError(
"tf.space_to_batch() requires input paddings with shape [2, 2].")
block_size = op.get_attr("block_size")
if block_size <= 1:
raise ValueError("Attribute block_size has to be > 1.")
paddings = tensor_util.constant_value(op.inputs[1])
if paddings is not None:
if (paddings[0, 0] < 0 or paddings[0, 1] < 0 or
paddings[1, 0] < 0 or paddings[1, 1] < 0):
raise ValueError("paddings cannot be negative.")
input_height = input_shape[1] + paddings[0, 0] + paddings[0, 1]
input_width = input_shape[2] + paddings[1, 0] + paddings[1, 1]
if input_height % block_size > 0 or input_width % block_size > 0:
raise IndexError("block_size needs to divide both width and height.")
else:
input_height = tensor_shape.Dimension(None)
input_width = tensor_shape.Dimension(None)
batch = input_shape[0] * block_size * block_size
height = input_height // block_size
width = input_width // block_size
depth = input_shape[3]
return [tensor_shape.TensorShape([batch, height, width, depth])]
@ops.RegisterShape("BatchToSpace")
def _BatchToSpaceShape(op):
"""Shape function for the BatchToSpace op.
The output shape is determined by the following inputs/ attributes:
* input: A rank-4 tensor with shape
[B*block_size*block_size, Hp/block_size, Wp/block_size, D]
Note that the batch size of the input tensor must be divisible by
`block_size * block_size`.
* crops: A 2-by-2 matrix, specified as follows:
crops = [[crop_top, crop_bottom], [crop_left, crop_right]].
* block_size: an int.
Its output is also a rank-4 tensor with shape [B, H, W, D], where:
H = Hp - crop_top - crop_bottom
W = Wp - crop_left - crop_right
Args:
op: A BatchToSpace op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of the inputs are not as expected.
IndexError: If block_size*block_size does not divide the input batch size.
"""
# Check that the input tensor is 4-D.
try:
input_shape = op.inputs[0].get_shape().with_rank(4)
except ValueError:
raise ValueError("tf.batch_to_space() requires 4-D input tensor.")
# Check that the crops tensor is a matrix with shape [2, 2].
try:
crops_shape = op.inputs[1].get_shape().with_rank(2)
except ValueError:
raise ValueError(
"tf.space_to_batch() requires 2-D crops tensor.")
if crops_shape[0] != 2 or crops_shape[1] != 2:
raise ValueError(
"tf.space_to_batch() requires input crops with shape [2, 2].")
crops = tensor_util.constant_value(op.inputs[1])
if (crops is not None and
(crops[0, 0] < 0 or crops[0, 1] < 0 or
crops[1, 0] < 0 or crops[1, 1] < 0)):
raise ValueError("crops cannot be negative.")
block_size = op.get_attr("block_size")
if block_size <= 1:
raise ValueError("Attribute block_size has to be > 1.")
input_batch = input_shape[0]
if input_batch % (block_size * block_size) > 0:
raise IndexError("input batch must be divisible by block_size*block_size.")
batch = input_batch // (block_size * block_size)
if crops is not None:
height = input_shape[1] * block_size - crops[0, 0] - crops[0, 1]
width = input_shape[2] * block_size - crops[1, 0] - crops[1, 1]
if height <= 0 or width <= 0:
raise ValueError("Output height or width is not positive.")
else:
height = tensor_shape.Dimension(None)
width = tensor_shape.Dimension(None)
depth = input_shape[3]
return [tensor_shape.TensorShape([batch, height, width, depth])]
@ops.RegisterShape("SpaceToDepth")
def _SpaceToDepthShape(op):
"""Shape function for the SpaceToDepth op.
This op takes two inputs:
* input: a tensor of shape like that [B, H, W, D]
* block_size: an int.
Its output is the same-rank tensor but with changed
dimensions like that: [B, H/block_size, W/block_size, D*block_size*block_size]
Args:
op: A SpaceToDepth op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of input are not as expected.
IndexError: If block_size does not divide W or H.
"""
# Check that the input tensor is of 4 dimensions.
try:
input_shape = op.inputs[0].get_shape().with_rank(4)
except ValueError:
raise ValueError(
"tf.space_to_depth() requires tensors with exactly 4 dimensions.")
block_size = op.get_attr("block_size")
if block_size <= 1:
raise ValueError("Attribute block_size has to be > 1.")
input_height = input_shape[1]
input_width = input_shape[2]
if (input_width % block_size > 0) or (input_height % block_size > 0):
raise IndexError(
"block_size needs to divide both width and height.")
width = input_width // block_size
height = input_height // block_size
new_depth = input_shape[3] * block_size * block_size
return [tensor_shape.TensorShape(
[input_shape[0], height, width, new_depth])]
@ops.RegisterShape("DepthToSpace")
def _DepthToSpaceShape(op):
"""Shape function for the DepthToSpace op.
This op takes two inputs:
* input: a tensor of shape like that [B, H, W, D]
* block_size: an int.
Its output is the same-rank tensor but with changed
dimensions like that:
[B, H*block_size, W*block_size, D/(block_size*block_size)]
Args:
op: A DepthToSpace op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of input are not as expected.
IndexError: If block_size*block_size does not divide D.
"""
# Check that the input tensor is of 4 dimensions.
try:
input_shape = op.inputs[0].get_shape().with_rank(4)
except ValueError:
raise ValueError(
"tf.depth_to_space() requires tensors with exactly 4 dimensions.")
block_size = op.get_attr("block_size")
if block_size <= 1:
raise ValueError("Attribute block_size has to be > 1.")
input_height = input_shape[1]
input_width = input_shape[2]
input_depth = input_shape[3]
width = input_width * block_size
height = input_height * block_size
if input_depth % (block_size * block_size) > 0:
raise IndexError(
"block_size*block_size needs to divide the input depth.")
new_depth = input_depth // (block_size * block_size)
return [tensor_shape.TensorShape(
[input_shape[0], height, width, new_depth])]
def one_hot(indices, depth, on_value=None, off_value=None,
axis=None, dtype=None, name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`
Note: If a non-numeric data type output is desired (tf.string, tf.bool, etc.),
both `on_value` and `off_value` _must_ be provided to `one_hot`
Examples
=========
Suppose that
```
indices = [0, 2, -1, 1]
depth = 3
on_value = 5.0
off_value = 0.0
axis = -1
```
Then output is `[4 x 3]`:
```
output =
[5.0 0.0 0.0] // one_hot(0)
[0.0 0.0 5.0] // one_hot(2)
[0.0 0.0 0.0] // one_hot(-1)
[0.0 5.0 0.0] // one_hot(1)
```
Suppose that
```
indices = [[0, 2], [1, -1]]
depth = 3
on_value = 1.0
off_value = 0.0
axis = -1
```
Then output is `[2 x 2 x 3]`:
```
output =
[
[1.0, 0.0, 0.0] // one_hot(0)
[0.0, 0.0, 1.0] // one_hot(2)
][
[0.0, 1.0, 0.0] // one_hot(1)
[0.0, 0.0, 0.0] // one_hot(-1)
]
```
Using default values for `on_value` and `off_value`:
```
indices = [0, 1, 2]
depth = 3
```
The output will be
```
output =
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(name, "one_hot", [indices, depth, on_value, off_value,
axis, dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
on_dtype = ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists \
else None
off_dtype = ops.convert_to_tensor(off_value).dtype.base_dtype if off_exists\
else None
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if (on_exists and on_dtype != dtype):
raise TypeError("dtype {0} of on_value does not match " \
"dtype parameter {1}".format(on_dtype, dtype))
if (off_exists and off_dtype != dtype):
raise TypeError("dtype {0} of off_value does not match " \
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match " \
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops._one_hot(indices, depth, on_value, off_value, axis,
name)
@ops.RegisterShape("OneHot")
def _OneHotShape(op):
"""Shape function for the OneHot op.
It closely follows the code in the .cc implementation.
Args:
op: A OneHot Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: if axis < -1.
"""
indices_shape = op.inputs[0].get_shape()
indices_dims = indices_shape.ndims
depth = tensor_util.constant_value(op.inputs[1])
axis = op.get_attr("axis")
if axis < -1:
raise ValueError("axis must be >= -1")
new_shape = None
if indices_dims is not None:
new_shape = indices_shape.as_list()
new_shape.insert(axis % (indices_dims + 1), depth)
return [tensor_shape.TensorShape(new_shape)]
@ops.RegisterShape("PlaceholderWithDefault")
def _PlaceholderWithDefaultShape(op):
"""Shape function for the PlaceholderWithDefault op.
This op acts as an identity when it is not fed (passing through a
default value), but allows the user to feed it with tensors of a
possibly less precise shape than its default value.
Args:
op: A PlaceholderWithDefault `Operation`.
Returns:
A single-element list containing the shape of the output.
"""
input_shape = op.inputs[0].get_shape()
output_shape = tensor_shape.TensorShape(op.get_attr("shape"))
# NOTE(mrry): We don't merge these shapes, because `output_shape`
# may be *less* precise than `input_shape`.
input_shape.assert_is_compatible_with(output_shape)
return [output_shape]
| apache-2.0 | 7,509,335,684,078,603,000 | 32.012601 | 86 | 0.633067 | false |
kubeflow/pipelines | samples/core/parallel_join/parallel_join.py | 1 | 1802 | #!/usr/bin/env python3
# Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp
from kfp import dsl
def gcs_download_op(url):
return dsl.ContainerOp(
name='GCS - Download',
image='google/cloud-sdk:279.0.0',
command=['sh', '-c'],
arguments=['gsutil cat $0 | tee $1', url, '/tmp/results.txt'],
file_outputs={
'data': '/tmp/results.txt',
}
)
def echo2_op(text1, text2):
return dsl.ContainerOp(
name='echo',
image='library/bash:4.4.23',
command=['sh', '-c'],
arguments=['echo "Text 1: $0"; echo "Text 2: $1"', text1, text2]
)
@dsl.pipeline(
name='parallel-pipeline',
description='Download two messages in parallel and prints the concatenated result.'
)
def download_and_join(
url1='gs://ml-pipeline/sample-data/shakespeare/shakespeare1.txt',
url2='gs://ml-pipeline/sample-data/shakespeare/shakespeare2.txt'
):
"""A three-step pipeline with first two running in parallel."""
download1_task = gcs_download_op(url1)
download2_task = gcs_download_op(url2)
echo_task = echo2_op(download1_task.output, download2_task.output)
if __name__ == '__main__':
kfp.compiler.Compiler().compile(download_and_join, __file__ + '.yaml')
| apache-2.0 | 2,698,934,252,417,545,000 | 30.614035 | 85 | 0.667037 | false |
SynapticNulship/Anibots | sim_py/anibots_breve_pushtest.py | 1 | 4900 | # Anibots (anigraf robots) physical/visual sim
#
# Copyright (c) 2007-2012 Samuel H. Kenyon. <[email protected]>
# http://synapticnulship.com
# This is open source, made available under the MIT License (see the
# accompanying file LICENSE).
#
# This python script connects my anibots C++ program (with the help of SWIG)
# to the Breve simulation environment. It also defines low-level physical
# actions triggered by higher-level anigraf actions.
#
# For final project 9.343J Fall 2006
# Experiment 0: The block task
#
from anibots import *
import breve
class AnibotPhysicsSim( breve.PhysicalControl ):
def __init__( self ):
breve.Control.__init__( self )
self.bots = breve.objectList()
self.actionDuration = 45
self.iterCount=self.actionDuration
self.videoLog = breve.Movie()
self.block = None
#configure the anibots
self.env = None
self.numBots = 1
self.iterations = 20
self.kDepth = 3
self.takeTurns = True
self.anibotConfig = AnibotConfig("final-x-g.dat", "final-x-pedge.dat","pweights-alt.dat")
self.anibotConfig.proxify = False
# bool proxyWeightsProportional;
# float proxyWeightsProportion;
#bool randomizeEdges
self.anibotConfig.randomize = False
#self.anibotConfig.quant = 11
#self.anibotConfig.quantDiff = 1
#anibotConfig.quantIrregular;
#self.anibotConfig.randMin = 0
#self.anibotConfig.randMax = 10
# bool singleTops;
AnibotPhysicsSim.init( self )
def init( self ):
print '''Setting up Anibot environment'''
# start the anibots environment (mental simulation)
self.env = AgentEnv("test_anibots_exp0.py",self.kDepth,self.takeTurns)
self.env.NewAnibot(self.numBots,self.anibotConfig)
self.env.InitLoner(0)
#self.env.InitTransaction(0,1)
print '''Setting up Physics Sim.'''
#start the visual/physical environment in Breve
self.setDisplayText( "Anibots Sim", -1.0, 0.8, 1 )
self.setRandomSeedFromDevRandom()
self.enableFastPhysics()
self.setFastPhysicsIterations( 15 )
#self.setGravity( breve.vector(0.0,-3.0,0.0) )
self.enableLighting()
self.enableSmoothDrawing()
self.moveLight( breve.vector( 20, 30, 20 ) )
floor = breve.createInstances( breve.Floor, 1 )
floor.catchShadows()
#floor.setE( 1.000000 )
floor.setMu(0.0)
#floor.showAxis()
self.cloudTexture = breve.createInstances( breve.Image, 1 ).load( 'images/clouds.png' )
self.enableShadowVolumes()
self.enableReflections()
self.setBackgroundColor( breve.vector( 0.400000, 0.600000, 0.900000 ) )
self.setBackgroundTextureImage( self.cloudTexture )
#self.offsetCamera( breve.vector( 3, 13, -13 ) )
self.pointCamera( breve.vector( 0, 0, 0 ), breve.vector( 20, 20, 60 ) )
#the virtual bodies
self.bots = breve.createInstances( breve.AnibotBody, 1 )
self.bots.move( breve.vector( 0.0, self.bots.radius, 14 ) )
self.env.Next()
#the block
self.block = breve.createInstances( breve.Mobile, 1 )
shape = breve.createInstances( breve.Cube, 1 ).initWith( breve.vector(15,3,4) )
shape.setMass(0.5)
self.block.setShape(shape)
self.block.setColor(breve.vector( 1.0, 0.5 ,0.0 ))
self.block.move( breve.vector( 0.0, 1.5 ,0.0 ) )
self.block.setMu(0.0)
#self.block.setE(0.1)
self.block.enablePhysics()
print self.block.getMass()
#self.block.setForce( breve.vector( 500.0, 500.0 , 500.0 ) )
#self.block.setVelocity( breve.vector( 0, 0, -10 ) )
#self.watch( self.bots[0] )
self.watch( self.block )
self.videoLog.record("anibots-pushtest.mpg")
def iterate( self ):
s2 = "block dist: %.2f" % (-self.block.getLocation()[2])
self.setDisplayText(s2, -1.0, 0.5, 6)
breve.Control.iterate( self )
breve.AnibotPhysicsSim = AnibotPhysicsSim
class AnibotBody( breve.Mobile ):
def __init__( self ):
breve.Mobile.__init__( self )
self.radius = 1.5
AnibotBody.init( self )
def init( self ):
shape = breve.createInstances( breve.Sphere, 1 ).initWith( self.radius )
shape.setDensity(100)
self.setShape( shape )
#self.setShape( breve.createInstances( breve.Cube, 1 ).initWith( breve.vector(self.radius,self.radius,self.radius) ))
self.setColor( breve.randomExpression( breve.vector( 1.000000, 1.000000, 1.000000 ) ) )
#self.move( breve.vector( breve.randomExpression(8.0)-4.0, self.radius, breve.randomExpression(20.0) + 8.0 ) )
self.move( breve.vector( 0.0, self.radius, 14.0 ) )
print self.getMass()
self.enablePhysics()
#self.setVelocity( breve.vector( 0.0, 0.0, -2.0 ) )
#self.setForce( breve.vector( 0.0, 0.0, -100.0 ) )
def moveX( self, x ):
if self.getLocation()[0] != x:
z = self.getLocation()[2]
self.move( breve.vector( x, self.radius, z+2 ) )
def iterate( self ):
#print self.getVelocity()
self.setVelocity( breve.vector( 0.0, 0.0, -2.0 ) )
breve.AnibotBody = AnibotBody
# Create an instance of our controller object to initialize the simulation
AnibotPhysicsSim()
| mit | -8,022,276,696,269,012,000 | 32.108108 | 119 | 0.696531 | false |
CSD-Public/stonix | src/tests/framework/unit_tests/zzzTestFrameworkCommandHelper.py | 1 | 4182 | #!/usr/bin/env python3
###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
'''
Created on 11/27/2012
Perform tests on different parts of the functionality for framework CommandHelper
@author: ekkehard
@change: roy - adding sys.path.append for both test framework and individual
test runs.
@change: Breen Malmberg - 04/11/2018 - added class doc string; removed
testinvalidcommand test since it was just essentially testing whether
python threw a typeerror exception when given an argument that was the wrong type
(it wasn't testing our framework - it was testing python itself)
@todo: fill out all remaining empty method doc strings
@note: If you're going to write assertRaises tests, make sure that you are not
catching them somewhere else in the call chain and throwing them as exceptions
(tracebacks) there, before it can come back to the assertRaise() method call, here.
'''
import unittest
import sys
sys.path.append("../../../..")
from src.tests.lib.logdispatcher_lite import LogPriority
from src.tests.lib.logdispatcher_lite import LogDispatcher
from src.stonix_resources.environment import Environment
from src.stonix_resources.CommandHelper import CommandHelper
class zzzTestFrameworkCommandHelper(unittest.TestCase):
'''Perform tests on different parts of the functionality for framework CommandHelper
:param unittest: TestCase: unittest TestCase class inheritance object reference
@author: ekkehard
@change: Breen Malmberg - 04/11/2018 - removed assertion tests -
you can't test for exception assertions in code that is wrapped by try
except because the try except intercepts the exception and throws it
and it never gets back to the assertraises call (see tf ticket for documentation)
'''
def setUp(self):
''' '''
self.enviro = Environment()
self.enviro.setdebugmode(True)
self.logger = LogDispatcher(self.enviro)
self.commandhelper = CommandHelper(self.logger)
def tearDown(self):
''' '''
pass
def testExecuteValidCommand(self):
''' '''
self.assertTrue(self.commandhelper.executeCommand("ls -l /"),
"Execute Valid Command string Failed!")
self.assertTrue(self.commandhelper.executeCommand(["ls", "-l", "/"]),
"Execute Valid Command List Failed!")
def testSetLogPriority(self):
''' '''
self.assertTrue(self.commandhelper.setLogPriority(LogPriority.INFO),
"Execute setLogPriority(0) Command string Failed!")
self.assertTrue(self.commandhelper.executeCommand(["ls", "-l", "/"]),
"Execute commandhelper.executeCommand(['ls','-l','/'])"
+ " Command List Failed!")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| gpl-2.0 | 561,659,646,466,533,760 | 42.113402 | 97 | 0.609995 | false |
openstack/networking-odl | networking_odl/journal/journal.py | 1 | 10979 | # Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from datetime import datetime
import threading
import time
from neutron_lib.callbacks import registry
from neutron_lib import context as nl_context
from neutron_lib.db import api as db_api
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception
from oslo_log import log as logging
from requests import exceptions
from networking_odl.common import client
from networking_odl.common import constants as odl_const
from networking_odl.common import filters
from networking_odl.common import utils
from networking_odl.db import db
from networking_odl.journal import dependency_validations
LOG = logging.getLogger(__name__)
MAKE_URL = {}
LOG_ENTRY_TEMPLATE = ("%(log_type)s (Entry ID: %(entry_id)s) - %(op)s "
"%(obj_type)s %(obj_id)s (Time stamp: %(timestamp)s)")
LOG_RECORDED = 'Recorded'
LOG_PROCESSING = 'Processing'
LOG_COMPLETED = 'Completed'
LOG_ERROR_PROCESSING = 'Error while processing'
def call_thread_on_end(func):
def new_func(obj, *args, **kwargs):
return_value = func(obj, *args, **kwargs)
obj.journal.set_sync_event()
return return_value
return new_func
def _enrich_port(plugin_context, ml2_context, object_type, operation, data):
"""Enrich the port with additional information needed by ODL"""
# NOTE(yamahata): work around of ODL neutron northbound
# It passes security groups in port as list of dict for historical reasons.
# keep its format for compatibility.
# TODO(yamahata): drop this format conversion.
if data[odl_const.ODL_SGS]:
groups = [{'id': id_} for id_ in data['security_groups']]
else:
groups = []
new_data = copy.deepcopy(data)
new_data[odl_const.ODL_SGS] = groups
# NOTE(yamahata): work around for port creation for router
# tenant_id=''(empty string) is passed when port is created
# by l3 plugin internally for router.
# On the other hand, ODL doesn't accept empty string for tenant_id.
# In that case, deduce tenant_id from network_id for now.
# Right fix: modify Neutron so that don't allow empty string
# for tenant_id even for port for internal use.
# TODO(yamahata): eliminate this work around when neutron side
# is fixed
# assert port['tenant_id'] != ''
if ('tenant_id' not in new_data or new_data['tenant_id'] == ''):
if ml2_context:
network = ml2_context._network_context._network
else:
plugin = directory.get_plugin()
network = plugin.get_network(plugin_context,
new_data['network_id'])
new_data['tenant_id'] = network['tenant_id']
return new_data
def _log_entry(log_type, entry, log_level=logging.INFO, **kwargs):
delta = datetime.now() - datetime.min
timestamp = delta.total_seconds()
log_dict = {'log_type': log_type, 'op': entry.operation,
'obj_type': entry.object_type, 'obj_id': entry.object_uuid,
'entry_id': entry.seqnum, 'timestamp': timestamp}
LOG.log(log_level, LOG_ENTRY_TEMPLATE, log_dict, **kwargs)
def record(plugin_context, object_type, object_uuid, operation, data,
ml2_context=None):
if (object_type == odl_const.ODL_PORT and
operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)):
data = _enrich_port(
plugin_context, ml2_context, object_type, operation, data)
# Calculate depending_on on other journal entries
depending_on = dependency_validations.calculate(
plugin_context, operation, object_type, object_uuid, data)
# NOTE(mpeterson): Between the moment that a dependency is calculated and
# the new entry is recorded in the journal, an operation can ocurr that
# would make the dependency irrelevant. In that case we request a retry.
# For more details, read the commit message that introduced this comment.
try:
entry = db.create_pending_row(
plugin_context, object_type, object_uuid, operation, data,
depending_on=depending_on)
except exception.DBReferenceError as e:
raise exception.RetryRequest(e)
_log_entry(LOG_RECORDED, entry)
LOG.debug('Entry with ID %(entry_id)s depends on these entries: '
'%(depending_on)s',
{'entry_id': entry.seqnum,
'depending_on': [d.seqnum for d in depending_on]})
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def entry_complete(context, entry):
if cfg.CONF.ml2_odl.completed_rows_retention == 0:
db.delete_row(context, entry)
else:
db.update_db_row_state(context, entry, odl_const.COMPLETED)
db.delete_dependency(context, entry)
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def entry_reset(context, entry):
db.update_db_row_state(context, entry, odl_const.PENDING)
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def entry_update_state_by_retry_count(context, entry, retry_count):
db.update_pending_db_row_retry(context, entry, retry_count)
def _make_url(row):
url_object = utils.make_url_object(row.object_type)
urlpath = ''
if row.operation == odl_const.ODL_CREATE:
urlpath = url_object
else:
urlpath = url_object + '/' + row.object_uuid
return urlpath
def register_url_builder(object_type, method):
MAKE_URL[object_type] = method
def _build_url(row):
return MAKE_URL.get(row.object_type, _make_url)(row)
class OpenDaylightJournalThread(object):
"""Thread worker for the OpenDaylight Journal Database."""
# make those parameter configurable?
_RETRY_SLEEP_MIN = 0.1
_RETRY_SLEEP_MAX = 60
def __init__(self, start_thread=True):
self.client = client.OpenDaylightRestClient.create_client()
self._max_retry_count = cfg.CONF.ml2_odl.retry_count
self._sleep_time = self._RETRY_SLEEP_MIN
self.event = threading.Event()
self._odl_sync_thread = self._create_odl_sync_thread()
self._odl_sync_thread_stop = threading.Event()
if start_thread:
self.start()
def _create_odl_sync_thread(self):
return threading.Thread(name='sync', target=self.run_sync_thread)
def start(self):
# Start the sync thread
LOG.debug("Starting a new sync thread")
if self._odl_sync_thread_stop.is_set():
self._odl_sync_thread_stop.clear()
self._odl_sync_thread = self._create_odl_sync_thread()
if not self._odl_sync_thread.is_alive():
self._odl_sync_thread.start()
def stop(self, timeout=None):
"""Allows to stop the sync thread.
Args:
timeout (float): Time in seconds to wait for joining or None for
no timeout.
"""
# Stop the sync thread
LOG.debug("Stopping the sync thread")
if self._odl_sync_thread.is_alive():
self._odl_sync_thread_stop.set()
# Process the journal one last time before stopping.
self.set_sync_event()
self._odl_sync_thread.join(timeout)
def set_sync_event(self):
self.event.set()
@staticmethod
def _json_data(row):
data = copy.deepcopy(row.data)
filters.filter_for_odl(row.object_type, row.operation, data)
if row.operation == odl_const.ODL_CREATE:
method = 'post'
to_send = {row.object_type: data}
elif row.operation == odl_const.ODL_UPDATE:
method = 'put'
to_send = {row.object_type: data}
elif row.operation == odl_const.ODL_DELETE:
method = 'delete'
to_send = None
return method, _build_url(row), to_send
def run_sync_thread(self):
while not self._odl_sync_thread_stop.is_set():
try:
self.event.wait()
self.event.clear()
self.sync_pending_entries()
except Exception:
# Catch exceptions to protect the thread while running
LOG.exception("Error on run_sync_thread")
def sync_pending_entries(self):
LOG.debug("Start processing journal entries")
context = nl_context.get_admin_context()
entry = db.get_oldest_pending_db_row_with_lock(context)
if entry is None:
LOG.debug("No journal entries to process")
return
while entry is not None:
stop_processing = self._sync_entry(context, entry)
if stop_processing:
break
entry = db.get_oldest_pending_db_row_with_lock(context)
LOG.debug("Finished processing journal entries")
def _retry_sleep(self):
# When something happened in the connection to ODL, don't busy loop
# because it's likely to hit same issue.
# Wait for a while for recovery
time.sleep(self._sleep_time)
self._sleep_time = min(self._sleep_time * 2, self._RETRY_SLEEP_MAX)
def _retry_reset(self):
self._sleep_time = self._RETRY_SLEEP_MIN
def _sync_entry(self, context, entry):
_log_entry(LOG_PROCESSING, entry)
method, urlpath, to_send = self._json_data(entry)
# TODO(mkolesni): This logic is weirdly written, need to refactor it.
try:
self.client.sendjson(method, urlpath, to_send)
registry.notify(entry.object_type, odl_const.BEFORE_COMPLETE,
self, context=context, operation=entry.operation,
row=entry)
entry_complete(context, entry)
self._retry_reset()
_log_entry(LOG_COMPLETED, entry)
except exceptions.ConnectionError:
# Don't raise the retry count, just log an error & break
entry_reset(context, entry)
LOG.error("Cannot connect to the OpenDaylight Controller,"
" will not process additional entries")
self._retry_sleep()
return True
except Exception:
_log_entry(LOG_ERROR_PROCESSING, entry,
log_level=logging.ERROR, exc_info=True)
entry_update_state_by_retry_count(
context, entry, self._max_retry_count)
return False
| apache-2.0 | 8,686,914,835,769,035,000 | 35.596667 | 79 | 0.637034 | false |
tgbugs/pyontutils | ilxutils/ilxutils/nltklib.py | 1 | 6030 | '''
nltk.download(['wordnet', 'stopwords', 'punkt']) if not already downloaded.
Should add to wordnet if you want more words to compare as reference to.
'''
from nltk import word_tokenize, pos_tag
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
from fuzzywuzzy import fuzz, process
stop_words = stopwords.words('english')
states = {
'ak': 'alaska',
'al': 'alabama',
'ar': 'arkansas',
'as': 'american samoa',
'az': 'arizona',
'ca': 'california',
'co': 'colorado',
'ct': 'connecticut',
'dc': 'district of columbia',
'de': 'delaware',
'fl': 'florida',
'ga': 'georgia',
'gu': 'guam',
'hi': 'hawaii',
'ia': 'iowa',
'id': 'idaho',
'il': 'illinois',
'in': 'indiana',
'ks': 'kansas',
'ky': 'kentucky',
'la': 'louisiana',
'ma': 'massachusetts',
'md': 'maryland',
'me': 'maine',
'mi': 'michigan',
'mn': 'minnesota',
'mo': 'missouri',
'mp': 'northern mariana islands',
'ms': 'mississippi',
'mt': 'montana',
'na': 'national',
'nc': 'north carolina',
'nd': 'north dakota',
'ne': 'nebraska',
'nh': 'new hampshire',
'nj': 'new jersey',
'nm': 'new mexico',
'nv': 'nevada',
'ny': 'new york',
'oh': 'ohio',
'ok': 'oklahoma',
'or': 'oregon',
'pa': 'pennsylvania',
'pr': 'puerto rico',
'ri': 'rhode island',
'sc': 'south carolina',
'sd': 'south dakota',
'tn': 'tennessee',
'tx': 'texas',
'ut': 'utah',
'va': 'virginia',
'vi': 'virgin islands',
'vt': 'vermont',
'wa': 'washington',
'wi': 'wisconsin',
'wv': 'west virginia',
'wy': 'wyoming'
}
def penn_to_wn(tag):
""" Convert between a Penn Treebank tag to a simplified Wordnet tag """
if tag.startswith('N'):
return 'n'
if tag.startswith('V'):
return 'v'
if tag.startswith('J'):
return 'a'
if tag.startswith('R'):
return 'r'
return None
def tagged_to_synset(word, tag):
wn_tag = penn_to_wn(tag)
# wn_tag is None if no definition is found
if wn_tag is None:
return word
# try:
# most probable english word
return wn.synsets(word, wn_tag)[0]
# except:
# return word
def fix_state_abbrev(tokens):
token = [
states[token] if states.get(token) else token
for token in tokens
]
return token
def clean_tokens(tokens, ignore_integers=False):
punctuations = ['(',')',';',':','[',']',',','.','/']
keywords = [
word for word in tokens
if not word in stop_words and not word in punctuations
]
keywords = fix_state_abbrev(keywords)
if ignore_integers:
keywords = [word for word in keywords if not is_possible_integer(word)]
return keywords
def clean(word):
word = str(word).lower().strip()
punctuations = ['(',')',';',':','[',']',',','.','/']
for punctuation in punctuations:
word = word.replace(punctuation, '')
return word
def is_possible_integer(word):
try:
int(word)
return True
except:
return False
def sentence_similarity(sentence1, sentence2, ignore_integers=False):
""" compute the sentence similarity using Wordnet """
# Tokenize and tag
sentence1 = ' '.join([clean(word) for word in sentence1.split()])
sentence2 = ' '.join([clean(word) for word in sentence2.split()])
tokens1 = word_tokenize(sentence1)
tokens2 = word_tokenize(sentence2)
tokens1 = clean_tokens(tokens1, ignore_integers)
tokens2 = clean_tokens(tokens2, ignore_integers)
# tag
sentence1 = pos_tag(tokens1)
sentence2 = pos_tag(tokens2)
# Get the synsets for the tagged words
synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]
synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]
print(synsets1)
print(synsets2)
# Filter out the Nones
synsets1 = [ss for ss in synsets1 if ss]
synsets2 = [ss for ss in synsets2 if ss]
score, count = 0.0, 0.0
# For each word in the first sentence
for synset1 in synsets1:
# Get the similarity value of the most similar word in the other sentence
best_score=[
wn.path_similarity(synset1, synset2)
if not isinstance(synset1, str) and not isinstance(synset2, str)
# just in case there are scientific words wordnet does not have
else fuzz.ratio(str(synset1), str(synset2)) / 100
for synset2 in synsets2
]
best_score=[s if s else 0 for s in best_score]
# print(synsets1, synsets2)
# Check that the similarity could have been computed
if best_score:
score += max(best_score)
count += 1
# Average the values
if count > 0:
score /= count
else:
score = 0
return score
def get_tokenized_sentence(sentence):
# Tokenize and tag
sentence = pos_tag(word_tokenize(sentence))
# Get the synsets for the tagged words
synsets = []
for tagged_word in sentence:
synset = tagged_to_synset(*tagged_word)
if synset:
synsets.append(synset)
else:
synsets.append(tagged_word[0])
return synsets # str(sorted(synsets))
def main():
sentences = [
"life is good in pa 92092",
"life is good in pa",
"life is good within pa 92092/2",
"life is good pa 92092/2",
"life is good in pa 92092/2",
"testing for difference"
]
focus_sentence = "life is good in pennsylvania"
for sentence in sentences:
# print ("Similarity(\"%s\", \"%s\") = %s" % (focus_sentence, sentence, sentence_similarity(focus_sentence, sentence)))
print ("Similarity(\"%s\", \"%s\") = %s" % (focus_sentence, sentence, sentence_similarity(focus_sentence, sentence, ignore_integers=True)))
# print(sentence_similarity(focus_sentence, sentences[2], ignore_integers=True))
if __name__ == '__main__':
main()
| mit | -8,744,749,470,656,482,000 | 27.443396 | 147 | 0.587231 | false |
Vayel/WAMPLab | transaction/v1/locator.py | 1 | 1589 | from twisted.internet.defer import inlineCallbacks
from autobahn import wamp
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import ApplicationRunner
class Locator(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
yield self.register(self)
@wamp.register(u'locator.move')
@inlineCallbacks
def move(self):
"""Try to mark the current position and to go to the next one. Fail if
the current position is not markable or if the next position does not
exist.
"""
print('Move!')
pos = yield self.call('data.get_position')
direction = yield self.call('data.get_direction')
next_pos = pos + direction
markable = yield self.call('data.is_markable', pos)
next_existing = yield self.call('data.is_pos', next_pos)
if markable and next_existing:
self.call('data.mark_pos', pos)
self.call('data.set_position', next_pos)
elif not markable:
self.publish(
'error',
'The pos {} is not markable.'.format(pos)
)
else:
self.publish(
'error',
'The pos {} does not exist.'.format(next_pos)
)
# Always done
self.call('data.sth')
if __name__ == "__main__":
print('Starting Locator component...')
ApplicationRunner(url='ws://localhost:8080/ws', realm='realm1').run(Locator)
| gpl-2.0 | -6,945,406,505,201,642,000 | 28.981132 | 80 | 0.563247 | false |
iiitv/algos | breadth_first_traversal/breadth_first_traversal.py | 1 | 2410 | """
Breadth-first-traversal is an algorithm for traversing a tree or
graph data structure. Starting at the tree root (or some arbitrary node of a
graph, sometimes referred to as a 'search key'[1]) and explores the neighbor
nodes at that level first, before moving to the next level.
"""
from collections import deque
def breadth_first_traversal(graph, source):
""" Performs a breadth-first traversal on a graph
Args:
graph (list of list of int): Adjacency matrix representation of graph
source (int): Index of source vertex to begin search from
Returns:
list of dicts describing each vertex in the searched graph
-> [{distance: _, predecessor: _ }]
"""
vertex_info = []
for i in range(len(graph)):
vertex_info.append({"distance": None, "predecessor": None})
vertex_info[source]["distance"] = 0
search_queue = deque()
search_queue.append(source)
while search_queue:
u = search_queue.popleft()
for v in graph[u]:
if vertex_info[v]["distance"] is None:
vertex_info[v]["distance"] = vertex_info[u]["distance"] + 1
vertex_info[v]["predecessor"] = u
search_queue.append(v)
return vertex_info
def main():
graph_adj_list = [
[1],
[0, 4, 5],
[3, 4, 5],
[2, 6],
[1, 2],
[1, 2, 6],
[3, 5],
[]
]
vertex_info = breadth_first_traversal(graph_adj_list, 3)
for i in range(len(graph_adj_list)):
print("vertex %s : distance = %s, predecessor = %s" %
(i, vertex_info[i]["distance"], vertex_info[i]["predecessor"]))
assert(vertex_info[0] == {
"distance": 4,
"predecessor": 1
})
assert(vertex_info[1] == {
"distance": 3,
"predecessor": 4
})
assert(vertex_info[2] == {
"distance": 1,
"predecessor": 3
})
assert(vertex_info[3] == {
"distance": 0,
"predecessor": None
})
assert(vertex_info[4] == {
"distance": 2,
"predecessor": 2
})
assert(vertex_info[5] == {
"distance": 2,
"predecessor": 2
})
assert(vertex_info[6] == {
"distance": 1,
"predecessor": 3
})
assert(vertex_info[7] == {
"distance": None,
"predecessor": None
})
if __name__ == '__main__':
main()
| mit | -3,733,688,494,333,147,000 | 25.195652 | 77 | 0.544813 | false |
Kortemme-Lab/protein_feature_analysis | ProteinFeatureAnalyzer/features/data_loading.py | 1 | 3308 | import os
import io
import Bio.PDB as PDB
from . import topology
from . import secondary_structures
def structure_from_pdb_file(file_path, name=''):
'''Read the structure stored in a PDB file.'''
parser = PDB.PDBParser()
return parser.get_structure(name, file_path)
def structure_from_pdb_string(pdb_string, name=''):
'''Read the structure stored in a PDB string.'''
parser = PDB.PDBParser()
pdb_sf = io.StringIO(pdb_string)
return parser.get_structure(name, pdb_sf)
def load_data_from_cath_pmls(input_path, output_path, job_list, dssp_path):
'''Load data from structures in the input path.
The input data should be stored in .pml files of superposed homologous
superfamilies from the CATH database.
'''
superfamilies = []
for f in job_list:
if f.endswith('.pml'):
# Make a scratch directory
scratch_path = os.path.join(output_path, f[0:-4])
if not os.path.exists(scratch_path):
os.mkdir(scratch_path)
# Load data from one file
load_from_one_cath_pml_file(os.path.join(input_path, f), scratch_path, superfamilies, dssp_path)
return superfamilies
def load_from_one_cath_pml_file(pml_file, scratch_path, superfamilies, dssp_path):
'''Load data from a .pml file of superposed
homologous superfamilies from the CATH database.
'''
superfamilies.append([])
candidate_proteins = []
with open(pml_file, 'r') as f:
while True:
line = f.readline()
if not line: break
# Read one structure
if line.strip().startswith('cmd.read_pdbstr'):
pdb_lines = [line.strip()[19:].strip('\\')]
pdb_id = ''
while True:
line = f.readline()
if line.strip().startswith('"""'):
pdb_id = line.strip()[5:12]
break
pdb_line = line.strip().strip('\\')
if len(pdb_line) > 17:
pdb_line = pdb_line[0:16] + ' ' + pdb_line[17:] # Remove all altLoc flags
pdb_lines.append(pdb_line) # Remove all altLoc flags
# Make a pdb file of the structure for DSSP analysis
structure = structure_from_pdb_string('\n'.join(pdb_lines), pdb_id)
# Store structures without chain breaks
if len(topology.find_structure_chain_breaks(structure)) == 0:
structure_path = os.path.join(scratch_path, pdb_id + '.pdb')
io = PDB.PDBIO()
io.set_structure(structure)
io.save(structure_path)
candidate_proteins.append({'structure' : structure, 'path' : structure_path})
for p in candidate_proteins:
try:
find_secondary_structures(p, dssp_path)
except:
continue
superfamilies[-1].append(p) # Add a protein to a superfamily if there's no exception
def find_secondary_structures(protein_dict, dssp_path):
'''Find secondary structures of a protein.
Arguements:
- protein_dict - a dictionary to store informations of a protein
'''
protein_dict['dssp_dict'], protein_dict['dssp_key_map'] = \
secondary_structures.make_dssp_dict(protein_dict['path'], dssp_path)
protein_dict['ss_list'], protein_dict['sheet_list'] = \
secondary_structures.pack_dssp_dict_into_ss_list(protein_dict['structure'][0],
protein_dict['dssp_dict'], protein_dict['dssp_key_map'])
| mit | -2,651,815,429,011,390,500 | 29.62963 | 102 | 0.637848 | false |
jralls/gramps | gramps/plugins/webreport/person.py | 1 | 75620 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <[email protected]>
# Copyright (C) 2007-2009 Gary Burton <[email protected]>
# Copyright (C) 2007-2009 Stephane Charette <[email protected]>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <[email protected]>
# Copyright (C) 2008-2011 Rob G. Healey <[email protected]>
# Copyright (C) 2010 Doug Blank <[email protected]>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010-2017 Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
PersonPage - Person index page and individual `Person pages
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from collections import defaultdict
from operator import itemgetter
from decimal import Decimal, getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.lib import (ChildRefType, Date, Name, Person, EventRoleType)
from gramps.gen.lib.date import Today
from gramps.gen.plug.report import Bibliography
from gramps.gen.plug.report import utils
from gramps.gen.utils.alive import probably_alive
from gramps.gen.constfunc import win
from gramps.gen.display.name import displayer as _nd
from gramps.gen.utils.db import get_birth_or_fallback, get_death_or_fallback
from gramps.plugins.lib.libhtml import Html
from gramps.gen.utils.place import conv_lat_lon
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import (get_first_letters, _KEYPERSON,
alphabet_navigation, sort_people,
_NAME_STYLE_FIRST, first_letter,
get_index_letter, add_birthdate,
primary_difference, FULLCLEAR,
_find_birth_date, _find_death_date,
MARKER_PATH, OSM_MARKERS,
GOOGLE_MAPS, MARKERS, html_escape,
DROPMASTERS, FAMILYLINKS)
_ = glocale.translation.sgettext
LOG = logging.getLogger(".NarrativeWeb")
getcontext().prec = 8
_WIDTH = 160
_HEIGHT = 64
_VGAP = 10
_HGAP = 30
_SHADOW = 5
_XOFFSET = 5
#################################################
#
# creates the Individual List Page and IndividualPages
#
#################################################
class PersonPages(BasePage):
"""
This class is responsible for displaying information about the 'Person'
database objects. It displays this information under the 'Individuals'
tab. It is told by the 'add_instances' call which 'Person's to display,
and remembers the list of persons. A single call to 'display_pages'
displays both the Individual List (Index) page and all the Individual
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for this report
"""
BasePage.__init__(self, report, title="")
self.ind_dict = defaultdict(set)
self.mapservice = None
self.sort_name = None
self.googleopts = None
self.googlemapkey = None
self.birthorder = None
self.person = None
self.familymappages = None
self.rel_class = None
self.placemappages = None
self.name = None
def display_pages(self, title):
"""
Generate and output the pages under the Individuals tab, namely the
individual index and the individual pages.
@param: title -- Is the title of the web page
"""
LOG.debug("obj_dict[Person]")
for item in self.report.obj_dict[Person].items():
LOG.debug(" %s", str(item))
with self.r_user.progress(_("Narrated Web Site Report"),
_('Creating individual pages'),
len(self.report.obj_dict[Person]) + 1
) as step:
self.individuallistpage(self.report, title,
self.report.obj_dict[Person].keys())
for person_handle in sorted(self.report.obj_dict[Person]):
step()
person = self.r_db.get_person_from_handle(person_handle)
self.individualpage(self.report, title, person)
#################################################
#
# creates the Individual List Page
#
#################################################
def individuallistpage(self, report, title, ppl_handle_list):
"""
Creates an individual page
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
@param: ppl_handle_list -- The list of people for whom we need
to create a page.
"""
BasePage.__init__(self, report, title)
prev_letter = " "
# plugin variables for this module
showbirth = report.options['showbirth']
showdeath = report.options['showdeath']
showpartner = report.options['showpartner']
showparents = report.options['showparents']
output_file, sio = self.report.create_file("individuals")
indlistpage, head, body = self.write_header(self._("Individuals"))
date = 0
# begin Individuals division
with Html("div", class_="content", id="Individuals") as individuallist:
body += individuallist
# Individual List page message
msg = self._("This page contains an index of all the individuals "
"in the database, sorted by their last names. "
"Selecting the person’s "
"name will take you to that "
"person’s individual page.")
individuallist += Html("p", msg, id="description")
# add alphabet navigation
index_list = get_first_letters(self.r_db, ppl_handle_list,
_KEYPERSON, rlocale=self.rlocale)
alpha_nav = alphabet_navigation(index_list, self.rlocale)
if alpha_nav is not None:
individuallist += alpha_nav
# begin table and table head
with Html("table",
class_="infolist primobjlist IndividualList") as table:
individuallist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
# show surname and first name
trow += Html("th", self._("Surname"), class_="ColumnSurname",
inline=True)
trow += Html("th", self._("Given Name"), class_="ColumnName",
inline=True)
if showbirth:
trow += Html("th", self._("Birth"), class_="ColumnDate",
inline=True)
if showdeath:
trow += Html("th", self._("Death"), class_="ColumnDate",
inline=True)
if showpartner:
trow += Html("th", self._("Partner"),
class_="ColumnPartner",
inline=True)
if showparents:
trow += Html("th", self._("Parents"),
class_="ColumnParents",
inline=True)
tbody = Html("tbody")
table += tbody
ppl_handle_list = sort_people(self.r_db, ppl_handle_list,
self.rlocale)
first = True
for (surname, handle_list) in ppl_handle_list:
if surname and not surname.isspace():
letter = get_index_letter(first_letter(surname), index_list,
self.rlocale)
else:
letter = ' '
surname = self._("<absent>")
first_surname = True
for person_handle in sorted(handle_list,
key=self.sort_on_name_and_grampsid):
person = self.r_db.get_person_from_handle(person_handle)
if person.get_change_time() > date:
date = person.get_change_time()
# surname column
trow = Html("tr")
tbody += trow
tcell = Html("td", class_="ColumnSurname", inline=True)
trow += tcell
if first or primary_difference(letter, prev_letter,
self.rlocale):
first = False
first_surname = False
prev_letter = letter
trow.attr = 'class = "BeginSurname"'
ttle = self._("Surnames %(surname)s beginning "
"with letter %(letter)s" %
{'surname' : surname,
'letter' : letter})
tcell += Html(
"a", html_escape(surname), name=letter,
id_=letter,
title=ttle)
elif first_surname:
first_surname = False
tcell += Html("a", html_escape(surname),
title=self._("Surnames") + " " + surname)
else:
tcell += " "
# firstname column
link = self.new_person_link(person_handle, person=person,
name_style=_NAME_STYLE_FIRST)
trow += Html("td", link, class_="ColumnName")
# birth column
if showbirth:
tcell = Html("td", class_="ColumnBirth", inline=True)
trow += tcell
birth_date = _find_birth_date(self.r_db, person)
if birth_date is not None:
if birth_date.fallback:
tcell += Html('em',
self.rlocale.get_date(birth_date),
inline=True)
else:
tcell += self.rlocale.get_date(birth_date)
else:
tcell += " "
# death column
if showdeath:
tcell = Html("td", class_="ColumnDeath", inline=True)
trow += tcell
death_date = _find_death_date(self.r_db, person)
if death_date is not None:
if death_date.fallback:
tcell += Html('em',
self.rlocale.get_date(death_date),
inline=True)
else:
tcell += self.rlocale.get_date(death_date)
else:
tcell += " "
# partner column
if showpartner:
family_list = person.get_family_handle_list()
first_family = True
#partner_name = None
tcell = () # pylint: disable=R0204
if family_list:
for family_handle in family_list:
family = self.r_db.get_family_from_handle(
family_handle)
partner_handle = utils.find_spouse(
person, family)
if partner_handle:
if not first_family:
# have to do this to get the comma on
# the same line as the link
if isinstance(tcell[-1], Html):
# tcell is an instance of Html (or
# of a subclass thereof)
tcell[-1].inside += ","
else:
tcell = tcell[:-1] + (
# TODO for Arabic, translate?
(tcell[-1] + ", "),)
# Have to manipulate as tuples so that
# subsequent people are not nested
# within the first link
tcell += (
self.new_person_link(partner_handle),)
first_family = False
else:
tcell = " "
trow += Html("td", class_="ColumnPartner") + tcell
# parents column
if showparents:
parent_hdl_list = person.get_parent_family_handle_list()
if parent_hdl_list:
parent_handle = parent_hdl_list[0]
family = self.r_db.get_family_from_handle(
parent_handle)
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
if father_handle:
father = self.r_db.get_person_from_handle(
father_handle)
else:
father = None
if mother_handle:
mother = self.r_db.get_person_from_handle(
mother_handle)
else:
mother = None
if father:
father_name = self.get_name(father)
if mother:
mother_name = self.get_name(mother)
samerow = False
if mother and father:
tcell = (Html("span", father_name,
class_="father fatherNmother",
inline=True),
Html("span", mother_name,
class_="mother", inline=True))
elif mother:
tcell = Html("span", mother_name,
class_="mother", inline=True)
elif father:
tcell = Html("span", father_name,
class_="father", inline=True)
else:
tcell = " "
samerow = True
else:
tcell = " "
samerow = True
trow += Html("td", class_="ColumnParents",
inline=samerow) + tcell
# create clear line for proper styling
# create footer section
footer = self.write_footer(date)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(indlistpage, output_file, sio, date)
#################################################
#
# creates an Individual Page
#
#################################################
gender_map = {
Person.MALE : _('male'),
Person.FEMALE : _('female'),
Person.UNKNOWN : _('unknown'),
}
def individualpage(self, report, title, person):
"""
Creates an individual page
@param: report -- The instance of the main report class for this report
@param: title -- Is the title of the web page
@param: person -- The person to use for this page.
"""
BasePage.__init__(self, report, title, person.get_gramps_id())
place_lat_long = []
self.person = person
self.bibli = Bibliography()
self.sort_name = self.get_name(person)
self.name = self.get_name(person)
date = self.person.get_change_time()
# to be used in the Family Map Pages...
self.familymappages = self.report.options['familymappages']
self.placemappages = self.report.options['placemappages']
self.mapservice = self.report.options['mapservice']
self.googleopts = self.report.options['googleopts']
self.googlemapkey = self.report.options['googlemapkey']
# decide if we will sort the birth order of siblings...
self.birthorder = self.report.options['birthorder']
# get the Relationship Calculator so that we can determine
# bio, half, step- siblings for use in display_ind_parents() ...
self.rel_class = self.report.rel_class
output_file, sio = self.report.create_file(person.get_handle(), "ppl")
self.uplink = True
indivdetpage, head, body = self.write_header(self.sort_name)
# attach the ancestortree style sheet if ancestor
# graph is being created?
if self.report.options["ancestortree"]:
if self.usecms:
fname = "/".join([self.target_uri, "css", "ancestortree.css"])
else:
fname = "/".join(["css", "ancestortree.css"])
url = self.report.build_url_fname(fname, None, self.uplink)
head += Html("link", href=url, type="text/css", media="screen",
rel="stylesheet")
# begin individualdetail division
with Html("div", class_="content",
id='IndividualDetail') as individualdetail:
body += individualdetail
# display a person's general data
thumbnail, name, summary = self.display_ind_general()
if thumbnail is not None:
individualdetail += thumbnail
individualdetail += (name, summary)
# display a person's events
sect2 = self.display_ind_events(place_lat_long)
if sect2 is not None:
individualdetail += sect2
# display relationship to the center person
sect3 = self.display_ind_center_person()
if sect3 is not None:
individualdetail += sect3
# display parents
sect4 = self.display_ind_parents()
if sect4 is not None:
individualdetail += sect4
# display relationships
relationships = self.display_relationships(self.person,
place_lat_long)
if relationships is not None:
individualdetail += relationships
# display LDS ordinance
sect5 = self.display_lds_ordinance(self.person)
if sect5 is not None:
individualdetail += sect5
# display address(es) and show sources
sect6 = self.display_addr_list(self.person.get_address_list(), True)
if sect6 is not None:
individualdetail += sect6
photo_list = self.person.get_media_list()
media_list = photo_list[:]
# if Family Pages are not being created, then include the Family
# Media objects? There is no reason to add these objects to the
# Individual Pages...
if not self.inc_families:
for handle in self.person.get_family_handle_list():
family = self.r_db.get_family_from_handle(handle)
if family:
media_list += family.get_media_list()
for evt_ref in family.get_event_ref_list():
event = self.r_db.get_event_from_handle(evt_ref.ref)
media_list += event.get_media_list()
# if the Event Pages are not being created, then include the Event
# Media objects? There is no reason to add these objects to the
# Individual Pages...
if not self.inc_events:
for evt_ref in self.person.get_primary_event_ref_list():
event = self.r_db.get_event_from_handle(evt_ref.ref)
if event:
media_list += event.get_media_list()
# display additional images as gallery
sect7 = self.disp_add_img_as_gallery(media_list, person)
if sect7 is not None:
individualdetail += sect7
# display Narrative Notes
notelist = person.get_note_list()
sect8 = self.display_note_list(notelist)
if sect8 is not None:
individualdetail += sect8
# display attributes
attrlist = person.get_attribute_list()
if attrlist:
attrsection, attrtable = self.display_attribute_header()
self.display_attr_list(attrlist, attrtable)
individualdetail += attrsection
# display web links
sect10 = self.display_url_list(self.person.get_url_list())
if sect10 is not None:
individualdetail += sect10
# display associations
assocs = person.get_person_ref_list()
if assocs:
individualdetail += self.display_ind_associations(assocs)
# for use in family map pages...
if len(place_lat_long) > 0:
if self.report.options["familymappages"]:
# save output_file, string_io and cur_fname
# before creating a new page
sof = output_file
sstring_io = sio
sfname = self.report.cur_fname
individualdetail += self.__display_family_map(
person, place_lat_long)
# restore output_file, string_io and cur_fname
# after creating a new page
output_file = sof
sio = sstring_io
self.report.cur_fname = sfname
# display pedigree
sect13 = self.display_ind_pedigree()
if sect13 is not None:
individualdetail += sect13
# display ancestor tree
if report.options['ancestortree']:
sect14 = self.display_tree()
if sect14 is not None:
individualdetail += sect14
# display source references
sect14 = self.display_ind_sources(person)
if sect14 is not None:
individualdetail += sect14
# add clearline for proper styling
# create footer section
footer = self.write_footer(date)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(indivdetpage, output_file, sio, date)
def __create_family_map(self, person, place_lat_long):
"""
creates individual family map page
@param: person -- person from database
@param: place_lat_long -- for use in Family Map Pages
"""
if not place_lat_long:
return
output_file, sio = self.report.create_file(person.get_handle(), "maps")
self.uplink = True
familymappage, head, body = self.write_header(self._("Family Map"))
minx, maxx = Decimal("0.00000001"), Decimal("0.00000001")
miny, maxy = Decimal("0.00000001"), Decimal("0.00000001")
xwidth, yheight = [], []
midx_, midy_, spanx, spany = [None]*4
number_markers = len(place_lat_long)
if number_markers > 1:
for (latitude, longitude, placetitle, handle,
date, etype) in place_lat_long:
xwidth.append(latitude)
yheight.append(longitude)
xwidth.sort()
yheight.sort()
minx = xwidth[0] if xwidth[0] else minx
maxx = xwidth[-1] if xwidth[-1] else maxx
minx, maxx = Decimal(minx), Decimal(maxx)
midx_ = str(Decimal((minx + maxx) /2))
miny = yheight[0] if yheight[0] else miny
maxy = yheight[-1] if yheight[-1] else maxy
miny, maxy = Decimal(miny), Decimal(maxy)
midy_ = str(Decimal((miny + maxy) /2))
midx_, midy_ = conv_lat_lon(midx_, midy_, "D.D8")
# get the integer span of latitude and longitude
spanx = int(maxx - minx)
spany = int(maxy - miny)
# set zoom level based on span of Longitude?
tinyset = [value for value in (-3, -2, -1, 0, 1, 2, 3)]
smallset = [value for value in (-4, -5, -6, -7, 4, 5, 6, 7)]
middleset = [value for value in (-8, -9, -10, -11, 8, 9, 10, 11)]
largeset = [value for value in (-11, -12, -13, -14, -15, -16,
-17, 11, 12, 13, 14, 15, 16, 17)]
if spany in tinyset or spany in smallset:
zoomlevel = 6
elif spany in middleset:
zoomlevel = 5
elif spany in largeset:
zoomlevel = 4
else:
zoomlevel = 3
# 0 = latitude, 1 = longitude, 2 = place title,
# 3 = handle, and 4 = date, 5 = event type...
# being sorted by date, latitude, and longitude...
place_lat_long = sorted(place_lat_long, key=itemgetter(4, 0, 1))
# for all plugins
# if family_detail_page
# if active
# call_(report, up, head)
# add narrative-maps style sheet
if self.usecms:
fname = "/".join([self.target_uri, "css", "narrative-maps.css"])
else:
fname = "/".join(["css", "narrative-maps.css"])
url = self.report.build_url_fname(fname, None, self.uplink)
head += Html("link", href=url, type="text/css", media="screen",
rel="stylesheet")
# add MapService specific javascript code
if self.mapservice == "Google":
src_js = GOOGLE_MAPS + "api/js?sensor=false"
if self.googlemapkey:
src_js += "&key=" + self.googlemapkey
head += Html("script", type="text/javascript",
src=src_js, inline=True)
else:
url = self.secure_mode
url += ("maxcdn.bootstrapcdn.com/bootstrap/3.3.7/"
"css/bootstrap.min.css")
head += Html("link", href=url, type="text/javascript",
rel="stylesheet")
src_js = self.secure_mode
src_js += "ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"
head += Html("script", type="text/javascript",
src=src_js, inline=True)
src_js = self.secure_mode
src_js += "openlayers.org/en/v3.17.1/build/ol.js"
head += Html("script", type="text/javascript",
src=src_js, inline=True)
url = self.secure_mode
url += "openlayers.org/en/v3.17.1/css/ol.css"
head += Html("link", href=url, type="text/javascript",
rel="stylesheet")
src_js = self.secure_mode
src_js += ("maxcdn.bootstrapcdn.com/bootstrap/3.3.7/"
"js/bootstrap.min.js")
head += Html("script", type="text/javascript",
src=src_js, inline=True)
if number_markers > 0:
tracelife = "["
seq_ = 1
for index in range(0, (number_markers - 1)):
(latitude, longitude, placetitle, handle, date,
etype) = place_lat_long[index]
# are we using Google?
if self.mapservice == "Google":
# are we creating Family Links?
if self.googleopts == "FamilyLinks":
tracelife += """
new google.maps.LatLng(%s, %s),""" % (latitude, longitude)
# are we creating Drop Markers or Markers?
elif self.googleopts in ["Drop", "Markers"]:
tracelife += """
['%s', %s, %s, %d],""" % (placetitle.replace("'", "\\'"), latitude,
longitude, seq_)
# are we using OpenStreetMap?
else:
tracelife += """
[%f, %f, \'%s\'],""" % (float(longitude), float(latitude),
placetitle.replace("'", "\\'"))
seq_ += 1
# FIXME: The last element in the place_lat_long list is treated
# specially, and the code above is apparently repeated so as to
# avoid a comma at the end, and get the right closing. This is very
# ugly.
(latitude, longitude, placetitle, handle, date,
etype) = place_lat_long[-1]
# are we using Google?
if self.mapservice == "Google":
# are we creating Family Links?
if self.googleopts == "FamilyLinks":
tracelife += """
new google.maps.LatLng(%s, %s)
];""" % (latitude, longitude)
# are we creating Drop Markers or Markers?
elif self.googleopts in ["Drop", "Markers"]:
tracelife += """
['%s', %s, %s, %d]
];""" % (placetitle.replace("'", "\\'"), latitude, longitude, seq_)
# are we using OpenStreetMap?
elif self.mapservice == "OpenStreetMap":
tracelife += """
[%f, %f, \'%s\']
];""" % (float(longitude), float(latitude), placetitle.replace("'", "\\'"))
# begin MapDetail division...
with Html("div", class_="content", id="FamilyMapDetail") as mapdetail:
body += mapdetail
# add page title
mapdetail += Html("h3",
html_escape(self._("Tracking %s")
% self.get_name(person)),
inline=True)
# page description
msg = self._("This map page represents that person "
"and any descendants with all of their event/ places. "
"If you place your mouse over "
"the marker it will display the place name. "
"The markers and the Reference "
"list are sorted in date order (if any?). "
"Clicking on a place’s "
"name in the Reference section will take you "
"to that place’s page.")
mapdetail += Html("p", msg, id="description")
# this is the style element where the Map is held in the CSS...
with Html("div", id="map_canvas") as canvas:
mapdetail += canvas
# begin javascript inline code...
with Html("script", deter="deter",
style='width =100%; height =100%;',
type="text/javascript", indent=False) as jsc:
head += jsc
# Link to Gramps marker
fname = "/".join(['images', 'marker.png'])
marker_path = self.report.build_url_image("marker.png",
"images",
self.uplink)
jsc += MARKER_PATH % marker_path
# are we using Google?
if self.mapservice == "Google":
# are we creating Family Links?
if self.googleopts == "FamilyLinks":
if midy_ is None:
jsc += FAMILYLINKS % (tracelife, latitude,
longitude, int(10))
else:
jsc += FAMILYLINKS % (tracelife, midx_, midy_,
zoomlevel)
# are we creating Drop Markers?
elif self.googleopts == "Drop":
if midy_ is None:
jsc += DROPMASTERS % (tracelife, latitude,
longitude, int(10))
else:
jsc += DROPMASTERS % (tracelife, midx_, midy_,
zoomlevel)
# we are creating Markers only...
else:
if midy_ is None:
jsc += MARKERS % (tracelife, latitude,
longitude, int(10))
else:
jsc += MARKERS % (tracelife, midx_, midy_,
zoomlevel)
# we are using OpenStreetMap...
else:
if midy_ is None:
jsc += OSM_MARKERS % (tracelife,
longitude,
latitude, 10)
else:
jsc += OSM_MARKERS % (tracelife, midy_, midx_,
zoomlevel)
# if Google and Drop Markers are selected,
# then add "Drop Markers" button?
if self.mapservice == "Google" and self.googleopts == "Drop":
mapdetail += Html("button", _("Drop Markers"),
id="drop", onclick="drop()", inline=True)
# add div for popups.
with Html("div", id="popup", inline=True) as popup:
mapdetail += popup
# begin place reference section and its table...
with Html("div", class_="subsection", id="references") as section:
mapdetail += section
section += Html("h4", self._("References"), inline=True)
with Html("table", class_="infolist") as table:
section += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [
(_("Date"), "ColumnDate"),
(_("Place Title"), "ColumnPlace"),
(_("Event Type"), "ColumnType")
]
)
tbody = Html("tbody")
table += tbody
for (latitude, longitude, placetitle, handle, date,
etype) in place_lat_long:
trow = Html("tr")
tbody += trow
trow.extend(
Html("td", data, class_=colclass, inline=True)
for data, colclass in [
(date, "ColumnDate"),
(self.place_link(handle, placetitle,
uplink=True),
"ColumnPlace"),
(str(etype), "ColumnType")
]
)
# add body id for this page...
body.attr = 'id ="FamilyMap" onload ="initialize()"'
# add clearline for proper styling
# add footer section
footer = self.write_footer(None)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(familymappage, output_file, sio, 0)
def __display_family_map(self, person, place_lat_long):
"""
Create the family map link
@param: person -- The person to set in the box
@param: place_lat_long -- The center of the box
"""
# create family map page
self.__create_family_map(person, place_lat_long)
# begin family map division plus section title
with Html("div", class_="subsection", id="familymap") as familymap:
familymap += Html("h4", self._("Family Map"), inline=True)
# add family map link
person_handle = person.get_handle()
url = self.report.build_url_fname_html(person_handle, "maps", True)
familymap += self.family_map_link(person_handle, url)
# return family map link to its caller
return familymap
def draw_box(self, center, col, person):
"""
Draw the box around the AncestorTree Individual name box...
@param: center -- The center of the box
@param: col -- The generation number
@param: person -- The person to set in the box
"""
top = center - _HEIGHT/2
xoff = _XOFFSET+col*(_WIDTH+_HGAP)
sex = person.gender
if sex == Person.MALE:
divclass = "male"
elif sex == Person.FEMALE:
divclass = "female"
else:
divclass = "unknown"
boxbg = Html("div", class_="boxbg %s AncCol%s" % (divclass, col),
style="top: %dpx; left: %dpx;" % (top, xoff+1)
)
person_name = self.get_name(person)
# This does not use [new_]person_link because the requirements are
# unique
result = self.report.obj_dict.get(Person).get(person.handle)
if result is None or result[0] == "":
# The person is not included in the webreport or there is no link
# to them
boxbg += Html("span", person_name, class_="unlinked", inline=True)
else:
thumbnail_url = None
if self.create_media and col < 5:
photolist = person.get_media_list()
if photolist:
photo_handle = photolist[0].get_reference_handle()
photo = self.r_db.get_media_from_handle(photo_handle)
mime_type = photo.get_mime_type()
if mime_type:
region = self.media_ref_region_to_object(photo_handle,
person)
if region:
# make a thumbnail of this region
newpath = self.copy_thumbnail(
photo_handle, photo, region)
# TODO. Check if build_url_fname can be used.
newpath = "/".join(['..']*3 + [newpath])
if win():
newpath = newpath.replace('\\', "/")
thumbnail_url = newpath
else:
(photo_url,
thumbnail_url) = self.report.prepare_copy_media(
photo)
thumbnail_url = "/".join(['..']*3 + [thumbnail_url])
if win():
thumbnail_url = thumbnail_url.replace('\\', "/")
url = self.report.build_url_fname_html(person.handle, "ppl", True)
birth = death = ""
bd_event = get_birth_or_fallback(self.r_db, person)
if bd_event:
birth = self.rlocale.get_date(bd_event.get_date_object())
dd_event = get_death_or_fallback(self.r_db, person)
if dd_event:
death = self.rlocale.get_date(dd_event.get_date_object())
if death == "":
death = "..."
value = person_name + "<br/>*", birth, "<br/>+", death
if thumbnail_url is None:
boxbg += Html("a", href=url, class_="noThumb") + value
else:
thumb = Html("span", class_="thumbnail") + (
Html("img", src=thumbnail_url, alt="Image: " + person_name))
boxbg += Html("a", href=url) + thumb + value
shadow = Html(
"div", class_="shadow", inline=True,
style="top: %dpx; left: %dpx;" % (top + _SHADOW, xoff + _SHADOW))
return [boxbg, shadow]
def extend_line(self, coord_y0, coord_x0):
"""
Draw and extended line
@param: coord_y0 -- The starting point
@param: coord_x0 -- The end of the line
"""
style = "top: %dpx; left: %dpx; width: %dpx"
ext_bv = Html("div", class_="bvline", inline=True,
style=style % (coord_y0, coord_x0, _HGAP/2)
)
ext_gv = Html("div", class_="gvline", inline=True,
style=style % (coord_y0+_SHADOW,
coord_x0, _HGAP/2+_SHADOW)
)
return [ext_bv, ext_gv]
def connect_line(self, coord_y0, coord_y1, col):
"""
We need to draw a line between to points
@param: coord_y0 -- The starting point
@param: coord_y1 -- The end of the line
@param: col -- The generation number
"""
coord_y = min(coord_y0, coord_y1)
stylew = "top: %dpx; left: %dpx; width: %dpx;"
styleh = "top: %dpx; left: %dpx; height: %dpx;"
coord_x0 = _XOFFSET + col * _WIDTH + (col-1)*_HGAP + _HGAP/2
cnct_bv = Html("div", class_="bvline", inline=True,
style=stylew % (coord_y1, coord_x0, _HGAP/2))
cnct_gv = Html("div", class_="gvline", inline=True,
style=stylew % (coord_y1+_SHADOW,
coord_x0+_SHADOW,
_HGAP/2+_SHADOW))
cnct_bh = Html("div", class_="bhline", inline=True,
style=styleh % (coord_y, coord_x0,
abs(coord_y0-coord_y1)))
cnct_gh = Html("div", class_="gvline", inline=True,
style=styleh % (coord_y+_SHADOW,
coord_x0+_SHADOW,
abs(coord_y0-coord_y1)))
return [cnct_bv, cnct_gv, cnct_bh, cnct_gh]
def draw_connected_box(self, center1, center2, col, handle):
"""
Draws the connected box for Ancestor Tree on the Individual Page
@param: center1 -- The first box to connect
@param: center2 -- The destination box to draw
@param: col -- The generation number
@param: handle -- The handle of the person to set in the new box
"""
box = []
if not handle:
return box
person = self.r_db.get_person_from_handle(handle)
box = self.draw_box(center2, col, person)
box += self.connect_line(center1, center2, col)
return box
def display_tree(self):
"""
Display the Ancestor Tree
"""
tree = []
if not self.person.get_main_parents_family_handle():
return None
generations = self.report.options['graphgens']
max_in_col = 1 << (generations-1)
max_size = _HEIGHT*max_in_col + _VGAP*(max_in_col+1)
center = int(max_size/2)
with Html("div", id="tree", class_="subsection") as tree:
tree += Html("h4", self._('Ancestors'), inline=True)
with Html("div", id="treeContainer",
style="width:%dpx; height:%dpx;" % (
_XOFFSET+(generations)*_WIDTH+(generations-1)*_HGAP,
max_size)
) as container:
tree += container
container += self.draw_tree(1, generations, max_size,
0, center, self.person.handle)
return tree
def draw_tree(self, gen_nr, maxgen, max_size, old_center,
new_center, person_handle):
"""
Draws the Ancestor Tree
@param: gen_nr -- The generation number to draw
@param: maxgen -- The maximum number of generations to draw
@param: max_size -- The maximum size of the drawing area
@param: old_center -- The position of the old box
@param: new_center -- The position of the new box
@param: person_handle -- The handle of the person to draw
"""
tree = []
if gen_nr > maxgen:
return tree
gen_offset = int(max_size / pow(2, gen_nr+1))
if person_handle:
person = self.r_db.get_person_from_handle(person_handle)
else:
person = None
if not person:
return tree
if gen_nr == 1:
tree = self.draw_box(new_center, 0, person)
else:
tree = self.draw_connected_box(old_center, new_center,
gen_nr-1, person_handle)
if gen_nr == maxgen:
return tree
family_handle = person.get_main_parents_family_handle()
if family_handle:
line_offset = _XOFFSET + gen_nr*_WIDTH + (gen_nr-1)*_HGAP
tree += self.extend_line(new_center, line_offset)
family = self.r_db.get_family_from_handle(family_handle)
f_center = new_center-gen_offset
f_handle = family.get_father_handle()
tree += self.draw_tree(gen_nr+1, maxgen, max_size,
new_center, f_center, f_handle)
m_center = new_center+gen_offset
m_handle = family.get_mother_handle()
tree += self.draw_tree(gen_nr+1, maxgen, max_size,
new_center, m_center, m_handle)
return tree
def display_ind_associations(self, assoclist):
"""
Display an individual's associations
@param: assoclist -- The list of persons for association
"""
# begin Associations division
with Html("div", class_="subsection", id="Associations") as section:
section += Html("h4", self._('Associations'), inline=True)
with Html("table", class_="infolist assoclist") as table:
section += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
assoc_row = [
(self._("Person"), 'Person'),
(self._('Relationship'), 'Relationship'),
(self._("Notes"), 'Notes'),
(self._("Sources"), 'Sources'),
]
trow.extend(
Html("th", label, class_="Column" + colclass, inline=True)
for (label, colclass) in assoc_row)
tbody = Html("tbody")
table += tbody
for person_ref in assoclist:
trow = Html("tr")
tbody += trow
person_lnk = self.new_person_link(person_ref.ref,
uplink=True)
index = 0
for data in [
person_lnk,
person_ref.get_relation(),
self.dump_notes(person_ref.get_note_list()),
self.get_citation_links(
person_ref.get_citation_list()),
]:
# get colclass from assoc_row
colclass = assoc_row[index][1]
trow += Html("td", data, class_="Column" + colclass,
inline=True)
index += 1
# return section to its callers
return section
def display_ind_pedigree(self):
"""
Display an individual's pedigree
"""
birthorder = self.report.options["birthorder"]
# Define helper functions
def children_ped(ol_html):
"""
Create a children list
@param: ol_html -- The html element to complete
"""
if family:
childlist = family.get_child_ref_list()
childlist = [child_ref.ref for child_ref in childlist]
children = add_birthdate(self.r_db, childlist, self.rlocale)
if birthorder:
children = sorted(children)
for birthdate, birth, death, handle in children:
if handle == self.person.get_handle():
child_ped(ol_html)
elif handle:
child = self.r_db.get_person_from_handle(handle)
if child:
ol_html += Html("li") + self.pedigree_person(child)
else:
child_ped(ol_html)
return ol_html
def child_ped(ol_html):
"""
Create a child element list
@param: ol_html -- The html element to complete
"""
with Html("li", self.name, class_="thisperson") as pedfam:
family = self.pedigree_family()
if family:
pedfam += Html("ol", class_="spouselist") + family
return ol_html + pedfam
# End of helper functions
parent_handle_list = self.person.get_parent_family_handle_list()
if parent_handle_list:
parent_handle = parent_handle_list[0]
family = self.r_db.get_family_from_handle(parent_handle)
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.r_db.get_person_from_handle(mother_handle)
else:
mother = None
if father_handle:
father = self.r_db.get_person_from_handle(father_handle)
else:
father = None
else:
family = None
father = None
mother = None
with Html("div", id="pedigree", class_="subsection") as ped:
ped += Html("h4", self._('Pedigree'), inline=True)
with Html("ol", class_="pedigreegen") as pedol:
ped += pedol
if father and mother:
pedfa = Html("li") + self.pedigree_person(father)
pedol += pedfa
with Html("ol") as pedma:
pedfa += pedma
pedma += (Html("li", class_="spouse") +
self.pedigree_person(mother) +
children_ped(Html("ol"))
)
elif father:
pedol += (Html("li") + self.pedigree_person(father) +
children_ped(Html("ol"))
)
elif mother:
pedol += (Html("li") + self.pedigree_person(mother) +
children_ped(Html("ol"))
)
else:
pedol += (Html("li") + children_ped(Html("ol")))
return ped
def display_ind_general(self):
"""
display an individual's general information...
"""
self.page_title = self.sort_name
thumbnail = self.disp_first_img_as_thumbnail(
self.person.get_media_list(), self.person)
section_title = Html("h3", html_escape(self.page_title),
inline=True) + (
Html('sup') + (
Html('small') +
self.get_citation_links(
self.person.get_citation_list())))
# begin summaryarea division
with Html("div", id='summaryarea') as summaryarea:
# begin general details table
with Html("table", class_="infolist") as table:
summaryarea += table
primary_name = self.person.get_primary_name()
all_names = [primary_name] + self.person.get_alternate_names()
# if the callname or the nickname is the same as the 'first
# name' (given name), then they are not displayed.
first_name = primary_name.get_first_name()
# Names [and their sources]
for name in all_names:
pname = html_escape(_nd.display_name(name))
pname += self.get_citation_links(name.get_citation_list())
# if we have just a firstname, then the name is preceeded
# by ", " which doesn't exactly look very nice printed on
# the web page
if pname[:2] == ', ': # TODO for Arabic, translate this?
pname = pname[2:]
if name != primary_name:
datetext = self.rlocale.get_date(name.date)
if datetext:
pname = datetext + ': ' + pname
type_ = self._(name.get_type().xml_str())
trow = Html("tr") + (
Html("td", type_, class_="ColumnAttribute",
inline=True)
)
tcell = Html("td", pname, class_="ColumnValue")
# display any notes associated with this name
notelist = name.get_note_list()
if len(notelist):
unordered = Html("ul")
for notehandle in notelist:
note = self.r_db.get_note_from_handle(notehandle)
if note:
note_text = self.get_note_format(note, True)
# attach note
unordered += note_text
tcell += unordered
trow += tcell
table += trow
# display the callname associated with this name.
call_name = name.get_call_name()
if call_name and call_name != first_name:
trow = Html("tr") + (
Html("td", _("Call Name"), class_="ColumnAttribute",
inline=True),
Html("td", call_name, class_="ColumnValue",
inline=True)
)
table += trow
# display the nickname associated with this name. Note that
# this no longer displays the Nickname attribute (if
# present), because the nickname attribute is deprecated in
# favour of the nick_name property of the name structure
# (see http://gramps.1791082.n4.nabble.com/Where-is-
# nickname-stored-tp4469779p4484272.html), and also because
# the attribute is (normally) displayed lower down the
# wNarrative Web report.
nick_name = name.get_nick_name()
if nick_name and nick_name != first_name:
trow = Html("tr") + (
Html("td", self._("Nick Name"),
class_="ColumnAttribute",
inline=True),
Html("td", nick_name, class_="ColumnValue",
inline=True)
)
table += trow
# Gramps ID
person_gid = self.person.get_gramps_id()
if not self.noid and person_gid:
trow = Html("tr") + (
Html("td", self._("Gramps ID"),
class_="ColumnAttribute",
inline=True),
Html("td", person_gid, class_="ColumnValue",
inline=True)
)
table += trow
# Gender
gender = self._(self.gender_map[self.person.gender])
trow = Html("tr") + (
Html("td", self._("Gender"), class_="ColumnAttribute",
inline=True),
Html("td", gender, class_="ColumnValue", inline=True)
)
table += trow
# Age At Death???
birth_date = Date.EMPTY
birth_ref = self.person.get_birth_ref()
if birth_ref:
birth = self.r_db.get_event_from_handle(birth_ref.ref)
if birth:
birth_date = birth.get_date_object()
if birth_date and birth_date is not Date.EMPTY:
alive = probably_alive(self.person, self.r_db, Today())
death_date = _find_death_date(self.r_db, self.person)
if not alive and death_date is not None:
nyears = death_date - birth_date
nyears = nyears.format(precision=3,
dlocale=self.rlocale)
trow = Html("tr") + (
Html("td", self._("Age at Death"),
class_="ColumnAttribute", inline=True),
Html("td", nyears,
class_="ColumnValue", inline=True)
)
table += trow
# return all three pieces to its caller
# do NOT combine before returning
return thumbnail, section_title, summaryarea
def display_ind_events(self, place_lat_long):
"""
will create the events table
@param: place_lat_long -- For use in Family Map Pages. This will be None
if called from Family pages, which do not
create a Family Map
"""
event_ref_list = self.person.get_event_ref_list()
if not event_ref_list:
return None
# begin events division and section title
with Html("div", id="events", class_="subsection") as section:
section += Html("h4", self._("Events"), inline=True)
# begin events table
with Html("table", class_="infolist eventlist") as table:
section += table
thead = Html("thead")
table += thead
# attach event header row
thead += self.event_header_row()
tbody = Html("tbody")
table += tbody
for evt_ref in event_ref_list:
event = self.r_db.get_event_from_handle(evt_ref.ref)
if event:
# display event row
tbody += self.display_event_row(event, evt_ref,
place_lat_long,
True, True,
EventRoleType.PRIMARY)
return section
def display_parent(self, handle, title, rel):
"""
This will display a parent ...
@param: handle -- The person handle
@param: title -- Is the title of the web page
@param: rel -- The relation
"""
tcell1 = Html("td", title, class_="ColumnAttribute", inline=True)
tcell2 = Html("td", class_="ColumnValue", close=False, inline=True)
tcell2 += self.new_person_link(handle, uplink=True)
if rel and rel != ChildRefType(ChildRefType.BIRTH):
tcell2 += ''.join([' '] *3 + ['(%s)']) % str(rel)
person = self.r_db.get_person_from_handle(handle)
birth = death = ""
if person:
bd_event = get_birth_or_fallback(self.r_db, person)
if bd_event:
birth = self.rlocale.get_date(bd_event.get_date_object())
dd_event = get_death_or_fallback(self.r_db, person)
if dd_event:
death = self.rlocale.get_date(dd_event.get_date_object())
tcell3 = Html("td", birth, class_="ColumnDate",
inline=False, close=False, indent=False)
tcell4 = Html("td", death, class_="ColumnDate",
inline=True, close=False, indent=False)
tcell2 += tcell3
tcell2 += tcell4
# return table columns to its caller
return tcell1, tcell2
def get_reln_in_family(self, ind, family):
"""
Display the relation of the indiv in the family
@param: ind -- The person to use
@param: family -- The family
"""
child_handle = ind.get_handle()
child_ref_list = family.get_child_ref_list()
for child_ref in child_ref_list:
if child_ref.ref == child_handle:
return (child_ref.get_father_relation(),
child_ref.get_mother_relation())
return (None, None)
def display_ind_parent_family(self, birthmother, birthfather, family,
table,
first=False):
"""
Display the individual parent family
@param: birthmother -- The birth mother
@param: birthfather -- The birth father
@param: family -- The family
@param: table -- The html document to complete
@param: first -- Is this the first indiv ?
"""
if not first:
trow = Html("tr") + (Html("td", " ", colspan=3,
inline=True))
table += trow
# get the father
father_handle = family.get_father_handle()
if father_handle:
if father_handle == birthfather:
# The parent may not be birth father in ths family, because it
# may be a step family. However, it will be odd to display the
# parent as anything other than "Father"
reln = self._("Father")
else:
# Stepfather may not always be quite right (for example, it may
# actually be StepFather-in-law), but it is too expensive to
# calculate out the correct relationship using the Relationship
# Calculator
reln = self._("Stepfather")
trow = Html("tr") + (self.display_parent(father_handle, reln, None))
table += trow
# get the mother
mother_handle = family.get_mother_handle()
if mother_handle:
if mother_handle == birthmother:
reln = self._("Mother")
else:
reln = self._("Stepmother")
trow = Html("tr") + (self.display_parent(mother_handle, reln, None))
table += trow
for child_ref in family.get_child_ref_list():
child_handle = child_ref.ref
child = self.r_db.get_person_from_handle(child_handle)
if child:
if child == self.person:
reln = ""
else:
try:
# We have a try except block here, because the two
# people MUST be siblings for the called Relationship
# routines to work. Depending on your definition of
# sibling, we cannot necessarily guarantee that.
sibling_type = self.rel_class.get_sibling_type(
self.r_db, self.person, child)
reln = self.rel_class.get_sibling_relationship_string(
sibling_type, self.person.gender, child.gender)
# We have a problem here : reln is never in the choosen
# language but in the default language.
# Does get_sibling_relationship_string work ?
reln = reln[0].upper() + reln[1:]
except:
reln = self._("Not siblings")
val1 = " "
reln = val1 + reln
# Now output reln, child_link, (frel, mrel)
frel = child_ref.get_father_relation()
mrel = child_ref.get_mother_relation()
if frel != ChildRefType.BIRTH or mrel != ChildRefType.BIRTH:
frelmrel = "(%s, %s)" % (str(frel), str(mrel))
else:
frelmrel = ""
trow = Html("tr") + (
Html("td", reln, class_="ColumnAttribute", inline=True))
tcell = Html("td", val1, class_="ColumnValue", inline=True)
tcell += self.display_child_link(child_handle)
birth = death = ""
bd_event = get_birth_or_fallback(self.r_db, child)
if bd_event:
birth = self.rlocale.get_date(bd_event.get_date_object())
dd_event = get_death_or_fallback(self.r_db, child)
if dd_event:
death = self.rlocale.get_date(dd_event.get_date_object())
tcell2 = Html("td", birth, class_="ColumnDate",
inline=True)
tcell3 = Html("td", death, class_="ColumnDate",
inline=True)
trow += tcell
trow += tcell2
trow += tcell3
tcell = Html("td", frelmrel, class_="ColumnValue",
inline=True)
trow += tcell
table += trow
def display_step_families(self, parent_handle,
family,
all_family_handles,
birthmother, birthfather,
table):
"""
Display step families
@param: parent_handle -- The family parent handle to display
@param: family -- The family
@param: all_family_handles -- All known family handles
@param: birthmother -- The birth mother
@param: birthfather -- The birth father
@param: table -- The html document to complete
"""
if parent_handle:
parent = self.r_db.get_person_from_handle(parent_handle)
for parent_family_handle in parent.get_family_handle_list():
if parent_family_handle not in all_family_handles:
parent_family = self.r_db.get_family_from_handle(
parent_family_handle)
self.display_ind_parent_family(birthmother, birthfather,
parent_family, table)
all_family_handles.append(parent_family_handle)
def display_ind_center_person(self):
"""
Display the person's relationship to the center person
"""
center_person = self.r_db.get_person_from_gramps_id(
self.report.options['pid'])
if center_person is None:
return
relationship = self.rel_class.get_one_relationship(self.r_db,
self.person,
center_person)
if relationship == "": # No relation to display
return
# begin center_person division
section = ""
with Html("div", class_="subsection", id="parents") as section:
message = self._("Relation to the center person")
message += " ("
name_format = self.report.options['name_format']
primary_name = center_person.get_primary_name()
name = Name(primary_name)
name.set_display_as(name_format)
message += _nd.display_name(name)
message += ") : "
message += relationship
section += Html("h4", message, inline=True)
return section
def display_ind_parents(self):
"""
Display a person's parents
"""
parent_list = self.person.get_parent_family_handle_list()
if not parent_list:
return None
# begin parents division
with Html("div", class_="subsection", id="parents") as section:
section += Html("h4", self._("Parents"), inline=True)
# begin parents table
with Html("table", class_="infolist") as table:
section += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [
(self._("Relation to main person"), "ColumnAttribute"),
(self._("Name"), "ColumnValue"),
(self._("Birth date"), "ColumnValue"),
(self._("Death date"), "ColumnValue"),
(self._("Relation within this family "
"(if not by birth)"),
"ColumnValue")
]
)
tbody = Html("tbody")
all_family_handles = list(parent_list)
(birthmother, birthfather) = self.rel_class.get_birth_parents(
self.r_db, self.person)
first = True
for family_handle in parent_list:
family = self.r_db.get_family_from_handle(family_handle)
if family:
# Display this family
self.display_ind_parent_family(birthmother,
birthfather,
family, tbody, first)
first = False
if self.report.options['showhalfsiblings']:
# Display all families in which the parents are
# involved. This displays half siblings and step
# siblings
self.display_step_families(
family.get_father_handle(), family,
all_family_handles,
birthmother, birthfather, tbody)
self.display_step_families(
family.get_mother_handle(), family,
all_family_handles,
birthmother, birthfather, tbody)
table += tbody
return section
def pedigree_person(self, person):
"""
will produce a hyperlink for a pedigree person ...
@param: person -- The person
"""
hyper = self.new_person_link(person.handle, person=person, uplink=True)
return hyper
def pedigree_family(self):
"""
Returns a family pedigree
"""
ped = []
for family_handle in self.person.get_family_handle_list():
rel_family = self.r_db.get_family_from_handle(family_handle)
spouse_handle = utils.find_spouse(self.person, rel_family)
if spouse_handle:
spouse = self.r_db.get_person_from_handle(spouse_handle)
pedsp = (Html("li", class_="spouse") +
self.pedigree_person(spouse)
)
else:
pedsp = (Html("li", class_="spouse"))
ped += [pedsp]
childlist = rel_family.get_child_ref_list()
if childlist:
with Html("ol") as childol:
pedsp += [childol]
for child_ref in childlist:
child = self.r_db.get_person_from_handle(child_ref.ref)
if child:
childol += (Html("li") +
self.pedigree_person(child)
)
return ped
| gpl-2.0 | -1,617,659,728,472,627,700 | 41.24581 | 80 | 0.463608 | false |
faddai/newfies-dialer | newfies/user_profile/views.py | 1 | 11084 | #
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2012 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.template.context import RequestContext
from django.utils.translation import ugettext_lazy as _
from django.utils import simplejson
from django.db.models import Q
from django.conf import settings
from notification import models as notification
from dialer_campaign.models import common_contact_authorization
from dialer_campaign.views import current_view, notice_count, grid_common_function
from dialer_campaign.function_def import user_dialer_setting_msg, variable_value
from dialer_settings.models import DialerSetting
from user_profile.models import UserProfile
from user_profile.forms import UserChangeDetailForm, \
UserChangeDetailExtendForm, \
CheckPhoneNumberForm,\
UserProfileForm
@login_required
def customer_detail_change(request):
"""User Detail change on Customer UI
**Attributes**:
* ``form`` - UserChangeDetailForm, UserChangeDetailExtendForm, PasswordChangeForm, CheckPhoneNumberForm
* ``template`` - 'frontend/registration/user_detail_change.html'
**Logic Description**:
* User is able to change his/her detail.
"""
user_detail = User.objects.get(username=request.user)
try:
user_detail_extened = UserProfile.objects.get(user=user_detail)
except UserProfile.DoesNotExist:
#create UserProfile
user_detail_extened = UserProfile(user=user_detail)
user_detail_extened.save()
user_detail_form = UserChangeDetailForm(request.user,
instance=user_detail)
user_detail_extened_form = UserChangeDetailExtendForm(request.user,
instance=user_detail_extened)
user_password_form = PasswordChangeForm(user=request.user)
check_phone_no_form = CheckPhoneNumberForm()
try:
user_ds = UserProfile.objects.get(user=request.user)
dialer_set = DialerSetting.objects.get(id=user_ds.dialersetting.id)
except:
dialer_set = ''
user_notification = \
notification.Notice.objects.filter(recipient=request.user)
# Search on sender name
q = (Q(sender=request.user))
if q:
user_notification = user_notification.filter(q)
msg_detail = ''
msg_pass = ''
msg_number = ''
msg_note = ''
error_detail = ''
error_pass = ''
error_number = ''
action = ''
if 'action' in request.GET:
action = request.GET['action']
if request.GET.get('msg_note') == 'true':
msg_note = request.session['msg_note']
# Mark all notification as read
if request.GET.get('notification') == 'mark_read_all':
notification_list = notification.Notice.objects.filter(unseen=1, recipient=request.user)
notification_list.update(unseen=0)
msg_note = _('All notifications are marked as read.')
if request.method == 'POST':
if request.POST['form-type'] == "change-detail":
user_detail_form = UserChangeDetailForm(request.user, request.POST,
instance=user_detail)
user_detail_extened_form = UserChangeDetailExtendForm(request.user,
request.POST,
instance=user_detail_extened)
action = 'tabs-1'
if user_detail_form.is_valid() and user_detail_extened_form.is_valid():
user_detail_form.save()
user_detail_extened_form.save()
msg_detail = _('Detail has been changed.')
else:
error_detail = _('Please correct the errors below.')
elif request.POST['form-type'] == "check-number": # check phone no
action = 'tabs-5'
check_phone_no_form = CheckPhoneNumberForm(data=request.POST)
if check_phone_no_form.is_valid():
if not common_contact_authorization(request.user,
request.POST['phone_number']):
error_number = _('This phone number is not authorized.')
else:
msg_number = _('This phone number is authorized.')
else:
error_number = _('Please correct the errors below.')
else: # "change-password"
user_password_form = PasswordChangeForm(user=request.user,
data=request.POST)
action = 'tabs-2'
if user_password_form.is_valid():
user_password_form.save()
msg_pass = _('Your password has been changed.')
else:
error_pass = _('Please correct the errors below.')
template = 'frontend/registration/user_detail_change.html'
data = {
'module': current_view(request),
'user_detail_form': user_detail_form,
'user_detail_extened_form': user_detail_extened_form,
'user_password_form': user_password_form,
'check_phone_no_form': check_phone_no_form,
'user_notification': user_notification,
'msg_detail': msg_detail,
'msg_pass': msg_pass,
'msg_number': msg_number,
'msg_note': msg_note,
'error_detail': error_detail,
'error_pass': error_pass,
'error_number': error_number,
'notice_count': notice_count(request),
'dialer_set': dialer_set,
'dialer_setting_msg': user_dialer_setting_msg(request.user),
'action': action,
}
return render_to_response(template, data,
context_instance=RequestContext(request))
def call_style(val):
"""Notification icon style"""
unseen_style = \
'style="text-decoration:none;background-image:url(%snewfies/icons/new.png);"' \
% settings.STATIC_URL
seen_style = \
'style="text-decoration:none;background-image:url(%snewfies/icons/tick.png);"' \
% settings.STATIC_URL
if val:
return unseen_style
else:
return seen_style
# Notification
@login_required
def notification_grid(request):
"""notification list in json format for flexigrid
**Model**: notification.Notice
"""
grid_data = grid_common_function(request)
page = int(grid_data['page'])
start_page = int(grid_data['start_page'])
end_page = int(grid_data['end_page'])
sortorder_sign = grid_data['sortorder_sign']
sortname = grid_data['sortname']
user_notification = \
notification.Notice.objects.filter(recipient=request.user)
# Search on sender name
q = (Q(sender=request.user))
if q:
user_notification = user_notification.filter(q)
count = user_notification.count()
user_notification_list = \
user_notification.order_by(sortorder_sign + sortname)[start_page:end_page]
rows = [{'id': row.id,
'cell': ['<input type="checkbox" name="select" class="checkbox"\
value="' + str(row.id) + '" />',
str(row.message),
str(row.notice_type),
str(row.sender),
str(row.added),
str('<a href="../update_notice_status_cust/' + str(row.id) + '/" class="icon" ' \
+ call_style(row.unseen) + '> </a>' ),
]}for row in user_notification_list ]
data = {'rows': rows,
'page': page,
'total': count}
return HttpResponse(simplejson.dumps(data), mimetype='application/json',
content_type="application/json")
@login_required
def notification_del_read(request, object_id):
"""Delete notification for the logged in user
**Attributes**:
* ``object_id`` - Selected notification object
* ``object_list`` - Selected notification objects
**Logic Description**:
* Delete/Mark as Read the selected notification from the notification list
"""
try:
# When object_id is not 0
notification_obj = notification.Notice.objects.get(pk=object_id)
# Delete/Read notification
if object_id:
if request.POST.get('mark_read') == 'false':
request.session["msg_note"] = _('"%(name)s" is deleted.') \
% {'name': notification_obj.notice_type}
notification_obj.delete()
else:
request.session["msg_note"] = _('"%(name)s" is marked as read.') \
% {'name': notification_obj.notice_type}
notification_obj.update(unseen=0)
return HttpResponseRedirect('/user_detail_change/?action=tabs-3&msg_note=true')
except:
# When object_id is 0 (Multiple records delete/mark as read)
values = request.POST.getlist('select')
values = ", ".join(["%s" % el for el in values])
notification_list = notification.Notice.objects.extra(where=['id IN (%s)' % values])
if request.POST.get('mark_read') == 'false':
request.session["msg_note"] = _('%(count)s notification(s) are deleted.')\
% {'count': notification_list.count()}
notification_list.delete()
else:
request.session["msg_note"] = _('%(count)s notification(s) are marked as read.')\
% {'count': notification_list.count()}
notification_list.update(unseen=0)
return HttpResponseRedirect('/user_detail_change/?action=tabs-3&msg_note=true')
def common_notification_status(request, id):
"""Notification Status (e.g. seen/unseen) need to be change.
It is a common function for admin and customer UI
**Attributes**:
* ``pk`` - primary key of notice record
**Logic Description**:
* Selected Notification's status need to be changed.
Changed status can be seen or unseen.
"""
notice = notification.Notice.objects.get(pk=id)
if notice.unseen == 1:
notice.unseen = 0
else:
notice.unseen = 1
notice.save()
return True
@login_required
def update_notice_status_cust(request, id):
"""Notification Status (e.g. seen/unseen) can be changed from
customer interface"""
common_notification_status(request, id)
return HttpResponseRedirect('/user_detail_change/?action=tabs-3')
| mpl-2.0 | 5,991,948,951,880,823,000 | 37.352941 | 111 | 0.603934 | false |
jiasir/pycs | vulpo/pyami/scriptbase.py | 1 | 1430 | import os
import sys
from vulpo.utils import ShellCommand, get_ts
import vulpo
import vulpo.utils
class ScriptBase(object):
def __init__(self, config_file=None):
self.instance_id = vulpo.config.get('Instance', 'instance-id', 'default')
self.name = self.__class__.__name__
self.ts = get_ts()
if config_file:
vulpo.config.read(config_file)
def notify(self, subject, body=''):
vulpo.utils.notify(subject, body)
def mkdir(self, path):
if not os.path.isdir(path):
try:
os.mkdir(path)
except:
vulpo.log.error('Error creating directory: %s' % path)
def umount(self, path):
if os.path.ismount(path):
self.run('umount %s' % path)
def run(self, command, notify=True, exit_on_error=False, cwd=None):
self.last_command = ShellCommand(command, cwd=cwd)
if self.last_command.status != 0:
vulpo.log.error('Error running command: "%s". Output: "%s"' % (command, self.last_command.output))
if notify:
self.notify('Error encountered', \
'Error running the following command:\n\t%s\n\nCommand output:\n\t%s' % \
(command, self.last_command.output))
if exit_on_error:
sys.exit(-1)
return self.last_command.status
def main(self):
pass
| mit | -3,219,314,286,743,809,500 | 31.5 | 110 | 0.565734 | false |
platipy/spyral | examples/collisions.py | 1 | 1514 | try:
import _path
except NameError:
pass
import spyral
SIZE = (640, 480)
BG_COLOR = (0, 0, 0)
class Square(spyral.Sprite):
def __init__(self, scene, direction, color=(255, 0,0)):
spyral.Sprite.__init__(self, scene)
self.image = spyral.Image(size=(16, 16)).fill(color)
self.direction = direction
self.anchor = 'center'
spyral.event.register("director.update", self.update)
def update(self):
self.x += self.direction * 4
if not self.collide_rect(self.scene.rect):
self.x -= self.direction * 4
self.flip()
def flip(self):
self.direction *= -1
class Game(spyral.Scene):
def __init__(self):
spyral.Scene.__init__(self, SIZE)
self.background = spyral.Image(size=SIZE).fill(BG_COLOR)
self.left_square = Square(self, 1, (0,255,0))
self.left_square.pos = self.rect.midleft
self.right_square = Square(self, -1)
self.right_square.pos = self.rect.midright
spyral.event.register("system.quit", spyral.director.quit)
spyral.event.register("director.update", self.update)
def update(self):
# Collision test
if self.left_square.collide_sprite(self.right_square):
self.right_square.flip()
self.left_square.flip()
if __name__ == "__main__":
spyral.director.init(SIZE) # the director is the manager for your scenes
spyral.director.run(scene=Game()) # This will run your game. It will not return.
| lgpl-2.1 | -6,099,856,198,345,127,000 | 30.541667 | 84 | 0.612285 | false |
DOAJ/doaj | portality/formcontext/formcontext.py | 1 | 82183 | import json
import uuid
from datetime import datetime
from flask import render_template, url_for, request
from flask_login import current_user
import portality.formcontext.forms
from portality.crosswalks.journal_form import JournalFormXWalk
from portality.crosswalks.article_form import ArticleFormXWalk
from portality.crosswalks.application_form import ApplicationFormXWalk
from portality import constants
from portality import models, app_email, util
from portality.bll import DOAJ
from portality.core import app
from portality.formcontext import forms, render, choices, FormContextException
from portality.lcc import lcc_jstree
from portality.ui.messages import Messages
import portality.notifications.application_emails as emails
from portality.forms.application_forms import JAVASCRIPT_FUNCTIONS
ACC_MSG = 'Please note you <span class="red">cannot edit</span> this application as it has been accepted into the DOAJ.'
SCOPE_MSG = 'Please note you <span class="red">cannot edit</span> this application as you don\'t have the necessary ' \
'account permissions to edit applications which are {0}.'
FIELDS_WITH_DESCRIPTION = ["publisher", "society_institution", "platform", "title", "alternative_title"]
URL_FIELDS = ["url", "processing_charges_url", "submission_charges_url", "articles_last_year_url", "digital_archiving_policy_url", "editorial_board_url", "review_process_url", "instructions_authors_url", "oa_statement_url", "license_url", "waiver_policy_url", "download_statistics_url", "copyright_url", "publishing_rights_url", "plagiarism_screening_url", "license_embedded_url", "aims_scope_url"]
class FormContext(object):
def __init__(self, form_data=None, source=None, formulaic_context=None):
# initialise our core properties
self._source = source
self._target = None
self._form_data = form_data
self._form = None
self._renderer = None
self._template = None
self._alert = []
self._info = ''
self._formulaic = formulaic_context
# initialise the renderer (falling back to a default if necessary)
self.make_renderer()
if self.renderer is None:
self.renderer = render.Renderer()
# specify the jinja template that will wrap the renderer
self.set_template()
# now create our form instance, with the form_data (if there is any)
if form_data is not None:
self.data2form()
# if there isn't any form data, then we should create the form properties from source instead
elif source is not None:
self.source2form()
# if there is no source, then a blank form object
else:
self.blank_form()
############################################################
# getters and setters on the main FormContext properties
############################################################
@property
def form(self):
return self._form
@form.setter
def form(self, val):
self._form = val
@property
def source(self):
return self._source
@property
def form_data(self):
return self._form_data
@property
def target(self):
return self._target
@target.setter
def target(self, val):
self._target = val
@property
def renderer(self):
return self._renderer
@renderer.setter
def renderer(self, val):
self._renderer = val
@property
def template(self):
return self._template
@template.setter
def template(self, val):
self._template = val
@property
def alert(self):
return self._alert
def add_alert(self, val):
self._alert.append(val)
@property
def info(self):
return self._info
@info.setter
def info(self, val):
self._info = val
############################################################
# Lifecycle functions that subclasses should implement
############################################################
def make_renderer(self):
"""
This will be called during init, and must populate the self.render property
"""
pass
def set_template(self):
"""
This will be called during init, and must populate the self.template property with the path to the jinja template
"""
pass
def pre_validate(self):
"""
This will be run before validation against the form is run.
Use it to patch the form with any relevant data, such as fields which were disabled
"""
pass
def blank_form(self):
"""
This will be called during init, and must populate the self.form_data property with an instance of the form in this
context, based on no originating source or form data
"""
pass
def data2form(self):
"""
This will be called during init, and must convert the form_data into an instance of the form in this context,
and write to self.form
"""
pass
def source2form(self):
"""
This will be called during init, and must convert the source object into an instance of the form in this
context, and write to self.form
"""
pass
def form2target(self):
"""
Convert the form object into a the target system object, and write to self.target
"""
pass
def patch_target(self):
"""
Patch the target with data from the source. This will be run by the finalise method (unless you override it)
"""
pass
def finalise(self, *args, **kwargs):
"""
Finish up with the FormContext. Carry out any final workflow tasks, etc.
"""
self.form2target()
self.patch_target()
############################################################
# Functions which can be called directly, but may be overridden if desired
############################################################
def validate(self):
self.pre_validate()
f = self.form
valid = False
if f is not None:
valid = f.validate()
# if this isn't a valid form, record the fields that have errors
# with the renderer for use later
if not valid:
error_fields = []
for field in self.form:
if field.errors:
error_fields.append(field.short_name)
return valid
@property
def errors(self):
f = self.form
if f is not None:
return f.errors
return False
def render_template(self, **kwargs):
return render_template(self.template, form_context=self, **kwargs)
#def render_field_group(self, field_group_name=None, **kwargs):
# return self.renderer.render_field_group(self, field_group_name, **kwargs)
def fieldset(self, fieldset_name=None):
return self._formulaic.fieldset(fieldset_name)
def fieldsets(self):
return self._formulaic.fieldsets()
def check_field_group_exists(self, field_group_name):
return self.renderer.check_field_group_exists(field_group_name)
@property
def ui_settings(self):
return self._formulaic.ui_settings
class PrivateContext(FormContext):
def _expand_descriptions(self, fields):
# add the contents of a few fields to their descriptions since select2 autocomplete
# would otherwise obscure the full values
for field in fields:
if field in self.form.data:
if self.form[field].data:
if not self.form[field].description:
self.form[field].description = '<small>Full contents: ' + self.form[field].data + '</small>'
else:
self.form[field].description += '<br><br><small>Full contents: ' + self.form[field].data + '</small>'
def _expand_url_descriptions(self, fields):
# add the contents of a few fields to their descriptions since select2 autocomplete
# would otherwise obscure the full values
for field in fields:
if field in self.form.data:
if self.form[field].data:
if not self.form[field].description:
self.form[field].description = '<small>Full contents: <a href=' + self.form[field].data + " target='_blank'>" + self.form[field].data + "</a><small>"
else:
self.form[field].description += '<br><br><small>Full contents: <a href=' + self.form[field].data + " target='_blank'>" + self.form[field].data + "</a><small>"
def _carry_fixed_aspects(self):
if self.source is None:
raise FormContextException("Cannot carry data from a non-existent source")
now = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
# copy over any important fields from the previous version of the object
created_date = self.source.created_date if self.source.created_date else now
self.target.set_created(created_date)
if "id" in self.source.data:
self.target.data['id'] = self.source.data['id']
try:
if self.source.current_application:
self.target.set_current_application(self.source.current_application)
except AttributeError:
# this means that the source doesn't know about current_applications, which is fine
pass
try:
if self.source.current_journal:
self.target.set_current_journal(self.source.current_journal)
except AttributeError:
# this means that the source doesn't know about current_journals, which is fine
pass
try:
if self.source.related_journal:
self.target.set_related_journal(self.source.related_journal)
except AttributeError:
# this means that the source doesn't know about related_journals, which is fine
pass
try:
if self.source.related_applications:
related = self.source.related_applications
for rel in related:
self.target.add_related_application(rel.get("application_id"), rel.get("date_accepted"))
except AttributeError:
# this means that the source doesn't know about related_applications, which is fine
pass
# if the source is a journal, we need to carry the in_doaj flag
if isinstance(self.source, models.Journal):
self.target.set_in_doaj(self.source.is_in_doaj())
@staticmethod
def _subjects2str(subjects):
subject_strings = []
for sub in subjects:
subject_strings.append('{term}'.format(term=sub.get('term')))
return ', '.join(subject_strings)
def _merge_notes_forward(self, allow_delete=False):
if self.source is None:
raise FormContextException("Cannot carry data from a non-existent source")
if self.target is None:
raise FormContextException("Cannot carry data on to a non-existent target - run the xwalk first")
# first off, get the notes (by reference) in the target and the notes from the source
tnotes = self.target.notes
snotes = self.source.notes
# if there are no notes, we might not have the notes by reference, so later will
# need to set them by value
apply_notes_by_value = len(tnotes) == 0
# for each of the target notes we need to get the original dates from the source notes
for n in tnotes:
for sn in snotes:
if n.get("note") == sn.get("note"):
n["date"] = sn.get("date")
# record the positions of any blank notes
i = 0
removes = []
for n in tnotes:
if n.get("note").strip() == "":
removes.append(i)
i += 1
# actually remove all the notes marked for deletion
removes.sort(reverse=True)
for r in removes:
tnotes.pop(r)
# finally, carry forward any notes that aren't already in the target
if not allow_delete:
for sn in snotes:
found = False
for tn in tnotes:
if sn.get("note") == tn.get("note"):
found = True
if not found:
tnotes.append(sn)
if apply_notes_by_value:
self.target.set_notes(tnotes)
def _populate_editor_field(self, editor_group_name):
"""Set the editor field choices from a given editor group name"""
if editor_group_name is None:
self.form.editor.choices = [("", "")]
else:
eg = models.EditorGroup.pull_by_key("name", editor_group_name)
if eg is not None:
editors = [eg.editor]
editors += eg.associates
editors = list(set(editors))
self.form.editor.choices = [("", "Choose an editor")] + [(editor, editor) for editor in editors]
else:
self.form.editor.choices = [("", "")]
def _validate_editor_field(self):
""" Validate the choice of editor, which could be out of sync with the group in exceptional circumstances """
editor = self.form.editor.data
if editor is not None and editor != "":
editor_group_name = self.form.editor_group.data
if editor_group_name is not None and editor_group_name != "":
eg = models.EditorGroup.pull_by_key("name", editor_group_name)
if eg is not None:
all_eds = eg.associates + [eg.editor]
if editor in all_eds:
return # success - an editor group was found and our editor was in it
raise FormContextException("Editor '{0}' not found in editor group '{1}'".format(editor, editor_group_name))
else:
raise FormContextException("An editor has been assigned without an editor group")
def _carry_continuations(self):
if self.source is None:
raise FormContextException("Cannot carry data from a non-existent source")
try:
sbj = self.source.bibjson()
tbj = self.target.bibjson()
if sbj.replaces:
tbj.replaces = sbj.replaces
if sbj.is_replaced_by:
tbj.is_replaced_by = sbj.is_replaced_by
if sbj.discontinued_date:
tbj.discontinued_date = sbj.discontinued_date
except AttributeError:
# this means that the source doesn't know about current_applications, which is fine
pass
class ApplicationContext(PrivateContext):
ERROR_MSG_TEMPLATE = \
"""Problem while creating account while turning suggestion into journal.
There should be a {missing_thing} on user {username} but there isn't.
Created the user but not sending the email.
""".replace("\n", ' ')
def _carry_fixed_aspects(self):
super(ApplicationContext, self)._carry_fixed_aspects()
if self.source.suggested_on is not None:
self.target.suggested_on = self.source.suggested_on
def _create_account_on_suggestion_approval(self, suggestion, journal):
o = models.Account.pull(suggestion.owner)
if o:
self.add_alert('Account {username} already exists, so simply associating the journal with it.'.format(username=o.id))
o.add_journal(journal.id)
if not o.has_role('publisher'):
o.add_role('publisher')
o.save()
return o
suggestion_contact = util.listpop(suggestion.contacts())
if not suggestion_contact.get('email'):
msg = self.ERROR_MSG_TEMPLATE.format(username=o.id, missing_thing='journal contact email in the application')
app.logger.error(msg)
self.add_alert(msg)
return o
send_info_to = suggestion_contact.get('email')
o = models.Account.make_account(
suggestion.owner,
name=suggestion_contact.get('name'),
email=send_info_to,
roles=['publisher'],
associated_journal_ids=[journal.id]
)
o.save()
if not o.reset_token:
msg = self.ERROR_MSG_TEMPLATE.format(username=o.id, missing_thing='reset token')
app.logger.error(msg)
self.add_alert(msg)
return o
url_root = request.url_root
if url_root.endswith("/"):
url_root = url_root[:-1]
reset_url = url_root + url_for('account.reset', reset_token=o.reset_token)
forgot_pw_url = url_root + url_for('account.forgot')
password_create_timeout_seconds = int(app.config.get("PASSWORD_CREATE_TIMEOUT", app.config.get('PASSWORD_RESET_TIMEOUT', 86400) * 14))
password_create_timeout_days = password_create_timeout_seconds / (60*60*24)
to = [send_info_to]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME","") + " - account created"
try:
if app.config.get("ENABLE_PUBLISHER_EMAIL", False):
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/account_created.txt",
reset_url=reset_url,
username=o.id,
timeout_days=password_create_timeout_days,
forgot_pw_url=forgot_pw_url
)
self.add_alert('Sent email to ' + send_info_to + ' to tell them about the new account.')
else:
self.add_alert('Did not email to ' + send_info_to + ' to tell them about the new account, as publisher emailing is disabled.')
if app.config.get('DEBUG', False):
self.add_alert('Debug mode - url for create is <a href="{url}">{url}</a>'.format(url=reset_url))
except app_email.EmailException:
magic = str(uuid.uuid1())
self.add_alert('Hm, sending the account creation email didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
if app.config.get('DEBUG', False):
self.add_alert('Debug mode - url for create is <a href="{url}">{url}</a>'.format(url=reset_url))
app.logger.exception('Error sending account creation email - ' + magic)
self.add_alert('Account {username} created'.format(username=o.id))
return o
def _send_application_approved_email(self, journal_title, publisher_name, email, journal_contact, update_request=False):
"""Email the publisher when an application is accepted (it's here because it's too troublesome to factor out)"""
url_root = request.url_root
if url_root.endswith("/"):
url_root = url_root[:-1]
to = [email]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME", "") + " - journal accepted"
publisher_name = publisher_name if publisher_name is not None else "Journal Owner"
try:
if app.config.get("ENABLE_PUBLISHER_EMAIL", False):
msg = Messages.SENT_ACCEPTED_APPLICATION_EMAIL.format(email=email)
template = "email/publisher_application_accepted.txt"
if update_request:
msg = Messages.SENT_ACCEPTED_UPDATE_REQUEST_EMAIL.format(email=email)
template = "email/publisher_update_request_accepted.txt"
jn = journal_title #.encode('utf-8', 'replace')
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name=template,
journal_title=jn,
publisher_name=publisher_name,
journal_contact=journal_contact,
url_root=url_root
)
self.add_alert(msg)
else:
msg = Messages.NOT_SENT_ACCEPTED_APPLICATION_EMAIL.format(email=email)
if update_request:
msg = Messages.NOT_SENT_ACCEPTED_UPDATE_REQUEST_EMAIL.format(email=email)
self.add_alert(msg)
except Exception as e:
magic = str(uuid.uuid1())
self.add_alert('Hm, sending the journal acceptance information email didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending application approved email failed - ' + magic)
def _send_contact_approved_email(self, journal_title, journal_contact, email, publisher_name, update_request=False):
"""Email the journal contact when an application is accepted """
url_root = request.url_root
if url_root.endswith("/"):
url_root = url_root[:-1]
to = [email]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME", "") + " - journal accepted"
try:
if app.config.get("ENABLE_PUBLISHER_EMAIL", False):
template = "email/contact_application_accepted.txt"
alert = Messages.SENT_JOURNAL_CONTACT_ACCEPTED_APPLICATION_EMAIL.format(email=to[0])
if update_request: # NOTE: right now, the way this is called, update request is always False. Should deprecate and remove this code.
template = "email/contact_update_request_accepted.txt"
alert = Messages.SENT_JOURNAL_CONTACT_ACCEPTED_UPDATE_REQUEST_EMAIL.format(email=to[0])
jn = journal_title #.encode('utf-8', 'replace')
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name=template,
journal_title=jn,
journal_contact=journal_contact,
publisher=publisher_name,
url_root=url_root
)
self.add_alert(alert)
else:
alert = Messages.NOT_SENT_JOURNAL_CONTACT_ACCEPTED_APPLICATION_EMAIL.format(email=to[0])
self.add_alert(alert)
except Exception as e:
magic = str(uuid.uuid1())
self.add_alert('Hm, sending the journal contact acceptance information email didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending accepted email to journal contact - ' + magic)
def render_template(self, **kwargs):
diff = None
cj = None
if self.source is not None:
current_journal = self.source.current_journal
if current_journal is not None:
cj = models.Journal.pull(current_journal)
if cj is not None:
jform = JournalFormXWalk.obj2form(cj)
if "notes" in jform:
del jform["notes"]
aform = ApplicationFormXWalk.obj2form(self.source)
if "notes" in aform:
del aform["notes"]
diff = self._form_diff(jform, aform)
return super(ApplicationContext, self).render_template(
form_diff=diff,
current_journal=cj,
js_functions=JAVASCRIPT_FUNCTIONS,
**kwargs)
def _form_diff(self, journal_form, application_form):
diff = []
for k, v in application_form.items():
try:
q = self.form[k].label
except KeyError:
continue
q_num = self.renderer.question_number(k)
if q_num is None or q_num == "":
q_num = 0
else:
q_num = int(q_num)
if k in journal_form and journal_form[k] != v:
diff.append((k, q_num, q.text, journal_form[k], v))
elif k not in journal_form and q_num != 0:
diff.append((k, q_num, q.text, Messages.DIFF_TABLE_NOT_PRESENT, v))
diff = sorted(diff, key=lambda x: x[1])
return diff
class ApplicationFormFactory(object):
@classmethod
def get_form_context(cls, role=None, source=None, form_data=None):
if role is None:
# return PublicApplication(source=source, form_data=form_data)
return None
elif role == "admin":
return ManEdApplicationReview(source=source, form_data=form_data)
elif role == "editor":
return EditorApplicationReview(source=source, form_data=form_data)
elif role == "associate_editor":
return AssEdApplicationReview(source=source, form_data=form_data)
elif role == "publisher":
return PublisherUpdateRequest(source=source, form_data=form_data)
elif role == "update_request_readonly":
return PublisherUpdateRequestReadOnly(source=source, form_data=form_data)
class JournalFormFactory(object):
@classmethod
def get_form_context(cls, role, source=None, form_data=None):
if role == "admin":
return ManEdJournalReview(source=source, form_data=form_data)
elif role == "editor":
return EditorJournalReview(source=source, form_data=form_data)
elif role == "associate_editor":
return AssEdJournalReview(source=source, form_data=form_data)
elif role == "readonly":
return ReadOnlyJournal(source=source, form_data=form_data)
elif role == "bulk_edit":
return ManEdBulkEdit(source=source, form_data=form_data)
class ManEdApplicationReview(ApplicationContext):
"""
Managing Editor's Application Review form. Should be used in a context where the form warrants full
admin priviledges. It will permit conversion of applications to journals, and assignment of owner account
as well as assignment to editorial group.
"""
def make_renderer(self):
self.renderer = render.ManEdApplicationReviewRenderer()
def set_template(self):
self.template = "formcontext/maned_application_review.html"
def blank_form(self):
self.form = forms.ManEdApplicationReviewForm()
self._set_choices()
def data2form(self):
self.form = forms.ManEdApplicationReviewForm(formdata=self.form_data)
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
def source2form(self):
self.form = forms.ManEdApplicationReviewForm(data=ApplicationFormXWalk.obj2form(self.source))
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
if self.source.application_status == constants.APPLICATION_STATUS_ACCEPTED:
self.info = ACC_MSG
def pre_validate(self):
# Editor field is populated in JS after page load - check the selected editor is actually in that editor group
self._validate_editor_field()
def form2target(self):
self.target = ApplicationFormXWalk.form2obj(self.form)
def patch_target(self):
if self.source is None:
raise FormContextException("You cannot patch a target from a non-existent source")
self._carry_fixed_aspects()
self._merge_notes_forward(allow_delete=True)
# NOTE: this means you can't unset an owner once it has been set. But you can change it.
if (self.target.owner is None or self.target.owner == "") and (self.source.owner is not None):
self.target.set_owner(self.source.owner)
def finalise(self):
# FIXME: this first one, we ought to deal with outside the form context, but for the time being this
# can be carried over from the old implementation
if self.source is None:
raise FormContextException("You cannot edit a not-existent application")
if self.source.application_status == constants.APPLICATION_STATUS_ACCEPTED:
raise FormContextException("You cannot edit applications which have been accepted into DOAJ.")
# if we are allowed to finalise, kick this up to the superclass
super(ManEdApplicationReview, self).finalise()
# FIXME: may want to factor this out of the suggestionformxwalk
# If we have changed the editors assinged to this application, let them know.
is_editor_group_changed = ApplicationFormXWalk.is_new_editor_group(self.form, self.source)
is_associate_editor_changed = ApplicationFormXWalk.is_new_editor(self.form, self.source)
# record the event in the provenance tracker
models.Provenance.make(current_user, "edit", self.target)
# delayed import of the DOAJ BLL
from portality.bll.doaj import DOAJ
applicationService = DOAJ.applicationService()
# if this application is being accepted, then do the conversion to a journal
if self.target.application_status == constants.APPLICATION_STATUS_ACCEPTED:
# remember whether this was an update request or not
is_update_request = self.target.current_journal is not None
j = applicationService.accept_application(self.target, current_user._get_current_object())
# record the url the journal is available at in the admin are and alert the user
jurl = url_for("doaj.toc", identifier=j.toc_id)
if self.source.current_journal is not None:
self.add_alert('<a href="{url}" target="_blank">Existing journal updated</a>.'.format(url=jurl))
else:
self.add_alert('<a href="{url}" target="_blank">New journal created</a>.'.format(url=jurl))
# create the user account for the owner and send the notification email
try:
owner = self._create_account_on_suggestion_approval(self.target, j)
names = []
for contact in j.contacts():
names.append(contact.get("name"))
journal_contacts = ", ".join(names)
# for all acceptances, send an email to the owner of the journal
self._send_application_approved_email(j.bibjson().title, owner.name, owner.email, journal_contacts, self.source.current_journal is not None)
# in the case of a new application, also send emails to the journal contacts
if not is_update_request:
for contact in j.contacts():
self._send_contact_approved_email(j.bibjson().title, contact.get("name"), contact.get("email"), owner.name, self.source.current_journal is not None)
except app_email.EmailException:
self.add_alert("Problem sending email to suggester - probably address is invalid")
app.logger.exception("Acceptance email to owner failed.")
# if the application was instead rejected, carry out the rejection actions
elif self.source.application_status != constants.APPLICATION_STATUS_REJECTED and self.target.application_status == constants.APPLICATION_STATUS_REJECTED:
# remember whether this was an update request or not
is_update_request = self.target.current_journal is not None
# reject the application
applicationService.reject_application(self.target, current_user._get_current_object())
# if this was an update request, send an email to the owner
if is_update_request:
sent = False
send_report = []
try:
send_report = emails.send_publisher_reject_email(self.target, update_request=is_update_request, send_to_owner=True, send_to_suggester=False)
sent = True
except app_email.EmailException as e:
pass
if sent:
self.add_alert(Messages.SENT_REJECTED_UPDATE_REQUEST_EMAIL.format(user=self.target.owner, email=send_report[0].get("email"), name=send_report[0].get("name")))
else:
self.add_alert(Messages.NOT_SENT_REJECTED_UPDATE_REQUEST_EMAIL.format(user=self.target.owner))
# the application was neither accepted or rejected, so just save it
else:
self.target.set_last_manual_update()
self.target.save()
# if revisions were requested, email the publisher
if self.source.application_status != constants.APPLICATION_STATUS_REVISIONS_REQUIRED and self.target.application_status == constants.APPLICATION_STATUS_REVISIONS_REQUIRED:
try:
emails.send_publisher_update_request_revisions_required(self.target)
self.add_alert(Messages.SENT_REJECTED_UPDATE_REQUEST_REVISIONS_REQUIRED_EMAIL.format(user=self.target.owner))
except app_email.EmailException as e:
self.add_alert(Messages.NOT_SENT_REJECTED_UPDATE_REQUEST_REVISIONS_REQUIRED_EMAIL.format(user=self.target.owner))
# if we need to email the editor and/or the associate, handle those here
if is_editor_group_changed:
try:
emails.send_editor_group_email(self.target)
except app_email.EmailException:
self.add_alert("Problem sending email to editor - probably address is invalid")
app.logger.exception("Email to associate failed.")
if is_associate_editor_changed:
try:
emails.send_assoc_editor_email(self.target)
except app_email.EmailException:
self.add_alert("Problem sending email to associate editor - probably address is invalid")
app.logger.exception("Email to associate failed.")
# If this is the first time this application has been assigned to an editor, notify the publisher.
old_ed = self.source.editor
if (old_ed is None or old_ed == '') and self.target.editor is not None:
is_update_request = self.target.current_journal is not None
if is_update_request:
alerts = emails.send_publisher_update_request_editor_assigned_email(self.target)
else:
alerts = emails.send_publisher_application_editor_assigned_email(self.target)
for alert in alerts:
self.add_alert(alert)
# Inform editor and associate editor if this application was 'ready' or 'completed', but has been changed to 'in progress'
if (self.source.application_status == constants.APPLICATION_STATUS_READY or self.source.application_status == constants.APPLICATION_STATUS_COMPLETED) and self.target.application_status == constants.APPLICATION_STATUS_IN_PROGRESS:
# First, the editor
try:
emails.send_editor_inprogress_email(self.target)
self.add_alert('An email has been sent to notify the editor of the change in status.')
except AttributeError:
magic = str(uuid.uuid1())
self.add_alert('Couldn\'t find a recipient for this email - check editor groups are correct. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('No editor recipient for failed review email - ' + magic)
except app_email.EmailException:
magic = str(uuid.uuid1())
self.add_alert('Sending the failed review email to editor didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending review failed email to editor - ' + magic)
# Then the associate
try:
emails.send_assoc_editor_inprogress_email(self.target)
self.add_alert('An email has been sent to notify the assigned associate editor of the change in status.')
except AttributeError:
magic = str(uuid.uuid1())
self.add_alert('Couldn\'t find a recipient for this email - check an associate editor is assigned. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('No associate editor recipient for failed review email - ' + magic)
except app_email.EmailException:
magic = str(uuid.uuid1())
self.add_alert('Sending the failed review email to associate editor didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending review failed email to associate editor - ' + magic)
# email other managing editors if this was newly set to 'ready'
if self.source.application_status != constants.APPLICATION_STATUS_READY and self.target.application_status == constants.APPLICATION_STATUS_READY:
# this template requires who made the change, say it was an Admin
ed_id = 'an administrator'
try:
emails.send_admin_ready_email(self.target, editor_id=ed_id)
self.add_alert('A confirmation email has been sent to the Managing Editors.')
except app_email.EmailException:
magic = str(uuid.uuid1())
self.add_alert('Hm, sending the ready status to managing editors didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending ready status email to managing editors - ' + magic)
def render_template(self, **kwargs):
if self.source is None:
raise FormContextException("You cannot edit a not-existent application")
return super(ManEdApplicationReview, self).render_template(
lcc_jstree=json.dumps(lcc_jstree),
subjectstr=self._subjects2str(self.source.bibjson().subjects()),
**kwargs)
def _set_choices(self):
self.form.application_status.choices = choices.Choices.choices_for_status('admin', self.source.application_status)
# The first time the form is rendered, it needs to populate the editor drop-down from saved group
egn = self.form.editor_group.data
self._populate_editor_field(egn)
class EditorApplicationReview(ApplicationContext):
"""
Editors Application Review form. This should be used in a context where an editor who owns an editorial group
is accessing an application. This prevents re-assignment of Editorial group, but permits assignment of associate
editor. It also permits change in application state, except to "accepted"; therefore this form context cannot
be used to create journals from applications. Deleting notes is not allowed, but adding is.
"""
def make_renderer(self):
self.renderer = render.EditorApplicationReviewRenderer()
self.renderer.set_disabled_fields(["editor_group"])
def set_template(self):
self.template = "formcontext/editor_application_review.html"
def blank_form(self):
self.form = forms.EditorApplicationReviewForm()
self._set_choices()
def data2form(self):
self.form = forms.EditorApplicationReviewForm(formdata=self.form_data)
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
def source2form(self):
self.form = forms.EditorApplicationReviewForm(data=ApplicationFormXWalk.obj2form(self.source))
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
editor_choices = list(sum(choices.Choices.application_status('editor'), ())) # flattens the list of tuples
if self.source.application_status not in editor_choices:
self.info = SCOPE_MSG.format(self.source.application_status)
if self.source.application_status == constants.APPLICATION_STATUS_ACCEPTED:
self.info = ACC_MSG # This is after so we can supersede the last message
def pre_validate(self):
self.form.editor_group.data = self.source.editor_group
if "application_status" in self.renderer.disabled_fields:
self.form.application_status.data = constants.APPLICATION_STATUS_ACCEPTED
def form2target(self):
self.target = ApplicationFormXWalk.form2obj(self.form)
def patch_target(self):
if self.source is None:
raise FormContextException("You cannot patch a target from a non-existent source")
self._carry_fixed_aspects()
self._merge_notes_forward()
self.target.set_owner(self.source.owner)
self.target.set_editor_group(self.source.editor_group)
self._carry_continuations()
def finalise(self):
# FIXME: this first one, we ought to deal with outside the form context, but for the time being this
# can be carried over from the old implementation
if self.source is None:
raise FormContextException("You cannot edit a not-existent application")
if self.source.application_status == constants.APPLICATION_STATUS_ACCEPTED:
raise FormContextException("You cannot edit applications which have been accepted into DOAJ.")
# if we are allowed to finalise, kick this up to the superclass
super(EditorApplicationReview, self).finalise()
# Check the status change is valid
choices.Choices.validate_status_change('editor', self.source.application_status, self.target.application_status)
# FIXME: may want to factor this out of the suggestionformxwalk
new_associate_assigned = ApplicationFormXWalk.is_new_editor(self.form, self.source)
# Save the target
self.target.set_last_manual_update()
self.target.save()
# record the event in the provenance tracker
models.Provenance.make(current_user, "edit", self.target)
# if we need to email the associate because they have just been assigned, handle that here.
if new_associate_assigned:
try:
emails.send_assoc_editor_email(self.target)
except app_email.EmailException:
self.add_alert("Problem sending email to associate editor - probably address is invalid")
app.logger.exception('Error sending associate assigned email')
# If this is the first time this application has been assigned to an editor, notify the publisher.
old_ed = self.source.editor
if (old_ed is None or old_ed == '') and self.target.editor is not None:
is_update_request = self.target.current_journal is not None
if is_update_request:
alerts = emails.send_publisher_update_request_editor_assigned_email(self.target)
else:
alerts = emails.send_publisher_application_editor_assigned_email(self.target)
for alert in alerts:
self.add_alert(alert)
# Email the assigned associate if the application was reverted from 'completed' to 'in progress' (failed review)
if self.source.application_status == constants.APPLICATION_STATUS_COMPLETED and self.target.application_status == constants.APPLICATION_STATUS_IN_PROGRESS:
try:
emails.send_assoc_editor_inprogress_email(self.target)
self.add_alert('An email has been sent to notify the assigned associate editor of the change in status.')
except AttributeError as e:
magic = str(uuid.uuid1())
self.add_alert('Couldn\'t find a recipient for this email - check an associate editor is assigned. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('No associate editor recipient for failed review email - ' + magic)
except app_email.EmailException:
magic = str(uuid.uuid1())
self.add_alert('Sending the failed review email to associate editor didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending failed review email to associate editor - ' + magic)
# email managing editors if the application was newly set to 'ready'
if self.source.application_status != constants.APPLICATION_STATUS_READY and self.target.application_status == constants.APPLICATION_STATUS_READY:
# Tell the ManEds who has made the status change - the editor in charge of the group
editor_group_name = self.target.editor_group
editor_group_id = models.EditorGroup.group_exists_by_name(name=editor_group_name)
editor_group = models.EditorGroup.pull(editor_group_id)
editor_acc = editor_group.get_editor_account()
# record the event in the provenance tracker
models.Provenance.make(current_user, "status:ready", self.target)
editor_id = editor_acc.id
try:
emails.send_admin_ready_email(self.target, editor_id=editor_id)
self.add_alert('A confirmation email has been sent to the Managing Editors.')
except app_email.EmailException:
magic = str(uuid.uuid1())
self.add_alert('Hm, sending the ready status to managing editors didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending ready status email to managing editors - ' + magic)
def render_template(self, **kwargs):
if self.source is None:
raise FormContextException("You cannot edit a not-existent application")
return super(EditorApplicationReview, self).render_template(
lcc_jstree=json.dumps(lcc_jstree),
subjectstr=self._subjects2str(self.source.bibjson().subjects()),
**kwargs)
def _set_choices(self):
if self.source is None:
raise FormContextException("You cannot set choices for a non-existent source")
if self.form.application_status.data == constants.APPLICATION_STATUS_ACCEPTED:
self.form.application_status.choices = choices.Choices.application_status("accepted")
self.renderer.set_disabled_fields(self.renderer.disabled_fields + ["application_status"])
else:
try:
# Assign the choices to the form
self.form.application_status.choices = choices.Choices.choices_for_status('editor', self.source.application_status)
except ValueError:
# If the current status isn't in the editor's status list, it must be out of bounds. Show it greyed out.
self.form.application_status.choices = choices.Choices.application_status("admin")
self.renderer.set_disabled_fields(self.renderer.disabled_fields + ["application_status"])
# get the editor group from the source because it isn't in the form
egn = self.source.editor_group
self._populate_editor_field(egn)
class AssEdApplicationReview(ApplicationContext):
"""
Associate Editors Application Review form. This is to be used in a context where an associate editor (fewest rights)
needs to access an application for review. This editor cannot change the editorial group or the assigned editor.
They also cannot change the owner of the application. They cannot set an application to "Accepted" so this form can't
be used to create a journal from an application. They cannot delete, only add notes.
"""
def make_renderer(self):
self.renderer = render.AssEdApplicationReviewRenderer()
def set_template(self):
self.template = "formcontext/assed_application_review.html"
def blank_form(self):
self.form = forms.AssEdApplicationReviewForm()
self._set_choices()
def data2form(self):
self.form = forms.AssEdApplicationReviewForm(formdata=self.form_data)
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
def source2form(self):
self.form = forms.AssEdApplicationReviewForm(data=ApplicationFormXWalk.obj2form(self.source))
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
associate_editor_choices = list(sum(choices.Choices.application_status(), ())) # flattens the list of tuples
if self.source.application_status not in associate_editor_choices:
self.info = SCOPE_MSG.format(self.source.application_status)
if self.source.application_status == constants.APPLICATION_STATUS_ACCEPTED:
self.info = ACC_MSG # This is after so we can supersede the last message
def pre_validate(self):
if "application_status" in self.renderer.disabled_fields:
self.form.application_status.data = constants.APPLICATION_STATUS_ACCEPTED
def form2target(self):
self.target = ApplicationFormXWalk.form2obj(self.form)
def patch_target(self):
if self.source is None:
raise FormContextException("You cannot patch a target from a non-existent source")
self._carry_fixed_aspects()
self._merge_notes_forward()
self.target.set_owner(self.source.owner)
self.target.set_editor_group(self.source.editor_group)
self.target.set_editor(self.source.editor)
self.target.set_seal(self.source.has_seal())
self._carry_continuations()
def finalise(self):
# FIXME: this first one, we ought to deal with outside the form context, but for the time being this
# can be carried over from the old implementation
if self.source is None:
raise FormContextException("You cannot edit a not-existent application")
if self.source.application_status == constants.APPLICATION_STATUS_ACCEPTED:
raise FormContextException("You cannot edit applications which have been accepted into DOAJ.")
# if we are allowed to finalise, kick this up to the superclass
super(AssEdApplicationReview, self).finalise()
# Check the status change is valid
choices.Choices.validate_status_change('associate', self.source.application_status, self.target.application_status)
# Save the target
self.target.set_last_manual_update()
self.target.save()
# record the event in the provenance tracker
models.Provenance.make(current_user, "edit", self.target)
# inform publisher if this was set to 'in progress' from 'pending'
if self.source.application_status == constants.APPLICATION_STATUS_PENDING and self.target.application_status == constants.APPLICATION_STATUS_IN_PROGRESS:
if app.config.get("ENABLE_PUBLISHER_EMAIL", False):
is_update_request = self.target.current_journal is not None
if is_update_request:
alerts = emails.send_publisher_update_request_inprogress_email(self.target)
else:
alerts = emails.send_publisher_application_inprogress_email(self.target)
for alert in alerts:
self.add_alert(alert)
else:
self.add_alert(Messages.IN_PROGRESS_NOT_SENT_EMAIL_DISABLED)
# inform editor if this was newly set to 'completed'
if self.source.application_status != constants.APPLICATION_STATUS_COMPLETED and self.target.application_status == constants.APPLICATION_STATUS_COMPLETED:
# record the event in the provenance tracker
models.Provenance.make(current_user, "status:completed", self.target)
try:
emails.send_editor_completed_email(self.target)
self.add_alert('A confirmation email has been sent to notify the editor of the change in status.')
except app_email.EmailException:
magic = str(uuid.uuid1())
self.add_alert('Hm, sending the ready status to editor email didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.exception('Error sending completed status email to editor - ' + magic)
def render_template(self, **kwargs):
if self.source is None:
raise FormContextException("You cannot edit a not-existent application")
return super(AssEdApplicationReview, self).render_template(
lcc_jstree=json.dumps(lcc_jstree),
subjectstr=self._subjects2str(self.source.bibjson().subjects()),
**kwargs)
def _set_choices(self):
if self.form.application_status.data == constants.APPLICATION_STATUS_ACCEPTED:
self.form.application_status.choices = choices.Choices.application_status("accepted")
self.renderer.set_disabled_fields(self.renderer.disabled_fields + ["application_status"])
else:
try:
# Assign the choices to the form
self.form.application_status.choices = choices.Choices.choices_for_status('associate_editor', self.source.application_status)
except ValueError:
# If the current status isn't in the associate editor's status list, it must be out of bounds. Show it greyed out.
self.form.application_status.choices = choices.Choices.application_status("admin")
self.renderer.set_disabled_fields(self.renderer.disabled_fields + ["application_status"])
class PublisherUpdateRequest(ApplicationContext):
def make_renderer(self):
self.renderer = render.PublisherUpdateRequestRenderer()
def set_template(self):
self.template = "formcontext/publisher_update_request.html"
def blank_form(self):
self.form = forms.PublisherUpdateRequestForm()
def data2form(self):
self.form = forms.PublisherUpdateRequestForm(formdata=self.form_data)
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
self._disable_fields()
def source2form(self):
self.form = forms.PublisherUpdateRequestForm(data=ApplicationFormXWalk.obj2form(self.source))
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
self._disable_fields()
def pre_validate(self):
if self.source is None:
raise FormContextException("You cannot validate a form from a non-existent source")
# carry forward the disabled fields
bj = self.source.bibjson()
contact = self.source.contact
self.form.title.data = bj.title
self.form.alternative_title.data = bj.alternative_title
pissn = bj.get_one_identifier(bj.P_ISSN)
if pissn == "": pissn = None
self.form.pissn.data = pissn
eissn = bj.get_one_identifier(bj.E_ISSN)
if eissn == "": eissn = None
self.form.eissn.data = eissn
if len(contact) == 0:
# this will cause a validation failure if the form does not provide them
return
# we copy across the contacts if they are necessary. The contact details are conditionally
# disabled, so they /may/ be set
if "contact_name" in self.renderer.disabled_fields:
self.form.contact_name.data = contact.get("name")
if "contact_email" in self.renderer.disabled_fields:
self.form.contact_email.data = contact.get("email")
if "confirm_contact_email" in self.renderer.disabled_fields:
self.form.confirm_contact_email.data = contact.get("email")
def form2target(self):
self.target = ApplicationFormXWalk.form2obj(self.form)
def patch_target(self):
if self.source is None:
raise FormContextException("You cannot patch a target from a non-existent source")
self._carry_subjects_and_seal()
self._carry_fixed_aspects()
self._merge_notes_forward()
self.target.set_owner(self.source.owner)
self.target.set_editor_group(self.source.editor_group)
self.target.set_editor(self.source.editor)
self._carry_continuations()
# set the suggester to the account owner
acc = models.Account.pull(self.target.owner)
if acc is not None:
self.target.set_suggester(acc.name, acc.email)
# we carry this over for completeness, although it will be overwritten in the finalise() method
self.target.set_application_status(self.source.application_status)
def finalise(self, save_target=True, email_alert=True):
# FIXME: this first one, we ought to deal with outside the form context, but for the time being this
# can be carried over from the old implementation
if self.source is None:
raise FormContextException("You cannot edit a not-existent application")
# if we are allowed to finalise, kick this up to the superclass
super(PublisherUpdateRequest, self).finalise()
# set the status to update_request (if not already)
self.target.set_application_status(constants.APPLICATION_STATUS_UPDATE_REQUEST)
# Save the target
self.target.set_last_manual_update()
if save_target:
saved = self.target.save()
if saved is None:
raise FormContextException("Save on application failed")
# obtain the related journal, and attach the current application id to it
journal_id = self.target.current_journal
from portality.bll.doaj import DOAJ
journalService = DOAJ.journalService()
if journal_id is not None:
journal, _ = journalService.journal(journal_id)
if journal is not None:
journal.set_current_application(self.target.id)
if save_target:
saved = journal.save()
if saved is None:
raise FormContextException("Save on journal failed")
else:
self.target.remove_current_journal()
# email the publisher to tell them we received their update request
if email_alert:
try:
self._send_received_email()
except app_email.EmailException as e:
self.add_alert("We were unable to send you an email confirmation - possible problem with your email address")
app.logger.exception('Error sending reapplication received email to publisher')
def render_template(self, **kwargs):
if self.source is None:
raise FormContextException("You cannot edit a not-existent application")
return super(PublisherUpdateRequest, self).render_template(**kwargs)
def _carry_subjects_and_seal(self):
# carry over the subjects
source_subjects = self.source.bibjson().subjects()
self.target.bibjson().set_subjects(source_subjects)
# carry over the seal
self.target.set_seal(self.source.has_seal())
def _disable_fields(self):
if self.source is None:
raise FormContextException("You cannot disable fields on a not-existent application")
disable = ["title", "alternative_title", "pissn", "eissn"] # these are always disabled
# contact fields are only disabled if they already have content in source
contact = self.source.contact
if contact.get("name"):
disable.append("contact_name")
if contact.get("email"):
disable += ["contact_email", "confirm_contact_email"]
self.renderer.set_disabled_fields(disable)
def _send_received_email(self):
acc = models.Account.pull(self.target.owner)
if acc is None:
self.add_alert("Unable to locate account for specified owner")
return
journal_name = self.target.bibjson().title #.encode('utf-8', 'replace')
to = [acc.email]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME","") + " - update request received"
try:
if app.config.get("ENABLE_PUBLISHER_EMAIL", False):
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/publisher_update_request_received.txt",
journal_name=journal_name,
username=self.target.owner
)
self.add_alert('A confirmation email has been sent to ' + acc.email + '.')
except app_email.EmailException as e:
magic = str(uuid.uuid1())
self.add_alert('Hm, sending the "update request received" email didn\'t work. Please quote this magic number when reporting the issue: ' + magic + ' . Thank you!')
app.logger.error(magic + "\n" + repr(e))
raise e
class PublisherUpdateRequestReadOnly(PrivateContext):
"""
Read Only Application form for publishers. Nothing can be changed. Useful to show publishers what they
currently have submitted for review
"""
def make_renderer(self):
self.renderer = render.PublisherUpdateRequestReadOnlyRenderer()
def set_template(self):
self.template = "formcontext/readonly_application.html"
def blank_form(self):
self.form = forms.PublisherUpdateRequestForm()
self.renderer.disable_all_fields(False)
# self._set_choices()
def data2form(self):
self.form = forms.PublisherUpdateRequestForm(formdata=self.form_data)
# self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
self.renderer.disable_all_fields(False)
def source2form(self):
self.form = forms.PublisherUpdateRequestForm(data=JournalFormXWalk.obj2form(self.source))
# self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
self.renderer.set_disabled_fields(["digital_archiving_policy"])
# self.renderer.disable_all_fields(True)
def form2target(self):
pass # you can't edit objects using this form
def patch_target(self):
pass # you can't edit objects using this form
def finalise(self):
raise FormContextException("You cannot edit applications using the read-only form")
"""
def render_template(self, **kwargs):
if self.source is None:
raise FormContextException("You cannot view a not-existent journal")
return super(ReadOnlyJournal, self).render_template(
lcc_jstree=json.dumps(lcc_jstree),
subjectstr=self._subjects2str(self.source.bibjson().subjects()),
**kwargs
)
"""
"""
def _set_choices(self):
# no application status (this is a journal) or editorial info (it's not even in the form) to set
pass
"""
### Journal form contexts ###
class ManEdJournalReview(PrivateContext):
"""
Managing Editor's Journal Review form. Should be used in a context where the form warrants full
admin privileges. It will permit doing every action.
"""
def make_renderer(self):
self.renderer = render.ManEdJournalReviewRenderer()
def set_template(self):
self.template = "formcontext/maned_journal_review.html"
def render_template(self, **kwargs):
if self.source is None:
raise FormContextException("You cannot edit a not-existent journal")
return super(ManEdJournalReview, self).render_template(
lcc_jstree=json.dumps(lcc_jstree),
subjectstr=self._subjects2str(self.source.bibjson().subjects()),
**kwargs)
def blank_form(self):
self.form = forms.ManEdApplicationReviewForm()
self._set_choices()
def data2form(self):
self.form = forms.ManEdJournalReviewForm(formdata=self.form_data)
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
def source2form(self):
self.form = forms.ManEdJournalReviewForm(data=JournalFormXWalk.obj2form(self.source))
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
def pre_validate(self):
# Editor field is populated in JS after page load - check the selected editor is actually in that editor group
self._validate_editor_field()
def form2target(self):
self.target = JournalFormXWalk.form2obj(self.form)
def patch_target(self):
if self.source is None:
raise FormContextException("You cannot patch a target from a non-existent source")
self._carry_fixed_aspects()
# NOTE: this means you can't unset an owner once it has been set. But you can change it.
if (self.target.owner is None or self.target.owner == "") and (self.source.owner is not None):
self.target.set_owner(self.source.owner)
self._merge_notes_forward(allow_delete=True)
def _set_choices(self):
# The first time this is rendered, it needs to populate the editor drop-down from saved group
egn = self.form.editor_group.data
self._populate_editor_field(egn)
def finalise(self):
# FIXME: this first one, we ought to deal with outside the form context, but for the time being this
# can be carried over from the old implementation
if self.source is None:
raise FormContextException("You cannot edit a not-existent journal")
# if we are allowed to finalise, kick this up to the superclass
super(ManEdJournalReview, self).finalise()
# FIXME: may want to factor this out of the suggestionformxwalk
# If we have changed the editors assinged to this application, let them know.
is_editor_group_changed = JournalFormXWalk.is_new_editor_group(self.form, self.source)
is_associate_editor_changed = JournalFormXWalk.is_new_editor(self.form, self.source)
# Save the target
self.target.set_last_manual_update()
self.target.save()
# if we need to email the editor and/or the associate, handle those here
if is_editor_group_changed:
try:
emails.send_editor_group_email(self.target)
except app_email.EmailException:
self.add_alert("Problem sending email to editor - probably address is invalid")
app.logger.exception('Error sending assignment email to editor.')
if is_associate_editor_changed:
try:
emails.send_assoc_editor_email(self.target)
except app_email.EmailException:
self.add_alert("Problem sending email to associate editor - probably address is invalid")
app.logger.exception('Error sending assignment email to associate.')
def validate(self):
# make use of the ability to disable validation, otherwise, let it run
if self.form is not None:
if self.form.make_all_fields_optional.data:
self.pre_validate()
return True
return super(ManEdJournalReview, self).validate()
class ManEdBulkEdit(PrivateContext):
"""
Managing Editor's Journal Review form. Should be used in a context where the form warrants full
admin privileges. It will permit doing every action.
"""
def make_renderer(self):
self.renderer = render.ManEdJournalBulkEditRenderer()
def set_template(self):
self.template = "formcontext/maned_journal_bulk_edit.html"
def blank_form(self):
self.form = forms.ManEdBulkEditJournalForm()
def data2form(self):
self.form = forms.ManEdBulkEditJournalForm(formdata=self.form_data)
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
class EditorJournalReview(PrivateContext):
"""
Editors Journal Review form. This should be used in a context where an editor who owns an editorial group
is accessing a journal. This prevents re-assignment of Editorial group, but permits assignment of associate
editor.
"""
def make_renderer(self):
self.renderer = render.EditorJournalReviewRenderer()
self.renderer.set_disabled_fields(["editor_group"])
def set_template(self):
self.template = "formcontext/editor_journal_review.html"
def render_template(self, **kwargs):
if self.source is None:
raise FormContextException("You cannot edit a not-existent journal")
return super(EditorJournalReview, self).render_template(
lcc_jstree=json.dumps(lcc_jstree),
subjectstr=self._subjects2str(self.source.bibjson().subjects()),
**kwargs)
def blank_form(self):
self.form = forms.EditorJournalReviewForm()
self._set_choices()
def data2form(self):
self.form = forms.EditorJournalReviewForm(formdata=self.form_data)
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
def source2form(self):
self.form = forms.EditorJournalReviewForm(data=JournalFormXWalk.obj2form(self.source))
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
def form2target(self):
self.target = JournalFormXWalk.form2obj(self.form)
def patch_target(self):
if self.source is None:
raise FormContextException("You cannot patch a target from a non-existent source")
self._carry_fixed_aspects()
self.target.set_owner(self.source.owner)
self.target.set_editor_group(self.source.editor_group)
self._merge_notes_forward()
self._carry_continuations()
def pre_validate(self):
self.form.editor_group.data = self.source.editor_group
def _set_choices(self):
if self.source is None:
raise FormContextException("You cannot set choices for a non-existent source")
# get the editor group from the source because it isn't in the form
egn = self.source.editor_group
self._populate_editor_field(egn)
def finalise(self):
# FIXME: this first one, we ought to deal with outside the form context, but for the time being this
# can be carried over from the old implementation
if self.source is None:
raise FormContextException("You cannot edit a not-existent journal")
# if we are allowed to finalise, kick this up to the superclass
super(EditorJournalReview, self).finalise()
# FIXME: may want to factor this out of the suggestionformxwalk
email_associate = ApplicationFormXWalk.is_new_editor(self.form, self.source)
# Save the target
self.target.set_last_manual_update()
self.target.save()
# if we need to email the associate, handle that here.
if email_associate:
try:
emails.send_assoc_editor_email(self.target)
except app_email.EmailException:
self.add_alert("Problem sending email to associate editor - probably address is invalid")
app.logger.exception('Error sending assignment email to associate.')
class AssEdJournalReview(PrivateContext):
"""
Associate Editors Journal Review form. This is to be used in a context where an associate editor (fewest rights)
needs to access a journal for review. This editor cannot change the editorial group or the assigned editor.
They also cannot change the owner of the journal. They cannot delete, only add notes.
"""
def make_renderer(self):
self.renderer = render.AssEdJournalReviewRenderer()
def set_template(self):
self.template = "formcontext/assed_journal_review.html"
def blank_form(self):
self.form = forms.AssEdJournalReviewForm()
self._set_choices()
def data2form(self):
self.form = forms.AssEdJournalReviewForm(formdata=self.form_data)
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
def source2form(self):
self.form = forms.AssEdJournalReviewForm(data=JournalFormXWalk.obj2form(self.source))
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
def form2target(self):
self.target = JournalFormXWalk.form2obj(self.form)
def patch_target(self):
if self.source is None:
raise FormContextException("You cannot patch a target from a non-existent source")
self._carry_fixed_aspects()
self._merge_notes_forward()
self.target.set_owner(self.source.owner)
self.target.set_editor_group(self.source.editor_group)
self.target.set_editor(self.source.editor)
self._carry_continuations()
def finalise(self):
# FIXME: this first one, we ought to deal with outside the form context, but for the time being this
# can be carried over from the old implementation
if self.source is None:
raise FormContextException("You cannot edit a not-existent journal")
# if we are allowed to finalise, kick this up to the superclass
super(AssEdJournalReview, self).finalise()
# Save the target
self.target.set_last_manual_update()
self.target.save()
def render_template(self, **kwargs):
if self.source is None:
raise FormContextException("You cannot edit a not-existent journal")
return super(AssEdJournalReview, self).render_template(
lcc_jstree=json.dumps(lcc_jstree),
subjectstr=self._subjects2str(self.source.bibjson().subjects()),
**kwargs
)
def _set_choices(self):
# no application status (this is a journal) or editorial info (it's not even in the form) to set
pass
class ReadOnlyJournal(PrivateContext):
"""
Read Only Journal form. Nothing can be changed. Useful for reviewing a journal and an application
(or update request) side by side in 2 browser windows or tabs.
"""
def make_renderer(self):
self.renderer = render.ReadOnlyJournalRenderer()
def set_template(self):
self.template = "formcontext/readonly_journal.html"
def blank_form(self):
self.form = forms.ReadOnlyJournalForm()
self._set_choices()
def data2form(self):
self.form = forms.ReadOnlyJournalForm(formdata=self.form_data)
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
def source2form(self):
self.form = forms.ReadOnlyJournalForm(data=JournalFormXWalk.obj2form(self.source))
self._set_choices()
self._expand_descriptions(FIELDS_WITH_DESCRIPTION)
self._expand_url_descriptions(URL_FIELDS)
def form2target(self):
pass # you can't edit objects using this form
def patch_target(self):
pass # you can't edit objects using this form
def finalise(self):
raise FormContextException("You cannot edit journals using the read-only form")
def render_template(self, **kwargs):
if self.source is None:
raise FormContextException("You cannot view a not-existent journal")
return super(ReadOnlyJournal, self).render_template(
lcc_jstree=json.dumps(lcc_jstree),
subjectstr=self._subjects2str(self.source.bibjson().subjects()),
**kwargs
)
def _set_choices(self):
# no application status (this is a journal) or editorial info (it's not even in the form) to set
pass
class ArticleFormFactory(object):
@classmethod
def get_from_context(cls, role, source=None, form_data=None, user=None):
if role == "admin":
return AdminMetadataArticleForm(source=source, form_data=form_data, user=user)
if role == "publisher":
return PublisherMetadataForm(source=source, form_data=form_data, user=user)
class MetadataForm(FormContext):
def __init__(self, source, form_data, user):
self.user = user
self.author_error = False
super(MetadataForm, self).__init__(source=source, form_data=form_data)
def _set_choices(self):
try:
ic = choices.Choices.choices_for_article_issns(user=self.user, article_id=self.source.id)
self.form.pissn.choices = ic
self.form.eissn.choices = ic
except Exception as e:
print (str(e))
# not logged in, and current_user is broken
# probably you are loading the class from the command line
pass
def modify_authors_if_required(self, request_data):
more_authors = request_data.get("more_authors")
remove_author = None
for v in list(request.values.keys()):
if v.startswith("remove_authors"):
remove_author = v.split("-")[1]
# if the user wants more authors, add an extra entry
if more_authors:
return self.render_template(more_authors=True)
# if the user wants to remove an author, do the various back-flips required
if remove_author is not None:
return self.render_template(remove_authors=remove_author)
def _check_for_author_errors(self, **kwargs):
if "more_authors" in kwargs and kwargs["more_authors"] == True:
self.form.authors.append_entry()
if "remove_authors" in kwargs:
keep = []
while len(self.form.authors.entries) > 0:
entry = self.form.authors.pop_entry()
if entry.short_name == "authors-" + kwargs["remove_author"]:
break
else:
keep.append(entry)
while len(keep) > 0:
self.form.authors.append_entry(keep.pop().data)
def _validate_authors(self):
counted = 0
for entry in self.form.authors.entries:
name = entry.data.get("name")
if name is not None and name != "":
counted += 1
return counted >= 1
def blank_form(self):
self.form = portality.formcontext.forms.ArticleForm()
self._set_choices()
def source2form(self):
self.form = portality.formcontext.forms.ArticleForm()
ArticleFormXWalk.obj2form(self.form, article=self.source)
self._set_choices()
def data2form(self):
self.form = portality.formcontext.forms.ArticleForm(formdata=self.form_data)
self._set_choices()
def form2target(self):
self.target = ArticleFormXWalk.form2obj(form=self.form)
def validate(self):
if not self._validate_authors():
self.author_error = True
if not self.form.validate():
return False
return True
def finalise(self, duplicate_check = True):
self.form2target()
if not self.author_error:
article_service = DOAJ.articleService()
article_service.create_article(self.target, self.user, add_journal_info=True,
update_article_id=self.source.id if self.source is not None else None,
duplicate_check = duplicate_check)
article_url = url_for('doaj.article_page', identifier=self.target.id)
msg, how = Messages.ARTICLE_METADATA_SUBMITTED_FLASH
Messages.flash_with_url(msg.format(url=article_url), how)
else:
return
class PublisherMetadataForm(MetadataForm):
def __init__(self, source, form_data, user):
super(PublisherMetadataForm, self).__init__(source=source, form_data=form_data, user=user)
def set_template(self):
self.template = "publisher/metadata.html"
def render_template(self, **kwargs):
self._check_for_author_errors(**kwargs)
if "validated" in kwargs and kwargs["validated"] == True:
self.blank_form()
return render_template(self.template, form=self.form, form_context=self, author_error=self.author_error)
class AdminMetadataArticleForm(MetadataForm):
def __init__(self, source, form_data, user):
super(AdminMetadataArticleForm, self).__init__(source=source, form_data=form_data, user=user)
def set_template(self):
self.template = "admin/article_metadata.html"
def render_template(self, **kwargs):
self._check_for_author_errors(**kwargs)
return render_template(self.template, form=self.form, form_context=self, author_error=self.author_error)
| apache-2.0 | -3,858,372,472,115,063,000 | 43.59197 | 398 | 0.630581 | false |
mushtaqak/edx-platform | openedx/core/djangoapps/credit/api.py | 1 | 28489 | """
Contains the APIs for course credit requirements.
"""
import logging
import uuid
import datetime
import pytz
from django.db import transaction
from util.date_utils import to_timestamp
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from student.models import User
from .exceptions import (
InvalidCreditRequirements,
InvalidCreditCourse,
UserIsNotEligible,
CreditProviderNotConfigured,
RequestAlreadyCompleted,
CreditRequestNotFound,
InvalidCreditStatus,
)
from .models import (
CreditCourse,
CreditProvider,
CreditRequirement,
CreditRequirementStatus,
CreditRequest,
CreditEligibility,
)
from .signature import signature, get_shared_secret_key
log = logging.getLogger(__name__)
def set_credit_requirements(course_key, requirements):
"""
Add requirements to given course.
Args:
course_key(CourseKey): The identifier for course
requirements(list): List of requirements to be added
Example:
>>> set_credit_requirements(
"course-v1-edX-DemoX-1T2015",
[
{
"namespace": "reverification",
"name": "i4x://edX/DemoX/edx-reverification-block/assessment_uuid",
"display_name": "Assessment 1",
"criteria": {},
},
{
"namespace": "proctored_exam",
"name": "i4x://edX/DemoX/proctoring-block/final_uuid",
"display_name": "Final Exam",
"criteria": {},
},
{
"namespace": "grade",
"name": "grade",
"display_name": "Grade",
"criteria": {"min_grade": 0.8},
},
])
Raises:
InvalidCreditRequirements
Returns:
None
"""
invalid_requirements = _validate_requirements(requirements)
if invalid_requirements:
invalid_requirements = ", ".join(invalid_requirements)
raise InvalidCreditRequirements(invalid_requirements)
try:
credit_course = CreditCourse.get_credit_course(course_key=course_key)
except CreditCourse.DoesNotExist:
raise InvalidCreditCourse()
old_requirements = CreditRequirement.get_course_requirements(course_key=course_key)
requirements_to_disable = _get_requirements_to_disable(old_requirements, requirements)
if requirements_to_disable:
CreditRequirement.disable_credit_requirements(requirements_to_disable)
for requirement in requirements:
CreditRequirement.add_or_update_course_requirement(credit_course, requirement)
def get_credit_requirements(course_key, namespace=None):
"""
Get credit eligibility requirements of a given course and namespace.
Args:
course_key(CourseKey): The identifier for course
namespace(str): Namespace of requirements
Example:
>>> get_credit_requirements("course-v1-edX-DemoX-1T2015")
{
requirements =
[
{
"namespace": "reverification",
"name": "i4x://edX/DemoX/edx-reverification-block/assessment_uuid",
"display_name": "Assessment 1",
"criteria": {},
},
{
"namespace": "proctored_exam",
"name": "i4x://edX/DemoX/proctoring-block/final_uuid",
"display_name": "Final Exam",
"criteria": {},
},
{
"namespace": "grade",
"name": "grade",
"display_name": "Grade",
"criteria": {"min_grade": 0.8},
},
]
}
Returns:
Dict of requirements in the given namespace
"""
requirements = CreditRequirement.get_course_requirements(course_key, namespace)
return [
{
"namespace": requirement.namespace,
"name": requirement.name,
"display_name": requirement.display_name,
"criteria": requirement.criteria
}
for requirement in requirements
]
@transaction.commit_on_success
def create_credit_request(course_key, provider_id, username):
"""
Initiate a request for credit from a credit provider.
This will return the parameters that the user's browser will need to POST
to the credit provider. It does NOT calculate the signature.
Only users who are eligible for credit (have satisfied all credit requirements) are allowed to make requests.
A provider can be configured either with *integration enabled* or not.
If automatic integration is disabled, this method will simply return
a URL to the credit provider and method set to "GET", so the student can
visit the URL and request credit directly. No database record will be created
to track these requests.
If automatic integration *is* enabled, then this will also return the parameters
that the user's browser will need to POST to the credit provider.
These parameters will be digitally signed using a secret key shared with the credit provider.
A database record will be created to track the request with a 32-character UUID.
The returned dictionary can be used by the user's browser to send a POST request to the credit provider.
If a pending request already exists, this function should return a request description with the same UUID.
(Other parameters, such as the user's full name may be different than the original request).
If a completed request (either accepted or rejected) already exists, this function will
raise an exception. Users are not allowed to make additional requests once a request
has been completed.
Arguments:
course_key (CourseKey): The identifier for the course.
provider_id (str): The identifier of the credit provider.
user (User): The user initiating the request.
Returns: dict
Raises:
UserIsNotEligible: The user has not satisfied eligibility requirements for credit.
CreditProviderNotConfigured: The credit provider has not been configured for this course.
RequestAlreadyCompleted: The user has already submitted a request and received a response
from the credit provider.
Example Usage:
>>> create_credit_request(course.id, "hogwarts", "ron")
{
"url": "https://credit.example.com/request",
"method": "POST",
"parameters": {
"request_uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_org": "HogwartsX",
"course_num": "Potions101",
"course_run": "1T2015",
"final_grade": 0.95,
"user_username": "ron",
"user_email": "[email protected]",
"user_full_name": "Ron Weasley",
"user_mailing_address": "",
"user_country": "US",
"signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
}
}
"""
try:
user_eligibility = CreditEligibility.objects.select_related('course').get(
username=username,
course__course_key=course_key
)
credit_course = user_eligibility.course
credit_provider = credit_course.providers.get(provider_id=provider_id)
except (CreditEligibility.DoesNotExist, CreditProvider.DoesNotExist):
log.warning(u'User tried to initiate a request for credit, but the user is not eligible for credit')
raise UserIsNotEligible
# Check if we've enabled automatic integration with the credit
# provider. If not, we'll show the user a link to a URL
# where the user can request credit directly from the provider.
# Note that we do NOT track these requests in our database,
# since the state would always be "pending" (we never hear back).
if not credit_provider.enable_integration:
return {
"url": credit_provider.provider_url,
"method": "GET",
"parameters": {}
}
else:
# If automatic credit integration is enabled, then try
# to retrieve the shared signature *before* creating the request.
# That way, if there's a misconfiguration, we won't have requests
# in our system that we know weren't sent to the provider.
shared_secret_key = get_shared_secret_key(credit_provider.provider_id)
if shared_secret_key is None:
msg = u'Credit provider with ID "{provider_id}" does not have a secret key configured.'.format(
provider_id=credit_provider.provider_id
)
log.error(msg)
raise CreditProviderNotConfigured(msg)
# Initiate a new request if one has not already been created
credit_request, created = CreditRequest.objects.get_or_create(
course=credit_course,
provider=credit_provider,
username=username,
)
# Check whether we've already gotten a response for a request,
# If so, we're not allowed to issue any further requests.
# Skip checking the status if we know that we just created this record.
if not created and credit_request.status != "pending":
log.warning(
(
u'Cannot initiate credit request because the request with UUID "%s" '
u'exists with status "%s"'
), credit_request.uuid, credit_request.status
)
raise RequestAlreadyCompleted
if created:
credit_request.uuid = uuid.uuid4().hex
# Retrieve user account and profile info
user = User.objects.select_related('profile').get(username=username)
# Retrieve the final grade from the eligibility table
try:
final_grade = CreditRequirementStatus.objects.get(
username=username,
requirement__namespace="grade",
requirement__name="grade",
status="satisfied"
).reason["final_grade"]
except (CreditRequirementStatus.DoesNotExist, TypeError, KeyError):
log.exception(
"Could not retrieve final grade from the credit eligibility table "
"for user %s in course %s.",
user.id, course_key
)
raise UserIsNotEligible
parameters = {
"request_uuid": credit_request.uuid,
"timestamp": to_timestamp(datetime.datetime.now(pytz.UTC)),
"course_org": course_key.org,
"course_num": course_key.course,
"course_run": course_key.run,
"final_grade": final_grade,
"user_username": user.username,
"user_email": user.email,
"user_full_name": user.profile.name,
"user_mailing_address": (
user.profile.mailing_address
if user.profile.mailing_address is not None
else ""
),
"user_country": (
user.profile.country.code
if user.profile.country.code is not None
else ""
),
}
credit_request.parameters = parameters
credit_request.save()
if created:
log.info(u'Created new request for credit with UUID "%s"', credit_request.uuid)
else:
log.info(
u'Updated request for credit with UUID "%s" so the user can re-issue the request',
credit_request.uuid
)
# Sign the parameters using a secret key we share with the credit provider.
parameters["signature"] = signature(parameters, shared_secret_key)
return {
"url": credit_provider.provider_url,
"method": "POST",
"parameters": parameters
}
def update_credit_request_status(request_uuid, provider_id, status):
"""
Update the status of a credit request.
Approve or reject a request for a student to receive credit in a course
from a particular credit provider.
This function does NOT check that the status update is authorized.
The caller needs to handle authentication and authorization (checking the signature
of the message received from the credit provider)
The function is idempotent; if the request has already been updated to the status,
the function does nothing.
Arguments:
request_uuid (str): The unique identifier for the credit request.
provider_id (str): Identifier for the credit provider.
status (str): Either "approved" or "rejected"
Returns: None
Raises:
CreditRequestNotFound: No request exists that is associated with the given provider.
InvalidCreditStatus: The status is not either "approved" or "rejected".
"""
if status not in ["approved", "rejected"]:
raise InvalidCreditStatus
try:
request = CreditRequest.objects.get(uuid=request_uuid, provider__provider_id=provider_id)
old_status = request.status
request.status = status
request.save()
log.info(
u'Updated request with UUID "%s" from status "%s" to "%s" for provider with ID "%s".',
request_uuid, old_status, status, provider_id
)
except CreditRequest.DoesNotExist:
msg = (
u'Credit provider with ID "{provider_id}" attempted to '
u'update request with UUID "{request_uuid}", but no request '
u'with this UUID is associated with the provider.'
).format(provider_id=provider_id, request_uuid=request_uuid)
log.warning(msg)
raise CreditRequestNotFound(msg)
def get_credit_requests_for_user(username):
"""
Retrieve the status of a credit request.
Returns either "pending", "accepted", or "rejected"
Arguments:
username (unicode): The username of the user who initiated the requests.
Returns: list
Example Usage:
>>> get_credit_request_status_for_user("bob")
[
{
"uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_key": "course-v1:HogwartsX+Potions101+1T2015",
"provider": {
"id": "HogwartsX",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
},
"status": "pending" # or "approved" or "rejected"
}
]
"""
return CreditRequest.credit_requests_for_user(username)
def get_credit_requirement_status(course_key, username, namespace=None, name=None):
""" Retrieve the user's status for each credit requirement in the course.
Args:
course_key (CourseKey): The identifier for course
username (str): The identifier of the user
Example:
>>> get_credit_requirement_status("course-v1-edX-DemoX-1T2015", "john")
[
{
"namespace": "reverification",
"name": "i4x://edX/DemoX/edx-reverification-block/assessment_uuid",
"display_name": "In Course Reverification",
"criteria": {},
"status": "failed",
},
{
"namespace": "proctored_exam",
"name": "i4x://edX/DemoX/proctoring-block/final_uuid",
"display_name": "Proctored Mid Term Exam",
"criteria": {},
"status": "satisfied",
},
{
"namespace": "grade",
"name": "i4x://edX/DemoX/proctoring-block/final_uuid",
"display_name": "Minimum Passing Grade",
"criteria": {"min_grade": 0.8},
"status": "failed",
},
]
Returns:
list of requirement statuses
"""
requirements = CreditRequirement.get_course_requirements(course_key, namespace=namespace, name=name)
requirement_statuses = CreditRequirementStatus.get_statuses(requirements, username)
requirement_statuses = dict((o.requirement, o) for o in requirement_statuses)
statuses = []
for requirement in requirements:
requirement_status = requirement_statuses.get(requirement)
statuses.append({
"namespace": requirement.namespace,
"name": requirement.name,
"display_name": requirement.display_name,
"criteria": requirement.criteria,
"status": requirement_status.status if requirement_status else None,
"status_date": requirement_status.modified if requirement_status else None,
})
return statuses
def is_user_eligible_for_credit(username, course_key):
"""Returns a boolean indicating if the user is eligible for credit for
the given course
Args:
username(str): The identifier for user
course_key (CourseKey): The identifier for course
Returns:
True if user is eligible for the course else False
"""
return CreditEligibility.is_user_eligible_for_credit(course_key, username)
def get_credit_requirement(course_key, namespace, name):
"""Returns the requirement of a given course, namespace and name.
Args:
course_key(CourseKey): The identifier for course
namespace(str): Namespace of requirement
name(str): Name of the requirement
Returns: dict
Example:
>>> get_credit_requirement_status(
"course-v1-edX-DemoX-1T2015", "proctored_exam", "i4x://edX/DemoX/proctoring-block/final_uuid"
)
{
"course_key": "course-v1-edX-DemoX-1T2015"
"namespace": "reverification",
"name": "i4x://edX/DemoX/edx-reverification-block/assessment_uuid",
"display_name": "reverification"
"criteria": {},
}
"""
requirement = CreditRequirement.get_course_requirement(course_key, namespace, name)
return {
"course_key": requirement.course.course_key,
"namespace": requirement.namespace,
"name": requirement.name,
"display_name": requirement.display_name,
"criteria": requirement.criteria
} if requirement else None
def set_credit_requirement_status(username, course_key, req_namespace, req_name, status="satisfied", reason=None):
"""
Update the user's requirement status.
This will record whether the user satisfied or failed a particular requirement
in a course. If the user has satisfied all requirements, the user will be marked
as eligible for credit in the course.
Args:
username (str): Username of the user
course_key (CourseKey): Identifier for the course associated with the requirement.
req_namespace (str): Namespace of the requirement (e.g. "grade" or "reverification")
req_name (str): Name of the requirement (e.g. "grade" or the location of the ICRV XBlock)
Keyword Arguments:
status (str): Status of the requirement (either "satisfied" or "failed")
reason (dict): Reason of the status
Example:
>>> set_credit_requirement_status(
"staff",
CourseKey.from_string("course-v1-edX-DemoX-1T2015"),
"reverification",
"i4x://edX/DemoX/edx-reverification-block/assessment_uuid",
status="satisfied",
reason={}
)
"""
# Check if we're already eligible for credit.
# If so, short-circuit this process.
if CreditEligibility.is_user_eligible_for_credit(course_key, username):
return
# Retrieve all credit requirements for the course
# We retrieve all of them to avoid making a second query later when
# we need to check whether all requirements have been satisfied.
reqs = CreditRequirement.get_course_requirements(course_key)
# Find the requirement we're trying to set
req_to_update = next((
req for req in reqs
if req.namespace == req_namespace
and req.name == req_name
), None)
# If we can't find the requirement, then the most likely explanation
# is that there was a lag updating the credit requirements after the course
# was published. We *could* attempt to create the requirement here,
# but that could cause serious performance issues if many users attempt to
# lock the row at the same time.
# Instead, we skip updating the requirement and log an error.
if req_to_update is None:
log.error(
(
u'Could not update credit requirement in course "%s" '
u'with namespace "%s" and name "%s" '
u'because the requirement does not exist. '
u'The user "%s" should have had his/her status updated to "%s".'
),
unicode(course_key), req_namespace, req_name, username, status
)
return
# Update the requirement status
CreditRequirementStatus.add_or_update_requirement_status(
username, req_to_update, status=status, reason=reason
)
# If we're marking this requirement as "satisfied", there's a chance
# that the user has met all eligibility requirements.
if status == "satisfied":
CreditEligibility.update_eligibility(reqs, username, course_key)
def _get_requirements_to_disable(old_requirements, new_requirements):
"""
Get the ids of 'CreditRequirement' entries to be disabled that are
deleted from the courseware.
Args:
old_requirements(QuerySet): QuerySet of CreditRequirement
new_requirements(list): List of requirements being added
Returns:
List of ids of CreditRequirement that are not in new_requirements
"""
requirements_to_disable = []
for old_req in old_requirements:
found_flag = False
for req in new_requirements:
# check if an already added requirement is modified
if req["namespace"] == old_req.namespace and req["name"] == old_req.name:
found_flag = True
break
if not found_flag:
requirements_to_disable.append(old_req.id)
return requirements_to_disable
def _validate_requirements(requirements):
"""
Validate the requirements.
Args:
requirements(list): List of requirements
Returns:
List of strings of invalid requirements
"""
invalid_requirements = []
for requirement in requirements:
invalid_params = []
if not requirement.get("namespace"):
invalid_params.append("namespace")
if not requirement.get("name"):
invalid_params.append("name")
if not requirement.get("display_name"):
invalid_params.append("display_name")
if "criteria" not in requirement:
invalid_params.append("criteria")
if invalid_params:
invalid_requirements.append(
u"{requirement} has missing/invalid parameters: {params}".format(
requirement=requirement,
params=invalid_params,
)
)
return invalid_requirements
def is_credit_course(course_key):
"""API method to check if course is credit or not.
Args:
course_key(CourseKey): The course identifier string or CourseKey object
Returns:
Bool True if the course is marked credit else False
"""
try:
course_key = CourseKey.from_string(unicode(course_key))
except InvalidKeyError:
return False
return CreditCourse.is_credit_course(course_key=course_key)
def get_credit_request_status(username, course_key):
"""Get the credit request status.
This function returns the status of credit request of user for given course.
It returns the latest request status for the any credit provider.
The valid status are 'pending', 'approved' or 'rejected'.
Args:
username(str): The username of user
course_key(CourseKey): The course locator key
Returns:
A dictionary of credit request user has made if any
"""
credit_request = CreditRequest.get_user_request_status(username, course_key)
if credit_request:
credit_status = {
"uuid": credit_request.uuid,
"timestamp": credit_request.modified,
"course_key": credit_request.course.course_key,
"provider": {
"id": credit_request.provider.provider_id,
"display_name": credit_request.provider.display_name
},
"status": credit_request.status
}
else:
credit_status = {}
return credit_status
def _get_duration_and_providers(credit_course):
"""Returns the credit providers and eligibility durations.
The eligibility_duration is the max of the credit duration of
all the credit providers of given course.
Args:
credit_course(CreditCourse): The CreditCourse object
Returns:
Tuple of eligibility_duration and credit providers of given course
"""
providers = credit_course.providers.all()
seconds_good_for_display = 0
providers_list = []
for provider in providers:
providers_list.append(
{
"id": provider.provider_id,
"display_name": provider.display_name,
"eligibility_duration": provider.eligibility_duration,
"provider_url": provider.provider_url
}
)
eligibility_duration = int(provider.eligibility_duration) if provider.eligibility_duration else 0
seconds_good_for_display = max(eligibility_duration, seconds_good_for_display)
return seconds_good_for_display, providers_list
def get_credit_eligibility(username):
"""
Returns the all the eligibility the user has meet.
Args:
username(str): The username of user
Example:
>> get_credit_eligibility('Aamir'):
{
"edX/DemoX/Demo_Course": {
"created_at": "2015-12-21",
"providers": [
"id": 12,
"display_name": "Arizona State University",
"eligibility_duration": 60,
"provider_url": "http://arizona/provideere/link"
],
"seconds_good_for_display": 90
}
}
Returns:
A dict of eligibilities
"""
eligibilities = CreditEligibility.get_user_eligibility(username)
user_credit_requests = get_credit_requests_for_user(username)
request_dict = {}
# Change the list to dict for iteration
for request in user_credit_requests:
request_dict[unicode(request["course_key"])] = request
user_eligibilities = {}
for eligibility in eligibilities:
course_key = eligibility.course.course_key
duration, providers_list = _get_duration_and_providers(eligibility.course)
user_eligibilities[unicode(course_key)] = {
"created_at": eligibility.created,
"seconds_good_for_display": duration,
"providers": providers_list,
}
# Default status is requirements_meet
user_eligibilities[unicode(course_key)]["status"] = "requirements_meet"
# If there is some request user has made for this eligibility then update the status
if unicode(course_key) in request_dict:
user_eligibilities[unicode(course_key)]["status"] = request_dict[unicode(course_key)]["status"]
user_eligibilities[unicode(course_key)]["provider"] = request_dict[unicode(course_key)]["provider"]
return user_eligibilities
def get_purchased_credit_courses(username): # pylint: disable=unused-argument
"""
Returns the purchased credit courses.
Args:
username(str): Username of the student
Returns:
A dict of courses user has purchased from the credit provider after completion
"""
# TODO: How to track the purchased courses. It requires Will's work for credit provider integration
return {}
| agpl-3.0 | 8,880,496,703,872,569,000 | 35.062025 | 114 | 0.614097 | false |
qacafe/cdrouter.py | cdrouter/jobs.py | 1 | 8582 | #
# Copyright (c) 2017-2020 by QA Cafe.
# All Rights Reserved.
#
"""Module for accessing CDRouter Jobs."""
import collections
from marshmallow import Schema, fields, post_load
from .cdr_datetime import DateTime
class Options(object):
"""Model for CDRouter Job Options.
:param tags: (optional) Tags as string list.
:param skip_tests: (optional) Tests to skip as string list.
:param begin_at: (optional) Test name to begin testing at as string.
:param end_at: (optional) Test name to end testing at as string.
:param extra_cli_args: (optional) Extra `cdrouter-cli` arguments as string.
"""
def __init__(self, **kwargs):
self.tags = kwargs.get('tags', None)
self.skip_tests = kwargs.get('skip_tests', None)
self.begin_at = kwargs.get('begin_at', None)
self.end_at = kwargs.get('end_at', None)
self.extra_cli_args = kwargs.get('extra_cli_args', None)
class OptionsSchema(Schema):
tags = fields.List(fields.Str(), missing=None)
skip_tests = fields.List(fields.Str(), missing=None)
begin_at = fields.Str()
end_at = fields.Str()
extra_cli_args = fields.Str()
@post_load
def post_load(self, data):
return Options(**data)
class Job(object):
"""Model for CDRouter Jobs.
:param id: (optional) Job ID as an int.
:param status: (optional) Bool `True` if user is an administrator.
:param options: (optional) :class:`jobs.Options <jobs.Options>` object
:param package_id: (optional) Package ID as an int.
:param package_name: (optional) Package name as string.
:param config_id: (optional) Config ID as an int.
:param config_name: (optional) Config name as string.
:param device_id: (optional) Device ID as an int.
:param device_name: (optional) Device name as string.
:param result_id: (optional) Result ID as an int.
:param user_id: (optional) User ID as an int.
:param created: (optional) Job creation time as `DateTime`.
:param updated: (optional) Job last-updated time as `DateTime`.
:param automatic: (optional) Bool `True` if job scheduled automatically `DateTime`.
:param run_at: (optional) Job scheduled run-time `DateTime`.
"""
def __init__(self, **kwargs):
self.id = kwargs.get('id', None)
self.status = kwargs.get('status', None)
self.options = kwargs.get('options', None)
self.package_id = kwargs.get('package_id', None)
self.package_name = kwargs.get('package_name', None)
self.config_id = kwargs.get('config_id', None)
self.config_name = kwargs.get('config_name', None)
self.device_id = kwargs.get('device_id', None)
self.device_name = kwargs.get('device_name', None)
self.result_id = kwargs.get('result_id', None)
self.user_id = kwargs.get('user_id', None)
self.created = kwargs.get('created', None)
self.updated = kwargs.get('updated', None)
self.automatic = kwargs.get('automatic', None)
self.run_at = kwargs.get('run_at', None)
class JobSchema(Schema):
id = fields.Int(as_string=True)
status = fields.Str()
options = fields.Nested(OptionsSchema)
package_id = fields.Int(as_string=True)
package_name = fields.Str()
config_id = fields.Int(as_string=True)
config_name = fields.Str()
device_id = fields.Int(as_string=True)
device_name = fields.Str()
result_id = fields.Int(as_string=True, missing=None)
user_id = fields.Int(as_string=True)
created = DateTime()
updated = DateTime()
automatic = fields.Bool()
run_at = DateTime()
@post_load
def post_load(self, data):
return Job(**data)
class Page(collections.namedtuple('Page', ['data', 'links'])):
"""Named tuple for a page of list response data.
:param data: :class:`jobs.Job <jobs.Job>` list
:param links: :class:`cdrouter.Links <cdrouter.Links>` object
"""
class JobsService(object):
"""Service for accessing CDRouter Jobs."""
RESOURCE = 'jobs'
BASE = RESOURCE + '/'
def __init__(self, service):
self.service = service
self.base = self.BASE
def list(self, filter=None, type=None, sort=None, limit=None, page=None, detailed=None): # pylint: disable=redefined-builtin
"""Get a list of jobs.
:param filter: (optional) Filters to apply as a string list.
:param type: (optional) `union` or `inter` as string.
:param sort: (optional) Sort fields to apply as string list.
:param limit: (optional) Limit returned list length.
:param page: (optional) Page to return.
:param detailed: (optional) Return all fields if Bool `True`.
:return: :class:`jobs.Page <jobs.Page>` object
"""
schema = JobSchema()
resp = self.service.list(self.base, filter, type, sort, limit, page, detailed=detailed)
js, l = self.service.decode(schema, resp, many=True, links=True)
return Page(js, l)
def iter_list(self, *args, **kwargs):
"""Get a list of jobs. Whereas ``list`` fetches a single page of jobs
according to its ``limit`` and ``page`` arguments,
``iter_list`` returns all jobs by internally making successive
calls to ``list``.
:param args: Arguments that ``list`` takes.
:param kwargs: Optional arguments that ``list`` takes.
:return: :class:`jobs.Job <jobs.Job>` list
"""
return self.service.iter_list(self.list, *args, **kwargs)
def get(self, id): # pylint: disable=invalid-name,redefined-builtin
"""Get a job.
:param id: Job ID as an int.
:return: :class:`jobs.Job <jobs.Job>` object
:rtype: jobs.Job
"""
schema = JobSchema()
resp = self.service.get_id(self.base, id)
return self.service.decode(schema, resp)
def edit(self, resource):
"""Edit a job.
:param resource: :class:`jobs.Job <jobs.Job>` object
:return: :class:`jobs.Job <jobs.Job>` object
:rtype: jobs.Job
"""
schema = JobSchema(exclude=('id', 'status', 'options', 'package_name', 'config_name', 'device_name', 'result_id', 'user_id', 'created', 'updated', 'automatic', 'run_at'))
json = self.service.encode(schema, resource)
schema = JobSchema()
resp = self.service.edit(self.base, resource.name, json)
return self.service.decode(schema, resp)
def launch(self, resource):
"""Launch a new job.
:param resource: :class:`jobs.Job <jobs.Job>` object
:return: :class:`jobs.Job <jobs.Job>` object
:rtype: jobs.Job
"""
schema = JobSchema(exclude=('id', 'status', 'package_name', 'config_name', 'device_name', 'result_id', 'user_id', 'created', 'updated', 'automatic'))
json = self.service.encode(schema, resource)
schema = JobSchema()
resp = self.service.create(self.base, json)
return self.service.decode(schema, resp)
def delete(self, id): # pylint: disable=invalid-name,redefined-builtin
"""Delete a job.
:param id: Job ID as an int.
"""
return self.service.delete_id(self.base, id)
def bulk_launch(self, jobs=None, filter=None, all=False): # pylint: disable=redefined-builtin
"""Bulk launch a set of jobs.
:param jobs: :class:`jobs.Job <jobs.Job>` list
:param filter: (optional) Filters to apply as a string list.
:param all: (optional) Apply to all if bool `True`.
"""
json = None
if jobs is not None:
schema = JobSchema(exclude=('id', 'status', 'package_name', 'config_name', 'device_name', 'result_id', 'user_id', 'created', 'updated', 'automatic'))
jobs_json = self.service.encode(schema, jobs, many=True)
json = {self.RESOURCE: jobs_json}
schema = JobSchema()
resp = self.service.post(self.base,
params={'bulk': 'launch', 'filter': filter, 'all': all}, json=json)
return self.service.decode(schema, resp, many=True)
def bulk_delete(self, ids=None, filter=None, type=None, all=False): # pylint: disable=redefined-builtin
"""Bulk delete a set of jobs.
:param ids: (optional) Int list of job IDs.
:param filter: (optional) String list of filters.
:param type: (optional) `union` or `inter` as string.
:param all: (optional) Apply to all if bool `True`.
"""
return self.service.bulk_delete(self.base, self.RESOURCE, ids=ids, filter=filter, type=type, all=all)
| mit | -3,121,657,317,393,191,000 | 38.916279 | 178 | 0.622233 | false |
jonashaag/django-autocomplete-light | autocomplete_light/widgets.py | 1 | 6373 | """
ChoiceWidget is intended to work as a replacement for django's Select widget,
and MultipleChoiceWidget for django's SelectMultiple.
Constructing a widget needs an Autocomplete class or registered autocomplete
name.
The choice autocomplete widget renders from autocomplete_light/widget.html
template.
"""
from django import forms
from django.forms.util import flatatt
from django.utils import safestring
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
__all__ = ['WidgetBase', 'ChoiceWidget', 'MultipleChoiceWidget', 'TextWidget']
class WidgetBase(object):
"""
Base widget for autocompletes.
Mainly handles passing arguments from Python to HTML data-* attributes,
via widget_js_attributes and autocomplete_js_attributes. Javascript will
parse these data-* attributes.
This widget also renders the widget template.
"""
def __init__(self, autocomplete,
widget_js_attributes=None, autocomplete_js_attributes=None,
extra_context=None):
if isinstance(autocomplete, basestring):
self.autocomplete_name = autocomplete
from autocomplete_light import registry
self.autocomplete = registry[self.autocomplete_name]
else:
self.autocomplete = autocomplete
self.autocomplete_name = autocomplete.__class__.__name__
if extra_context is None:
self.extra_context = {}
else:
self.extra_context = extra_context
if widget_js_attributes is None:
self.widget_js_attributes = {}
else:
self.widget_js_attributes = widget_js_attributes
if autocomplete_js_attributes is None:
self.autocomplete_js_attributes = {}
else:
self.autocomplete_js_attributes = autocomplete_js_attributes
def process_js_attributes(self):
extra_autocomplete_js_attributes = getattr(self.autocomplete,
'autocomplete_js_attributes', {})
self.autocomplete_js_attributes.update(
extra_autocomplete_js_attributes)
extra_widget_js_attributes = getattr(self.autocomplete,
'widget_js_attributes', {})
self.widget_js_attributes.update(
extra_widget_js_attributes)
if 'bootstrap' not in self.widget_js_attributes.keys():
self.widget_js_attributes['bootstrap'] = 'normal'
if 'choice_selector' not in self.autocomplete_js_attributes.keys():
self.autocomplete_js_attributes['choice_selector'] = '[data-value]'
if 'url' not in self.autocomplete_js_attributes.keys():
url = self.autocomplete().get_absolute_url()
self.autocomplete_js_attributes['url'] = url
if 'placeholder' not in self.autocomplete_js_attributes.keys():
self.autocomplete_js_attributes['placeholder'] = _(
'type some text to search in this autocomplete').capitalize()
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs)
self.html_id = final_attrs.pop('id', name)
if value is not None and not isinstance(value, (list, tuple)):
values = [value]
else:
values = value
autocomplete = self.autocomplete(values=values)
if values and not autocomplete.validate_values():
raise forms.ValidationError('%s cannot validate %s' % (
self.autocomplete_name, values))
self.process_js_attributes()
autocomplete_name = self.autocomplete_name.lower()
context = {
'name': name,
'values': values,
'widget': self,
'extra_attrs': safestring.mark_safe(flatatt(final_attrs)),
'autocomplete': autocomplete,
}
context.update(self.extra_context)
return safestring.mark_safe(render_to_string([
getattr(autocomplete, 'widget_template', ''),
'autocomplete_light/%s/widget.html' % autocomplete_name,
'autocomplete_light/%s/widget.html' % getattr(autocomplete,
'widget_template_name', ''),
'autocomplete_light/widget.html',
], context))
class ChoiceWidget(WidgetBase, forms.Select):
"""
Widget that provides an autocomplete for zero to one choice.
"""
def __init__(self, autocomplete,
widget_js_attributes=None, autocomplete_js_attributes=None,
extra_context=None, *args, **kwargs):
forms.Select.__init__(self, *args, **kwargs)
WidgetBase.__init__(self, autocomplete,
widget_js_attributes, autocomplete_js_attributes, extra_context)
self.widget_js_attributes['max_values'] = 1
class MultipleChoiceWidget(WidgetBase, forms.SelectMultiple):
"""
Widget that provides an autocomplete for zero to n choices.
"""
def __init__(self, autocomplete=None,
widget_js_attributes=None, autocomplete_js_attributes=None,
extra_context=None, *args, **kwargs):
forms.SelectMultiple.__init__(self, *args, **kwargs)
WidgetBase.__init__(self, autocomplete,
widget_js_attributes, autocomplete_js_attributes, extra_context)
class TextWidget(forms.TextInput, WidgetBase):
""" Widget that just adds an autocomplete to fill a text input """
def __init__(self, autocomplete,
widget_js_attributes=None, autocomplete_js_attributes=None,
*args, **kwargs):
forms.TextInput.__init__(self, *args, **kwargs)
WidgetBase.__init__(self, autocomplete,
widget_js_attributes, autocomplete_js_attributes)
def build_attrs(self, extra_attrs=None, **kwargs):
attrs = forms.TextInput.build_attrs(self, extra_attrs, **kwargs)
def update_attrs(source, prefix=''):
for key, value in source.items():
key = u'data-%s%s' % (prefix, key.replace('_', '-'))
attrs[key] = value
self.process_js_attributes()
update_attrs(self.widget_js_attributes)
update_attrs(self.autocomplete_js_attributes, 'autocomplete-')
if 'class' not in attrs.keys():
attrs['class'] = ''
attrs['class'] += ' autocomplete-light-text-widget'
return attrs
| mit | 1,981,258,003,177,358,000 | 34.405556 | 79 | 0.630943 | false |
oliviermartin/pyOCD | pyOCD/flash/__init__.py | 1 | 2136 | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flash_cortex_m import Flash_cortex_m
from flash_klxx import Flash_klxx
from flash_kl28z import Flash_kl28z
from flash_k20d50m import Flash_k20d50m
from flash_k22f import Flash_k22f
from flash_k64f import Flash_k64f
from flash_lpc800 import Flash_lpc800
from flash_lpc11u24 import Flash_lpc11u24
from flash_lpc1768 import Flash_lpc1768
from flash_lpc4330 import Flash_lpc4330
from flash_nrf51 import Flash_nrf51
from flash_nrf52 import Flash_nrf52
from flash_stm32f103rc import Flash_stm32f103rc
from flash_stm32f051 import Flash_stm32f051
from flash_maxwsnenv import Flash_maxwsnenv
from flash_max32600mbed import Flash_max32600mbed
from flash_w7500 import Flash_w7500
from flash_lpc11xx_32 import Flash_lpc11xx_32
FLASH = {
'cortex_m': Flash_cortex_m,
'kinetis': Flash_cortex_m,
'kl02z': Flash_klxx,
'kl05z': Flash_klxx,
'kl25z': Flash_klxx,
'kl26z': Flash_klxx,
'kl28z': Flash_kl28z,
'kl46z': Flash_klxx,
'k20d50m': Flash_k20d50m,
'k22f': Flash_k22f,
'k64f': Flash_k64f,
'lpc800': Flash_lpc800,
'lpc11u24': Flash_lpc11u24,
'lpc1768': Flash_lpc1768,
'lpc4330': Flash_lpc4330,
'nrf51': Flash_nrf51,
'nrf52': Flash_nrf52,
'stm32f103rc': Flash_stm32f103rc,
'stm32f051': Flash_stm32f051,
'maxwsnenv': Flash_maxwsnenv,
'max32600mbed': Flash_max32600mbed,
'w7500': Flash_w7500,
'lpc11xx_32': Flash_lpc11xx_32,
}
| apache-2.0 | 4,962,502,064,367,315,000 | 34.016393 | 73 | 0.698034 | false |
paultcochrane/pyvisi | pyvisi/renderers/vtk/item.py | 1 | 1221 | # Copyright (C) 2004-2008 Paul Cochrane
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
This is base class for items within a scene
"""
# generic imports
from pyvisi.renderers.vtk.common import debugMsg
from pyvisi.item import Item as BaseItem
__revision__ = '$Revision$'
class Item(BaseItem):
"""
This is the base class for items within a scene
"""
def __init__(self):
"""
Initialisation of Item class
"""
debugMsg("Called Item.__init__()")
BaseItem.__init__(self)
# vim: expandtab shiftwidth=4:
| gpl-2.0 | -3,891,547,595,315,248,000 | 29.525 | 77 | 0.70516 | false |
hearsaycorp/normalize | normalize/property/types.py | 1 | 5422 | #
# This file is a part of the normalize python library
#
# normalize is free software: you can redistribute it and/or modify
# it under the terms of the MIT License.
#
# normalize is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
#
# You should have received a copy of the MIT license along with
# normalize. If not, refer to the upstream repository at
# http://github.com/hearsaycorp/normalize
#
"""``normalize.property.types`` provides an assortment of pre-generated
types"""
import six
from past.builtins import basestring
import datetime
import numbers
from sys import maxsize
from . import make_property_type
from ..subtype import subtype
try:
from dateutil.parser import parse as parse_datetime
except ImportError:
formats = {
6: "%y%m%d",
8: "%Y%m%d",
13: "%Y%m%d%H:%M",
14: "%Y%m%d%H:%MZ",
16: "%Y%m%d%H:%M:%S",
17: "%Y%m%d%H:%M:%SZ",
}
def parse_datetime(not_a_datetime):
datetime_stripped = not_a_datetime.replace(
"-", "").replace("T", "").replace(" ", "")
if len(datetime_stripped) in formats:
return datetime.datetime.strptime(
datetime_stripped, formats[len(datetime_stripped)],
)
else:
raise Exception(
"``dateutil`` not installed, so can't parse %r" %
not_a_datetime
)
IntProperty = make_property_type(
"IntProperty", isa=int, trait_name="int",
attrs={
"__doc__": "A property which must be an ``int``",
},
)
LongProperty = make_property_type(
"LongProperty", isa=six.integer_types[-1], trait_name="long",
attrs={
"__doc__": "A property which must be a ``long``",
},
)
IntegerProperty = make_property_type(
"IntegerProperty", isa=numbers.Integral, trait_name="integer",
coerce=lambda x: (
int(x) if abs(float(x)) < maxsize else six.integer_types[-1](x)
),
attrs={
"__doc__": "A property which holds an integer, int or long",
},
)
NumberProperty = make_property_type(
"NumberProperty", isa=numbers.Number, trait_name="number",
coerce=lambda x: coerce_number(x),
attrs={
"__doc__": "A property which holds a number type (eg float, int) "
"with automatic cast from string",
},
)
StringProperty = make_property_type(
"StringProperty", isa=basestring, trait_name="str",
attrs={
"__doc__": "A property which must be a ``basestring`` or "
"``unicode``, and if not, throws a coerce error",
},
)
FloatProperty = make_property_type(
"FloatProperty", isa=float, trait_name="float",
attrs={
"__doc__": "A property which must be a floating point number.",
},
)
UnicodeProperty = make_property_type(
"UnicodeProperty", base_type=StringProperty,
isa=six.text_type, coerce=(lambda s: six.text_type(s)
if isinstance(s, str) else s),
trait_name="unicode",
attrs={
"__doc__": "A property which must be a ``unicode`` or ``str`` "
"(it is upgraded to ``unicode`` if it is passed in as "
"a ``str``)",
},
)
def coerce_datetime(not_a_datetime):
if isinstance(not_a_datetime, date):
tt = not_a_datetime.timetuple()
return datetime.datetime(*(tt[0:6]))
elif isinstance(not_a_datetime, basestring):
return parse_datetime(not_a_datetime)
else:
raise ValueError(
"Cannot coerce %r to a date/datetime" % not_a_datetime
)
def coerce_date(not_a_date):
if isinstance(not_a_date, datetime.datetime) or (
hasattr(not_a_date, "date") and callable(not_a_date.date)
):
return not_a_date.date()
else:
return coerce_datetime(not_a_date).date()
def coerce_number(not_a_number):
if isinstance(not_a_number, basestring):
try:
return six.integer_types[-1](not_a_number)
except ValueError:
return float(not_a_number)
else:
return float(not_a_number)
date = subtype(
"date",
of=datetime.date,
where=lambda x: not isinstance(x, datetime.datetime),
)
DateProperty = make_property_type(
"DateProperty",
trait_name="date", isa=date, coerce=coerce_date,
json_out=lambda dt: dt.isoformat(),
attrs={
"__doc__": "A property which must hold a python date; coercion "
"from string is provided via ``dateutil.parse``. "
"As of normalize v1, if a ``datetime.datetime`` "
"instance is assigned to a ``DateProperty``, it will "
"be truncated to a ``datetime.date``.",
},
)
DatetimeProperty = make_property_type(
"DatetimeProperty",
trait_name="datetime", isa=datetime.datetime,
coerce=coerce_datetime,
json_out=lambda dt: dt.isoformat(),
attrs={
"__doc__": "A property which must holds a python datetime. "
"Correct timezone handling is currently TODO and "
"users should not depend on timezone behavior until "
"this message is removed (submit tests and a patch!)",
},
)
__all__ = tuple(k for k in list(globals().keys()) if k.endswith("Property"))
| mit | -1,562,669,226,994,970,400 | 30.16092 | 76 | 0.601254 | false |
kewljedi/octoprint-pushbullet | setup.py | 1 | 1047 | # coding=utf-8
import setuptools
def package_data_dirs(source, sub_folders):
import os
dirs = []
for d in sub_folders:
for dirname, _, files in os.walk(os.path.join(source, d)):
dirname = os.path.relpath(dirname, source)
for f in files:
dirs.append(os.path.join(dirname, f))
return dirs
def params():
name = "OctoPrint-Pushbullet"
version = "0.0.1"
description = "Adds support to push OctoPrint events to a Pushbullet channel"
long_description = "TODO"
author = "kewljedi"
author_email = "[email protected]"
url = "https://github.com/kewljedi/octoprint-pushbullet"
license = "GPLv3"
packages = ["octoprint_pushbullet"]
package_data = {"octoprint_pushbullet": package_data_dirs('octoprint_pushbullet', ['static', 'templates'])}
include_package_data = True
zip_safe = False
install_requires = open("requirements.txt").read().split("\n")
entry_points = {
"octoprint.plugin": [
"pushbullet = octoprint_pushbullet"
]
}
return locals()
setuptools.setup(**params()) | gpl-3.0 | 8,190,900,412,091,826,000 | 23.952381 | 109 | 0.671442 | false |
mateoqac/unqTip | language/vxgbs/lang/gbs_compiler.py | 1 | 29369 | #
# Copyright (C) 2011, 2012 Pablo Barenbaum <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"Gobstones compiler from source ASTs to virtual machine code."
import lang.gbs_vm
import lang.gbs_builtins
import lang.gbs_type
import lang.gbs_def_helper as def_helper
import common.i18n as i18n
import common.position
import common.utils
class GbsCompileException(common.utils.StaticException):
"Base exception for Gobstones compiler errors."
pass
def parse_literal(tok):
"""Given a token, parse its string value and return the denotated
Gobstones value.
"""
if tok.type == 'symbol':
val = tok.value
else:
val = lang.gbs_builtins.parse_constant(tok.value)
#assert val is not None
if val is None:
val = tok.value
return val
class GbsLabel(object):
"Represents a unique label in the program."
def __repr__(self):
return 'L_%s' % (id(self),)
class GbsCompiler(object):
"Compiler of Gobstones programs."
def __init__(self):
self.code = None
self.temp_counter = None
self.module_handler = None
self._current_def_name = None
self.constructor_of_type = {"Arreglo":"Arreglo"}
def compile_program(self, tree, module_prefix='', explicit_board=None):
"""Given an AST for a full program, compile it to virtual machine
code, returning an instance of lang.gbs_vm.GbsCompiledProgram.
The Main module should be given the empty module prefix ''.
Every other module should be given the module name as a prefix.
"""
if explicit_board is None:
entrypoint_tree = def_helper.find_def(tree.children[2], def_helper.is_entrypoint_def)
self.explicit_board = len(entrypoint_tree.children[2].children) != 0
else:
self.explicit_board = explicit_board
self.module_handler = tree.module_handler
self.compile_imported_modules(tree)
imports = tree.children[1].children
defs = tree.children[2]
self.code = lang.gbs_vm.GbsCompiledProgram(
tree, module_prefix=module_prefix)
self.compile_imports(imports)
self.user_defined_routine_names = list(self.code.external_routines.keys())
self.user_defined_routine_names += def_helper.get_routine_names(defs)
self.compile_defs(defs)
return self.code
def compile_imported_modules(self, tree):
"Recursively compile the imported modules."
for mdl_name, mdl_tree in self.module_handler.parse_trees():
compiler = GbsCompiler()
try:
code = compiler.compile_program(
mdl_tree, module_prefix=mdl_name, explicit_board=self.explicit_board
)
self.constructor_of_type.update(compiler.constructor_of_type)
except common.utils.SourceException as exception:
self.module_handler.reraise(
GbsCompileException,
exception,
i18n.i18n(
'Error compiling module %s'
) % (
mdl_name,
),
common.position.ProgramAreaNear(tree.children[1]))
self.module_handler.set_compiled_code(mdl_name, code)
def compile_imports(self, imports):
"""Add the imported procedures and functions to the local
namespace of routines.
"""
for imp in imports:
mdl_name = imp.children[1].value
rtns = imp.children[2].children
for rtn in rtns:
if (not isinstance(rtn, lang.gbs_constructs.UserType) and
not isinstance(rtn, lang.gbs_constructs.BuiltinFieldGetter)):
mdl_code = self.module_handler.compiled_code_for(mdl_name)
if rtn.name() in mdl_code.routines:
self.code.external_routines[rtn.name()] = (
mdl_code,
mdl_code.routines[rtn.name()]
)
else:
assert rtn.name() in mdl_code.external_routines
val = mdl_code.external_routines[rtn.name()]
self.code.external_routines[rtn.name()] = val
def compile_defs(self, tree):
"Compile a list of definitions."
self.temp_counter = 0
for def_ in tree.children:
if def_helper.is_type_def(def_):
self.gather_type_data(def_)
else:
self.compile_routine_def(def_)
def gather_type_data(self, def_):
_, type_name, type_or_def = def_.children
if type_or_def.children[0] == 'record':
self.constructor_of_type[type_name.value] = type_name.value
else:
body = type_or_def.children[1]
for case in body.children:
_, cname, _ = case.children
self.constructor_of_type[cname.value] = type_name.value
def temp_varname(self):
"Make a temporary variable name."
self.temp_counter += 1
return '_tempvar%i' % (self.temp_counter)
def compile_routine_def(self, tree):
"Compile a single definition."
prfn = def_helper.get_def_keyword(tree)
name = def_helper.get_def_name(tree).value
self._current_def_name = name
params = [param.value for param in def_helper.get_def_params(tree)]
immutable_params = []
if prfn == 'function':
immutable_params = params
elif prfn == 'procedure' and len(params) > 1:
immutable_params = params[1:]
code = lang.gbs_vm.GbsCompiledCode(tree, prfn, name, params, self.explicit_board)
code.add_enter()
for p in immutable_params:
code.push(('setImmutable', p), near=tree)
self.compile_commands(def_helper.get_def_body(tree), code)
if prfn == 'procedure' and self.explicit_board:
code.push(('pushFrom', params[0]), near=tree)
code.add_leave_return()
code.build_label_table()
self.code.routines[name] = code
#### The following methods take a program fragment in form of an AST
#### and a "code" argument, which should be an instance of
#### lang.gbs_vm.GbsCompiledCode.
####
#### The compilation process appends to the compiled code the virtual
#### machine code corresponding to the given program fragment.
#### Commands
def compile_commands(self, tree, code):
"Compile a sequence of commands."
for cmd in tree.children:
self.compile_cmd(cmd, code)
def compile_cmd(self, tree, code):
"Compile a single command."
command = tree.children[0]
dispatch = {
'Skip': self.compile_skip,
'THROW_ERROR': self.compile_boom,
'procCall': self.compile_proc_call,
'assignVarName': self.compile_assign_var_name,
'assignVarTuple1': self.compile_assign_var_tuple1,
'if': self.compile_if,
'case': self.compile_case,
'while': self.compile_while,
'repeat': self.compile_repeat,
'repeatWith': self.compile_repeat_with,
'foreach': self.compile_foreach,
'block': self.compile_block,
'return': self.compile_return,
}
assert command in dispatch
dispatch[command](tree, code)
def compile_type(self, tree, code):
"""Compile a type expression. Just fill a hole in construct() function.
In a future, it could be usefull for runtime type checks. [CHECK]"""
tok = tree.children[1]
type = self.constructor_of_type[tok.value] + "::" + tok.value
code.push(('pushConst', type), near=tree)
def compile_skip(self, tree, code):
"Compile a Skip command."
pass
def compile_boom(self, tree, code):
"Compile a THROW_ERROR command."
code.push(('THROW_ERROR', tree.children[1].value), near=tree)
def compile_proc_call(self, tree, code):
"Compile a procedure call."
procname = tree.children[1].value
args = tree.children[2].children
if self.explicit_board:
inout_var = args[0]
type_annotation = None
if hasattr(tree, 'type_annotation'):
type_annotation = tree.type_annotation
for i, arg in zip(range(len(args)), args):
self.compile_expression(arg, code)
code.push(('call', procname, len(args)), near=tree)
if self.explicit_board:
code.push(('popTo', inout_var.children[1].value), near=tree)
def compile_projectable_var_check(self, tree, code, var):
"Compile a projectable variable check. Varname is pushed to stack."
code.push(('pushConst', var), near=tree)
code.push(('call', '_checkProjectableVar', 1), near=tree)
def compile_assign_var_name(self, tree, code):
"Compile an assignment: <lvalue> := <expr>"
offsets = tree.children[2].children
if len(offsets) > 0:
#calculate assignment reference
var = tree.children[1].children[1].value
self.compile_projectable_var_check(tree, code, var)
code.push(('pushFrom', var), near=tree)
for offset in offsets:
if offset.children[0] == 'index':
self.compile_expression(offset.children[1], code)
else:
code.push(('pushConst', parse_literal(offset.children[1].children[1])), near=tree)
code.push(('call', '_getRef', 2), near=tree)
#compile expression
self.compile_expression(tree.children[3], code)
#Set ref
code.push(('call', '_SetRefValue', 2), near=tree)
else:
#compile expression
self.compile_expression(tree.children[3], code)
#assign varname
full_varname = '.'.join([tok.value for tok in tree.children[1].children[1:]])
code.push(('popTo', full_varname), near=tree)
def compile_assign_var_tuple1(self, tree, code):
"Compile a tuple assignment: (v1, ..., vN) := f(...)"
self.compile_expression(tree.children[2], code)
varnames = [var.value for var in tree.children[1].children]
for var in common.utils.seq_reversed(varnames):
code.push(('popTo', var), near=tree)
def compile_if(self, tree, code):
"Compile a conditional statement."
lelse = GbsLabel()
self.compile_expression(tree.children[1], code) # cond
code.push((('jumpIfFalse'), lelse), near=tree)
self.compile_block(tree.children[2], code) # then
if tree.children[3] is None:
code.push(('label', lelse), near=tree)
else:
lend = GbsLabel()
code.push(('jump', lend), near=tree)
code.push(('label', lelse), near=tree)
self.compile_block(tree.children[3], code) # else
code.push(('label', lend), near=tree)
def compile_case(self, tree, code):
"Compile a case statement."
# case (Value) of
# Lits1 -> {Body1}
# LitsN -> {BodyN}
# _ -> {BodyElse}
#
# Compiles to code corresponding to:
#
# value0 := Value
# if (value0 in Lits1) {Body1}
# elif (value0 in Lits2) {Body2}
# ...
# elif (value0 in LitsN) {BodyN}
# else {BodyElse}
value = tree.children[1]
value0 = self.temp_varname()
self.compile_expression(value, code)
# value0 := value
code.push(('popTo', value0), near=tree)
lend = GbsLabel()
next_label = None
for branch in tree.children[2].children:
if next_label is not None:
code.push(('label', next_label), near=tree)
if branch.children[0] == 'branch':
lits = [parse_literal(lit) for lit in branch.children[1].children]
next_label = GbsLabel()
# if value0 in LitsI
code.push(('pushFrom', value0), near=tree)
code.push(('jumpIfNotIn', lits, next_label), near=tree)
# BodyI
self.compile_block(branch.children[2], code)
code.push(('jump', lend), near=tree)
else: # defaultBranch
# BodyElse
self.compile_block(branch.children[1], code)
code.push(('label', lend), near=tree)
def compile_match(self, tree, code):
"Compile a match statement."
# match (<Expr-V>) of
# <Case-1> -> <Expr-1>
# <Case-2> -> <Expr-2>
# ...
# <Case-N> -> <Expr-N>
# _ -> <Expr-Else>
#
# Compiles to code corresponding to:
#
# case := _extract_case(<Expr-V>)
# if (case == <Case-1>) <Expr-1>
# elif (case == <Case-2>) <Expr-2>
# ...
# elif (case == <Case-N>) <Expr-N>
# else <Expr-Else>
value = tree.children[1]
value0 = self.temp_varname()
self.compile_expression(value, code)
# This is a runtime function to extract type name
code.push(('call', '_extract_case', 1), near=tree)
# value0 := value
code.push(('popTo', value0), near=tree)
lend = GbsLabel()
next_label = None
default_branch = False
for branch in tree.children[2].children:
if not next_label is None:
code.push(('label', next_label), near=tree)
if branch.children[0] == 'branch':
case_i = parse_literal(branch.children[1])
next_label = GbsLabel()
# if value0 in LitsI
code.push(('pushFrom', value0), near=tree)
code.push(('pushConst', case_i), near=tree)
code.push(('call', '==', 2), near=tree)
code.push(('jumpIfFalse', next_label), near=tree)
# BodyI
self.compile_expression(branch.children[2], code)
code.push(('jump', lend), near=tree)
else: # defaultBranch
# BodyElse
default_branch = True
self.compile_expression(branch.children[1], code)
if not default_branch:
code.push(('label', next_label), near=tree)
code.push(('THROW_ERROR', '"' + i18n.i18n('Expression has no matching branch.') + '"'), near=tree)
code.push(('label', lend), near=tree)
def compile_while(self, tree, code):
"Compile a while statement."
lbegin = GbsLabel()
lend = GbsLabel()
code.push(('label', lbegin), near=tree)
self.compile_expression(tree.children[1], code) # cond
code.push(('jumpIfFalse', lend), near=tree)
self.compile_block(tree.children[2], code) # body
code.push(('jump', lbegin), near=tree)
code.push(('label', lend), near=tree)
def compile_repeat(self, tree, code):
"Compile a repeat statement."
#
# repeat (<Expr>) <Block>
#
# Compiles to code corresponding to
# the following fragment:
#
# counter := <Expr>
# while (true) {
# if (not (counter > 0)) { break }
# <Block>
# counter := counter - 1
# }
#
times = tree.children[1]
body = tree.children[2]
counter = self.temp_varname()
lbegin = GbsLabel()
lend = GbsLabel()
# counter := <Expr>
self.compile_expression(times, code)
code.push(('popTo', counter), near=tree)
# while (true) {
code.push(('label', lbegin), near=tree)
# if (not (counter > 0) { break }
code.push(('pushFrom', counter), near=tree)
code.push(('pushConst', 0), near=tree)
code.push(('call', '>', 2), near=tree)
code.push(('jumpIfFalse', lend), near=tree)
# <Block>
self.compile_block(body, code)
# counter := counter - 1
code.push(('pushFrom', counter), near=tree)
code.push(('pushConst', 1), near=tree)
code.push(('call', '-', 2), near=tree)
code.push(('popTo', counter), near=tree)
# end while
code.push(('jump', lbegin), near=tree)
code.push(('label', lend), near=tree)
code.push(('delVar', counter), near=tree)
def compile_foreach(self, tree, code):
"Compile a foreach statement."
#
# foreach <Index> in <List> <Block>
#
# Compiles to code corresponding to
# the following fragment:
#
# xs0 := <List>
# while (true) {
# if (isEmpty(xs0)) break;
# <Index> := head(xs0)
# setImmutable(<Index>)
# <Block>
# unsetImmutable(<Index>)
# xs0 := tail(xs)
# }
#
def jumpIfIsEmpty(var, label):
code.push(('pushFrom', var), near=tree)
code.push(('call', i18n.i18n('isEmpty'), 1), near=tree)
code.push(('call', 'not', 1), near=tree)
code.push(('jumpIfFalse', label), near=tree)
def head(listVar, var):
code.push(('pushFrom', listVar), near=tree)
code.push(('call', i18n.i18n('head'), 1), near=tree)
code.push(('popTo', var), near=tree)
def tail(listVar, var):
code.push(('pushFrom', listVar), near=tree)
code.push(('call', i18n.i18n('tail'), 1), near=tree)
code.push(('popTo', var), near=tree)
index = tree.children[1].value
list_ = tree.children[2]
body = tree.children[3]
xs0 = self.temp_varname()
lbegin = GbsLabel()
lend = GbsLabel()
lend2 = GbsLabel()
# xs0 := <List>
self.compile_expression(list_, code)
code.push(('popTo', xs0), near=tree)
# while (true) {
code.push(('label', lbegin), near=tree)
# if (isEmpty(xs0)) break;
jumpIfIsEmpty(xs0, lend)
# <Index> := head(xs0)
head(xs0, index)
# setImmutable(<Index>)
code.push(('setImmutable', index), near=tree)
# <Block>
self.compile_block(body, code)
# setImmutable(<Index>)
code.push(('unsetImmutable', index), near=tree)
# xs0 := tail(xs0)
tail(xs0, xs0)
# }
code.push(('jump', lbegin), near=tree)
code.push(('label', lend2), near=tree)
code.push(('delVar', index), near=tree)
code.push(('label', lend), near=tree)
def compile_repeat_with(self, tree, code):
"Compile a repeatWith statement."
#
# repeatWith i in Lower..Upper {BODY}
#
# Compiles to code corresponding to
# the following fragment:
#
# i := Lower
# upper0 := Upper
# if (i <= upper0) {
# while (true) {
# {BODY}
# if (i == upper0) break;
# i := next(i)
# }
# }
#
def call_next():
"""Add a VM instruction for calling the builtin 'next' function,
which operates on any iterable value.
"""
name = i18n.i18n('next')
if hasattr(tree, 'index_type_annotation'):
name = lang.gbs_builtins.polyname(
name,
[repr(tree.index_type_annotation)])
code.push(('call', name, 1), near=tree)
# upper0 is preserved in the stack
i = tree.children[1].value
limit_lower = tree.children[2].children[1]
limit_upper = tree.children[2].children[2]
body = tree.children[3]
upper0 = self.temp_varname()
lbegin = GbsLabel()
lend = GbsLabel()
# i := Lower
self.compile_expression(limit_lower, code)
code.push(('popTo', i), near=tree)
code.push(('setImmutable', i), near=tree)
# upper0 := Upper
self.compile_expression(limit_upper, code)
code.push(('popTo', upper0), near=tree)
# if i <= upper0
code.push(('pushFrom', i), near=tree)
code.push(('pushFrom', upper0), near=tree)
code.push(('call', '<=', 2), near=tree)
code.push(('jumpIfFalse', lend), near=tree)
# while true
code.push(('label', lbegin), near=tree)
# body
self.compile_block(body, code)
# if (i == upper0) break
code.push(('pushFrom', i), near=tree)
code.push(('pushFrom', upper0), near=tree)
code.push(('call', '/=', 2), near=tree)
code.push(('jumpIfFalse', lend), near=tree)
# i := next(i)
code.push(('pushFrom', i), near=tree)
call_next()
code.push(('unsetImmutable', i), near=tree)
code.push(('popTo', i), near=tree)
code.push(('setImmutable', i), near=tree)
# end while
code.push(('jump', lbegin), near=tree)
code.push(('label', lend), near=tree)
code.push(('delVar', i), near=tree)
def compile_block(self, tree, code):
"Compile a block statement."
self.compile_commands(tree.children[1], code)
def compile_return(self, tree, code):
"Compile a return statement."
vals = tree.children[1].children
for val in vals:
self.compile_expression(val, code)
if self._current_def_name == 'program':
vrs = []
expr_count = 1
for v in tree.children[1].children:
if v.children[0] == 'varName':
vrs.append(v.children[1].value)
else:
vrs.append("#%s" % (expr_count,))
expr_count += 1
if hasattr(tree, 'type_annot'):
# Decorate the return variables with their types.
types = [
repr(subtype)
for subtype in tree.type_annot.subtypes()
]
vrs = [
lang.gbs_builtins.polyname(vname, [vtype])
for vname, vtype in zip(vrs, types)
]
code.push(('returnVars', len(vals), vrs), near=tree)
else:
code.push(('return', len(vals)), near=tree)
#### Expressions
def compile_expression(self, tree, code):
"Compile an expression."
exptype = tree.children[0]
dispatch = {
'or': self.compile_or,
'and': self.compile_and,
'not': self.compile_not,
'relop': self.compile_binary_op,
'addsub': self.compile_binary_op,
'mul': self.compile_binary_op,
'divmod': self.compile_binary_op,
'pow': self.compile_binary_op,
'listop': self.compile_binary_op,
'projection': self.compile_binary_op,
'constructor': self.compile_func_call,
'varName': self.compile_var_name,
'funcCall': self.compile_func_call,
'match': self.compile_match,
'unaryMinus': self.compile_unary_minus,
'literal': self.compile_literal,
'type': self.compile_type,
}
if exptype in dispatch:
dispatch[exptype](tree, code)
else:
msg = i18n.i18n('Unknown expression: %s') % (exptype,)
area = common.position.ProgramAreaNear(tree)
raise GbsCompileException(msg, area)
def get_type_annotation(self, tree):
if hasattr(tree, 'type_annotation'):
return tree.type_annotation
else:
return None
def compile_binary_op(self, tree, code):
"Compile a binary operator expression."
type_annotation = self.get_type_annotation(tree)
self.compile_expression(tree.children[2], code)
self.compile_expression(tree.children[3], code)
code.push(('call', tree.children[1].value, 2), near=tree)
def compile_not(self, tree, code):
"Compile a boolean not expression."
self.compile_expression(tree.children[1], code)
code.push(('call', 'not', 1), near=tree)
def compile_or(self, tree, code):
"Compile a short-circuiting disjunction."
lcontinue = GbsLabel()
lend = GbsLabel()
type_annotation = self.get_type_annotation(tree)
self.compile_expression(tree.children[2], code)
code.push(('jumpIfFalse', lcontinue), near=tree)
code.push(('pushConst', lang.gbs_builtins.parse_constant('True')),
near=tree)
code.push(('jump', lend), near=tree)
code.push(('label', lcontinue), near=tree)
self.compile_expression(tree.children[3], code)
code.push(('label', lend), near=tree)
def compile_and(self, tree, code):
"Compile a short-circuiting conjunction."
lcontinue = GbsLabel()
lend = GbsLabel()
type_annotation = self.get_type_annotation(tree)
self.compile_expression(tree.children[2], code)
code.push(('jumpIfFalse', lcontinue), near=tree)
self.compile_expression(tree.children[3], code)
code.push(('jump', lend), near=tree)
code.push(('label', lcontinue), near=tree)
code.push(('pushConst', lang.gbs_builtins.parse_constant('False')),
near=tree)
code.push(('label', lend), near=tree)
def compile_unary_minus(self, tree, code):
"Compile a unary minus expression."
funcname = 'unary-'
args = tree.children[1:]
self._compile_func_call_poly(tree, funcname, args, code)
def compile_var_name(self, tree, code):
"Compile a variable name expression."
offsets = tree.children[2].children
var = tree.children[1].value
code.push(('pushFrom', var), near=tree)
if len(offsets) > 0:
self.compile_projectable_var_check(tree, code, var)
#calculate assignment reference
for offset in offsets:
self.compile_expression(offset.children[1], code)
code.push(('call', '_getRef', 2), near=tree)
code.push(('call', '_getRefValue', 1), near=tree)
def compile_func_call(self, tree, code):
"Compile a function call."
funcname = tree.children[1].value
args = tree.children[2].children
if lang.gbs_builtins.is_defined(funcname) or funcname in self.user_defined_routine_names:
self._compile_func_call_poly(tree, funcname, args, code)
else:
self._compile_field_getter(tree, funcname, args, code)
def _compile_field_getter(self, tree, field_name, args, code):
self.compile_expression(args[0], code)
field = tree.children[1]
field.type = 'symbol'
code.push(('pushConst', parse_literal(field)), near=tree)
code.push(('call', '_get_field', 2), near=tree)
def _compile_func_call_poly(self, tree, funcname, args, code):
"Compile a potentially polymorphic function call."
polys = lang.gbs_builtins.BUILTINS_POLYMORPHIC
annotate = True
annotate = annotate and funcname in polys
annotate = annotate and hasattr(tree, 'type_annotation')
annotate = annotate and isinstance(tree.type_annotation, list)
type_annotation = None
if hasattr(tree, 'type_annotation'):
type_annotation = tree.type_annotation
for i, arg in zip(range(len(args)),args):
self.compile_expression(arg, code)
if annotate:
funcname = lang.gbs_builtins.polyname(
funcname,
[repr(ann) for ann in tree.type_annotation])
code.push(('call', funcname, len(args)), near=tree)
def compile_literal(self, tree, code):
"Compile a constant expression."
tok = tree.children[1]
code.push(('pushConst', parse_literal(tok)), near=tree)
def compile_program(tree):
"Compile a full Gobstones program."
compiler = GbsCompiler()
return compiler.compile_program(tree)
| gpl-3.0 | -6,050,560,207,559,410,000 | 37.899338 | 110 | 0.548367 | false |
altair-viz/altair | altair/utils/core.py | 1 | 20886 | """
Utility routines
"""
from collections.abc import Mapping
from copy import deepcopy
import json
import itertools
import re
import sys
import traceback
import warnings
import jsonschema
import pandas as pd
import numpy as np
from .schemapi import SchemaBase, Undefined
try:
from pandas.api.types import infer_dtype as _infer_dtype
except ImportError:
# Import for pandas < 0.20.0
from pandas.lib import infer_dtype as _infer_dtype
def infer_dtype(value):
"""Infer the dtype of the value.
This is a compatibility function for pandas infer_dtype,
with skipna=False regardless of the pandas version.
"""
if not hasattr(infer_dtype, "_supports_skipna"):
try:
_infer_dtype([1], skipna=False)
except TypeError:
# pandas < 0.21.0 don't support skipna keyword
infer_dtype._supports_skipna = False
else:
infer_dtype._supports_skipna = True
if infer_dtype._supports_skipna:
return _infer_dtype(value, skipna=False)
else:
return _infer_dtype(value)
TYPECODE_MAP = {
"ordinal": "O",
"nominal": "N",
"quantitative": "Q",
"temporal": "T",
"geojson": "G",
}
INV_TYPECODE_MAP = {v: k for k, v in TYPECODE_MAP.items()}
# aggregates from vega-lite version 4.6.0
AGGREGATES = [
"argmax",
"argmin",
"average",
"count",
"distinct",
"max",
"mean",
"median",
"min",
"missing",
"product",
"q1",
"q3",
"ci0",
"ci1",
"stderr",
"stdev",
"stdevp",
"sum",
"valid",
"values",
"variance",
"variancep",
]
# window aggregates from vega-lite version 4.6.0
WINDOW_AGGREGATES = [
"row_number",
"rank",
"dense_rank",
"percent_rank",
"cume_dist",
"ntile",
"lag",
"lead",
"first_value",
"last_value",
"nth_value",
]
# timeUnits from vega-lite version 4.6.0
TIMEUNITS = [
"utcyear",
"utcquarter",
"utcmonth",
"utcday",
"utcdate",
"utchours",
"utcminutes",
"utcseconds",
"utcmilliseconds",
"utcyearquarter",
"utcyearquartermonth",
"utcyearmonth",
"utcyearmonthdate",
"utcyearmonthdatehours",
"utcyearmonthdatehoursminutes",
"utcyearmonthdatehoursminutesseconds",
"utcquartermonth",
"utcmonthdate",
"utcmonthdatehours",
"utchoursminutes",
"utchoursminutesseconds",
"utcminutesseconds",
"utcsecondsmilliseconds",
"year",
"quarter",
"month",
"day",
"date",
"hours",
"minutes",
"seconds",
"milliseconds",
"yearquarter",
"yearquartermonth",
"yearmonth",
"yearmonthdate",
"yearmonthdatehours",
"yearmonthdatehoursminutes",
"yearmonthdatehoursminutesseconds",
"quartermonth",
"monthdate",
"monthdatehours",
"hoursminutes",
"hoursminutesseconds",
"minutesseconds",
"secondsmilliseconds",
]
def infer_vegalite_type(data):
"""
From an array-like input, infer the correct vega typecode
('ordinal', 'nominal', 'quantitative', or 'temporal')
Parameters
----------
data: Numpy array or Pandas Series
"""
# Otherwise, infer based on the dtype of the input
typ = infer_dtype(data)
# TODO: Once this returns 'O', please update test_select_x and test_select_y in test_api.py
if typ in [
"floating",
"mixed-integer-float",
"integer",
"mixed-integer",
"complex",
]:
return "quantitative"
elif typ in ["string", "bytes", "categorical", "boolean", "mixed", "unicode"]:
return "nominal"
elif typ in [
"datetime",
"datetime64",
"timedelta",
"timedelta64",
"date",
"time",
"period",
]:
return "temporal"
else:
warnings.warn(
"I don't know how to infer vegalite type from '{}'. "
"Defaulting to nominal.".format(typ)
)
return "nominal"
def merge_props_geom(feat):
"""
Merge properties with geometry
* Overwrites 'type' and 'geometry' entries if existing
"""
geom = {k: feat[k] for k in ("type", "geometry")}
try:
feat["properties"].update(geom)
props_geom = feat["properties"]
except (AttributeError, KeyError):
# AttributeError when 'properties' equals None
# KeyError when 'properties' is non-existing
props_geom = geom
return props_geom
def sanitize_geo_interface(geo):
"""Santize a geo_interface to prepare it for serialization.
* Make a copy
* Convert type array or _Array to list
* Convert tuples to lists (using json.loads/dumps)
* Merge properties with geometry
"""
geo = deepcopy(geo)
# convert type _Array or array to list
for key in geo.keys():
if str(type(geo[key]).__name__).startswith(("_Array", "array")):
geo[key] = geo[key].tolist()
# convert (nested) tuples to lists
geo = json.loads(json.dumps(geo))
# sanitize features
if geo["type"] == "FeatureCollection":
geo = geo["features"]
if len(geo) > 0:
for idx, feat in enumerate(geo):
geo[idx] = merge_props_geom(feat)
elif geo["type"] == "Feature":
geo = merge_props_geom(geo)
else:
geo = {"type": "Feature", "geometry": geo}
return geo
def sanitize_dataframe(df): # noqa: C901
"""Sanitize a DataFrame to prepare it for serialization.
* Make a copy
* Convert RangeIndex columns to strings
* Raise ValueError if column names are not strings
* Raise ValueError if it has a hierarchical index.
* Convert categoricals to strings.
* Convert np.bool_ dtypes to Python bool objects
* Convert np.int dtypes to Python int objects
* Convert floats to objects and replace NaNs/infs with None.
* Convert DateTime dtypes into appropriate string representations
* Convert Nullable integers to objects and replace NaN with None
* Convert Nullable boolean to objects and replace NaN with None
* convert dedicated string column to objects and replace NaN with None
* Raise a ValueError for TimeDelta dtypes
"""
df = df.copy()
if isinstance(df.columns, pd.RangeIndex):
df.columns = df.columns.astype(str)
for col in df.columns:
if not isinstance(col, str):
raise ValueError(
"Dataframe contains invalid column name: {0!r}. "
"Column names must be strings".format(col)
)
if isinstance(df.index, pd.MultiIndex):
raise ValueError("Hierarchical indices not supported")
if isinstance(df.columns, pd.MultiIndex):
raise ValueError("Hierarchical indices not supported")
def to_list_if_array(val):
if isinstance(val, np.ndarray):
return val.tolist()
else:
return val
for col_name, dtype in df.dtypes.iteritems():
if str(dtype) == "category":
# XXXX: work around bug in to_json for categorical types
# https://github.com/pydata/pandas/issues/10778
col = df[col_name].astype(object)
df[col_name] = col.where(col.notnull(), None)
elif str(dtype) == "string":
# dedicated string datatype (since 1.0)
# https://pandas.pydata.org/pandas-docs/version/1.0.0/whatsnew/v1.0.0.html#dedicated-string-data-type
col = df[col_name].astype(object)
df[col_name] = col.where(col.notnull(), None)
elif str(dtype) == "bool":
# convert numpy bools to objects; np.bool is not JSON serializable
df[col_name] = df[col_name].astype(object)
elif str(dtype) == "boolean":
# dedicated boolean datatype (since 1.0)
# https://pandas.io/docs/user_guide/boolean.html
col = df[col_name].astype(object)
df[col_name] = col.where(col.notnull(), None)
elif str(dtype).startswith("datetime"):
# Convert datetimes to strings. This needs to be a full ISO string
# with time, which is why we cannot use ``col.astype(str)``.
# This is because Javascript parses date-only times in UTC, but
# parses full ISO-8601 dates as local time, and dates in Vega and
# Vega-Lite are displayed in local time by default.
# (see https://github.com/altair-viz/altair/issues/1027)
df[col_name] = (
df[col_name].apply(lambda x: x.isoformat()).replace("NaT", "")
)
elif str(dtype).startswith("timedelta"):
raise ValueError(
'Field "{col_name}" has type "{dtype}" which is '
"not supported by Altair. Please convert to "
"either a timestamp or a numerical value."
"".format(col_name=col_name, dtype=dtype)
)
elif str(dtype).startswith("geometry"):
# geopandas >=0.6.1 uses the dtype geometry. Continue here
# otherwise it will give an error on np.issubdtype(dtype, np.integer)
continue
elif str(dtype) in {
"Int8",
"Int16",
"Int32",
"Int64",
"UInt8",
"UInt16",
"UInt32",
"UInt64",
}: # nullable integer datatypes (since 24.0)
# https://pandas.pydata.org/pandas-docs/version/0.25/whatsnew/v0.24.0.html#optional-integer-na-support
col = df[col_name].astype(object)
df[col_name] = col.where(col.notnull(), None)
elif np.issubdtype(dtype, np.integer):
# convert integers to objects; np.int is not JSON serializable
df[col_name] = df[col_name].astype(object)
elif np.issubdtype(dtype, np.floating):
# For floats, convert to Python float: np.float is not JSON serializable
# Also convert NaN/inf values to null, as they are not JSON serializable
col = df[col_name]
bad_values = col.isnull() | np.isinf(col)
df[col_name] = col.astype(object).where(~bad_values, None)
elif dtype == object:
# Convert numpy arrays saved as objects to lists
# Arrays are not JSON serializable
col = df[col_name].apply(to_list_if_array, convert_dtype=False)
df[col_name] = col.where(col.notnull(), None)
return df
def parse_shorthand(
shorthand,
data=None,
parse_aggregates=True,
parse_window_ops=False,
parse_timeunits=True,
parse_types=True,
):
"""General tool to parse shorthand values
These are of the form:
- "col_name"
- "col_name:O"
- "average(col_name)"
- "average(col_name):O"
Optionally, a dataframe may be supplied, from which the type
will be inferred if not specified in the shorthand.
Parameters
----------
shorthand : dict or string
The shorthand representation to be parsed
data : DataFrame, optional
If specified and of type DataFrame, then use these values to infer the
column type if not provided by the shorthand.
parse_aggregates : boolean
If True (default), then parse aggregate functions within the shorthand.
parse_window_ops : boolean
If True then parse window operations within the shorthand (default:False)
parse_timeunits : boolean
If True (default), then parse timeUnits from within the shorthand
parse_types : boolean
If True (default), then parse typecodes within the shorthand
Returns
-------
attrs : dict
a dictionary of attributes extracted from the shorthand
Examples
--------
>>> data = pd.DataFrame({'foo': ['A', 'B', 'A', 'B'],
... 'bar': [1, 2, 3, 4]})
>>> parse_shorthand('name') == {'field': 'name'}
True
>>> parse_shorthand('name:Q') == {'field': 'name', 'type': 'quantitative'}
True
>>> parse_shorthand('average(col)') == {'aggregate': 'average', 'field': 'col'}
True
>>> parse_shorthand('foo:O') == {'field': 'foo', 'type': 'ordinal'}
True
>>> parse_shorthand('min(foo):Q') == {'aggregate': 'min', 'field': 'foo', 'type': 'quantitative'}
True
>>> parse_shorthand('month(col)') == {'field': 'col', 'timeUnit': 'month', 'type': 'temporal'}
True
>>> parse_shorthand('year(col):O') == {'field': 'col', 'timeUnit': 'year', 'type': 'ordinal'}
True
>>> parse_shorthand('foo', data) == {'field': 'foo', 'type': 'nominal'}
True
>>> parse_shorthand('bar', data) == {'field': 'bar', 'type': 'quantitative'}
True
>>> parse_shorthand('bar:O', data) == {'field': 'bar', 'type': 'ordinal'}
True
>>> parse_shorthand('sum(bar)', data) == {'aggregate': 'sum', 'field': 'bar', 'type': 'quantitative'}
True
>>> parse_shorthand('count()', data) == {'aggregate': 'count', 'type': 'quantitative'}
True
"""
if not shorthand:
return {}
valid_typecodes = list(TYPECODE_MAP) + list(INV_TYPECODE_MAP)
units = dict(
field="(?P<field>.*)",
type="(?P<type>{})".format("|".join(valid_typecodes)),
agg_count="(?P<aggregate>count)",
op_count="(?P<op>count)",
aggregate="(?P<aggregate>{})".format("|".join(AGGREGATES)),
window_op="(?P<op>{})".format("|".join(AGGREGATES + WINDOW_AGGREGATES)),
timeUnit="(?P<timeUnit>{})".format("|".join(TIMEUNITS)),
)
patterns = []
if parse_aggregates:
patterns.extend([r"{agg_count}\(\)"])
patterns.extend([r"{aggregate}\({field}\)"])
if parse_window_ops:
patterns.extend([r"{op_count}\(\)"])
patterns.extend([r"{window_op}\({field}\)"])
if parse_timeunits:
patterns.extend([r"{timeUnit}\({field}\)"])
patterns.extend([r"{field}"])
if parse_types:
patterns = list(itertools.chain(*((p + ":{type}", p) for p in patterns)))
regexps = (
re.compile(r"\A" + p.format(**units) + r"\Z", re.DOTALL) for p in patterns
)
# find matches depending on valid fields passed
if isinstance(shorthand, dict):
attrs = shorthand
else:
attrs = next(
exp.match(shorthand).groupdict() for exp in regexps if exp.match(shorthand)
)
# Handle short form of the type expression
if "type" in attrs:
attrs["type"] = INV_TYPECODE_MAP.get(attrs["type"], attrs["type"])
# counts are quantitative by default
if attrs == {"aggregate": "count"}:
attrs["type"] = "quantitative"
# times are temporal by default
if "timeUnit" in attrs and "type" not in attrs:
attrs["type"] = "temporal"
# if data is specified and type is not, infer type from data
if isinstance(data, pd.DataFrame) and "type" not in attrs:
if "field" in attrs and attrs["field"] in data.columns:
attrs["type"] = infer_vegalite_type(data[attrs["field"]])
return attrs
def use_signature(Obj):
"""Apply call signature and documentation of Obj to the decorated method"""
def decorate(f):
# call-signature of f is exposed via __wrapped__.
# we want it to mimic Obj.__init__
f.__wrapped__ = Obj.__init__
f._uses_signature = Obj
# Supplement the docstring of f with information from Obj
if Obj.__doc__:
doclines = Obj.__doc__.splitlines()
if f.__doc__:
doc = f.__doc__ + "\n".join(doclines[1:])
else:
doc = "\n".join(doclines)
try:
f.__doc__ = doc
except AttributeError:
# __doc__ is not modifiable for classes in Python < 3.3
pass
return f
return decorate
def update_subtraits(obj, attrs, **kwargs):
"""Recursively update sub-traits without overwriting other traits"""
# TODO: infer keywords from args
if not kwargs:
return obj
# obj can be a SchemaBase object or a dict
if obj is Undefined:
obj = dct = {}
elif isinstance(obj, SchemaBase):
dct = obj._kwds
else:
dct = obj
if isinstance(attrs, str):
attrs = (attrs,)
if len(attrs) == 0:
dct.update(kwargs)
else:
attr = attrs[0]
trait = dct.get(attr, Undefined)
if trait is Undefined:
trait = dct[attr] = {}
dct[attr] = update_subtraits(trait, attrs[1:], **kwargs)
return obj
def update_nested(original, update, copy=False):
"""Update nested dictionaries
Parameters
----------
original : dict
the original (nested) dictionary, which will be updated in-place
update : dict
the nested dictionary of updates
copy : bool, default False
if True, then copy the original dictionary rather than modifying it
Returns
-------
original : dict
a reference to the (modified) original dict
Examples
--------
>>> original = {'x': {'b': 2, 'c': 4}}
>>> update = {'x': {'b': 5, 'd': 6}, 'y': 40}
>>> update_nested(original, update) # doctest: +SKIP
{'x': {'b': 5, 'c': 4, 'd': 6}, 'y': 40}
>>> original # doctest: +SKIP
{'x': {'b': 5, 'c': 4, 'd': 6}, 'y': 40}
"""
if copy:
original = deepcopy(original)
for key, val in update.items():
if isinstance(val, Mapping):
orig_val = original.get(key, {})
if isinstance(orig_val, Mapping):
original[key] = update_nested(orig_val, val)
else:
original[key] = val
else:
original[key] = val
return original
def display_traceback(in_ipython=True):
exc_info = sys.exc_info()
if in_ipython:
from IPython.core.getipython import get_ipython
ip = get_ipython()
else:
ip = None
if ip is not None:
ip.showtraceback(exc_info)
else:
traceback.print_exception(*exc_info)
def infer_encoding_types(args, kwargs, channels):
"""Infer typed keyword arguments for args and kwargs
Parameters
----------
args : tuple
List of function args
kwargs : dict
Dict of function kwargs
channels : module
The module containing all altair encoding channel classes.
Returns
-------
kwargs : dict
All args and kwargs in a single dict, with keys and types
based on the channels mapping.
"""
# Construct a dictionary of channel type to encoding name
# TODO: cache this somehow?
channel_objs = (getattr(channels, name) for name in dir(channels))
channel_objs = (
c for c in channel_objs if isinstance(c, type) and issubclass(c, SchemaBase)
)
channel_to_name = {c: c._encoding_name for c in channel_objs}
name_to_channel = {}
for chan, name in channel_to_name.items():
chans = name_to_channel.setdefault(name, {})
key = "value" if chan.__name__.endswith("Value") else "field"
chans[key] = chan
# First use the mapping to convert args to kwargs based on their types.
for arg in args:
if isinstance(arg, (list, tuple)) and len(arg) > 0:
type_ = type(arg[0])
else:
type_ = type(arg)
encoding = channel_to_name.get(type_, None)
if encoding is None:
raise NotImplementedError("positional of type {}" "".format(type_))
if encoding in kwargs:
raise ValueError("encoding {} specified twice.".format(encoding))
kwargs[encoding] = arg
def _wrap_in_channel_class(obj, encoding):
try:
condition = obj["condition"]
except (KeyError, TypeError):
pass
else:
if condition is not Undefined:
obj = obj.copy()
obj["condition"] = _wrap_in_channel_class(condition, encoding)
if isinstance(obj, SchemaBase):
return obj
if isinstance(obj, str):
obj = {"shorthand": obj}
if isinstance(obj, (list, tuple)):
return [_wrap_in_channel_class(subobj, encoding) for subobj in obj]
if encoding not in name_to_channel:
warnings.warn("Unrecognized encoding channel '{}'".format(encoding))
return obj
classes = name_to_channel[encoding]
cls = classes["value"] if "value" in obj else classes["field"]
try:
# Don't force validation here; some objects won't be valid until
# they're created in the context of a chart.
return cls.from_dict(obj, validate=False)
except jsonschema.ValidationError:
# our attempts at finding the correct class have failed
return obj
return {
encoding: _wrap_in_channel_class(obj, encoding)
for encoding, obj in kwargs.items()
}
| bsd-3-clause | 6,587,024,043,765,292,000 | 29.269565 | 114 | 0.584123 | false |
seanbell/opensurfaces | server/photos/migrations/0011_auto__del_field_flickruser_displayname__del_field_flickruser_subname__.py | 1 | 29209 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'FlickrUser.displayname'
db.delete_column(u'photos_flickruser', 'displayname')
# Deleting field 'FlickrUser.subname'
db.delete_column(u'photos_flickruser', 'subname')
# Adding field 'FlickrUser.display_name'
db.add_column(u'photos_flickruser', 'display_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
# Adding field 'FlickrUser.sub_name'
db.add_column(u'photos_flickruser', 'sub_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'FlickrUser.displayname'
db.add_column(u'photos_flickruser', 'displayname',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
# Adding field 'FlickrUser.subname'
db.add_column(u'photos_flickruser', 'subname',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
# Deleting field 'FlickrUser.display_name'
db.delete_column(u'photos_flickruser', 'display_name')
# Deleting field 'FlickrUser.sub_name'
db.delete_column(u'photos_flickruser', 'sub_name')
models = {
u'accounts.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'always_approve': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'blocked_reason': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'exclude_from_aggregation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_worker_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'licenses.license': {
'Meta': {'object_name': 'License'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'cc_attribution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_no_deriv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_noncommercial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_share_alike': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creative_commons': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'publishable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
},
u'mturk.experiment': {
'Meta': {'ordering': "['slug', 'variant']", 'unique_together': "(('slug', 'variant'),)", 'object_name': 'Experiment'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'completed_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'cubam_dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'examples_group_attr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'has_tutorial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'module': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'new_hit_settings': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiments'", 'null': 'True', 'to': u"orm['mturk.ExperimentSettings']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'template_dir': ('django.db.models.fields.CharField', [], {'default': "'mturk/experiments'", 'max_length': '255'}),
'test_contents_per_assignment': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'variant': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'mturk.experimentsettings': {
'Meta': {'object_name': 'ExperimentSettings'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'auto_add_hits': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_approval_delay': ('django.db.models.fields.IntegerField', [], {'default': '2592000'}),
'content_filter': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'experiment_settings_in'", 'to': u"orm['contenttypes.ContentType']"}),
'contents_per_hit': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '1800'}),
'feedback_bonus': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'frame_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'lifetime': ('django.db.models.fields.IntegerField', [], {'default': '2678400'}),
'max_active_hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'max_total_hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'min_output_consensus': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'num_outputs_max': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'out_content_attr': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'out_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiment_settings_out'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'out_count_ratio': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'qualifications': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'requirements': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'reward': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '4'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'mturk.experimenttestcontent': {
'Meta': {'ordering': "['-id']", 'object_name': 'ExperimentTestContent'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_contents'", 'to': u"orm['mturk.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'priority': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'})
},
u'mturk.mtassignment': {
'Meta': {'object_name': 'MtAssignment'},
'accept_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'action_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'approval_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'approve_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auto_approval_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'bonus': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'bonus_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'feedback': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'feedback_bonus_given': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feedback': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['mturk.MtHit']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'manually_rejected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'num_test_contents': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_test_correct': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_test_incorrect': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'partially_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'post_meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reject_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rejection_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'screen_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'screen_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'submission_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'submit_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'test_contents': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'assignments'", 'symmetrical': 'False', 'to': u"orm['mturk.ExperimentTestContent']"}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_load_ms': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'wage': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'worker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']", 'null': 'True', 'blank': 'True'})
},
u'mturk.mthit': {
'Meta': {'object_name': 'MtHit'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'all_submitted_assignments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'any_submitted_assignments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'compatible_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hit_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'hit_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hits'", 'to': u"orm['mturk.MtHitType']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'incompatible_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lifetime': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_assignments': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'num_assignments_available': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_assignments_completed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_assignments_pending': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_contents': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'out_count_ratio': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'review_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'mturk.mthittype': {
'Meta': {'object_name': 'MtHitType'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'auto_approval_delay': ('django.db.models.fields.IntegerField', [], {'default': '2592000'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '3600'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hit_types'", 'to': u"orm['mturk.Experiment']"}),
'experiment_settings': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hit_types'", 'to': u"orm['mturk.ExperimentSettings']"}),
'external_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'feedback_bonus': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'frame_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'default': "'0.01'", 'max_digits': '8', 'decimal_places': '4'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'photos.flickruser': {
'Meta': {'ordering': "['-id']", 'object_name': 'FlickrUser'},
'blacklisted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'family_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'given_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sub_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '127'}),
'website_name': ('django.db.models.fields.CharField', [], {'max_length': '1023', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '1023', 'blank': 'True'})
},
u'photos.photo': {
'Meta': {'ordering': "['aspect_ratio', '-id']", 'object_name': 'Photo'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'aspect_ratio': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'exif': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'flickr_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'flickr_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['photos.FlickrUser']"}),
'focal_y': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'fov': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_orig': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'inappropriate': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['licenses.License']"}),
'light_stack': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['photos.PhotoLightStack']"}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'median_intrinsic_error': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'nonperspective': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'num_intrinsic_comparisons': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_intrinsic_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_shapes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_vertices': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'orig_height': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'orig_width': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'rotated': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'scene_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['photos.PhotoSceneCategory']"}),
'scene_category_correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'scene_category_correct_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'scene_category_correct_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'stylized': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'synthetic': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"}),
'vanishing_length': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'vanishing_lines': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'vanishing_points': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'whitebalanced': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'whitebalanced_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'photos.photolightstack': {
'Meta': {'ordering': "['-id']", 'object_name': 'PhotoLightStack'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'photos.photoscenecategory': {
'Meta': {'ordering': "['name']", 'object_name': 'PhotoSceneCategory'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '127'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['photos.PhotoSceneCategory']", 'null': 'True', 'blank': 'True'})
},
u'photos.photoscenequalitylabel': {
'Meta': {'ordering': "['photo', '-time_ms']", 'object_name': 'PhotoSceneQualityLabel'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'correct': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scene_qualities'", 'to': u"orm['photos.Photo']"}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"})
},
u'photos.photowhitebalancelabel': {
'Meta': {'ordering': "['photo', '-time_ms']", 'object_name': 'PhotoWhitebalanceLabel'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'chroma_median': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'num_points': ('django.db.models.fields.IntegerField', [], {}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'whitebalances'", 'to': u"orm['photos.Photo']"}),
'points': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"}),
'whitebalanced': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['photos'] | mit | 2,888,323,342,558,164,000 | 86.717718 | 209 | 0.561471 | false |
sacharya/nova | nova/api/openstack/compute/schemas/v3/flavor_access_schema.py | 1 | 1776 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
add_tenant_access = {
'type': 'object',
'properties': {
'add_tenant_access': {
'type': 'object',
'properties': {
'tenant_id': {
# defined from project_id in instance_type_projects table
'type': 'string', 'minLength': 1, 'maxLength': 255,
},
},
'required': ['tenant_id'],
'additionalProperties': False,
},
},
'required': ['add_tenant_access'],
'additionalProperties': False,
}
remove_tenant_access = {
'type': 'object',
'properties': {
'remove_tenant_access': {
'type': 'object',
'properties': {
'tenant_id': {
# defined from project_id in instance_type_projects table
'type': 'string', 'minLength': 1, 'maxLength': 255,
},
},
'required': ['tenant_id'],
'additionalProperties': False,
},
},
'required': ['remove_tenant_access'],
'additionalProperties': False,
}
| apache-2.0 | -7,900,809,347,432,931,000 | 31.888889 | 78 | 0.560248 | false |
msg/g2ools | nord/g2/file.py | 1 | 33799 | #!/usr/bin/env python2
#
# Copyright (c) 2006,2007 Matt Gerassimoff
#
# This file is part of g2ools.
#
# g2ools is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# g2ools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import string, sys
from struct import pack, unpack
import nord.g2.modules
from nord import printf
from nord.module import Module
from nord.file import hexdump, binhexdump
from nord.file import Patch, Performance, Note, Cable, Knob, Ctrl, MorphMap
from nord.g2 import modules
from nord.g2.crc import crc
from nord.g2.bits import setbits, getbits, BitStream
section_debug = 0 # outputs section debug
title_section = 0 # replace end of section with section title
NVARIATIONS = 9 # 1-8, init
NMORPHS = 8 # 8 morphs
NKNOBS = 120 # 120 knob settings
NMORPHMAPS = 25 # max morphmaps per variation
FX, VOICE, SETTINGS = 0, 1, 2
class G2Error(Exception):
'''G2Error - exception for throwing an unrecoverable error.'''
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
def read_string(bitstream, l, pad=False):
read_str = bitstream.read_str
if pad == True:
return read_str(l).strip('\0')
else:
s = bytearray(l)
for i in xrange(l):
s[i] = read_str(1)
if s[i] == 0:
return str(s[:i])
return str(s[:l])
def format_string(s, l, pad=False):
if len(s) < l:
s = s + '\0'
if pad == True:
s = s.ljust(l, '\0')
return s[:l] # in case the string is longer
def write_string(bitstream, s, l, pad=False):
bitstream.write_str(format_string(s, l, pad))
def get_patch_area(patch, area):
return [patch.fx, patch.voice][area]
class Section(object):
'''Section abstract class that represents a section of .pch2 file.
all sections objects have parse() and format() methods.
'''
default = [0] * (2 << 10) # max 64k section size
def __init__(self, **kw):
self.__dict__ = kw
self.data = bytearray(64<<10)
class SectionManager(object):
def add(self, class_):
self.__dict__[class_.type] = class_
def get(self, type, default=None):
return self.__dict__.get(type, default)
section_manager = SectionManager()
class Description(object):
'''Description class for patch/performance description.'''
pass
class PatchDescription(Section):
'''PatchDescription Section subclass'''
type = 0x21
description_attrs = [
['reserved', 5], ['voices', 5], ['height', 14], ['unk2', 3],
['red', 1], ['blue', 1], ['yellow', 1], ['orange', 1],
['green', 1], ['purple', 1], ['white', 1],
['monopoly', 2], ['variation', 8], ['category', 8],
]
def parse_description(self, description, bitstream):
for name, nbits in self.description_attrs:
setattr(description, name, bitstream.read_bits(nbits))
def format_description(self, description, bitstream):
for name, nbits in self.description_attrs:
bitstream.write_bits(nbits, getattr(description, name))
bitstream.write_bits(8, 0)
def parse(self, patch, data):
description = patch.description = Description() # G2Patch
bitstream = BitStream(data, 7*8)
self.parse_description(patch.description, bitstream)
def format(self, patch, data):
bitstream = BitStream(data, 7*8)
self.format_description(patch.description, bitstream)
return bitstream.tell_bit()
section_manager.add(PatchDescription)
class ModuleList(Section):
'''ModuleList Section subclass'''
type = 0x4a
# NOTE: module.leds seems to be related to a group of modules. i cannot
# see the relationship but I have got a list of modules
# that require this to be set. This will probably be handled
# without this property but added to the module types that
# set it.
# make sure leds bit is set for specific modules
# - some earlier generated .pch2 files where different
# these were emperically determined.
# NOTE2: these should be in nord/g2/modules.py
ledtypes = [
3, 4, 17, 38, 42, 48, 50, 57, 59, 60, 68, 69,
71, 75, 76, 81, 82, 83, 85,
105, 108, 112, 115, 141, 142, 143, 147, 148, 149, 150,
156, 157, 170, 171, 178, 188, 189, 198, 199, 208,
]
def fixleds(self, module):
module.leds = 0
#if module.type.id in ModuleList.ledtypes:
# module.leds = 1
#else:
# module.leds = 0
module_params = [
['index', 8 ], ['horiz', 7], ['vert', 7], ['color', 8],
['uprate', 1 ], ['leds', 1], ['reserved', 6],
]
def parse_area(self, area, bitstream):
read_bits = bitstream.read_bits
nmodules = read_bits(8)
area.modules = [ None ] * nmodules
for i in xrange(nmodules):
id = read_bits(8)
module = Module(modules.fromid(id), area)
area.modules[i] = module
for attr, nbits in self.module_params:
setattr(module, attr, read_bits(nbits))
nmodes = read_bits(4)
self.fixleds(module)
# mode data for module (if there is any)
for mode in module.modes:
mode.value = read_bits(6)
# add missing mode data. some .pch2 versions didn't contain
# all the modes in version 23 BUILD 266
module_type = module.type
if len(module.modes) < len(module_type.modes):
for mode in xrange(len(module.modes), len(module_type.modes)):
module.modes[mode].value = module_type.modes[mode].type.default
def format_area(self, area, bitstream):
write_bits = bitstream.write_bits
write_bits(2, self.area)
write_bits(8, len(area.modules))
for module in area.modules:
write_bits(8, module.type.id)
module.reserved = 0 # just in case is wasn't set
for attr, nbits in self.module_params:
bitstream.write_bits(nbits, getattr(module, attr))
self.fixleds(module)
write_bits(4, len(module.modes))
for mode in module.modes:
write_bits(6, mode.value)
def parse(self, patch, data):
bitstream = BitStream(data)
self.area = bitstream.read_bits(2)
area = get_patch_area(patch, self.area)
self.parse_area(area, bitstream)
def format(self, patch, data):
bitstream = BitStream(data)
area = get_patch_area(patch, self.area)
self.format_area(area, bitstream)
return bitstream.tell_bit()
section_manager.add(ModuleList)
class CurrentNote(Section):
'''CurrentNote Section subclass'''
type = 0x69
def parse(self, patch, data):
bitstream = BitStream(data)
lastnote = patch.lastnote = Note() # G2Patch
values = bitstream.read_bitsa([7] * 3)
lastnote.note, lastnote.attack, lastnote.release = values
nnotes = bitstream.read_bits(5) + 1
notes = patch.notes = [ Note() for i in xrange(nnotes) ] # G2Patch
for note in notes:
note.note, note.attack, note.release = bitstream.read_bitsa([7, 7, 7])
def format(self, patch, data):
bitstream = BitStream(data)
if len(patch.notes):
lastnote = patch.lastnote
if not lastnote:
values = [ 64, 0, 0 ]
else:
values = [ lastnote.note, lastnote.attack, lastnote.release ]
bitstream.write_bitsa([7, 7, 7], values)
bitstream.write_bits(5, len(patch.notes)-1)
for note in patch.notes:
bitstream.write_bitsa([7, 7, 7], [note.note, note.attack, note.release])
else:
bitstream.write_bits(24, 0x800000)
bitstream.write_bits(24, 0x200000)
return bitstream.tell_bit()
def invalid_cable(smodule, sconn, direction, dmodule, dconn):
'''invalid_cable(area, smodule, sconn, direction, dmodule, dconn) -> bool
if connection valid return 0, otherwise error.
'''
if direction == 1: # verify from
if sconn >= len(smodule.outputs): # out -> in
return 1
elif sconn >= len(smodule.inputs): # in -> in
return 2
if dconn >= len(dmodule.inputs): # verify to
return 3
return 0 # if we got here, everything's cool.
section_manager.add(CurrentNote)
class CableList(Section):
'''CableList Section subclass'''
type = 0x52
def parse_area(self, area, bitstream):
_, ncables = bitstream.read_bitsa([6, 16])
area.cables = [ None ] * ncables
for i in xrange(ncables):
cable = Cable(area)
cable.color, source, src_conn, direction, dest, dest_conn = \
bitstream.read_bitsa([3, 8, 6, 1, 8, 6])
src_module = area.find_module(source)
dest_module = area.find_module(dest)
if invalid_cable(src_module, src_conn, direction, dest_module, dest_conn):
printf('Invalid cable %d: "%s"(%d,%d) -%d-> "%s"(%d,%d)\n',
i, src_module.type.shortnm, src_module.index, src_conn, direction,
dest_module.type.shortnm, dest_module.index, dest_conn)
continue
if direction == 1:
cable.source = src_module.outputs[src_conn]
else:
cable.source = src_module.inputs[src_conn]
cable.dest = dest_module.inputs[dest_conn]
area.cables[i] = cable
cable.source.cables.append(cable)
cable.dest.cables.append(cable)
area.netlist.add(cable.source, cable.dest)
def format_area(self, area, bitstream):
bitstream.write_bitsa([2, 6, 16], [area.index, 0, len(area.cables)])
for cable in area.cables:
bitstream.write_bitsa([3, 8, 6, 1, 8, 6],
[ cable.color, cable.source.module.index, cable.source.index,
cable.source.direction, cable.dest.module.index, cable.dest.index])
def parse(self, patch, data):
bitstream = BitStream(data)
self.area = bitstream.read_bits(2)
area = get_patch_area(patch, self.area)
self.parse_area(area, bitstream)
def format(self, patch, data):
bitstream = BitStream(data)
area = get_patch_area(patch, self.area)
self.format_area(area, bitstream)
return bitstream.tell_bit()
section_manager.add(CableList)
class SettingsArea(object):
def __init__(self):
self.index = SETTINGS
self.name = 'settings'
class ParameterModule(object):
def __init__(self, area, index):
self.area = area
self.index = index
class Parameter(object):
'''Parameter class for module parameters/settings.'''
def __init__(self, area, mod_index, index, default=0, name='', module=None):
self.area = area
self.index = index
self.variations = [default]*NVARIATIONS
self.name = name
self.module = ParameterModule(area, mod_index)
self.knob = None
self.mmap = None
self.ctrl = None
class Morph(object):
'''Morph class for morph settings.'''
def __init__(self, area, index):
self.name = 'morph%d' % (index+1)
self.maps = [[] for variation in xrange(NVARIATIONS) ]
self.index = index
self.area = area
# morph "module" has 2 parameters dial and mode
self.dial = Parameter(area, 1, index, 0, name='dial')
self.mode = Parameter(area, 1, index+NMORPHS, 1, name='mode')
class Settings(object):
'''Settings class for patch settings.'''
groups = [
[ 'patchvol', 'activemuted' ],
[ 'glide', 'glidetime' ],
[ 'bend', 'semi' ],
[ 'vibrato', 'cents', 'rate' ],
[ 'arpeggiator', 'arptime', 'arptype', 'octaves' ],
[ 'octaveshift', 'sustain' ],
]
def __init__(self):
self.area = SettingsArea()
for i, group in enumerate(self.groups, 2):
for j, name in enumerate(group):
setattr(self, name, Parameter(self.area, i, j, name=name))
self.morphs = [ Morph(self.area, morph+1) for morph in xrange(NMORPHS) ]
self.morphmaps = [ [] for variation in xrange(NVARIATIONS) ]
class Parameters(Section):
'''Parameters Section subclass'''
type = 0x4d
def parse_settings(self, settings, bitstream):
read_bits = bitstream.read_bits
read_bitsa = bitstream.read_bitsa
nsections, nvariations, section, nentries = read_bitsa([8, 8, 8, 7])
# nentries: 16 parameters per variation: 8 dials, 8 modes
for i in xrange(nvariations): # usually 9
variation = read_bits(8)
for morph in settings.morphs:
dial = read_bits(7)
if variation < NVARIATIONS:
morph.dial.variations[variation] = dial
for morph in settings.morphs:
mode = read_bits(7)
if variation < NVARIATIONS:
morph.mode.variations[variation] = mode
for group in settings.groups:
section, nentries = read_bitsa([8, 7])
for i in xrange(nvariations):
variation = read_bits(8)
for entry in xrange(nentries):
value = read_bits(7)
if variation < NVARIATIONS:
getattr(settings, group[entry]).variations[variation] = value
def format_settings(self, settings, bitstream):
write_bits = bitstream.write_bits
write_bitsa = bitstream.write_bitsa
# 1 for morph--. .-- 16/var
write_bitsa([2, 8, 8, 8, 7], [SETTINGS, 7, NVARIATIONS, 1, 16])
for variation in xrange(NVARIATIONS): # morph groups
write_bits(8, variation)
for morph in settings.morphs:
write_bits(7, morph.dial.variations[variation])
for morph in settings.morphs:
write_bits(7, morph.mode.variations[variation])
section = 2 # starts at 2 (above: morph is section 1)
for group in settings.groups:
nentries = len(group)
write_bitsa([8, 7], [section, nentries])
for variation in xrange(NVARIATIONS):
write_bits(8, variation)
for entry in xrange(nentries):
value = getattr(settings, group[entry]).variations[variation]
write_bits(7, value)
section += 1
return bitstream.tell_bit()
def parse_area(self, area, bitstream):
read_bits = bitstream.read_bits
nmodules, nvariations = bitstream.read_bitsa([8, 8])
for i in xrange(nmodules):
index, nparams = bitstream.read_bitsa([8, 7])
module = area.find_module(index)
params = module.params
for i in xrange(nvariations):
variation = read_bits(8)
for param in xrange(nparams):
value = read_bits(7)
if param < len(params) and variation < NVARIATIONS:
params[param].variations[variation] = value
def format_area(self, area, bitstream):
modules = []
for module in area.modules:
try:
if not len(module.params):
continue
modules.append(module)
except:
pass
modules.sort(lambda a, b: cmp(a.index, b.index))
write_bits = bitstream.write_bits
mlen = len(modules)
bitstream.write_bitsa([2, 8], [area.index, mlen])
if mlen == 0:
write_bits(8, 0)
return bitstream.tell_bit()
write_bits(8, NVARIATIONS)
for module in modules:
write_bits(8, module.index)
params = module.params
write_bits(7, len(params))
for variation in xrange(NVARIATIONS):
write_bits(8, variation)
for param in params:
write_bits(7, param.variations[variation])
def parse(self, patch, data):
bitstream = BitStream(data)
self.area = bitstream.read_bits(2)
if self.area == SETTINGS:
patch.settings = Settings() # G2Patch
self.parse_settings(patch.settings, bitstream)
else:
area = get_patch_area(patch, self.area)
self.parse_area(area, bitstream)
def format(self, patch, data):
bitstream = BitStream(data)
if self.area == SETTINGS:
self.format_settings(patch.settings, bitstream)
else:
area = get_patch_area(patch, self.area)
self.format_area(area, bitstream)
return bitstream.tell_bit()
section_manager.add(Parameters)
def get_settings_param(patch, index, param):
if index < 2:
morph = patch.settings.morphs[param & 7]
if param < 8:
return morph.dial
else:
return morph.mode
else:
group = patch.settings.groups[index - 2]
return getattr(patch.settings, group[param])
class MorphParameters(Section):
'''MorphParameters Section subclass'''
type = 0x65
def parse(self, patch, data):
bitstream = BitStream(data)
read_bits = bitstream.read_bits
nvariations, nmorphs, _, _ = bitstream.read_bitsa([8, 4, 10, 10])
# variations seem to be 9 bytes with first nibble variation # from 0 ~ 8
# number of morph parameters starts at byte 7-bit 0 for 5-bits
morphs = patch.settings.morphs
morphmaps = patch.settings.morphmaps
for i in xrange(nvariations):
variation = read_bits(4)
bitstream.seek_bit(4 + (6*8) + 4, 1) # zeros
nmorphs = read_bits(8)
for j in xrange(nmorphs):
morph_map = MorphMap()
area, index, param, morph = bitstream.read_bitsa([2, 8, 7, 4])
morph_map.range = read_bits(8, 1)
module = get_patch_area(patch, area).find_module(index)
morph_map.param = module.params[param]
morph_map.variation = variation
morph_map.morph = morphs[morph-1]
morph_map.morph.maps[variation].append(morph_map)
morphmaps[variation].append(morph_map)
reserved = read_bits(4) # always 0
def format(self, patch, data):
bitstream = BitStream(data)
write_bits = bitstream.write_bits
bitstream.write_bitsa([8, 4, 10, 10], [ NVARIATIONS, NMORPHS, 0, 0])
# variations seem to be 9 bytes with first nibble variation # from 0 ~ 8
# number of morph parameters starts at byte 7-bit 0 for 5-bits
morphs = patch.settings.morphs
for variation in xrange(NVARIATIONS):
write_bits(4, variation)
bitstream.seek_bit(4 + (6 * 8) + 4, 1)
# collect all morph_maps of this variation into 1 array
morph_maps = []
for morph in morphs:
morph_maps.extend(morph.maps[variation])
def mod_param_index_cmp(a, b):
return cmp(a.param.module.index, b.param.module.index)
morph_maps.sort(mod_param_index_cmp)
write_bits(8, len(morph_maps))
for morph_map in morph_maps:
values = [
morph_map.param.module.area.index, morph_map.param.module.index,
morph_map.param.index, morph_map.morph.index, morph_map.range,
] # range is signed
bitstream.write_bitsa([2, 8, 7, 4, 8], values)
write_bits(4, 0) # always 0
bitstream.seek_bit(-4, 1) # remove last 4-bits
return bitstream.tell_bit()
section_manager.add(MorphParameters)
class KnobAssignments(Section):
'''KnobAssignments Section subclass'''
type = 0x62
def parse(self, patch, data):
bitstream = BitStream(data)
nknobs = bitstream.read_bits(16)
patch.knobs = [ Knob() for i in xrange(nknobs)] # G2Patch / G2Performance
for knob in patch.knobs:
knob.assigned = bitstream.read_bits(1)
if not knob.assigned:
continue
area, index, knob.isled, param = bitstream.read_bitsa([2, 8, 2, 7])
if type(patch) == Performance:
knob.slot = bitstream.read_bits(2)
perf = patch
patch = perf.slots[knob.slot].patch
else:
knob.slot = 0
if area == SETTINGS:
knob.param = get_settings_param(patch, index, param)
else:
module = get_patch_area(patch, area).find_module(index)
if module:
knob.param = module.params[param]
else:
knob.assigned = 0
continue
knob.param.knob = knob
def format(self, patch, data):
bitstream = BitStream(data)
bitstream.write_bits(16, NKNOBS)
for knob in patch.knobs:
bitstream.write_bits(1, knob.assigned)
if not knob.assigned:
continue
module = knob.param.module
bitstream.write_bitsa([2, 8, 2, 7],
[ module.area.index, module.index, knob.isled, knob.param.index ])
if type(patch) == Performance:
bitstream.write_bits(2, knob.slot)
return bitstream.tell_bit()
section_manager.add(KnobAssignments)
class CtrlAssignments(Section):
'''CtrlAssignments Section subclass'''
type = 0x60
def parse(self, patch, data):
bitstream = BitStream(data)
nctrls = bitstream.read_bits(7)
patch.ctrls = [ Ctrl() for i in xrange(nctrls)] # G2Patch? / G2Ctrl?
for ctrl in patch.ctrls:
ctrl.midicc, area, index, param = bitstream.read_bitsa([7, 2, 8, 7])
if area == SETTINGS:
ctrl.param = get_settings_param(patch, index, param)
else:
module = get_patch_area(patch, area).find_module(index)
ctrl.param = module.params[param]
ctrl.param.ctrl = ctrl
def format(self, patch, data):
bitstream = BitStream(data)
bitstream.write_bits(7, len(patch.ctrls))
for ctrl in patch.ctrls:
param = ctrl.param
bitstream.write_bitsa([7, 2, 8, 7], [ ctrl.midicc,
param.module.area.index, param.module.index, param.index ])
return bitstream.tell_bit()
section_manager.add(CtrlAssignments)
class Labels(Section):
'''Labels Section subclass'''
type = 0x5b
def parse_morphs(self, morphs, bitstream):
nentries, entry, length = bitstream.read_bitsa([8, 8, 8]) # 1, 1, 0x50
for morph in morphs:
index, morphlen, entry = bitstream.read_bytes(3)
morph.label = read_string(bitstream, 7, pad=True)
def format_morphs(self, morphs, bitstream):
bitstream.write_bits(2, SETTINGS)
bitstream.write_str('\1\1\x50')
s = bytearray([1, 1, 0])
for morph in morphs:
s[2] = 8 + morph.index
bitstream.write_str(str(s))
write_string(bitstream, morph.label, 7, pad=True)
return bitstream.tell_bit()
def parse_module(self, module, bitstream):
modlen = bitstream.read_bits(8)
if module.type.id == 121: # SeqNote
# extra editor parameters
# [0, 1, mag, 0, 1, octave]
# mag: 0=3-octaves, 1=2-octaves, 2=1-octave
# octave: 0-9 (c0-c9)
module.editmodes = bitstream.read_bytes(modlen)
else:
while modlen > 0:
stri, paramlen, parami = bitstream.read_bitsa([8, 8, 8])
modlen -= 3
param = module.params[parami]
paramlen -= 1 # decrease because we got param index
if paramlen:
param.labels = [ read_string(bitstream, 7, pad=True)
for i in xrange(paramlen / 7) ]
modlen -= paramlen
else:
param.labels = ['']
if section_debug:
printf('%d %s %d %d %s\n', module.index, module.type.shortnm,
paramlen, parami, param.labels)
def parse_area(self, area, bitstream):
read_bits = bitstream.read_bits
nmodules = read_bits(8)
for i in xrange(nmodules):
index = read_bits(8)
module = area.find_module(index)
self.parse_module(module, bitstream)
def format_module(self, module, bitstream):
s = ''
if module.type.id == 121: # SeqNote
s += str(bytearray(module.editmodes))
else:
# build up the labels and then write them
for i, param in enumerate(module.params):
if not hasattr(param, 'labels'):
continue
if section_debug:
printf('%d %s %d %d %s\n', module.index, module.type.shortnm,
7*len(param.labels), i, param.labels)
labels = [format_string(lbl, 7, pad=True) for lbl in param.labels]
ps = chr(i) + ''.join(labels)
s += chr(1)+chr(len(ps))+ps
bitstream.write_bitsa([8, 8], [module.index, len(s)])
bitstream.write_str(s)
def format_area(self, area, bitstream):
# collect all modules with parameters that have labels
modules = []
for module in area.modules:
if hasattr(module, 'params'):
for param in module.params:
if hasattr(param, 'labels'):
modules.append(module)
break
elif hasattr(module, 'editmodes'):
modules.append(module)
bitstream.write_bitsa([2, 8], [area.index, len(modules)])
for module in modules:
self.format_module(module, bitstream)
return bitstream.tell_bit()
def parse(self, patch, data):
bitstream = BitStream(data)
self.area = bitstream.read_bits(2)
if self.area == SETTINGS:
self.parse_morphs(patch.settings.morphs, bitstream)
else:
area = get_patch_area(patch, self.area)
self.parse_area(area, bitstream)
def format(self, patch, data):
bitstream = BitStream(data)
if self.area == SETTINGS:
return self.format_morphs(patch.settings.morphs, bitstream)
else:
area = get_patch_area(patch, self.area)
return self.format_area(area, bitstream)
section_manager.add(Labels)
class ModuleNames(Section):
'''ModuleNames Section subclass'''
type = 0x5a
def parse_area(self, area, bitstream):
areai, nmodules = bitstream.read_bitsa([6, 8])
for i in xrange(nmodules):
module = area.find_module(bitstream.read_bits(8))
module.name = read_string(bitstream, 16)
def parse(self, patch, data):
bitstream = BitStream(data)
self.area = bitstream.read_bits(2)
area = get_patch_area(patch, self.area)
self.parse_area(area, bitstream)
def format_area(self, area, bitstream):
bitstream.write_bitsa([2, 6, 8], [area.index, self.area, len(area.modules)])
for module in area.modules:
bitstream.write_bits(8, module.index)
write_string(bitstream, module.name, 16)
def format(self, patch, data):
bitstream = BitStream(data)
area = get_patch_area(patch, self.area)
self.format_area(area, bitstream)
return bitstream.tell_bit()
section_manager.add(ModuleNames)
class TextPad(Section):
'''TextPad Section subclass'''
type = 0x6f
def parse(self, patch, data):
patch.textpad = data
def format(self, patch, data):
bitstream = BitStream(data)
bitstream.write_str(patch.textpad)
return bitstream.tell_bit()
section_manager.add(TextPad)
class PerformanceDescription(Section):
'''PerformanceDescription Section subclass'''
type = 0x11
description_attrs = [
['unk1', 8],
['unk2', 4], ['focus', 2], [ 'unk3', 2 ],
['rangesel', 8], ['bpm', 8],
['split', 8], ['clock', 8], ['unk4', 8], ['unk5', 8],
]
slot_attrs = [
['active', 8], ['keyboard', 8], ['hold', 8], ['bank', 8 ], [ 'patch', 8 ],
['keylow', 8], ['keyhigh', 8], ['unk3', 8], ['unk4', 8], ['unk5', 8],
]
def parse(self, performance, data):
description = performance.description = Description() # G2Performance
bitstream = BitStream(data)
read_bits = bitstream.read_bits
for name, nbits in self.description_attrs:
value = read_bits(nbits)
setattr(description, name, value)
for slot in performance.slots:
slot.description = Description()
slot.name = read_string(bitstream, 16)
for name, nbits in self.slot_attrs:
value = read_bits(nbits)
setattr(slot.description, name, value)
def format(self, performance, data):
bitstream = BitStream(data)
write_bits = bitstream.write_bits
description = performance.description
for name, nbits in self.description_attrs:
write_bits(nbits, getattr(description, name))
for slot in performance.slots:
write_string(bitstream, slot.name, 16)
for name, nbits in self.slot_attrs:
write_bits(nbits, getattr(slot.description, name))
return bitstream.tell_bit()
section_manager.add(PerformanceDescription)
class GlobalKnobAssignments(KnobAssignments):
'''GlobalKnobAssignments Section subclasss'''
type = 0x5f
class Pch2File(object):
'''Pch2File(filename) - main reading/writing object for .pch2 files
this may become generic G2 file for .pch2 and .prf2 files
just by handling the performance sections (and perhaps others)
and parsing all 4 patches within the .prf2 file.
'''
patch_sections = [
PatchDescription(),
ModuleList(area=1),
ModuleList(area=0),
CurrentNote(),
CableList(area=1),
CableList(area=0),
Parameters(area=2),
Parameters(area=1),
Parameters(area=0),
MorphParameters(area=2),
KnobAssignments(),
CtrlAssignments(),
Labels(area=2),
Labels(area=1),
Labels(area=0),
ModuleNames(area=1),
ModuleNames(area=0),
TextPad(),
]
standard_text_header = '''Version=Nord Modular G2 File Format 1\r
Type=%s\r
Version=%d\r
Info=BUILD %d\r
\0''' # needs the null byte
binary_version = 23
build_version = 266
def __init__(self, filename=None):
self.type = 'Patch'
self.binary_revision = 0
self.patch = Patch(nord.g2.modules.fromname)
if filename:
self.read(filename)
def parse_section(self, section, patch_or_perf, memview):
type, l = unpack('>BH', memview[:3])
l += 3
if section_debug:
nm = section.__class__.__name__
printf('0x%02x %-25s len:0x%04x\n', type, nm, l)
printf('%s\n', binhexdump(memview[:l].tobytes()))
section.parse(patch_or_perf, memview[3:l])
return memview[l:]
def parse_patch(self, patch, memview):
memview = self.parse_section(PatchDescription(), patch, memview)
while len(memview) > 0:
type = ord(memview[0])
if type == PatchDescription.type: # prf2 concats patches
break
section_class = section_manager.get(type, None)
if not section_class:
break
memview = self.parse_section(section_class(), patch, memview)
return memview
def parse(self, memview):
return self.parse_patch(self.patch, memview)
def parse_header(self, memview, filename):
header2x = bytearray(memview[:2*len(self.standard_text_header)])
null = header2x.find('\0')
if null < 0:
raise G2Error('Invalid G2File "%s" missing null terminator.' % filename)
self.txthdr = str(header2x[:null])
self.binhdr = header2x[null+1], header2x[null+2]
if self.binhdr[0] != self.binary_version:
printf('Warning: %s version %d\n', filename, self.binhdr[0])
printf(' version %d supported. it may fail to load.\n',
self.binary_version)
return memview[null+1:] # include binhdr for crc
# read - this is where the rubber meets the road. it start here....
def read(self, filename):
self.filename = filename
self.data = bytearray(open(filename, 'rb').read())
memview = self.parse_header(memoryview(self.data), filename)
bytes = len(self.parse(memview[2:-2]))
ecrc = unpack('>H', self.data[-2:])[0]
acrc = crc(memview[:-2])
if ecrc != acrc:
printf('Bad CRC 0x%x 0x%x\n' % (ecrc, acrc))
def format_section(self, section, patch_or_perf, memview):
#print section.__class__.__name__
bits = section.format(patch_or_perf, memview[3:]) # skip type, size
bytes = (bits + 7) >> 3
# write type, size
memview[:3] = pack('>BH', section.type, bytes)
if section_debug:
nm = section.__class__.__name__
printf('0x%02x %-25s len:0x%04x\n', section.type, nm, bytes)
tbl = string.maketrans(string.ascii_lowercase, ' '*26)
nm = nm.translate(tbl).replace(' ', '')
printf('%s\n', nm)
#if title_section and len(nm) < len(f):
# f = nm+f[len(nm):]
return memview[bytes + 3:]
def format_patch(self, patch, memview):
for section in Pch2File.patch_sections:
memview = self.format_section(section, patch, memview)
return memview
def format(self, memview):
return self.format_patch(self.patch, memview)
def format_file(self):
data = bytearray(64<<10)
memview = memoryview(data)
hdr = Pch2File.standard_text_header % (self.type,
self.binary_version, self.build_version)
memview[:len(hdr)] = hdr
memview = memview[len(hdr):]
#memview = self.format_header(memview)
memview[0] = chr(self.binary_version)
memview[1] = chr(self.binary_revision)
fmemview = self.format(memview[2:])
bytes = len(memview) - len(fmemview)
data_crc = crc(memview[:bytes])
memview[bytes:bytes+2] = pack('>H', crc(memview[:bytes]))
bytes = len(data) - len(fmemview) + 2
return data[:bytes]
# write - this looks a lot easier then read ehhhh???
def write(self, filename=None):
out = open(filename, 'wb')
out.write(str(self.format_file()))
class Prf2File(Pch2File):
'''Prf2File(filename) -> load a nord modular g2 performance.'''
def __init__(self, filename=None):
self.type = 'Performance'
self.binary_revision = 1
self.performance = Performance(nord.g2.modules.fromname)
self.performance_section = PerformanceDescription()
self.globalknobs_section = GlobalKnobAssignments()
if filename:
self.read(filename)
def parse(self, memview):
performance = self.performance
performance_section = self.performance_section
globalknobs_section = self.globalknobs_section
memview = self.parse_section(performance_section, performance, memview)
for slot in performance.slots:
memview = self.parse_patch(slot.patch, memview)
memview = self.parse_section(globalknobs_section, performance, memview)
return memview
def format_performance(self, memview):
performance = self.performance
performace_section = self.performance_section
globalknobs_section = self.globalknobs_section
memview = self.format_section(performance_section, performance, memview)
for slot in performance.slots:
memview = self.format_patch(slot.patch, memview)
memview = self.format_section(globalknobs_section, performance, memview)
return memview
def format(self, memview):
return self.format_performace(memview)
if __name__ == '__main__':
prog = sys.argv.pop(0)
filename = sys.argv.pop(0)
printf('"%s"\n', filename)
pch2 = Pch2File(filename)
#pch2.write(sys.argv.pop(0))
| gpl-2.0 | -1,795,341,959,768,795,100 | 32.564052 | 80 | 0.641912 | false |
pdftables/python-pdftables-api | test/test_pdftables_api.py | 1 | 7244 | # Copyright 2016 The Sensible Code Company
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
from tempfile import NamedTemporaryFile
import requests_mock
from unittest import TestCase
from pdftables_api import Client, APIException
class TestEnsureExtFormat(TestCase):
def test_wrong_format(self):
self.assertRaises(ValueError, Client.ensure_format_ext, 'foo.csv', 'txt')
self.assertRaises(ValueError, Client('key').dump, 'foo.pdf', 'txt')
def test_unmodified(self):
self.assertEqual(('foo.csv', 'csv'),
Client.ensure_format_ext('foo.csv', 'csv'))
self.assertEqual(('foo.xlsx', 'xlsx-multiple'),
Client.ensure_format_ext('foo.xlsx', 'xlsx-multiple'))
self.assertEqual(('foo.xlsx', 'xlsx-multiple'),
Client.ensure_format_ext('foo.xlsx', 'xlsx-multiple'))
self.assertEqual(('foo.xml', 'xml'),
Client.ensure_format_ext('foo.xml', 'xml'))
self.assertEqual(('foo.html', 'html'),
Client.ensure_format_ext('foo.html', 'html'))
def test_missing_format(self):
self.assertEqual(('foo.xlsx', 'xlsx-multiple'),
Client.ensure_format_ext('foo', None))
self.assertEqual(('foo.txt.xlsx', 'xlsx-multiple'),
Client.ensure_format_ext('foo.txt', None))
self.assertEqual(('foo.xlsx', 'xlsx-multiple'),
Client.ensure_format_ext('foo.xlsx', None))
def test_missing_ext(self):
self.assertEqual(('foo.csv', 'csv'),
Client.ensure_format_ext('foo', 'csv'))
def test_incorrect_ext(self):
self.assertEqual(('foo.txt.csv', 'csv'),
Client.ensure_format_ext('foo.txt', 'csv'))
self.assertEqual(('foo.xlsx.csv', 'csv'),
Client.ensure_format_ext('foo.xlsx', 'csv'))
def test_stdout(self):
self.assertEqual((None, 'xlsx-multiple'),
Client.ensure_format_ext(None, None))
self.assertEqual((None, 'csv'),
Client.ensure_format_ext(None, 'csv'))
class TestRequests(TestCase):
def test_successful_conversion(self):
with requests_mock.mock() as m:
m.post('https://pdftables.com/api?key=fake_key', text='xlsx output')
pdf_fo = io.BytesIO(b'pdf content')
c = Client('fake_key')
with NamedTemporaryFile(suffix="test.pdf") as tf:
filename = tf.name
tf.write(b"Hello world")
tf.file.close()
filename_out = filename.replace(".pdf", ".xlsx")
try:
s = c.convert(filename, filename_out)
with open(filename_out) as fd:
self.assertEqual(fd.read(), "xlsx output")
finally:
try:
os.unlink(filename_out)
except OSError:
pass
def test_successful_conversion_bytes(self):
with requests_mock.mock() as m:
m.post('https://pdftables.com/api?key=fake_key', content=b'xlsx output')
with NamedTemporaryFile(suffix="test.pdf") as tf:
filename = tf.name
tf.write(b"Hello world")
tf.file.close()
output = Client('fake_key').convert(filename)
self.assertEqual(b'xlsx output', output)
def test_successful_conversion_string(self):
with requests_mock.mock() as m:
m.post('https://pdftables.com/api?key=fake_key', text='csv output')
with NamedTemporaryFile(suffix="test.pdf") as tf:
filename = tf.name
tf.write(b"Hello world")
tf.file.close()
output = Client('fake_key').convert(filename, out_format="csv")
self.assertEqual('csv output', output)
def test_different_api_url(self):
with requests_mock.mock() as m:
m.post('http://example.com/api?key=fake_key', text='xlsx output')
pdf_fo = io.BytesIO(b'pdf content')
c = Client('fake_key', api_url='http://example.com/api')
s = c.dump(pdf_fo, 'csv')
self.assertEqual(b'xlsx output', consume(s))
def test_missing_api_key(self):
with requests_mock.mock() as m:
m.post('https://pdftables.com/api?key=fake_key', text='xlsx output')
pdf_fo = io.BytesIO(b'pdf content')
c = Client('')
self.assertRaisesRegexp(APIException, 'Invalid API key', c.dump, pdf_fo, 'csv')
def test_invalid_format(self):
with requests_mock.mock() as m:
m.post('https://pdftables.com/api?key=fake_key', text='xlsx output')
pdf_fo = io.BytesIO(b'pdf content')
c = Client('fake_key')
self.assertRaisesRegexp(ValueError, 'Invalid output format', c.dump, pdf_fo, 'invalid_format')
def test_remaining(self):
with requests_mock.mock() as m:
m.get('https://pdftables.com/api/remaining?key=fake_key', text='8584')
c = Client('fake_key')
self.assertEqual(c.remaining(), 8584)
def test_response_invalid_format(self):
with requests_mock.mock() as m:
m.post('https://pdftables.com/api?key=fake_key', status_code=400)
pdf_fo = io.BytesIO(b'pdf content')
c = Client('fake_key')
self.assertRaisesRegexp(APIException, 'Unknown file format', c.dump, pdf_fo)
def test_response_unauthorized(self):
with requests_mock.mock() as m:
m.post('https://pdftables.com/api?key=wrong_key', status_code=401)
pdf_fo = io.BytesIO(b'pdf content')
c = Client('wrong_key')
self.assertRaisesRegexp(APIException, 'Unauthorized API key', c.dump, pdf_fo)
def test_response_limit_exceeded(self):
with requests_mock.mock() as m:
m.post('https://pdftables.com/api?key=fake_key', status_code=402)
pdf_fo = io.BytesIO(b'pdf content')
c = Client('fake_key')
self.assertRaisesRegexp(APIException, 'Usage limit exceeded', c.dump, pdf_fo)
def test_response_unknown_file_format(self):
with requests_mock.mock() as m:
m.post('https://pdftables.com/api?key=fake_key', status_code=403)
png_fo = io.BytesIO(b'png content')
c = Client('fake_key')
self.assertRaisesRegexp(APIException, 'Unknown format requested', c.dump, png_fo)
def consume(s):
r = b''
for chunk in s:
r += chunk
return r
| bsd-3-clause | -621,245,369,902,256,800 | 37.531915 | 106 | 0.576615 | false |
rossant/spiky | spiky/colors.py | 1 | 2971 | import numpy as np
# from matplotlib.colors import hsv_to_rgb, rgb_to_hsv
__all__ = ['COLORMAP', 'HIGHLIGHT_COLORMAP', 'COLORS', 'COLORS_COUNT', 'generate_colors']
# Color creation routines
# -----------------------
def hue(H):
H = H.reshape((-1, 1))
R = np.abs(H * 6 - 3) - 1;
G = 2 - np.abs(H * 6 - 2);
B = 2 - np.abs(H * 6 - 4);
return np.clip(np.hstack((R,G,B)), 0, 1)
def hsv_to_rgb(HSV):
a = HSV[:,1].reshape((-1, 1))
b = HSV[:,2].reshape((-1, 1))
a = np.tile(a, (1, 3))
b = np.tile(b, (1, 3))
return ((hue(HSV[:,0]) - 1) * a + 1) * b
def generate_hsv(n0=20):
H = np.linspace(0., 1., n0)
i = np.arange(n0)
H = H[~((i==5) | (i==7) | (i==10) | (i==12) | (i==15) |(i==17) | (i==18) | (i==19))]
# H = H[((i==15) |(i==17) | (i==18) | (i==19))]
H = np.repeat(H, 4)
n = len(H)
S = np.ones(n)
V = np.ones(n)
# change V for half of the colors
V[1::2] = .75
# change S for half of the colors
S[2::4] = .75
S[3::4] = .75
hsv = np.zeros((n, 3))
hsv[:,0] = H
hsv[:,1] = S
hsv[:,2] = V
return hsv
# Global variables with all colors
# --------------------------------
# generate a list of RGB values for each color
hsv = generate_hsv()
hsv = np.clip(hsv, 0, 1)
# hsv = hsv.reshape((1, -1, 3))
COLORS = hsv_to_rgb(hsv)
COLORS = np.clip(COLORS, 0, 1)
COLORS_COUNT = len(COLORS)
step = 17 # needs to be prime with COLORS_COUNT
perm = np.mod(np.arange(0, step * 24, step), 24)
perm = np.hstack((2 * perm, 2 * perm + 1))
COLORMAP = COLORS[perm, ...]
# COLORMAP = np.array(COLORS)
# Highlight color map
# rgb = COLORMAP.reshape((1, -1, 3))
# hsv = rgb_to_hsv(rgb)
# decrease saturation, increase value
hsv[:,1] -= .5
hsv[:,2] += .5
hsv = np.clip(hsv, 0, 1)
hsv = hsv[perm, ...]
HIGHLIGHT_COLORMAP = hsv_to_rgb(hsv)
def generate_colors(n=None):
if n is None:
n = COLORS_COUNT
if n < COLORS_COUNT:
return COLORS[:n]
else:
return [COLORS[i % COLORS_COUNT] for i in xrange(n)]
if __name__ == "__main__":
def hsv_rect(hsv, coords):
col = hsv_to_rgb(hsv)
col = np.clip(col, 0, 1)
rgb_rect(col, coords)
def rgb_rect(rgb, coords):
x0, y0, x1, y1 = coords
a = 2./len(rgb)
c = np.zeros((len(rgb), 4))
c[:,0] = np.linspace(x0, x1-a, len(rgb))
c[:,1] = y0
c[:,2] = np.linspace(x0+a, x1, len(rgb))
c[:,3] = y1
rectangles(coordinates=c, color=rgb)
from galry import *
figure(constrain_navigation=False)
rgb_rect(COLORMAP, (-1,0,1,1))
rgb_rect(HIGHLIGHT_COLORMAP, (-1,-1,1,0))
ylim(-1,1)
show()
# hsv = generate_hsv()
# hsv_rect(hsv, (-1,0,1,1))
# highlight
# hsv[:,1] -= 0.5 # white -> color
# hsv[:,2] += 0.5 # black -> white
# hsv[:,1] -= 0.25 # white -> color
# hsv[:,2] += 0.5 # black -> white
# hsv_rect(hsv, (-1,-1,1,0)) | bsd-3-clause | -7,645,004,919,115,126,000 | 23.97479 | 89 | 0.499159 | false |
foxmask/django-th | django_th/management/commands/read.py | 1 | 1331 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
from concurrent.futures import ThreadPoolExecutor
# django
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import Q
# trigger happy
from django_th.models import TriggerService
from django_th.read import Read
from logging import getLogger
# create logger
logger = getLogger('django_th.trigger_happy')
class Command(BaseCommand):
help = 'Trigger all the services and put them in cache'
def handle(self, *args, **options):
"""
get all the triggers that need to be handled
"""
from django.db import connection
connection.close()
failed_tries = settings.DJANGO_TH.get('failed_tries', 10)
trigger = TriggerService.objects.filter(
Q(provider_failed__lte=failed_tries) |
Q(consumer_failed__lte=failed_tries),
status=True,
user__is_active=True,
provider__name__status=True,
consumer__name__status=True,
).select_related('consumer__name', 'provider__name')
with ThreadPoolExecutor(max_workers=settings.DJANGO_TH.get('processes')) as executor:
r = Read()
for t in trigger:
executor.submit(r.reading, t)
| bsd-3-clause | 6,266,242,189,755,051,000 | 31.463415 | 93 | 0.6574 | false |
jmesteve/saas3 | openerp/addons_extra/l10n_es_payment_order/wizard/converter.py | 1 | 5200 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2006 ACYSOS S.L. (http://acysos.com) All Rights Reserved.
# Pedro Tarrafeta <[email protected]>
# Copyright (c) 2008 Pablo Rocandio. All Rights Reserved.
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <[email protected]>
# Copyright (c) 2009 NaN (http://www.nan-tic.com) All Rights Reserved.
# Albert Cervera i Areny <[email protected]>
# $Id$
# Refactorización. Acysos S.L. (http://www.acysos.com) 2012
# Ignacio Ibeas <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
from log import *
class payment_converter_spain(osv.osv):
_name= 'payment.converter.spain'
_auto = False
def digits_only(self, cr, uid, cc_in):
"""Discards non-numeric chars"""
cc = ""
for i in cc_in or '':
try:
int(i)
cc += i
except ValueError:
pass
return cc
def to_ascii(self, cr, uid, text):
"""Converts special characters such as those with accents to their ASCII equivalents"""
old_chars = ['á','é','í','ó','ú','à','è','ì','ò','ù','ä','ë','ï','ö','ü','â','ê','î','ô','û','Á','É','Í','Ú','Ó','À','È','Ì','Ò','Ù','Ä','Ë','Ï','Ö','Ü','Â','Ê','Î','Ô','Û','ñ','Ñ','ç','Ç','ª','º','·','\n']
new_chars = ['a','e','i','o','u','a','e','i','o','u','a','e','i','o','u','a','e','i','o','u','A','E','I','U','O','A','E','I','O','U','A','E','I','O','U','A','E','I','O','U','n','N','c','C','a','o','.',' ']
for old, new in zip(old_chars, new_chars):
text = text.replace(unicode(old,'UTF-8'), new)
return text
def convert_text(self, cr, uid, text, size, justified='left'):
if justified == 'left':
return self.to_ascii(cr,uid,text)[:size].ljust(size)
else:
return self.to_ascii(cr,uid,text)[:size].rjust(size)
def convert_float(self, cr, uid, number, size, context):
text = str( int( round( number * 100, 0 ) ) )
if len(text) > size:
raise Log(_('Error:\n\nCan not convert float number %(number).2f to fit in %(size)d characters.') % {
'number': number,
'size': size
})
return text.zfill(size)
def convert_int(self, cr, uid, number, size, context):
text = str( number )
if len(text) > size:
raise Log( _('Error:\n\nCan not convert integer number %(number)d to fit in %(size)d characters.') % {
'number': number,
'size': size
})
return text.zfill(size)
def convert(self, cr, uid, value, size, context, justified='left'):
if value == False:
return self.convert_text(cr, uid, '', size)
elif isinstance(value, float):
return self.convert_float(cr, uid, value, size, context)
elif isinstance(value, int):
return self.convert_int(cr, uid, value, size, context)
else:
return self.convert_text(cr, uid, value, size, justified)
def convert_bank_account(self, cr, uid, value, partner_name, context):
if not isinstance(value, basestring):
raise Log( _('User error:\n\nThe bank account number of %s is not defined.') % partner_name )
ccc = self.digits_only(cr, uid, value)
if len(ccc) != 20:
raise Log( _('User error:\n\nThe bank account number of %s does not have 20 digits.') % partner_name )
return ccc
def bank_account_parts(self, cr, uid, value, partner_name, context):
if not isinstance(value, basestring):
raise Log( _('User error:\n\nThe bank account number of %s is not defined.') % partner_name )
ccc = self.digits_only(cr, uid, value)
if len(ccc) != 20:
raise Log( _('User error:\n\nThe bank account number of %s does not have 20 digits.') % partner_name )
return {'bank':ccc[:4],
'office': ccc[4:8],
'dc': ccc[8:10],
'account': ccc[10:]}
payment_converter_spain()
| agpl-3.0 | -6,711,844,283,154,323,000 | 44.59292 | 214 | 0.542508 | false |
veblush/PyAuParser | sample/tutorial2.py | 1 | 1053 | import os
import sys
import pyauparser
def main():
g = pyauparser.Grammar.load_file("data/operator.egt")
# every production has a lambda handler which evaluates value from childs.
# Because LALR is a bottom-up parser, handler would be called from bottom.
h = pyauparser.ProductionHandler({
'<E> ::= <E> + <M>': lambda c: c[0] + c[2],
'<E> ::= <E> - <M>': lambda c: c[0] - c[2],
'<E> ::= <M>': lambda c: c[0],
'<M> ::= <M> * <N>': lambda c: c[0] * c[2],
'<M> ::= <M> / <N>': lambda c: c[0] / c[2],
'<M> ::= <N>': lambda c: c[0],
'<N> ::= - <V>': lambda c: -c[1],
'<N> ::= <V>': lambda c: c[0],
'<V> ::= Num': lambda c: int(c[0].lexeme),
'<V> ::= ( <E> )': lambda c: c[1],
}, g)
try:
pyauparser.parse_string(g, "-2*(3+4)-5", handler=h)
print "Result = {0}".format(h.result)
except pyauparser.ParseError as e:
print e
if __name__ == "__main__":
main()
| mit | 3,278,430,495,155,954,700 | 30.90625 | 78 | 0.449193 | false |
mattcaldwell/djangopypi | userpypi/migrations/0001_initial.py | 1 | 10447 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Classifier'
db.create_table('userpypi_classifier', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
))
db.send_create_signal('userpypi', ['Classifier'])
# Adding model 'Project'
db.create_table('userpypi_project', (
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('license', self.gf('django.db.models.fields.TextField')(blank=True)),
('metadata_version', self.gf('django.db.models.fields.CharField')(default=1.0, max_length=64)),
('author', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('home_page', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('download_url', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('summary', self.gf('django.db.models.fields.TextField')(blank=True)),
('author_email', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(related_name='projects', to=orm['auth.User'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
))
db.send_create_signal('userpypi', ['Project'])
# Adding M2M table for field classifiers on 'Project'
db.create_table('userpypi_project_classifiers', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('project', models.ForeignKey(orm['userpypi.project'], null=False)),
('classifier', models.ForeignKey(orm['userpypi.classifier'], null=False))
))
db.create_unique('userpypi_project_classifiers', ['project_id', 'classifier_id'])
# Adding model 'Release'
db.create_table('userpypi_release', (
('upload_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('md5_digest', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('filetype', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('pyversion', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='releases', to=orm['userpypi.Project'])),
('platform', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('version', self.gf('django.db.models.fields.CharField')(max_length=128)),
('signature', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('distribution', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('userpypi', ['Release'])
# Adding unique constraint on 'Release', fields ['project', 'version', 'platform', 'distribution', 'pyversion']
db.create_unique('userpypi_release', ['project_id', 'version', 'platform', 'distribution', 'pyversion'])
def backwards(self, orm):
# Deleting model 'Classifier'
db.delete_table('userpypi_classifier')
# Deleting model 'Project'
db.delete_table('userpypi_project')
# Removing M2M table for field classifiers on 'Project'
db.delete_table('userpypi_project_classifiers')
# Deleting model 'Release'
db.delete_table('userpypi_release')
# Removing unique constraint on 'Release', fields ['project', 'version', 'platform', 'distribution', 'pyversion']
db.delete_unique('userpypi_release', ['project_id', 'version', 'platform', 'distribution', 'pyversion'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'userpypi.classifier': {
'Meta': {'object_name': 'Classifier'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'userpypi.project': {
'Meta': {'object_name': 'Project'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'author_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'classifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['userpypi.Classifier']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'download_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'home_page': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'metadata_version': ('django.db.models.fields.CharField', [], {'default': '1.0', 'max_length': '64'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects'", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'userpypi.release': {
'Meta': {'unique_together': "(('project', 'version', 'platform', 'distribution', 'pyversion'),)", 'object_name': 'Release'},
'distribution': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'filetype': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5_digest': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'releases'", 'to': "orm['userpypi.Project']"}),
'pyversion': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'signature': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'upload_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
}
}
complete_apps = ['userpypi']
| bsd-3-clause | -2,172,781,878,352,863,500 | 67.281046 | 139 | 0.577965 | false |
tecan/xchat-rt | plugins/scripts/ss-autoaway.py | 1 | 2072 | #!/usr/bin/env python
#
# screensaverAutoAway.py - X-Chat script to monitor for the DBUS message
# emitted when the screensaver is activated and de-activated and set the user
# away.
#
# To install:
# o Copy this file to your ~/.xchat2/ directory and it will be loaded on startup.
# o To load without restart, run: /py load screensaverAutoAway.py
# (If you don't want to put it in your ~/.xchat2, then specify the full path.)
#
# If running the '/py' command above results in a message 'py :Unknown command',
# then you do not have the Python plugin installed.
# Written by Wil Cooley <[email protected]>
# Began 26 Aug 2008
#
# $Id$
#todo idle sensor
import dbus
from dbus.mainloop.glib import DBusGMainLoop
try:
import xchat
except ImportError:
# Allow for external tests
pass
__author__ = 'Wil Cooley <wcooley at nakedape.cc>'
__module_name__ = 'screensaverAutoAway'
__module_version__ = '0.2'
__module_description__ = 'Sets user away when the GNOME screensaver is activated'
def screensaver_changed(state):
''' Called when screensaver stops or starts
state is either:
- True: Screensaver activated
- False: Screensaver deactivated
'''
if state:
set_away()
else:
set_back()
def set_away():
away_msg = '%s (Auto-away by %s, version %s)' % \
(xchat.get_prefs('away_reason'),
__module_name__ ,
__module_version__)
if xchat.get_info('away') is None:
xchat.command('away ' + away_msg)
def set_back():
if xchat.get_info('away') is not None:
xchat.command('back')
def setup_session():
DBusGMainLoop(set_as_default=True)
sesbus = dbus.SessionBus()
sesbus.add_signal_receiver(screensaver_changed,
'SessionIdleChanged', 'org.mate.ScreenSaver')
if __name__ == '__main__':
setup_session()
xchat.prnt('%s version %s by %s loaded' % \
(__module_name__, __module_version__, __author__) )
| gpl-2.0 | 1,878,460,759,486,318,300 | 27.383562 | 82 | 0.615347 | false |
Bajoo/client-pc | bajoo/filesync/task_builder.py | 1 | 5313 | # -*- coding: utf-8 -*-
from ..index.folder_node import FolderNode
from ..index.hints import DeletedHint, DestMoveHint, SourceMoveHint
from ..index.hint_builder import HintBuilder
from .added_local_files_task import AddedLocalFilesTask
from .added_remote_files_task import AddedRemoteFilesTask
from .folder_task import FolderTask
from .moved_local_files_task import MovedLocalFilesTask
from .removed_local_files_task import RemovedLocalFilesTask
from .removed_remote_files_task import RemovedRemoteFilesTask
class TaskBuilder(object):
"""Create sync task by acquiring node and release node when task is done.
A task is created from the node, depending of the node's type and its
state (new or existing node, type of hints).
When the task has been executed, it can release the reserved node(s).
A task can reserve several nodes, by example when there is a "Move" hint.
"""
@classmethod
def build_from_node(cls, local_container, node):
"""Create the best suited sync task for the target node.
The type of task will depends of the type of node and the hints set by
external events.
After this call, the node is not yet acquired. `acquire_from_task()`
should be called before executing the task.
Note:
- This method must be called with the IndexTree's lock acquired.
Args:
local_container (LocalContainer): container owning the node.
node (BaseNode): node to sync.
Returns:
Task: sync task, executable by the filesync service.
"""
container = local_container.container
node_path = node.get_full_path()
if isinstance(node, FolderNode):
task = FolderTask(local_container, node)
else:
if node.local_hint:
if isinstance(node.local_hint, DestMoveHint):
node = node.local_hint.source_node
node_path = node.get_full_path()
if isinstance(node.local_hint, SourceMoveHint):
dest_path = node.local_hint.dest_node.get_full_path()
task = MovedLocalFilesTask(container,
(node_path, dest_path,),
local_container)
elif isinstance(node.local_hint, DeletedHint):
task = RemovedLocalFilesTask(container, (node_path,),
local_container)
else: # ModifiedHint
task = AddedLocalFilesTask(container, (node_path,),
local_container)
elif node.remote_hint:
if isinstance(node.remote_hint, DestMoveHint):
node = node.remote_hint.source_node
node_path = node.get_full_path()
# if isinstance(node.remote_hint, SourceMoveHint):
# # TODO: no support for remove Move events.
# dest_path = node.remote_hint.dest_node.get_full_path()
if isinstance(node.remote_hint, DeletedHint):
task = RemovedRemoteFilesTask(container, (node_path,),
local_container)
else: # ModifiedHint
task = AddedRemoteFilesTask(container, (node_path,),
local_container)
else:
task = AddedLocalFilesTask(container, (node_path,),
local_container)
return task
@classmethod
def acquire_from_task(cls, node, task):
"""Acquire the node and all related nodes used by the task.
For most of the tasks, only the primary node is acquired. If there are
some "Move" hints, hint pairs can be split in (Deleted, Modified)
couple.
If the task is of type "MovedLocalFilesTask", both source and
destination nodes are acquired by the task.
Note:
- This method must be called with the IndexTree's lock acquired.
- After an acquisition, nodes's hints are reset to None. If they
are needed by the task, they should be copied before that.
Args:
node (BaseNode): primary target of the task.
task: sync task that will take care of the node(s).
"""
if isinstance(task, MovedLocalFilesTask):
HintBuilder.break_coupled_hints(node, HintBuilder.SCOPE_REMOTE)
if isinstance(node.local_hint, SourceMoveHint):
# acquire destination node
dest_node = node.local_hint.dest_node
dest_node.task = task
dest_node.remote_hint = None
dest_node.local_hint = None
else:
# acquire source node
source_node = node.local_hint.source_node
source_node.task = task
source_node.remote_hint = None
source_node.local_hint = None
else:
HintBuilder.break_coupled_hints(node)
# acquire target node
node.task = task
node.remote_hint = None
node.local_hint = None
| gpl-3.0 | -5,430,126,019,375,362,000 | 41.166667 | 78 | 0.576699 | false |
lord63/wangyi_music_top100 | crawler/database.py | 1 | 1097 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
crawler/database.py
~~~~~~~~~~~~~~~~~~~
provide some convenient ways to get data and communicate with
the redis server
"""
from __future__ import absolute_import
from datetime import datetime
from crawler import config
class Database(object):
def __init__(self):
self.redis = config.redis_server
def set_update_time(self):
self.redis.set('wangyi:latest_update',
datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'))
@property
def songlists(self):
return self.redis.lrange('wangyi:songlists', 0, -1)
@property
def comments_ranklist(self):
return self.redis.lrange('wangyi:ranklist:comments', 0, -1)
@property
def palys_ranklist(self):
return self.redis.lrange('wangyi:ranklist:plays', 0, -1)
@property
def favourites_ranklist(self):
return self.redis.lrange('wangyi:ranklist:favourites', 0, -1)
@property
def shares_ranklist(self):
return self.redis.lrange('wangyi:ranklist:shares', 0, -1)
| mit | -4,058,596,468,989,847,000 | 23.377778 | 78 | 0.625342 | false |
maximz/cooperate-without-looking | src/cwl.py | 1 | 24981 | # -*- coding: utf-8 -*-
"""Module cwl.
Produces simulation calculation and figures for the Cooperate With/Without Looking project.
Usage:
python cwl.py {recalculate?}
Examples:
python cwl.py run using pre-calculated saved data
python cwl.py recalculate run with freshly calculated data
@author: Maxim Zaslavsky <[email protected]>
@author: Erez Yoeli <[email protected]>
"""
### GENERAL
# system imports
import sys, os
import numpy as np
import matplotlib
matplotlib.use("pdf") # save as PDFs
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from collections import defaultdict
from random import sample as random_sample
from math import floor
import cPickle as pickle
# Choose whether to recalculate or to used saved data
Calculate = False
if __name__ == "__main__":
try:
if sys.argv[1] == 'recalculate':
Calculate = True
except: # Interactive mode
pass
output_dir = '../bin/'
print 'Welcome to the CW(O)L Simulations and Figures toolkit.'
print
#######################################################
#######################################################
# Game theory methods
def are_assumptions_valid(a, b, c1, c2, d, p, w):
#P1 and P2 prefer a cooperative interaction to no interaction
statement_1 = a > 0 and b > 0
#P1 gets short-term gains from defection
statement_2 = c1 > a and c2 > a
#P2 2 doesn't want to interact with 1 if he expects 1 to defect in either game.
statement_3 = b * p + d * (1 - p) < 0 and d * p + b * (1 - p) < 0
#wlog it is more tempting to defect in state 2.
statement_4 = c2 > c1
#all of this must hold
return statement_1 and statement_2 and statement_3 and statement_4
def get_game_population_1(a, b, c1, c2, d, p, w):
"""
Game for population 1 of CWL
"""
if not are_assumptions_valid(a, b, c1, c2, d, p, w):
raise ValueError("This parameters do not comply with assumptions")
A = np.empty(shape=(4, 3))
A[0, 0] = (a * p + a * (1.0 - p)) / (1.0 - w)
A[0, 1] = (a * p + a * (1.0 - p)) / (1.0 - w)
A[0, 2] = (a * p + a * (1.0 - p))
A[1, 0] = (a * p + a * (1.0 - p))
A[1, 1] = (a * p + a * (1.0 - p)) / (1 - w)
A[1, 2] = (a * p + a * (1.0 - p))
A[2, 0] = (a * p + c2 * (1.0 - p))
A[2, 1] = (a * p + c2 * (1.0 - p)) / (1 - p * w)
A[2, 2] = (a * p + c2 * (1.0 - p))
A[3, 0] = (c1 * p + c2 * (1.0 - p))
A[3, 1] = (c1 * p + c2 * (1.0 - p))
A[3, 2] = (c1 * p + c2 * (1.0 - p))
return A
def get_game_population_2(a, b, c1, c2, d, p, w):
"""
Game for population 2 of CWL
"""
if not are_assumptions_valid(a, b, c1, c2, d, p, w):
raise ValueError("This parameters do not comply with assumptions")
B = np.empty(shape=(4, 3))
B[0, 0] = (b * p + b * (1.0 - p)) / (1.0 - w)
B[0, 1] = (b * p + b * (1.0 - p)) / (1.0 - w)
B[0, 2] = (b * p + b * (1.0 - p))
B[1, 0] = (b * p + b * (1.0 - p))
B[1, 1] = (b * p + b * (1.0 - p)) / (1.0 - w)
B[1, 2] = (b * p + b * (1.0 - p))
B[2, 0] = (b * p + d * (1.0 - p))
B[2, 1] = (b * p + d * (1.0 - p)) / (1.0 - p * w)
B[2, 2] = (b * p + d * (1.0 - p))
B[3, 0] = (d * p + d * (1.0 - p))
B[3, 1] = (d * p + d * (1.0 - p))
B[3, 2] = (d * p + d * (1.0 - p))
return B.T
# replicator
def __replicator_equation_two_populations(x, t, game1, game2, number__of_strategies_population_1, number__of_strategies_population_2):
"""
This auxiliary function codes the replicator dynamics step. Typically it is only called from replicator_trajectory_two_populations()
Parameters
----------
x: ndarray initial state (concatenated from the two populations)
t: time
game1: ndarray, game for population 1
game2: ndarray, game for population 2
number__of_strategies_population_1: int
number__of_strategies_population_2: int
Returns:
out: ndarray next state (concatenated from the two populations)
"""
x_population_1 = x[0:number__of_strategies_population_1]
#the first piece of y corresponds to population 1
x_population_2 = x[number__of_strategies_population_1:number__of_strategies_population_1 + number__of_strategies_population_2] # the second piece of y corresponds to population 2
#First Ay
fitness_vector_1 = np.dot(game1, x_population_2)
# and Bx (see equation above)
fitness_vector_2 = np.dot(game2, x_population_1)
#Now xAy
average_fitness_1 = np.dot(x_population_1, fitness_vector_1)
#And yBx
average_fitness_2 = np.dot(x_population_2, fitness_vector_2)
#the next lines correspond to equations 10.5 and 10.6 of Hofbauer and Sigmund (page 116)
new_population_1 = x_population_1 * (fitness_vector_1 - average_fitness_1)
new_population_2 = x_population_2 * (fitness_vector_2 - average_fitness_2)
return np.array(new_population_1.tolist() + new_population_2.tolist())
def replicator_trajectory_two_populations(game_matrix_1, game_matrix_2, x_0, y_0, t_vector, **kwargs):
"""
Computes a replicator trajectory for two populations, given two games, starting points and time vector.
It uses scipy's odeint.
Parameters
----------
game_matrix_1: numpy matrix (for population 1)
game_matrix_2: numpy matrix (for population 2)
x_0: ndarray
y_0: ndarray
t_vector: time array
Returns
-------
out: list
Examples
--------
#TODO: Write examples
"""
#join initial populations to fit signature of replicator_equation
start = np.array(x_0.tolist() + y_0.tolist())
number__of_strategies_population_1 = len(x_0)
number__of_strategies_population_2 = len(y_0)
#solve
soln = odeint(__replicator_equation_two_populations, start, t_vector, args=(game_matrix_1, game_matrix_2, number__of_strategies_population_1, number__of_strategies_population_2), **kwargs)
return [soln[:, i] for i in xrange(number__of_strategies_population_1 + number__of_strategies_population_2)]
def get_random_point_inside_simplex(dimension):
"""
Returns a vector that sums up to one, where components have been uniformly chosen.
Parameters:
----------
dimension:int
"""
exponencial = np.random.exponential(size=dimension)
exponencial /= np.sum(exponencial, dtype=float)
return exponencial
def adjusted_solution(a, b, c1, c2, d, p, w, x_0, y_0, max_t, **kwargs):
"""
Returns a steady state, by ajusting dynamically the step size and total error.
"""
tolerance = 1e-4
added_factor_vector = [10.0, 20.0, 50.0, 100.0]
game_1 = get_game_population_1(a, b, c1, c2, d, p, w)
game_2 = get_game_population_2(a, b, c1, c2, d, p, w)
t = np.linspace(0.0, max_t, 2000)
if x_0 is None or y_0 is None:
(x_0, y_0) = (get_random_point_inside_simplex(4), get_random_point_inside_simplex(3))
for added_factor in added_factor_vector:
sol = replicator_trajectory_two_populations(added_factor + game_1, added_factor + game_2, x_0, y_0, t, atol=tolerance, **kwargs)
end_point = [sol[i][-1] for i in xrange(0, 7)]
if np.allclose(sum(end_point), 2.0, atol=tolerance):
return end_point
raise ValueError("Numerics: x = {}, y = {}, a = {}, b = {}, c1 = {}, c2 = {}, d = {}, p = {}, w = {}".format(x_0.tolist(), y_0.tolist(), a, b, c1, c2, d, p, w))
def determine_outcome(solution):
tolerance = 1e-3
if not np.allclose(np.sum(solution), 2.0, atol=tolerance):
raise ValueError("Probabilities don't add up: {} ".format(solution))
elif player1_CWOL(solution, atol=tolerance) and player2_sometimes_exits_if_looks_or_defects(solution, atol=tolerance):
return (1, solution)
elif player1_alwaysD(solution, atol=tolerance) and (player2_pure_strategy(solution, atol=tolerance) or player2_mixes(solution, atol=tolerance)):
return (2, solution)
elif player2_exitifdefect(solution, atol=tolerance) and (player1_CWOL(solution, atol=tolerance) or player1_CWL(solution, atol=tolerance) or player1_CWOL_or_CWL(solution, atol=tolerance)):
return (3, solution)
else:
return (4, solution)
def determine_random_outcome(a, b, c1, c2, d, p, w, max_t, **kwargs):
"""
Starting in a random point tries to determine the outcome, given parameters.
This is the main function to be called from montecarlo procedures
"""
x_0 = get_random_point_inside_simplex(4)
y_0 = get_random_point_inside_simplex(3)
solution = adjusted_solution(a, b, c1, c2, d, p, w, x_0, y_0, max_t)
return determine_outcome(solution)
def montecarlo(a, b, c1, c2, d, p, w, max_t=300, repetitions=5000):
"""
Takes samples for a given point in the space. Counting the occurrences
of different outcomes, and returns them in a dictionary with the
following indexes:
1 - Outcome 1
2 - Outcome 2
3 - Outcome 3
4 - No categorized
"""
ans = defaultdict(int)
sum_of_solution = np.zeros(7)
for i in xrange(0, repetitions):
try:
outcome, solution = determine_random_outcome(a, b, c1, c2, d, p, w, max_t)
ans[outcome] = ans[outcome]+1
sum_of_solution += solution
except ValueError, e:
print e
ans[5] = ans[5] + 1
avg_of_solution = sum_of_solution/repetitions
return (ans, sum_of_solution)
#--------- THEORY CHECKING FUNCTIONS ----------
def is_coop_wihtout_looking_an_equilibrium(a, b, c1, c2, d, p, w):
return c1*p+c2*(1.0 - p) < a / (1.0 - w)
def is_coop_looking_an_equilibrium(a, b, c1, c2, d, p, w):
return c2 < a / (1.0 - w)
def number_of_equlibria(a, b, c1, c2, d, p, w):
CWOL = is_coop_wihtout_looking_an_equilibrium(a, b, c1, c2, d, p, w)
CWL = is_coop_looking_an_equilibrium(a, b, c1, c2, d, p, w)
if CWOL and CWL:
return 3
elif CWOL or CWOL:
return 2
else:
return 1
#--- classifier functions
def player1_CWOL(solution, atol=1e-3):
player1_plays_desired_pure_strategy = np.allclose(solution[0], 1.0, atol)
return player1_plays_desired_pure_strategy
def player1_CWL(solution, atol=1e-3):
player1_plays_desired_pure_strategy = np.allclose(solution[1], 1.0, atol)
return player1_plays_desired_pure_strategy
def player1_Cin1(solution, atol=1e-3):
player1_plays_desired_pure_strategy = np.allclose(solution[2], 1.0, atol)
return player1_plays_desired_pure_strategy
def player1_alwaysD(solution, atol=1e-3):
player1_plays_desired_pure_strategy = np.allclose(solution[3], 1.0, atol)
return player1_plays_desired_pure_strategy
def player1_pure_strategy(solution, atol=1e-3):
return (player1_CWOL(solution, atol) or player1_CWL(solution, atol) or player1_Cin1(solution, atol) or player1_alwaysD(solution, atol))
def player1_CWOL_or_CWL(solution, atol=1e-3):
#solution[0:1] is now solution[0:2]
player1_mixes_CWL_CWOL = np.allclose(np.sum(solution[0:2]), 1.0, atol)
return player1_mixes_CWL_CWOL and not player1_pure_strategy(solution, atol)
def player1_mixes(solution, atol=1e-3):
#solution[0:3] is now solution[0:4]
player1_mixes = np.allclose(np.sum(solution[0:4]), 1.0, atol)
return player1_mixes and not player1_pure_strategy(solution, atol)
def player2_exitiflook(solution, atol=1e-3):
player2_plays_desired_pure_strategy = np.allclose(solution[4], 1.0, atol)
return player2_plays_desired_pure_strategy
def player2_exitifdefect(solution, atol=1e-3):
player2_plays_desired_pure_strategy = np.allclose(solution[5], 1.0, atol)
return player2_plays_desired_pure_strategy
def player2_alwaysexit(solution, atol=1e-3):
player2_plays_desired_pure_strategy = np.allclose(solution[6], 1.0, atol)
return player2_plays_desired_pure_strategy
def player2_pure_strategy(solution, atol=1e-3):
return (player2_exitifdefect(solution, atol=1e-3) or player2_exitiflook(solution, atol=atol) or player2_alwaysexit(solution, atol=atol))
def player2_mixes(solution, atol=1e-3):
#solution[4:6] is now changed to solution[4:7], please verify.
player2_mixes = np.allclose(np.sum(solution[4:7]), 1.0, atol)
return player2_mixes and not player2_pure_strategy(solution, atol=atol)
def player2_sometimes_exits_if_looks_or_defects(solution, atol=1e-3):
player2_sometimes_exits_if_looks = not np.allclose(solution[4], 0.0, atol)
player2_sometimes_exits_if_defects = not np.allclose(solution[5], 0.0, atol)
return player2_sometimes_exits_if_looks or player2_sometimes_exits_if_defects
# Additioanl plot beautifier functions:
def summarize_binary_list(lista):
"""
#determines edges of sequences of 1's in a binary list
"""
ans = []
x_0 = None
tamano = len(lista)
for i in xrange(tamano):
if lista[i] == 1 and x_0 is None:
x_0 = i
end_of_sequence = lista[i] == 0
end_of_array = i == (tamano-1) and lista[i] == 1
if (end_of_sequence or end_of_array) and x_0 is not None:
if end_of_sequence:
ans.append((x_0, i-1))
if end_of_array:
ans.append((x_0, i))
x_0 = None
return ans
#######################################################
#######################################################
### FIGURE 2 PREPARATION
def clear_past_figs():
plt.close()
plt.clf()
plt.cla()
plt.close()
#del f, fig_all
#gc.collect()
def export_graph(f_i, f_name):
#f_i.savefig(output_dir+f_name+'.png',dpi=300)
#f_i.savefig(output_dir+f_name+'.png',dpi=600)
f_i.savefig(output_dir+f_name+'.pdf', dpi=600) # This one looks the best
print f_name, 'exported as pdf at 600 dpi.' # 300dpi_png, 600dpi_png,
# Figure 2B and 2C calculations:
print 'Calculating or loading values for Figure 2B and Figure 2C'
p = 0.5 + 0.01
b = 1.0
c1 = 4.0
c2 = 12.0
d = -10.0
w = 7.0/8.0 + 0.02
repetitions = 10000
number_of_points = 50
if Calculate:
a_interval = np.linspace(0.0+0.1, 2.0, number_of_points, endpoint=False)
a_interval_tight = np.linspace(0.0+0.1, 2.0, number_of_points) # TODO: change to 300?
#lets plot the theory predictions first as a shade
calculated_equilibria=[number_of_equlibria(a, b, c1, c2, d, p, w) for a in a_interval_tight]
one_equilibrium_region = summarize_binary_list([ce == 1 for ce in calculated_equilibria])
two_equilibria_region = summarize_binary_list([ce == 2 for ce in calculated_equilibria])
three_equilibria_region = summarize_binary_list([ce == 3 for ce in calculated_equilibria])
#first the sampling
outcome_1 = []
outcome_2 = []
outcome_3 = []
outcome_4 = []
no_outcome = []
strategy_1 = []
strategy_2 = []
strategy_3 = []
strategy_4 = []
strategy_5 = []
strategy_6 = []
strategy_7 = []
for a in a_interval_tight: # TODO: should this be a_interval?
diccionario, avg_strategy_frequency = montecarlo(a, b, c1, c2, d, p, w, repetitions=repetitions)
outcome_1.append(diccionario[1])
outcome_2.append(diccionario[2])
outcome_3.append(diccionario[3])
outcome_4.append(diccionario[4])
no_outcome.append(diccionario[5])
strategy_1.append(avg_strategy_frequency[0])
strategy_2.append(avg_strategy_frequency[1])
strategy_3.append(avg_strategy_frequency[2])
strategy_4.append(avg_strategy_frequency[3])
strategy_5.append(avg_strategy_frequency[4])
strategy_6.append(avg_strategy_frequency[5])
strategy_7.append(avg_strategy_frequency[6])
stuff = [a_interval, a_interval_tight, one_equilibrium_region, two_equilibria_region, three_equilibria_region, outcome_1, outcome_2, outcome_3, outcome_4, no_outcome, strategy_1, strategy_2, strategy_3, strategy_4, strategy_5, strategy_6, strategy_7]
pickle.dump( stuff, open( output_dir+"Figure 2_B and C_strategy frequency.saved_data", "wb" ) )
else:
(a_interval, a_interval_tight, one_equilibrium_region, two_equilibria_region, three_equilibria_region, outcome_1, outcome_2, outcome_3, outcome_4, no_outcome, strategy_1, strategy_2, strategy_3, strategy_4, strategy_5, strategy_6, strategy_7) = pickle.load(open(output_dir+"Figure 2_B and C_strategy frequency.saved_data", "r"))
# Plotting:
clear_past_figs()
def process_ax(ax):
'''
Shades figure to correspond to equilibria regions.
'''
# hack to fill white space in the middle:
midpoint = (a_interval_tight[one_equilibrium_region[0][1]] + a_interval_tight[two_equilibria_region[0][0]])/2
midpoint1 = (a_interval_tight[two_equilibria_region[0][1]] + a_interval_tight[three_equilibria_region[0][0]])/2
for dupla in one_equilibrium_region:
#ax.axvspan(p_interval_tight[dupla[0]], p_interval_tight[dupla[1]], facecolor='red', alpha=0.2)
ax.axvspan(a_interval_tight[dupla[0]], midpoint, facecolor='white', alpha=1) # red, alpha=0.2
print 'one', dupla, a_interval_tight[dupla[0]], a_interval_tight[dupla[1]]
for dupla in two_equilibria_region:
#ax.axvspan(p_interval_tight[dupla[0]], p_interval_tight[dupla[1]], facecolor='blue', alpha=0.2)
ax.axvspan(midpoint, midpoint1, facecolor='0.50', alpha=0.2) # blue or .80
print 'two', dupla, a_interval_tight[dupla[0]], a_interval_tight[dupla[1]]
for dupla in three_equilibria_region:
ax.axvspan(midpoint1, a_interval_tight[dupla[1]], facecolor='0.10', alpha=0.2) # yellow or .20
print 'three', dupla, a_interval_tight[dupla[0]], a_interval_tight[dupla[1]]
avoid_end = -1 # remove last 1 point
#######################################################
#######################################################
### PLOT FIGURE 2(B): Frequency vs. a-value
print
print 'Plotting Figure 2B'
clear_past_figs()
f = plt.figure(figsize=(10,10))
process_ax(f.gca())
plt.plot(a_interval[:avoid_end], (np.array(outcome_1)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'bo-', label='Cooperate without looking')
plt.plot(a_interval[:avoid_end], (np.array(outcome_2)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'ro-', label='Always defect')
plt.plot(a_interval[:avoid_end], (np.array(outcome_3)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'yo-', label='Cooperate with looking')
plt.plot(a_interval[:avoid_end], (np.array(outcome_4)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'ko-', label='Other')
plt.grid()
plt.legend(loc='best')
plt.ylim((-0.01, 1.01))
plt.xlim((a_interval[0]-0.01, a_interval[-1]+0.01))
plt.xlabel('a')
plt.ylabel('Frequency')
plt.title('Frequency vs a')
export_graph(f, 'Figure_2B')
#######################################################
#######################################################
### PLOT FIGURE 2(C): Average frequency of strategies for players 1 and 2
print
print 'Plotting Figure 2C'
clear_past_figs()
fig_all, (ax1, ax2) = plt.subplots(2,1, sharex=False, sharey=False) # make 2x1 grid of subplots
fig_all.set_size_inches(10, 15)
#plt.subplots_adjust(wspace=0.30, hspace=0.15)
#prepare plots
for ax in (ax1, ax2):
ax.grid()
ax.legend(loc='best')
ax.set_ylim((-0.01, 1.01))
ax.set_xlim((a_interval[0]-0.01, a_interval[-1]+0.01))
ax.set_xlabel('a')
ax.set_ylabel('Frequency')
process_ax(ax)
plt.tight_layout()
#player1
ax1.plot(a_interval[:avoid_end], (np.array(strategy_1)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'bo-', label='P1 CWOL')
ax1.plot(a_interval[:avoid_end], (np.array(strategy_2)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'ro-', label='P1 CWL')
ax1.plot(a_interval[:avoid_end], (np.array(strategy_3)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'yo-', label='P1 C in 1')
ax1.plot(a_interval[:avoid_end], (np.array(strategy_4)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'ko-', label='P1 All D')
ax1.set_title('Average Frequency of Strategies - Player 1')
ax1.legend(loc='best')
#player2
ax2.plot(a_interval[:avoid_end], (np.array(strategy_5)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'co-', label='P2 Exit if Look')
ax2.plot(a_interval[:avoid_end], (np.array(strategy_6)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'mo-', label='P2 Exit if Defect')
ax2.plot(a_interval[:avoid_end], (np.array(strategy_7)/(float(repetitions)-np.array(no_outcome)))[:avoid_end], 'go-', label='P2 Always Exit')
ax2.set_title('Average Frequency of Strategies - Player 2')
ax2.legend(loc='best')
fig_all.tight_layout()
export_graph(fig_all, 'Figure_2C')
#######################################################
#######################################################
### PLOT FIGURE 2(A): Player 1 and 2 strategy replicator trajectories from single simulation run
print
print 'Calculating or loading values for Figure 2A'
# Decide which a-values to use and plot.
def get_a_value_from_interval(bounds):
for (bound_x, bound_y) in bounds:
i_chosen = int(floor((bound_x+bound_y)/2.0))
yield a_interval_tight[i_chosen]
a_selected = list(get_a_value_from_interval([one_equilibrium_region[0], two_equilibria_region[0], three_equilibria_region[0]]))
# This setup supports having multiple columns, i.e. one column for each a-value.
# The below is currently configured to hide all but the second column - however, we could easily disable this to return to all-column view, simply by commenting out the following line:
a_selected = a_selected[1:2]
print 'Using these a-values:', a_selected
# Randomly seed strategy frequencies:
if Calculate:
tolerance_current=1e-2 # previously, 1e-3. arbitrary designation.
x_0 = get_random_point_inside_simplex(4) # random frequency
y_0 = get_random_point_inside_simplex(3) # random frequency
t_vector = np.linspace(0.0, 30.0, 1000) # time values
parameters_saved = [x_0, y_0, t_vector, tolerance_current, b, c1, c2, d, p, w] # a_selected is not necessary
pickle.dump( parameters_saved, open( output_dir+"Figure 2_A_single simulation run of strategy replicator trajectories.saved_data", "wb" ) )
else: # load previous working version
(x_0, y_0, t_vector, tolerance_current, b, c1, c2, d, p, w) = pickle.load(open(output_dir+"Figure 2_A_single simulation run of strategy replicator trajectories.saved_data", "r"))
# Begin plot:
print
print 'Plotting Figure 2A'
clear_past_figs()
fig_all, ax_arr = plt.subplots(2,len(a_selected), sharex=False, sharey=False, figsize=(10,20)) # make 2 rows x 3 columns grid of subplots; (30, 20) size when 3x2
for i in range(len(a_selected)):
if len(a_selected) == 1: # Treat situation differently based on whether we are conmparing a-values or not.
(ax_p1, ax_p2) = (ax_arr[0], ax_arr[1])
else:
(ax_p1, ax_p2) = (ax_arr[0,i], ax_arr[1,i])
a_cur = a_selected[i]
solution = replicator_trajectory_two_populations(get_game_population_1(a_cur, b, c1, c2, d, p, w), get_game_population_2(a_cur, b, c1, c2, d, p, w), x_0, y_0, t_vector, atol=tolerance_current)
for ax in (ax_p1, ax_p2):
ax.set_ylim((-0.1, 1.1))
ax.set_xlim(0,10)
ax.set_ylabel('Frequency')
ax.set_xlabel('Time')
ax.grid(True)
ax_p1.plot(t_vector, solution[0], 'b-', label='P1 C wout looking', linewidth=2.0)
ax_p1.plot(t_vector, solution[1], 'g-', label='P1 Observe and C', linewidth=2.0)
ax_p1.plot(t_vector, solution[2], 'y-', label='P1 Observe and C only if 1 is chosen', linewidth=2.0)
ax_p1.plot(t_vector, solution[3], 'r-', label='P1 ALLD', linewidth=2.0)
ax_p2.plot(t_vector, solution[4], 'm--', label='P2 Continue iff P1 C wout looking', linewidth=2.0)
ax_p2.plot(t_vector, solution[5], 'y--', label='P2 Continue iff P1 C', linewidth=2.0)
ax_p2.plot(t_vector, solution[6], 'r--', label='P2 Exit', linewidth=2.0)
ax_p1.set_title('Player 1 Strategies') # 'Player 1. a = '+str(a_cur)+'.'
ax_p2.set_title('Player 2 Strategies') # 'Player 2. a = '+str(a_cur)+'.'
ax_p1.legend(loc='best')
ax_p2.legend(loc='best')
#fig_all.suptitle('Single simulation run, replicator trajectory; tolerance = '+str(tolerance_current)+'.', fontsize=24)
fig_all.tight_layout()
fig_all.subplots_adjust(top=0.85)
# fig_all.show()
export_graph(fig_all, 'Figure_2A')
#######################################################
#######################################################
print
print 'CW(O)L Simulation Calculations and Figures Complete.'
| mit | -9,027,010,238,727,414,000 | 38.818627 | 332 | 0.616148 | false |
walkr/cryex | cryex/coins/poloniex.py | 1 | 2638 | POLONIEX_REPAIRS = {
"1cr": "1CR",
"aby": "ABY",
"adn": "ADN",
"amp": "AMP",
"arch": "ARCH",
"bbr": "BBR",
"bcn": "BCN",
"bcy": "BCY",
"bela": "BELA",
"bitcny": "BITCNY",
"bits": "BITS",
"bitusd": "BITUSD",
"blk": "BLK",
"block": "BLOCK",
"btcd": "BTCD",
"btm": "BTM",
"bts": "BTS",
"burst": "BURST",
"c2": "C2",
"cga": "CGA",
"clam": "CLAM",
"cnmt": "CNMT",
"cure": "CURE",
"dash": "DASH",
"dgb": "DGB",
"diem": "DIEM",
"doge": "DOGE",
"emc2": "EMC2",
"eth": "ETH",
"exe": "EXE",
"exp": "EXP",
"fct": "FCT",
"fibre": "FIBRE",
"fldc": "FLDC",
"flo": "FLO",
"flt": "FLT",
"gap": "GAP",
"gemz": "GEMZ",
"geo": "GEO",
"gmc": "GMC",
"grc": "GRC",
"grs": "GRS",
"huc": "HUC",
"hyp": "HYP",
"hz": "HZ",
"index": "INDEX",
"ioc": "IOC",
"lqd": "LQD",
"ltbc": "LTBC",
"ltc": "LTC",
"maid": "MAID",
"mcn": "MCN",
"mil": "MIL",
"mint": "MINT",
"mmc": "MMC",
"mmnxt": "MMNXT",
"mrs": "MRS",
"myr": "MYR",
"naut": "NAUT",
"nav": "NAV",
"nbt": "NBT",
"neos": "NEOS",
"nmc": "NMC",
"nobl": "NOBL",
"note": "NOTE",
"noxt": "NOXT",
"nsr": "NSR",
"nxt": "NXT",
"omni": "OMNI",
"piggy": "PIGGY",
"pink": "PINK",
"pot": "POT",
"ppc": "PPC",
"pts": "PTS",
"qbk": "QBK",
"qora": "QORA",
"qtl": "QTL",
"rads": "RADS",
"rby": "RBY",
"rdd": "RDD",
"ric": "RIC",
"sc": "SC",
"sdc": "SDC",
"silk": "SILK",
"sjcx": "SJCX",
"str": "STR",
"swarm": "SWARM",
"sync": "SYNC",
"sys": "SYS",
"unity": "UNITY",
"via": "VIA",
"vrc": "VRC",
"vtc": "VTC",
"wdc": "WDC",
"xbc": "XBC",
"xc": "XC",
"xch": "XCH",
"xcn": "XCN",
"xcp": "XCP",
"xcr": "XCR",
"xdn": "XDN",
"xdp": "XDP",
"xem": "XEM",
"xmg": "XMG",
"xmr": "XMR",
"xpb": "XPB",
"xpm": "XPM",
"xrp": "XRP",
"xst": "XST",
"xvc": "XVC",
"yacc": "YACC",
}
def update():
new_pairs = {}
# Add *_BTC pair
for down, up in POLONIEX_REPAIRS.items():
new_key = '_'.join((down, 'btc'))
new_value = '_'.join(('BTC', up))
new_pairs[new_key] = new_value
# Add *_USD pair
for down in ['btc', 'eth', 'ltc', 'xmr', 'dash', 'xrp', 'nxt', 'str']:
up = down.upper()
new_key = '_'.join((down, 'usd'))
new_value = '_'.join(('USDT', up))
new_pairs[new_key] = new_value
POLONIEX_REPAIRS.update(new_pairs)
update()
| mit | 6,310,330,788,044,527,000 | 18.686567 | 74 | 0.392722 | false |
chrislit/abydos | tests/fingerprint/test_fingerprint_occurrence.py | 1 | 1890 | # Copyright 2018-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.fingerprint.test_fingerprint_occurrence.
This module contains unit tests for abydos.fingerprint.Occurrence
"""
import unittest
from abydos.fingerprint import Occurrence
class OccurrenceFingerprintTestCases(unittest.TestCase):
"""Test Cisłak & Grabowski's occurrence fingerprint functions.
abydos.fingerprint.Occurrence
"""
fp = Occurrence()
def test_occurrence_fingerprint(self):
"""Test abydos.fingerprint.Occurrence."""
# Base case
self.assertEqual(self.fp.fingerprint(''), '0' * 16)
# https://arxiv.org/pdf/1711.08475.pdf
self.assertEqual(self.fp.fingerprint('instance'), '1110111000010000')
self.assertEqual(self.fp.fingerprint('inst'), '0100111000000000')
self.assertEqual(
Occurrence(15).fingerprint('instance'), '111011100001000'
)
self.assertEqual(
Occurrence(32).fingerprint('instance'),
'11101110000100000000000000000000',
)
self.assertEqual(
Occurrence(64).fingerprint('instance'),
'11101110000100000000000000000000' + '0' * 32,
)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 6,633,291,817,443,212,000 | 31.568966 | 77 | 0.692959 | false |
Droriel/python_training | generator/contact.py | 1 | 7547 | # -*- coding: utf-8 -*-
import random
import string
from model.contact import PersonalData, PhoneNumbers, Emails, Www, AdditionalData, Notes, ContactBaseData, \
ContactAllData, BirthDate, AnniversaryDate
import jsonpickle
import os.path
import sys
import getopt
try:
opts, args=getopt.getopt(sys.argv[1:], 'n:f:', ['number of contacts', 'file'])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = 'data/contacts.json'
for o, a in opts:
if o == '-n':
n = int(a)
elif o == '-f':
f = a
def random_string(maxlen):
symbols = string.ascii_letters + ' '*13 + '-'*3 + '_'*3
# + "'"*3
return ''.join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_string_with_new_line(prefix, maxlen):
symbols = string.ascii_letters + string.digits + ' '*15 + '\n'*5 + '-'*3 + '_'*3
# + string.punctuation
return prefix + ''.join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_email(maxlen):
symbols = string.ascii_letters + '-' * 3 + '_' * 3
return ''.join([random.choice(symbols) for i in range(random.randrange(maxlen+5))]) + '@' +\
''.join([random.choice(symbols) for i in range(random.randrange(maxlen))]) + '.' +\
''.join([random.choice(string.ascii_letters) for i in range(random.randrange(2,4))])
def random_phone_number(maxlen):
symbols = str(string.digits) * 4 + '('+ ')' + '+' + '-' + ' '
return ''.join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_www(maxlen):
symbols = string.ascii_letters + '-'
return 'www.' + ''.join([random.choice(symbols) for i in range(random.randrange(maxlen))]) + '.'+\
''.join([random.choice(string.ascii_letters) for i in range(random.randrange(2,4))])
def random_day():
return random.randrange(1, 31)
def random_month():
return random.randrange(1, 12)
def random_year():
symbols = string.digits
return ''.join([random.choice(symbols) for i in range(4)])
testData = [ContactAllData(contactBaseData=ContactBaseData(firstname=random_string(10), lastname=random_string(18)),
personalData=PersonalData(middlename=random_string(10), nickname=random_string(10),
title=random_string(10), company=random_string(20),
address=random_string_with_new_line('Adres podstawowy: ', 30)),
phoneNumbers=PhoneNumbers(home=random_phone_number(12), mobile=random_phone_number(16),
work=random_phone_number(12), fax=random_phone_number(10)),
emails=Emails(email1=random_email(8), email2=random_email(5), email3=random_email(6)),
www=Www(www=random_www(30)),
birthDate=BirthDate(day=random_day(), month=random_month(), year=random_year()),
anniversaryDate=AnniversaryDate(day=random_day(), month=random_month(), year=random_year()),
additionalData=AdditionalData(address=random_string_with_new_line('Adres dodatkowy: ', 30) ,
phone=random_phone_number(12)),
notes=Notes(notes=random_string_with_new_line('n', 100)))
for i in range(n)]\
+ \
[ContactAllData(contactBaseData=ContactBaseData(firstname='', lastname=''),
personalData=PersonalData(middlename='', nickname='',
title='', company='',
address=''),
phoneNumbers=PhoneNumbers(home='', mobile='',
work='', fax=''),
emails=Emails(email1='', email2='', email3=''),
www=Www(www=''),
birthDate=BirthDate(day=-1, month=0, year=''),
anniversaryDate=AnniversaryDate(day=-1, month=0, year=''),
additionalData=AdditionalData(address='' ,
phone=''),
notes=Notes(notes=''))]\
+\
[ContactAllData(contactBaseData=ContactBaseData(firstname=random_string(10), lastname=random_string(18)),
personalData=PersonalData(middlename=random_string(10), nickname=random_string(10),
title=random_string(10), company=random_string(20),
address=''),
phoneNumbers=PhoneNumbers(home='', mobile='',
work='', fax=''),
emails=Emails(email1='', email2='', email3=''),
www=Www(www=''),
birthDate=BirthDate(day=31, month=12, year='1999'),
anniversaryDate=AnniversaryDate(day=1, month=1, year='2010'),
additionalData=AdditionalData(address='',
phone=''),
notes=Notes(notes=random_string_with_new_line('n', 100)))]\
+ \
[ContactAllData(contactBaseData=ContactBaseData(firstname=' a ', lastname=' b '),
personalData=PersonalData(middlename='', nickname='',
title='', company='',
address=''),
phoneNumbers=PhoneNumbers(home='', mobile='',
work='', fax=''),
emails=Emails(email1='', email2='', email3=''),
www=Www(www=''),
birthDate=BirthDate(day=-1, month=0, year=''),
anniversaryDate=AnniversaryDate(day=-1, month=0, year=''),
additionalData=AdditionalData(address='',
phone=''),
notes=Notes(notes=''))] \
+ \
[ContactAllData(contactBaseData=ContactBaseData(firstname='a b', lastname='c d'),
personalData=PersonalData(middlename='', nickname='',
title='', company='',
address=''),
phoneNumbers=PhoneNumbers(home='', mobile='',
work='', fax=''),
emails=Emails(email1='', email2='', email3=''),
www=Www(www=''),
birthDate=BirthDate(day=-1, month=0, year=''),
anniversaryDate=AnniversaryDate(day=-1, month=0, year=''),
additionalData=AdditionalData(address='',
phone=''),
notes=Notes(notes=''))]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..' , f)
with open(file, 'w', encoding='utf8') as out:
jsonpickle.set_encoder_options('json', indent=2)
out.write(jsonpickle.encode(testData)) | apache-2.0 | -4,843,098,957,106,365,000 | 50.69863 | 119 | 0.481118 | false |
asoliveira/NumShip | source/Navio-back.py | 1 | 56391 | # -*- coding: utf-8 -*-
#
#This file is part of a program called NumShip
#Copyright (C) 2011,2012 Alex Sandro Oliveira
#NumShip is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from Casco import *
from Leme import *
import scipy as sp
from scipy import linalg
from scipy import stats
from Prop import *
class inte(object):
"""
Classe que realisa a integração no tempo
:version:191010
:author: Alex
"""
def __init__(self):
"""
"""
pass
def rk4(self, function, x, t0, dt, par = None):
"""
Integrador runge-kutta
"""
k1 = function(x, t0, par)
k2 = function(x + 1./2*dt*k1, t0 + 1./2*dt, par)
k3 = function(x + 1./2*dt*k2, t0 + 1./2*dt, par)
k4 = function(x + dt*k3, t0 + dt, par)
xt = x + 1./6*(k1+ 2.*k2+ 2.*k3+ k4)*dt
return xt
def euler(self, f, x, t0, dt, par= None ):
"""
"""
return x + f(x, t0, par)*dt
class navio:
"""
Classe de navios
"""
tipo = 'Escolhido de acordo com self.Tipo'
data = '10-11-2010'
autor = 'Alex'
def __init__(self, DicionarioDerivadas, Nome = 'Teste', Tipo = 'TP'):
""""
Construtor do navio
__________________________
Variáveis de entrada:
Nome (string)-- Nome do Navio. Não possui relevância;
Tipo ('TP')-- Tipo de modelo numérico adotado para a construção do Leme
"""
self.nome = Nome
self.vel = sp.zeros((6, 1))
self.acel = sp.zeros((6, 1))
self.pos = sp.zeros((6, 1))
self.dic = DicionarioDerivadas
self.tipo = Tipo
self.integrador = inte()
self.uc = sp.array(self.dic['unom'])
if Tipo == 'TP':
self.leme = lemeTris(DicionarioDerivadas)
self.casco = cascoTris(DicionarioDerivadas)
self.prop = prop()
elif Tipo == 'MARAD':
self.leme = lemeMarAd(DicionarioDerivadas)
self.casco = cascoMarAd(DicionarioDerivadas)
self.prop = propMarAd(DicionarioDerivadas)
def MostraVel(self):
"""
Retorna a Velocidade da embarcação
"""
return self.vel
def MostraAcel(self):
"""
Retorna a aceleração da embarcação
"""
return self.acel
def MostraLeme(self):
"""
Retorna o leme em rad da embarcação
"""
return self.leme.MostraLeme()
def MostraLemeCom(self):
"""
Retorna o leme em rad da embarcação
"""
return self.leme.MostraLemeCom()
def MostraPos(self):
"""
Retorna a posição da embarcação
"""
return self.pos
def MostraRotCom(self):
"""
Retorna a rotação comandada
"""
return self.prop.MostraRotCom()
def MostraRot(self):
"""
Retorna a rotação
"""
return self.prop.MostraRot()
def MostraVelCom(self):
"""
Retorna a velocidade comandada
"""
return self.uc
def MudaVelCom(self, uc):
"""
Muda a velocidade comandada
"""
self.uc = uc.copy()
self.prop.MudaVelCom(uc)
def MudaLemeCom(self, AngLeme):
"""
Muda o leme comandado da embarcação
__________________________
Variáveis de entrada:
"""
temp = AngLeme.copy()
self.leme.MudaLemeCom(temp)
def MudaVel(self, Velocidade):
"""
Muda a velocidade da embarcação
__________________________
Variáveis de entrada:
Velocidade -- velocidade (m/s)
"""
temp = Velocidade.copy()
self.vel = temp
self.casco.MudaVel(temp)
self.leme.MudaVel(temp)
self.prop.MudaVel(temp)
def MudaPos(self, Posicao):
"""
Muda a posição da embarcação
__________________________
Variáveis de entrada:
Posição -- posição (m)
"""
temp = Posicao.copy()
self.pos = temp
self.casco.MudaPos(temp)
self.leme.MudaPos(temp)
self.prop.MudaPos(temp)
def MudaRotCom(self, Rot):
"""
Muda a rotação Comandada da embarcação
"""
self.prop.MudaRotCom(Rot)
def CalcFx(self):
"""
Calcula a força em Surge
"""
m = self.dic['m']*(self.dic['rho']*(self.dic['lpp']**3)/2)
u = self.MostraVel()[0]
v = self.MostraVel()[1]
p = self.MostraVel()[3]
r = self.MostraVel()[5]
xg = self.dic['xg']
zg = self.dic['zg']
cori = m*(v*r + xg*(r**2) - zg*p*r)
if self.tipo == 'MARAD':
saida = (self.casco.Fx() + self.prop.Fx() +
self.leme.Fx(self.MostraRot(),
self.MostraVelCom()/self.MostraVel()[0]) + cori)
elif self.tipo == 'TP':
saida = self.casco.Fx() + self.leme.Fx() + self.prop.Fx() + cori
return saida
def CalcFy(self):
"""
Calcula a força em Sway
"""
m = self.dic['m']*(self.dic['rho']*(self.dic['lpp']**3)/2)
u = self.MostraVel()[0]
v = self.MostraVel()[1]
p = self.MostraVel()[3]
r = self.MostraVel()[5]
xg = self.dic['xg']
zg = self.dic['zg']
cori = -m*u*r
if self.tipo == 'MARAD':
saida = (self.casco.Fy() + self.leme.Fy(self.MostraRot()) +
self.prop.Fy() + cori)
elif self.tipo == 'TP':
saida = self.casco.Fy() + self.leme.Fy() + self.prop.Fy() + cori
return saida
def CalcK(self):
"""
Calcula o momento de Roll
"""
m = self.dic['m']*(self.dic['rho']*(self.dic['lpp']**3)/2)
u = self.MostraVel()[0]
v = self.MostraVel()[1]
p = self.MostraVel()[3]
r = self.MostraVel()[5]
xg = self.dic['xg']
zg = self.dic['zg']
cori = m*zg*u*r
if self.tipo == 'MARAD':
saida = (self.casco.K() + self.leme.K(self.MostraRot()) +
self.prop.K() + cori)
elif self.tipo == 'TP':
saida = self.casco.K() + self.leme.K() + self.prop.K() + cori
return saida
def CalcN(self):
"""
Calcula o momento de Yaw
"""
m = self.dic['m']*(self.dic['rho']*(self.dic['lpp']**3)/2)
u = self.MostraVel()[0]
v = self.MostraVel()[1]
p = self.MostraVel()[3]
r = self.MostraVel()[5]
xg = self.dic['xg']
zg = self.dic['zg']
cori = -m*xg*u*r
if self.tipo == 'MARAD':
saida = (self.casco.N() + self.leme.N(self.MostraRot()) +
self.prop.N() + cori)
elif self.tipo == 'TP':
saida = self.casco.N() + self.leme.N() + self.prop.N() + cori
return saida
def VetF(self, p=None):
"""
Vetor de forças
_________________________
Variáveis de entrada:
p -- tupla
p[0] (integer)-- Graus de liberdade
p[1] (tupla)-- Com pesos
"""
if p == None:
GrausDeLib =4
peso = None
elif len(p)==1:
GrausDeLib =p[0]
peso = None
elif len(p)==2:
GrausDeLib =p[0]
peso = p[1]
if peso == None:
if GrausDeLib == 4:
saida = sp.array([self.CalcFx(), self.CalcFy(),
self.CalcK(), self.CalcN()])
elif GrausDeLib == 3:
saida = sp.array([self.CalcFx(), self.CalcFy(), self.CalcN()])
else:
lemearq = self.MostraLeme()
velarq = self.MostraVel()
uc = self.MostraVelCom()
####################
self.leme.MudaLemeDir(sp.array(0.))
self.MudaVelCom(velarq[0]) #condição eta=1
## ####################
## Aquilo que depende somente de u
##
## ####################
veltemp = sp.zeros((6, 1))
veltemp[0] = velarq[0]
self.MudaVel(veltemp)
fu = self.VetF((GrausDeLib, ))
####################
veltemp = sp.zeros((6, 1))
veltemp[0] = velarq[0]
veltemp[1] = velarq[1]
self.MudaVel(veltemp)
# leme = 0 e eta = 1
fbeta = self.VetF((GrausDeLib, )) - fu
it = 0
fbeta1 = fbeta.copy()
for arg in peso[0]:
fbeta[it] = arg* fbeta[it]
it +=1
####################
veltemp = sp.zeros((6, 1))
veltemp[5] = velarq[5]
veltemp[0] = velarq[0]
self.MudaVel(veltemp)
fr = self.VetF((GrausDeLib, )) - fu
fr1 = fr.copy()
it = 0
for arg in peso[1]:
fr[it] = arg* fr[it]
it +=1
####################
self.leme.MudaLemeDir(lemearq)
veltemp = sp.zeros((6, 1))
veltemp[0] = velarq[0]
self.MudaVel(veltemp)
fleme = self.VetF((GrausDeLib, )) - fu
fleme1 = fleme.copy()
it = 0
for arg in peso[2]:
fleme[it] = arg* fleme[it]
it +=1
####################
self.MudaVel(velarq)
self.MudaVelCom(uc)
fbetarl = self.VetF((GrausDeLib, )) - (fbeta1 + fr1 + fleme1)
it = 0
for arg in peso[3]:
fbetarl[it] = arg* fbetarl[it]
it +=1
del it
saida = fbeta + fr + fleme + fbetarl
return saida
def H (self, GrausDeLib=4):
"""
Matriz de massa menos matriz de massa adicional
_________________________
Variáveis de entrada:
GrausDeLib (integer)-- Graus de liberdade
"""
H = None
H = self.casco.M(GrausDeLib) - self.casco.Ma(GrausDeLib)
return sp.mat(H)
def MatRot(self, p=None):
"""
Retorna a matrix de rotação de do referêncial solidárial para o
inercial
"""
if p== None:
roll= self.MostraPos()[3]
pitch = self.MostraPos()[4]
yaw = self.MostraPos()[5]
else:
roll= p[0]
pitch = p[1]
yaw = p[2]
Rot = sp.array([[sp.cos(yaw)*sp.cos(pitch),
-sp.sin(yaw)*sp.cos(roll) + sp.cos(yaw)*sp.sin(pitch)*sp.sin(roll),
sp.sin(yaw)*sp.sin(roll) + sp.cos(yaw)*sp.cos(roll)*sp.sin(pitch) ],
[sp.sin(yaw)*sp.cos(pitch),
sp.cos(yaw)*sp.cos(roll) + sp.sin(roll)*sp.sin(pitch)*sp.sin(yaw),
-sp.cos(yaw)*sp.sin(roll) + sp.sin(yaw)*sp.cos(roll)*sp.sin(pitch) ],
[-sp.sin(pitch), sp.cos(pitch)*sp.sin(roll),
sp.cos(pitch)*sp.cos(roll)] ])
Rot.shape = (3, 3)
Rot= sp.matrix(Rot)
return Rot
def f2 (self, VetF, H):
"""
Calcula o valor de f(x) na equação
x' = f(x)
onde x são é o vetor de velocidades no sistema solidário
_________________________
Variáveis de entrada:
GrausDeLib (integer)-- Graus de liberdade
"""
GrausDeLib = len(VetF)
if GrausDeLib == 4:
a= sp.zeros((6, 6))
a[5, 5] = 1.
a[4, 4] = 1.
a[:4, :4]= H
b= sp.zeros((6, 1))
b [4, 0] = self.vel[3]
b [5, 0] = self.vel[5]*sp.cos(self.MostraPos()[3])
b[:4, :]= VetF
elif GrausDeLib == 3:
a= sp.zeros((4, 4))
a[3, 3] = 1.
a[:3, :3]= H
b= sp.zeros((4, 1))
b[:3, :]= VetF
b[3, 0] = self.MostraVel()[5]
saida = linalg.solve(a, b )
return saida
def f(self, velocidade=None, t=None, p=(4, )):
"""
O p é uma tupla com o valor dos graus de liberdade
"""
GrausDeLib = p[0]
if velocidade !=None:
velarq = self.MostraVel()
posarq = self.MostraPos()
veltemp = sp.zeros((6, 1))
postemp = sp.zeros((6, 1))
if GrausDeLib==3:
veltemp[:2] = velocidade[:2]
veltemp[5] = velocidade[2]
postemp[5] = velocidade[3]
elif GrausDeLib==4:
veltemp[:2] = velocidade[:2]
veltemp[3] = velocidade[2]
veltemp[5] = velocidade[3]
postemp[3] = velocidade[4]
postemp[5] = velocidade[5]
self.MudaVel(veltemp)
self.MudaPos(postemp)
if GrausDeLib == 4:
a= sp.zeros((6, 6))
a[5, 5] = 1.
a[4, 4] = 1.
a[:4, :4]= self.H(GrausDeLib)
b= sp.zeros((6, 1))
b [4, 0] = self.vel[3]
b [5, 0] = self.vel[5]*sp.cos(self.MostraPos()[3])
b[:4, :]= self.VetF(p)
elif GrausDeLib == 3:
a= sp.zeros((4, 4))
a[3, 3] = 1.
a[:3, :3]= self.H(GrausDeLib)
b= sp.zeros((4, 1))
b[:3, :]= self.VetF(p)
b[3, 0] = self.MostraVel()[5]
saida = linalg.solve(a, b)
if velocidade !=None:
self.MudaVel(velarq)
self.MudaPos(posarq)
return saida
def fvein(self, x, t, p):
"""
x = sp.array(u, v , w)
p = ( roll, pitch, yaw)
"""
return sp.array(self.MatRot(p[0])*p[1])
def simula (self, met='euler', t0=0., dt=0.5, t=100., GrausDeLib=4,
velocidade=None, tipo='ZigZag', leme=sp.array(20.),
proa=sp.array(20.), RotCom =sp.array(1), osa=sp.array(0.05),
ospath=sp.array(150), erro=sp.array(0.05),
errotf=sp.array(0.05), errotd=sp.array(0.05)):
"""
Simulador de manobras padrão
_________________________
Variáveis de entrada:
GrausDeLib (integer)-- Graus de liberdade;
met -- Método de integração. Default- Euler;
t0 -- Tempo inicial;
dt -- Passo no tempo;
t -- Tempo final
tipo - tipo de manobra simulada. Zig-Zag10/10 e Curva_de_Giro_port ou
Curva_de_Giro_starboard . Default -Zig-Zag
__________________________
Saída:
Tupla de sp.array
(veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis)
Em cada elemento da tupla:
A primeira coluna é o passo de tempo e as demais são as variáveis
veloHis -- histórico de velocidades;
posHis -- histórico de posições
acelHis --- histórico de acelerações
fHis -- histórico de forças
veloInerHis -- histórico de velocidades no sistema inercial
lemeHis -- histórico do comando de leme
"""
#
# Tipo de Simulação a ser realizada:
#
self.MudaPos( sp.array([ [0.], [0.], [0.], [0.], [0.], [0.] ]))
self.MudaVel(sp.array([ [self.dic['unom']], [0.], [0.], [0.], [0.],
[0.] ]))
self.MudaRotCom(RotCom)
self.MudaVelCom(self.dic['unom'])
#Log é o parâmetro que indica quando a simulação armazenou os dados do
#relatório
if tipo == 'Curva_de_Giro_port':
self.MudaLemeCom(sp.array(leme*sp.pi/180))
log = False
elif tipo == 'Curva_de_Giro_starboard':
self.MudaLemeCom(sp.array(-leme*sp.pi/180))
log = False
elif tipo == 'ZigZag':
self.MudaLemeCom(sp.array(leme*sp.pi/180))
exe = 0
###############################
##
## Dados relacionados a curva de zizag
##
###############################
if (tipo == 'ZigZag' and (((exe%2 == 0) and self.MostraPos()[5] <=
-(proa*sp.pi/180) ) or (exe%2 != 0 and self.MostraPos()[5] >=
(proa*sp.pi/180) ))):
self.MudaLemeCom(self.MostraLeme()*(-1))
if exe!=0:
dic['reach'] = erro
dic['ospath'] = ospath
dic['osangle'] = abs(osa - dic['proa'])
dados.append(dic.copy())
exe += 1
dic['exeNummber'] = exe
dic['time'] = tp - sp.array(dt)
dic['path'] = self.MostraPos()[1]
dic['proa'] = self.MostraPos()[5]
if tipo=='ZigZag' and exe!=0:
if abs(self.MostraPos()[1]- dic['path'])>ospath:
ospath = abs(self.MostraPos()[1]- dic['path'])
if abs(self.MostraPos()[5])>abs(osa):
osa = self.MostraPos()[5]
if abs(self.MostraPos()[5] - PosIni[5]) < erro:
erro = abs(self.MostraPos()[5] - PosIni[5])
###############################
##
## Dados relacionados a curva de Giro
##
###############################
if ((tipo == 'Curva_de_Giro_port' or
tipo == 'Curva_de_Giro_starboard') and not log):
if (abs(abs(self.MostraPos()[5] - PosIni[5]) -
(sp.array(90)*sp.pi/180)) <= errotf):
errotf = (abs(abs(self.MostraPos()[5] - PosIni[5]) -
(sp.array(90)*sp.pi/180)))
dic['transfer'] = abs(self.MostraPos()[1] - PosIni[1])
dic['advance'] = abs(self.MostraPos()[0] - PosIni[0])
if abs(abs(self.MostraPos()[5] - PosIni[5]) - sp.pi) <= errotd:
errotd = abs(abs(self.MostraPos()[5] - PosIni[5]) - sp.pi)
dic['taticalDiameter'] = abs(self.MostraPos()[1] -
PosIni[1])
if abs(self.MostraPos()[5] - PosIni[5]) > sp.pi :
log = True
dados.append(dic)
Rot = self.MatRot()
#
# inc = Velocidades Lineares no Sistema Inecial
#
VelIn = Rot*sp.matrix(self.vel[0:3])
PosIne = self.MostraPos()[0:3]
##################################
#
# Guardando os parâmetros
#
##################################
# Velocidade Inercial
d= sp.hstack(VelIn)
veloInerHis[cont, 1:] = d #
veloInerHis[cont, 0] = tp #
# Histórico Leme
lemeHis[cont, 0] = tp
lemeHis[cont, 1] = self.MostraLeme()
# Histórico da posição
temp = sp.hstack(self.MostraPos())
posHis[cont, :] = sp.hstack((tp, temp))
# Histórico da Velocidade
temp = sp.hstack(self.MostraVel())
veloHis[cont, :] = sp.hstack((tp, temp))
# Histórico das Forças
temp =sp.hstack(sp.array(self.VetF(GrausDeLib)))
if GrausDeLib == 4:
fHis[cont, :] = sp.hstack((tp, temp))
elif GrausDeLib == 3:
fHis[cont, :3] = sp.hstack((tp, temp[:2]))
fHis[cont, 4] = temp[2]
# Histórico Propulsor
propHis[cont, :] = sp.hstack((tp, self.MostraRot()))
# Histórico das Acelerações
Acel = self.f(GrausDeLib)
if GrausDeLib == 4:
vetor = sp.zeros((6, 1))
vetor[:2] = Acel[:2]
vetor[3] = Acel[2]
vetor [5] = Acel[3]
elif GrausDeLib == 3:
vetor = sp.zeros((6, 1))
vetor[:2] = Acel[:2]
vetor [5] = Acel[2]
acelHis[cont, :] = sp.hstack((tp, sp.hstack(vetor)))
del temp
##############################
#
# Criação de vetor de graus de liberdade
#
##############################
if GrausDeLib == 4:
xIn = sp.zeros([6, 1])
xIn [0] = self.MostraVel()[0]
xIn [1] = self.MostraVel()[1]
xIn [2] = self.MostraVel()[3]
xIn [3] = self.MostraVel()[5]
xIn [4] = self.MostraPos()[3]
xIn [5] = self.MostraPos()[5]
elif GrausDeLib == 3:
xIn = sp.zeros([4, 1])
xIn [0] = self.MostraVel()[0]
xIn [1] = self.MostraVel()[1]
xIn [2] = self.MostraVel()[5]
xIn [3] = self.MostraPos()[5]
##################################
#
# Integração da Aceleração solidária
#
##################################
if met == 'euler':
xIn = self.integrador.euler(Acel, xIn, dt )
elif met =='rk4':
xIn = self.integrador.rk4(self.facel, dt, tp, xIn)
##################################
if GrausDeLib == 4:
x = sp.zeros((6, 1))
x[0] = xIn[0]
x[1] = xIn[1]
x[3] = xIn[2]
x[5] = xIn[3]
elif GrausDeLib==3:
x = sp.zeros((6, 1))
x[0] = xIn[0]
x[1] = xIn[1]
x[5] = xIn[2]
self.MudaVel(x)
del x
##################################
##
## Integração da velocidade inercial
##
###################################
posfutura = sp.zeros((6, 1))
posfutura[:3] = self.integrador.euler(VelIn, PosIne, dt)
##################################
if GrausDeLib== 4:
posfutura[3] = xIn[4]
posfutura[5] = xIn[5]
elif GrausDeLib== 3:
posfutura[5] = xIn[3]
self.MudaPos(posfutura)
cont += 1
del posfutura
self.prop.MudaRot(tp)
self.leme.MudaLeme(tp)
return (veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis, propHis,
dados)
def getCurvaGiro(self, peso=None, met='euler', t0=0., dt=0.5, t=100.,
GrausDeLib=3, tipo='port', leme=sp.array(20.),
RotCom=None, VelCom= None, Vel=None, Eta='vel',
PosIne=sp.array([[0.], [0.], [0.], [0.], [0.], [0.] ]),
errotf=sp.array(0.05), errotd=sp.array(0.05),
errosr=sp.array(0.001), saida='txt'):
"""
"""
if RotCom == None:
RotCom = self.dic['rotnom']
if VelCom == None:
VelCom = self.dic['unom']
if Vel == None:
Vel = sp.array([ [self.dic['unom']], [0.], [0.], [0.], [0.], [0.]
])
self.MudaPos( PosIne)
self.MudaVel(Vel)
self.MudaRotCom(RotCom)
self.MudaVelCom(VelCom)
#Log é o parâmetro que indica quando a simulação armazenou os dados do
#relatório
if tipo == 'port':
self.MudaLemeCom(sp.array(leme*sp.pi/180))
log = False
log1 = False
elif tipo == 'starboard':
self.MudaLemeCom(sp.array(-leme*sp.pi/180))
log = False
log1 = False
#
# Criando espaço na memória para armazenar os parâmetros da curva
#
nlin = len(sp.arange(t0, t, dt)) #Número de linhas das colunas a serem
#criadas
if saida == 'mem':
lemeHis = sp.zeros((nlin, 2)) #historico do leme
veloHis = sp.zeros((nlin, 7)) #histórico da velocidade
veloInerHis = sp.zeros((nlin, 4))#histórico da velocidade no
#sistema inercial Verificar depois a necessidade
posHis = sp.zeros([nlin, 7]) #histórico da posição no sistema
#inercial
fHis = sp.zeros((nlin, 5)) #histórico de forças
acelHis = sp.zeros((nlin, 7)) #histórico de acelerações
propHis = sp.zeros((nlin, 2)) #histórico Máquina
EtaHis = sp.zeros((nlin, 2)) #histórico Eta
betaHis = sp.zeros((nlin, 2)) #histórico beta
elif saida == 'txt':
os.makedirs('./saida/CurvaGiro')
os.chdir('./saida/CurvaGiro')
lemeHis = open('leme.dat', 'w')#historico do leme
lemeHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
lemeHis.write('#Valor do leme em rad\n')
lemeHis.write('#temp'.center(5) + ' ' + 'leme'.rjust(8) + ' ' +
'\n')
veloHis = open('velo.dat', 'w') #histórico da velocidade
veloHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
veloHis.write('#Velocidade Sistema Solidário \n#\n')
veloHis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'w'.rjust(11) + ' ' + 'dot roll'.rjust(11) + ' ' + '
dot pitch'.rjust(11) + ' ' + 'dot yaw'.rjust(11) + ' ' + '\n')
veloInerHis = open('veloiner.dat', 'w')#histórico da velocidade no
#sistema inercial Verificar depois a necessidade
veloInerHis.write('#Navio ' + self.nome + '\n' + '#Manobra de
Curva Giro\n#\n')
veloInerHis.write('#Velocidade Inercial\n#\n')
veloInerHis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'r'.rjust(11) + '\n')
posHis = open('pos.dat', 'w')#histórico da posição no sistema
#inercial
posHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
posHis.write('#Posição e Orientação\n#\n')
posHis.write('#temp'.center(5) + ' ' + 'x'.rjust(11) + ' ' +
'y'.rjust(11) + ' ' + 'z'.rjust(11) + ' ' + 'roll'.rjust(11) + ' ' +
'pitch'.rjust(11) + ' ' + 'yaw'.rjust(11) + ' ' + '\n')
fHis = open('forcas.dat', 'w') #histórico de forças
fHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
fHis.write('#Forças e Momentos\n#\n')
fHis.write('#temp'.center(5) + ' ' + 'X'.rjust(11) + ' ' +
'Y'.rjust(11) + ' ' + 'K'.rjust(11) + ' ' + 'N'.rjust(11) + ' ' + '\n')
acelHis = open('acel.dat', 'w') #histórico de acelerações
acelHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
acelHis.write('#Aceleração\n#\n')
acelHis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'w'.rjust(11) + ' ' + 'ddotroll'.rjust(11) + ' ' + '
ddotpitch'.rjust(11) + ' ' + 'ddotyaw'.rjust(11) + ' ' + '\n')
propHis = open('propulsor.dat', 'w') #histórico Máquina
propHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
propHis.write('#Rotações do propulsor\n#\n')
propHis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + '\n')
EtaHis = open('Eta.dat', 'w') #histórico Eta
EtaHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
EtaHis.write('#Eta \n#\n')
EtaHis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + ' ' + '\n')
betaHis = open('beta.dat', 'w') #histórico Eta
betaHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Giro\n#\n')
betaHis.write('#Beta \n#\n')
betaHis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + ' ' +
'\n')
os.chdir('..')
os.chdir('..')
dados = []
dic = {}
PosIni = self.MostraPos().copy()
del nlin #não preciso mais
cont =0 #Contador
if peso == None:
par = (GrausDeLib, )
else:
par = (GrausDeLib, peso)
#
# Iteração
#
for tp in sp.arange(t0, t, dt):
if not log1:
if cont == 0:
V1 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
elif cont == 1:
V2 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
elif cont == 2:
V3 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
elif cont == 3:
V4 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
else:
V1 = V2
V2 = V3
V3 = V4
V4 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
if log:
if stats.tstd((V1, V2, V3, V4))<errosr:
dic['steadytr'] = (sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2) /
self.MostraVel()[5])
dados.append(dic.copy())
log1= True
if not log:
if (abs(abs(self.MostraPos()[5] - PosIni[5]) - (sp.pi/2))
<= errotf):
errotf = (abs(abs(self.MostraPos()[5] - PosIni[5]) -
(sp.pi/2)))
dic['transfer'] = abs(self.MostraPos()[1] - PosIni[1])
dic['advance'] = abs(self.MostraPos()[0] - PosIni[0])
if (abs(abs(self.MostraPos()[5] - PosIni[5]) - sp.pi) <=
errotd):
errotd = abs(abs(self.MostraPos()[5] - PosIni[5]) -
sp.pi)
dic['taticalDiameter'] = abs(self.MostraPos()[1] -
PosIni[1])
if abs(self.MostraPos()[5] - PosIni[5]) > sp.pi:
log = True
###################################
ft = self.VetF(par)
###################################
##
## inc = Velocidades Lineares no Sistema Inecial
##
###################################
MatRot = self.MatRot()
VelIn = sp.array(MatRot*self.MostraVel()[0:3])
PosIne = self.MostraPos()[0:3]
##################################
##
## Guardando os parâmetros
##
##################################
# Velocidade Inercial
if saida == 'txt':
veloInerHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in VelIn:
veloInerHis.write('%.5e'.rjust(11)%(arg) + ' ')
veloInerHis.write('\n')
elif saida == 'mem':
d = sp.hstack(VelIn)
veloInerHis[cont, 1:] = d #
veloInerHis[cont, 0] = tp #
# Histórico Leme
if saida == 'txt':
lemeHis.write('%.2f'.rjust(5)%(tp) + ' ')
lemeHis.write('%.2f'.rjust(5)%(self.MostraLeme()) + '\n')
elif saida == 'mem':
lemeHis[cont, 0] = tp
lemeHis[cont, 1] = self.MostraLeme()
# Histórico da posição
if saida == 'txt':
posHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in self.MostraPos():
posHis.write('%.5e'.rjust(11)%(arg) + ' ')
posHis.write('\n')
elif saida == 'mem':
temp = sp.hstack(self.MostraPos())
posHis[cont, :] = sp.hstack((tp, temp))
del temp
# Histórico da Velocidade
if saida == 'txt':
veloHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in self.MostraVel():
veloHis.write('%.5e'.rjust(11)%(arg) + ' ')
veloHis.write('\n')
elif saida == 'mem':
temp = sp.hstack(self.MostraVel())
veloHis[cont, :] = sp.hstack((tp, temp))
del temp
# Histórico das Forças
if saida == 'txt':
temp = sp.zeros((4, 1))
if GrausDeLib == 4:
temp= ft
elif GrausDeLib == 3:
temp[:2] = ft[:2]
temp[3] = ft[2]
fHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in temp:
fHis.write('%.5e'.rjust(11)%(arg) + ' ')
fHis.write('\n')
elif saida == 'mem':
temp = sp.hstack(sp.array(ft))
if GrausDeLib == 4:
fHis[cont, :] = sp.hstack((tp, temp))
elif GrausDeLib == 3:
fHis[cont, :3] = sp.hstack((tp, temp[:2]))
fHis[cont, 4] = temp[2]
# Histórico Propulsor
if saida == 'txt':
propHis.write('%.2f'.rjust(5)%(tp) + ' ')
propHis.write('%.2f'.rjust(5)%self.MostraRot() + '\n')
elif saida == 'mem':
propHis[cont, :] = sp.hstack((tp, self.MostraRot()))
# Histórico Eta
if saida == 'txt':
EtaHis.write('%.2f'.rjust(5)%(tp) + ' ')
if Eta == 'rot':
EtaHis.write('%.2f'.rjust(5) % (self.MostraRotCom() /
self.MostraRot()) + '\n')
elif Eta == 'vel':
EtaHis.write('%.2f'.rjust(5) %
(self.MostraVelCom() / self.MostraVel()[0]) +
'\n')
elif saida == 'mem':
if Eta== 'rot':
EtaHis[cont, :] = sp.hstack((tp, self.MostraRotCom() /
self.MostraRot()))
elif Eta == 'vel':
EtaHis[cont, :] = sp.hstack((tp,
self.MostraVelCom() /
self.MostraVel()[0]))
# Histórico Beta
if saida == 'txt':
betaHis.write('%.2f'.rjust(5)%(tp) + ' ')
betaHis.write('%.2f'.rjust(5)%(sp.arctan(-self.MostraVel()[1]
/ self.MostraVel()[0])) + '\n')
elif saida == 'mem':
betaHis[cont, :] = sp.hstack((tp,
sp.arctan(-self.MostraVel()[1] /
self.MostraVel()[0])))
# Histórico das Acelerações
Acel = self.f2(ft, self.H(GrausDeLib))
vetor = sp.zeros((6, 1))
if GrausDeLib == 4:
vetor[:2] = Acel[:2]
vetor[3] = Acel[2]
vetor [5] = Acel[3]
elif GrausDeLib == 3:
vetor[:2] = Acel[:2]
vetor [5] = Acel[2]
if saida == 'txt':
acelHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in vetor:
acelHis.write('%.5e'.rjust(11)%(arg[0]) + ' ')
acelHis.write('\n')
elif saida == 'mem':
acelHis[cont, :] = sp.hstack((tp, sp.hstack(vetor)))
del temp
##############################
#
# Criação de vetor de graus de liberdade
#
##############################
if GrausDeLib == 4:
vt = sp.zeros([6, 1])
vt [0] = self.MostraVel()[0]
vt [1] = self.MostraVel()[1]
vt [2] = self.MostraVel()[3]
vt [3] = self.MostraVel()[5]
vt [4] = self.MostraPos()[3]
vt [5] = self.MostraPos()[5]
elif GrausDeLib == 3:
vt = sp.zeros([4, 1])
vt [0] = self.MostraVel()[0]
vt [1] = self.MostraVel()[1]
vt [2] = self.MostraVel()[5]
vt [3] = self.MostraPos()[5]
##################################
##
## Integração da Aceleração solidária
##
##################################
if met == 'euler':
vt = self.integrador.euler(self.f, vt, tp, dt ,par )
elif met =='rk4':
vt = self.integrador.rk4(self.f, vt, tp, dt, par)
##################################
if GrausDeLib == 4:
v = sp.zeros((6, 1))
v[0] = vt[0]
v[1] = vt[1]
v[3] = vt[2]
v[5] = vt[3]
elif GrausDeLib == 3:
v = sp.zeros((6, 1))
v[0] = vt[0]
v[1] = vt[1]
v[5] = vt[2]
self.MudaVel(v)
del v
##################################
##
## Integração da velocidade inercial
##
###################################
x = sp.zeros((6, 1))
if met == 'euler':
x[:3] = self.integrador.euler(self.fvein ,
self.MostraPos()[:3], tp, dt ,
(self.MostraPos()[3:] ,
self.MostraVel()[:3]))
elif met == 'rk4':
x[:3] = self.integrador.rk4(self.fvein, self.MostraPos()[:3],
tp, dt, (self.MostraPos()[3:],
self.MostraVel()[:3]))
##################################
if GrausDeLib == 4:
x[3] = vt[4]
x[5] = vt[5]
elif GrausDeLib == 3:
x[5] = vt[3]
self.MudaPos(x)
del x
cont += 1
self.prop.MudaRot(tp)
self.leme.MudaLeme(tp)
if saida == 'txt':
arq = [veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis,
propHis, EtaHis]
for arg in arq:
arg.close()
return dados
elif saida == 'mem':
return (veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis,
propHis, EtaHis, dados, betaHis)
def getCurvaZigZag(self, peso=None, met='euler', t0=0., dt=0.5, t=100.,
GrausDeLib=3, tipo='port', lemesp.array(20.),
RotCom=None, VelComNone, VelNone, proa=
sp.array([20.]), Eta='vel', PosInesp.array([[0.], [0.],
[0.], [0.], [0.], [0.]]), osasp.array(0.0),
ospathsp.array(0.0), erro=sp.array(0.005), saida'txt'):
"""
Simulador de manobras padrão
_________________________
Variáveis de entrada:
GrausDeLib (integer)-- Graus de liberdade;
met -- Método de integração. Default- Euler;
t0 -- Tempo inicial;
dt -- Passo no tempo;
t -- Tempo final
tipo - tipo de manobra simulada. Zig-Zag10/10 e Curva_de_Giro_port ou
Curva_de_Giro_starboard . Default -Zig-Zag
__________________________
Saída:
Tupla de sp.array
(veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis)
Em cada elemento da tupla:
A primeira coluna é o passo de tempo e as demais são as variáveis
veloHis -- histórico de velocidades;
posHis -- histórico de posições
acelHis --- histórico de acelerações
fHis -- histórico de forças
veloInerHis -- histórico de velocidades no sistema inercial
lemeHis -- histórico do comando de leme
"""
if RotCom == None:
RotCom = self.dic['rotnom']
if VelCom == None:
VelCom = self.dic['unom']
if Vel == None:
Vel = sp.array([[self.dic['unom']], [0.], [0.], [0.], [0.], [0.]
])
self.MudaPos( PosIne)
self.MudaVel(Vel)
self.MudaRotCom(RotCom)
self.MudaVelCom(VelCom)
if tipo == 'port':
self.MudaLemeCom(sp.array(leme*sp.pi/180))
exe=0
elif tipo == 'starboard':
self.MudaLemeCom(sp.array(-leme*sp.pi/180))
exe=1
#
# Criando espaço na memória para armazenar os parâmetros da curva
#
#Número de linhas das colunas a seremcriadas
nlin = len(sp.arange(t0, t, dt))
if saida == 'mem':
lemeHis = sp.zeros((nlin, 2)) #historico do leme
veloHis = sp.zeros((nlin, 7)) #histórico da velocidade
veloInerHis = sp.zeros((nlin, 4))#histórico da velocidade no
#sistema inercial Verificar depois a necessidade
posHis = sp.zeros([nlin, 7]) #histórico da posição no sistema
#inercial
fHis = sp.zeros((nlin, 5)) #histórico de forças
acelHis = sp.zeros((nlin, 7)) #histórico de acelerações
propHis = sp.zeros((nlin, 2)) #histórico Máquina
EtaHis = sp.zeros((nlin, 2)) #histórico Eta
elif saida == 'txt':
os.makedirs('./saida/ZigZag')
os.chdir('./saida/ZigZag')
lemeHis = open('leme.dat', 'w')#historico do leme
lemeHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
lemeHis.write('#Valor do leme em rad\n')
lemeHis.write('#temp'.center(5) + ' ' + 'leme'.rjust(8) + ' ' +
'\n')
veloHis = open('velo.dat', 'w') #histórico da velocidade
veloHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
veloHis.write('#Velocidade Sistema Solidário \n#\n')
veloHis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'w'.rjust(11) + ' ' + 'dot
roll'.rjust(11) + ' ' + ' dot pitch'.rjust(11) + '
' + 'dot yaw'.rjust(11) + ' ' + '\n')
veloInerHis = open('veloiner.dat', 'w')#histórico da velocidade no
#sistema inercial Verificar depois a necessidade
veloInerHis.write('#Navio ' + self.nome + '\n' + '#Manobra de
Curva Zig-Zag\n#\n')
veloInerHis.write('#Velocidade Inercial\n#\n')
veloInerHis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'r'.rjust(11) + '\n')
posHis = open('pos.dat', 'w')#histórico da posição no sistema
#inercial
posHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
posHis.write('#Posição e Orientação\n#\n')
posHis.write('#temp'.center(5) + ' ' + 'x'.rjust(11) + ' ' +
'y'.rjust(11) + ' ' + 'z'.rjust(11) + ' ' +
'roll'.rjust(11) + ' ' + 'pitch'.rjust(11) + ' ' +
'yaw'.rjust(11) + ' ' + '\n')
fHis = open('forcas.dat', 'w') #histórico de forças
fHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
fHis.write('#Forças e Momentos\n#\n')
fHis.write('#temp'.center(5) + ' ' + 'X'.rjust(11) + ' ' +
'Y'.rjust(11) + ' ' + 'K'.rjust(11) + ' ' +
'N'.rjust(11) + ' ' + '\n')
acelHis = open('acel.dat', 'w') #histórico de acelerações
acelHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
acelHis.write('#Aceleração\n#\n')
acelHis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'w'.rjust(11) + ' ' +
'ddotroll'.rjust(11) + ' ' + ' ddotpitch'.rjust(11)
+ ' ' + 'ddotyaw'.rjust(11) + ' ' + '\n')
propHis = open('propulsor.dat', 'w') #histórico Máquina
propHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
propHis.write('#Rotações do propulsor\n#\n')
propHis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + '\n')
EtaHis = open('Eta.dat', 'w') #histórico Eta
EtaHis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva
Zig-Zag\n#\n')
EtaHis.write('#Eta \n#\n')
EtaHis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + ' ' + '\n')
os.chdir('..')
os.chdir('..')
dados = []
dic = {}
PosIni = self.MostraPos().copy()
del nlin #não preciso mais
cont =0 #Contador
if peso == None:
par = (GrausDeLib, )
else:
par = (GrausDeLib, peso)
#
# Iteração
#
for tp in sp.arange(t0, t, dt):
###############################
##
## Verificando o momento em que será realizada a mudança do leme
##
###############################
if (((exe%2 == 0) and self.MostraPos()[5] <=
-(proa * sp.pi / 180)) or (exe%2 != 0 and
self.MostraPos()[5] >= (proa * sp.pi / 180))):
self.MudaLemeCom(self.MostraLeme() * (-1))
if ((exe != 0 and tipo == 'port') or (exe != 1
and tipo == 'starboard')):
dic['reach'] = erro
dic['ospath'] = ospath
dic['osangle'] = osa
dados.append(dic.copy())
osa = sp.array(0.0)
ospath = sp.array(0)
erro = sp.array(0.05)
logospath = False
logosa = False
exe += 1
if tipo =='port':
dic['exeNummber'] = exe
elif tipo=='starboard':
dic['exeNummber'] = exe - 1
dic['time'] = tp - sp.array(dt)
dic['path'] = self.MostraPos()[1]
dic['proa'] = self.MostraPos()[5]
###############################
##
## Atualizando os parâmetros
##
###############################
if ((exe!=0 and tipo == 'port') or (exe!=1 and tipo ==
'starboard')):
if ((logospath == False) and
(abs(self.MostraPos()[1] - dic['path']) >= ospath)):
#(sp.sign(self.MostraPos()[1])== sp.sign(dic['path'])) and
ospath = abs(self.MostraPos()[1] - dic['path'])
else:
logospath = True
if ((logosa == False) and (abs(self.MostraPos()[5] -
dic['proa']) >= osa)): #(sp.sign(self.MostraPos()[5])==
#sp.sign(dic['proa'])) and
osa = abs(self.MostraPos()[5] - dic['proa'])
else:
logosa = True
if abs(abs(self.MostraPos()[5]) - abs(PosIni[5])) < erro:
erro = abs(self.MostraPos()[5] - PosIni[5])
#
# inc = Velocidades Lineares no Sistema Inecial
#
MatRot = self.MatRot()
VelIn = MatRot * sp.matrix(self.vel[0:3])
PosIne = self.MostraPos()[0:3]
###################################
#################################
##
## Cálculo das forças de Maneira Modular
##
###################################
ft = self.VetF(par)
##################################
##################################
##
## Guardando os parâmetros
##
##################################
# Velocidade Inercial
if saida == 'txt':
veloInerHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in VelIn:
veloInerHis.write('%.5e'.rjust(11)%(arg) + ' ')
veloInerHis.write('\n')
elif saida == 'mem':
d = sp.hstack(VelIn)
veloInerHis[cont, 1:] = d #
veloInerHis[cont, 0] = tp #
# Histórico Leme
if saida == 'txt':
lemeHis.write('%.2f'.rjust(5)%(tp) + ' ')
lemeHis.write('%.2f'.rjust(5)%(self.MostraLeme()) + '\n')
elif saida == 'mem':
lemeHis[cont, 0] = tp
lemeHis[cont, 1] = self.MostraLeme()
# Histórico da posição
if saida == 'txt':
posHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in self.MostraPos():
posHis.write('%.5e'.rjust(11)%(arg) + ' ')
posHis.write('\n')
elif saida == 'mem':
temp = sp.hstack(self.MostraPos())
posHis[cont, :] = sp.hstack((tp, temp))
del temp
# Histórico da Velocidade
if saida == 'txt':
veloHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in self.MostraVel():
veloHis.write('%.5e'.rjust(11)%(arg) + ' ')
veloHis.write('\n')
elif saida == 'mem':
temp = sp.hstack(self.MostraVel())
veloHis[cont, :] = sp.hstack((tp, temp))
del temp
# Histórico das Forças
if saida == 'txt':
temp = sp.zeros((4, 1))
if GrausDeLib == 4:
temp = ft
elif GrausDeLib == 3:
temp[:2] = ft[:2]
temp[3] = ft[2]
fHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in temp:
fHis.write('%.5e'.rjust(11)%(arg) + ' ')
fHis.write('\n')
elif saida == 'mem':
temp = sp.hstack(sp.array(ft))
if GrausDeLib == 4:
fHis[cont, :] = sp.hstack((tp, temp))
elif GrausDeLib == 3:
fHis[cont, :3] = sp.hstack((tp, temp[:2]))
fHis[cont, 4] = temp[2]
# Histórico Propulsor
if saida == 'txt':
propHis.write('%.2f'.rjust(5)%(tp) + ' ')
propHis.write('%.2f'.rjust(5)%self.MostraRot() + '\n')
elif saida == 'mem':
propHis[cont, :] = sp.hstack((tp, self.MostraRot()))
# Histórico Eta
if saida == 'txt':
EtaHis.write('%.2f'.rjust(5)%(tp) + ' ')
if Eta == 'rot':
EtaHis.write('%.2f'.rjust(5) % (self.MostraRotCom() /
self.MostraRot()) + '\n')
elif Eta == 'vel':
EtaHis.write('%.2f'.rjust(5) % (self.MostraVelCom() /
self.MostraVel()[0]) + '\n')
elif saida == 'mem':
if Eta== 'rot':
EtaHis[cont, :] = sp.hstack((tp, self.MostraRotCom() /
self.MostraRot()))
elif Eta == 'vel':
EtaHis[cont, :] = sp.hstack((tp, self.MostraVelCom() /
self.MostraVel()[0]))
# Histórico das Acelerações
Acel = self.f2(ft, self.H(GrausDeLib))
vetor = sp.zeros((6, 1))
if GrausDeLib == 4:
vetor[:2] = Acel[:2]
vetor[3] = Acel[2]
vetor [5] = Acel[3]
elif GrausDeLib == 3:
vetor[:2] = Acel[:2]
vetor [5] = Acel[2]
if saida == 'txt':
acelHis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in vetor:
acelHis.write('%.5e'.rjust(11)%(arg[0]) + ' ')
acelHis.write('\n')
elif saida == 'mem':
acelHis[cont, :] = sp.hstack((tp, sp.hstack(vetor)))
del vetor
##############################
##
## Criação de vetor de graus de liberdade
##
##############################
if GrausDeLib == 4:
vt = sp.zeros([6, 1])
vt [0] = self.MostraVel()[0]
vt [1] = self.MostraVel()[1]
vt [2] = self.MostraVel()[3]
vt [3] = self.MostraVel()[5]
vt [4] = self.MostraPos()[3]
vt [5] = self.MostraPos()[5]
elif GrausDeLib == 3:
vt = sp.zeros([4, 1])
vt [0] = self.MostraVel()[0]
vt [1] = self.MostraVel()[1]
vt [2] = self.MostraVel()[5]
vt [3] = self.MostraPos()[5]
##################################
##
## Integração da Aceleração solidária
##
##################################
if met == 'euler':
vt = self.integrador.euler(self.f, vt, tp, dt ,par )
elif met =='rk4':
vt = self.integrador.rk4(self.f, vt, tp, dt, par)
##################################
if GrausDeLib == 4:
v = sp.zeros((6, 1))
v[0] = vt[0]
v[1] = vt[1]
v[3] = vt[2]
v[5] = vt[3]
elif GrausDeLib ==3:
v = sp.zeros((6, 1))
v[0] = vt[0]
v[1] = vt[1]
v[5] = vt[2]
self.MudaVel(v)
del v
##################################
##
## Integração da velocidade inercial
##
###################################
x = sp.zeros((6, 1))
if met == 'euler':
x[:3] = self.integrador.euler(self.fvein, self.MostraPos()[:3],
tp, dt, (self.MostraPos()[3:],
self.MostraVel()[:3]))
elif met == 'rk4':
x[:3] = self.integrador.rk4(self.fvein, self.MostraPos()[:3],
tp, dt, (self.MostraPos()[3:],
self.MostraVel()[:3]))
##################################
if GrausDeLib == 4:
x[3] = vt[4]
x[5] = vt[5]
elif GrausDeLib == 3:
x[5] = vt[3]
self.MudaPos(x)
cont += 1
del x
self.prop.MudaRot(tp)
self.leme.MudaLeme(tp)
if saida == 'txt':
arq = [veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis,
propHis, EtaHis]
for arg in arq:
arg.close()
return dados
elif saida == 'mem':
return (veloHis, posHis, acelHis, fHis, veloInerHis, lemeHis,
propHis, EtaHis, dados)
def simulaTestb(self, p, intervalo=sp.array(5.), V= None ):
"""
Retorna uma matrix com o valor das forças variando de acordo com que
varia a velocidade
u= Vcos(beta) v = Vsen(beta) com beta variando de 0 a 180 graus em um
intervalo = intervalo
"""
if V == None:
V = self.dic['unom']
Velocidade = sp.zeros((6, 1))
saida = sp.zeros([len( sp.arange(0., sp.pi, intervalo * sp.pi / 180)),
5])
contlinha = 0
for beta in sp.arange(0., sp.pi, intervalo * sp.pi / 180):
Velocidade[0] = sp.array(V) * sp.cos(beta)
Velocidade[1] = -sp.array(V) * sp.sin(beta)
self.MudaVelCom(Velocidade[0]) #condição que força \eta=1
self.MudaVel(Velocidade)
v = sp.sqrt(Velocidade[0] ** 2 + Velocidade[1] ** 2)
rho = self.dic['rho']
lpp = self.dic['lpp']
vetF = self.VetF((4, p))
# vetF = sp.hstack(vetF)
saida[contlinha, :] = sp.hstack([beta, vetF[0] * (2 / (rho * (lpp *
(v ** 2)))), vetF[1] * (2 / (rho *
(lpp* (v ** 2)))), vetF[2] *
(2 / (rho * ((lpp * v) ** 2))),
vetF[3] * (2 / (rho * ((lpp * v) **
2)))])
contlinha += 1
return saida
| gpl-3.0 | -2,103,145,341,740,217,300 | 33.131995 | 79 | 0.426835 | false |
zombiezen/pymage | pymage/config.py | 1 | 21300 | #!/usr/bin/env python
#
# config.py
# pymage
#
# Copyright (C) 2006-2007 Ross Light
#
# This file is part of pymage.
#
# pymage is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# pymage is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
"""
Provide configuration for your game.
The typical use of this module is to use the `setup` function and give set up
parameters. From there, the module sets up the correct parts of the pymage
library.
This module is not strictly necessary, but pymage's reasoning is as follows:
Resources for the game should be loaded on an location-independent basis.
This provides abstraction and the ability for arbitrary file structure.
However, these files eventually need to be located, so the developer should
create a file (separate from the main code) that locates everything. From
there, there may be a legitimate reason for the user to want to customize the
file locations (perhaps he/she wants the game resources to be on a separate hard
drive). Therefore, a separate file is used to override the behavior of any
given installation of the game. But since the user never touches the
developer's default game setup, any corrupt preferences can be corrected by
deleting the user configuration files.
Here is a sample gamesite.xml file::
<?xml version="1.0"?>
<!-- name is not necessary, but reserved for future use. -->
<game-site name="Sample Game">
<!-- Prepare an image in the ImageManager.
The id attribute is used as the tag.
The path element is required to find the file.
The section and option elements specify how the path can be
overriden in configuration files. -->
<image id="SampleImage">
<section>sprites</section>
<option>sample</option>
<path>img/sample.png</path>
</image>
<!-- Prepare a sound in the SoundManager.
The id attribute is used as the tag.
The path element is required to find the file.
The section and option elements specify how the path can be
overriden in configuration files. -->
<sound id="SampleSound">
<section>sounds</section>
<option>sample</option>
<path>sfx/sample.wav</path>
</sound>
<!-- Prepare a playlist in the MusicManager.
The id attribute is used as the tag.
The path element(s) are required to find the music file(s).
The section and option elements specify how the playlist can be
overriden in configuration files. -->
<music id="song1">
<path>music/song1.wav</path>
</music>
<playlist id="SamplePlaylist">
<section>playlists</section>
<option>sample</option>
<music ref="song1"/>
<path>music/song2.wav</path> <!-- Old way, still works -->
</playlist>
<!-- Prepare a cache group. -->
<group id="SampleGroup">
<section>groups</section>
<option>sample</option>
<image ref="SampleImage"/>
<sound ref="SampleSound"/>
</group>
<!-- Specify additional configuration files. -->
<config-file>userconfig.ini</config-file>
</game-site>
"""
from ConfigParser import ConfigParser
import os
from textwrap import dedent
import warnings
from xml.dom import minidom
from pymage import resman
from pymage import sound
from pymage import states
from pymage import vfs
__author__ = 'Ross Light'
__date__ = 'August 10, 2006'
__all__ = ['GameSiteWarning',
'load',
'save',
'getOption',
'setOption',
'registerType',
'unregisterType',
'setup',]
__docformat__ = 'reStructuredText'
# Globals
_gsPrims = {'resource': resman.Resource,
'image': resman.ImageResource,
'sound': resman.SoundResource,
'music': resman.MusicResource,}
class GameSiteWarning(UserWarning):
"""Warning emitted when odd game site constructs are used."""
pass
## CONFIG PARSER ##
class CaseConfigParser(ConfigParser):
"""A ``ConfigParser`` that is case-sensitive."""
def optionxform(self, optstr):
"""Return the string in the same case."""
return optstr
def load(*args, **kw):
"""
Load a series of configuration files.
Any keyword values are used as variables.
:Keywords:
convert : bool
Whether the function should interpret the values as what they seem
to be (e.g. ``float``, ``int``). The default is ``True``.
:Returns: Loaded configuration
:ReturnType: dict
"""
# Retrieve convert keyword
convertValues = kw.get('convert', True)
try:
del kw['convert']
except KeyError:
pass
# Parse the files
parser = CaseConfigParser(kw)
for configFile in args:
close = False
if isinstance(configFile, (basestring, vfs.Path)):
# Open strings as paths
game = states.Game.getGame()
if game is None:
# Use physical filesystem
configFile = os.path.normpath(os.path.expanduser(configFile))
if os.path.exists(configFile):
configFile = open(configFile)
else:
continue
else:
# Use virtual filesystem
if game.filesystem.exists(configFile):
configFile = game.filesystem.open(configFile)
else:
continue
close = True
parser.readfp(configFile)
if close:
configFile.close()
# Assemble dictionary
configDict = {}
for section in parser.sections():
sectionDict = {}
for option in parser.options(section):
value = parser.get(section, option)
if convertValues: # Interpret values
value = _getValue(value)
sectionDict[option] = value
configDict[section] = sectionDict
return configDict
def save(config, config_file):
"""
Saves a configuration dictionary to a file.
:Parameters:
config : dict
Configuration dictionary
config_file : string or file
File to write configuration to
"""
# Create configuration writer
parser = CaseConfigParser()
for section, values in config.iteritems():
parser.add_section(section)
for option, value in values.iteritems():
parser.set(section, option, value)
# Determine file to write
close = False
if isinstance(config_file, (basestring, vfs.Path)):
game = states.Game.getGame()
if game is not None:
config_file = game.filesystem.open(site)
else:
if isinstance(config_file, vfs.Path):
config_file = str(config_file)
path = os.path.normpath(os.path.expanduser(config_file))
config_file = open(path, 'w')
close = True
# Write file and close
parser.write(config_file)
if close:
config_file.close()
def _getValue(value_string):
"""
Retrieves a value from a ``ConfigParser`` string.
:Parameters:
value_string : string
Option string to convert
:Returns: The string's value, converted into an int, bool, float, or string
"""
boolLiterals = {'false': False,
'no': False,
'off': False,
'true': True,
'yes': True,
'on': True,}
if value_string.isdigit():
# Integer
return int(value_string)
elif value_string.lower() in boolLiterals:
# Boolean
return boolLiterals[value_string.lower()]
elif _isFloat(value_string):
# Float
return float(value_string)
else:
# String
return str(value_string)
def _isFloat(value_string):
"""
Returns whether the string is a ``float``.
The format for a float is::
int[.[fraction]]
:Parameters:
value_string : string
String to test for floatiness
:ReturnType: bool
"""
if value_string.isdigit():
# int
return True
elif value_string.count('.') == 1:
# has a decimal point
value_string = value_string.replace('.', '')
if value_string.isdigit():
# int[.[fraction]]
return True
else:
return False
else:
return False
def getOption(config, section, option, default=None):
"""
Retrieves an option from the configuration dictionary.
:Parameters:
config : dict
Configuration dictionary to read from
section : string
Configuration section
option : string
Option name
default
Default value to return if option is not found
:Returns: The requested option's value
"""
try:
section = config[section]
except KeyError:
return default
else:
return section.get(option, default)
def setOption(config, section, option, value):
"""
Changes an option in the configuration dictionary.
:Parameters:
config : dict
Configuration dictionary to modify
section : string
Configuration section
option : string
Option name
value
Value to change the option to
"""
section = config.setdefault(section, {})
section[option] = value
## GAME SITE ##
def registerType(tag, factory):
"""
Register a custom game site resource type.
:Parameters:
tag : string
Name of the XML element
factory
Callable that takes one positional argument: the path to the
resource. Any additional attributes found on the XML element are
passed as keyword arguments. The value of this parameter is
typically the constructor of a `pymage.resman.Resource` subclass.
"""
_gsPrims[tag] = factory
def unregisterType(tag):
"""
Unregister a custom game site resource type.
:Parameters:
tag : string
Name of the XML element
"""
del _gsPrims[tag]
def setup(site='gamesite.xml', *config_files, **kw):
"""
Sets up a game from the specified parameters.
The additional arguments are program default configuration files. These
are parsed before any inside the game site file (therefore giving the site
configuration files higher precedence).
:Parameters:
site : string or file
Game site file
:Keywords:
configSound : bool
Whether the sound manager volume should be configured automatically
configMusic : bool
Whether the music manager volume should be configured automatically
:Returns: The game's configuration
:ReturnType: dict
"""
# Get keyword arguments
configSound = kw.pop('configSound', True)
configMusic = kw.pop('configMusic', True)
if kw:
raise TypeError("Invalid keyword argument")
# See if we can use the game's filesystem
if isinstance(site, (basestring, vfs.Path)):
game = states.Game.getGame()
if game is not None:
site = game.filesystem.open(site)
elif isinstance(site, vfs.Path):
site = str(site)
# Parse game site file
doc = minidom.parse(site)
config = _getSiteConfig(doc, config_files)
# Load configuration
if configSound:
_processSoundOptions(config)
if configMusic:
_processMusicOptions(config)
# Process resources
_processGameSite(doc, config)
# Return configuration dictionary
return config
def _getSiteConfig(doc, config_files):
"""
Obtains full configuration.
The configuration files passed in are put first in the list, so the ones
specified in the game site file take precedence.
:Parameters:
doc : DOM document
Game site DOM tree
config_files : list of strings or files
Configuration files to load
:Returns: The loaded configuration dictionary
:ReturnType: dict
"""
siteConfigs = []
for child in doc.documentElement.childNodes:
if (child.nodeType == minidom.Node.ELEMENT_NODE and
child.tagName == 'config-file'):
siteConfigs.append(_getText(child))
config_files = list(config_files) + list(siteConfigs)
return load(*config_files)
def _processSoundOptions(config):
"""
Configure the sound manager with the default configuration keys.
:Parameters:
config : dict
The configuration dictionary
"""
sound.sound.shouldPlay = bool(getOption(config, 'sound', 'play', True))
sound.sound.volume = float(getOption(config, 'sound', 'volume', 1.0))
def _processMusicOptions(config):
"""
Configure the music manager with the default configuration keys.
:Parameters:
config : dict
The configuration dictionary
"""
sound.music.shouldPlay = bool(getOption(config, 'music', 'play', True))
sound.music.volume = bool(getOption(config, 'music', 'volume', 0.5))
sound.music.loop = bool(getOption(config, 'music', 'loop', True))
def _processGameSite(doc, config):
"""
Run through game site file and add resources to manager.
:Parameters:
doc : DOM document
Game site DOM tree
config : dict
Configuration dictionary
"""
handlers = {'playlist': _handlePlaylist,
'group': _handleGroup,}
handlers.update(dict.fromkeys(_gsPrims, _handlePrimitive))
for child in doc.documentElement.childNodes:
if (child.nodeType == minidom.Node.ELEMENT_NODE and
child.tagName in handlers):
# Call handler
handler = handlers[child.tagName]
handler(child, config)
def _handlePrimitive(elem, config):
"""
Handle a basic resource (i.e. images, sound effects, and custom resources).
:Parameters:
elem : DOM node
Element to handle
config : dict
Configuration dictionary
:Returns: Resource's key
:ReturnType: string
"""
attr = _attributes(elem, include_ns=False, ascii=True)
pathChild = _childNamed(elem, 'path')
if pathChild is None:
warnings.warn("Primitive without a path", GameSiteWarning)
return
# Get ID
try:
key = attr.pop('id')
except KeyError:
pathChild = _childNamed(elem, 'path')
if pathChild:
key = _getText(pathChild)
else:
warnings.warn("Primitive without a key", GameSiteWarning)
return
# Get resource information
resType = _gsPrims[elem.tagName]
section = _getText(_childNamed(elem, 'section'))
option = _getText(_childNamed(elem, 'option'))
path = _getText(pathChild)
# Create resource
if section is not None and option is not None:
path = getOption(config, section, option, path)
resman.resman.addResource(key, resType(path, **attr))
# Return key
return key
def _handlePlaylist(elem, config):
"""
Handle a playlist element.
:Parameters:
elem : DOM node
Element to handle
config : dict
Configuration dictionary
:Returns: Playlist's key
:ReturnType: string
"""
key = elem.getAttribute('id')
section = _getText(_childNamed(elem, 'section'))
option = _getText(_childNamed(elem, 'option'))
playlistKeys = []
# Get playlist keys
for sub in elem.childNodes:
if sub.nodeType == minidom.Node.ELEMENT_NODE:
if sub.tagName == 'path':
# Old-school path approach
warnings.warn("%s using old path-based playlist" % (key),
GameSiteWarning)
if sub.hasAttribute('id'):
musicKey = sub.getAttribute('id')
musicPath = _getText(sub)
else:
musicKey = musicPath = _getText(sub)
resman.addResource(musicKey, resman.MusicResource(musicPath))
playlistKeys.append(musicKey)
elif sub.tagName == 'music':
# New-school music reference/declaration approach
if sub.hasAttribute('ref'):
musicKey = sub.getAttribute('ref')
else:
musicKey = _handlePrimitive(sub, config)
playlistKeys.append(musicKey)
# Create playlist
if section is not None and option is not None:
configKeys = getOption(config, section, option)
if configKeys is not None:
playlistKeys = configKeys.split(',')
sound.music.addPlaylist(key, playlistKeys)
# Return key
return key
def _handleGroup(elem, config):
"""
Handle a group element (a cache group).
:Parameters:
elem : DOM node
Element to handle
config : dict
Configuration dictionary
:Returns: Cache group's key
:ReturnType: string
"""
key = elem.getAttribute('id')
section = _getText(_childNamed(elem, 'section'))
option = _getText(_childNamed(elem, 'option'))
groupKeys = set()
# Get group keys
for sub in elem.childNodes:
if (sub.nodeType == minidom.Node.ELEMENT_NODE and
sub.tagName in _gsPrims):
if sub.hasAttribute('ref'):
resourceKey = sub.getAttribute('ref')
else:
resourceKey = _handlePrimitive(sub, config)
groupKeys.add(resourceKey)
# Create group
if section is not None and option is not None:
configKeys = getOption(config, section, option)
if configKeys is not None:
groupKeys = configKeys.split(',')
resman.resman.addCacheGroup(key, groupKeys)
# Return key
return key
def _getText(elem, post=True):
"""
Retrieve text from a DOM node, stripping indents, if asked.
This function does honor the ``xml:space`` attribute, and if
``xml:space="preserve"`` is specified, it takes precendence over the
``post`` argument.
:Parameters:
elem : DOM node
The element to get text from
:Keywords:
post : bool
Whether to strip indents
:Returns: The element's text
:ReturnType: string
"""
xmlNS = 'http://www.w3.org/XML/1998/namespace'
if elem is None:
return None
text = ''
for child in elem.childNodes:
if child.nodeType == minidom.Node.TEXT_NODE:
text += child.wholeText
preserve = (elem.hasAttributeNS(xmlNS, 'space') and
elem.getAttributeNS(xmlNS, 'space') == 'preserve')
if post and not preserve:
text = dedent(text)
if text.startswith('\n'):
text = text[1:]
if text.endswith('\n'):
text = text[:-1]
return text
def _childNamed(elem, name):
"""
Returns the first child with the given name.
:Parameters:
elem : DOM node
The element to search in
name : string
The name to search for
:Returns: The named child, or ``None`` if not found
:ReturnType: DOM node
"""
for child in elem.childNodes:
if (child.nodeType == minidom.Node.ELEMENT_NODE and
child.tagName == name):
return child
else:
return None
def _attributes(elem, include_ns=True, ascii=False):
"""
Retrieves the attributes of a DOM node as a dictionary.
If ``include_ns`` is ``True``, then . If ``ascii`` is ``True``, then
:Parameters:
elem : DOM node
The element to extract attributes from
:Keywords:
include_ns : bool
Whether attributes with a namespace will be discarded
ascii : bool
Whether the attribute names are converted to ASCII. If an attribute
name cannot be converted, the entire attribute is discarded.
"""
nodemap = elem.attributes
attrDict = {}
for index in xrange(nodemap.length):
attr = nodemap.item(index)
if not include_ns and attr.prefix:
continue
name = attr.localName
if ascii:
try:
name = str(name)
except UnicodeError:
continue
attrDict[name] = attr.value
return attrDict
| gpl-3.0 | -6,821,084,749,787,888,000 | 31.223903 | 80 | 0.601596 | false |
ASMlover/study | cplusplus/wren_cc/test/bench_fib.py | 1 | 1588 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2019 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import time
def fib(n):
if n < 2:
return n
return fib(n - 1) + fib(n - 2)
start = time.clock()
for i in range(5):
print(fib(20))
print("use: " + str(time.clock() - start))
| bsd-2-clause | -9,005,991,868,441,173,000 | 37.731707 | 70 | 0.742443 | false |
CCS-Lab/hBayesDM | Python/hbayesdm/models/_dd_hyperbolic_single.py | 1 | 9923 | from typing import Sequence, Union, Any
from collections import OrderedDict
from numpy import Inf, exp
import pandas as pd
from hbayesdm.base import TaskModel
from hbayesdm.preprocess_funcs import dd_single_preprocess_func
__all__ = ['dd_hyperbolic_single']
class DdHyperbolicSingle(TaskModel):
def __init__(self, **kwargs):
super().__init__(
task_name='dd',
model_name='hyperbolic',
model_type='single',
data_columns=(
'subjID',
'delay_later',
'amount_later',
'delay_sooner',
'amount_sooner',
'choice',
),
parameters=OrderedDict([
('k', (0, 0.1, 1)),
('beta', (0, 1, 5)),
]),
regressors=OrderedDict([
]),
postpreds=['y_pred'],
parameters_desc=OrderedDict([
('k', 'discounting rate'),
('beta', 'inverse temperature'),
]),
additional_args_desc=OrderedDict([
]),
**kwargs,
)
_preprocess_func = dd_single_preprocess_func
def dd_hyperbolic_single(
data: Union[pd.DataFrame, str, None] = None,
niter: int = 4000,
nwarmup: int = 1000,
nchain: int = 4,
ncore: int = 1,
nthin: int = 1,
inits: Union[str, Sequence[float]] = 'vb',
ind_pars: str = 'mean',
model_regressor: bool = False,
vb: bool = False,
inc_postpred: bool = False,
adapt_delta: float = 0.95,
stepsize: float = 1,
max_treedepth: int = 10,
**additional_args: Any) -> TaskModel:
"""Delay Discounting Task - Hyperbolic Model
Individual Bayesian Modeling of the Delay Discounting Task
using Hyperbolic Model [Mazur1987]_ with the following parameters:
"k" (discounting rate), "beta" (inverse temperature).
.. [Mazur1987] Mazur, J. E. (1987). An adjustment procedure for studying delayed reinforcement.
User data should contain the behavioral data-set of all subjects of interest for
the current analysis. When loading from a file, the datafile should be a
**tab-delimited** text file, whose rows represent trial-by-trial observations
and columns represent variables.
For the Delay Discounting Task, there should be 6 columns of data
with the labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice". It is not necessary for the columns to be
in this particular order; however, it is necessary that they be labeled
correctly and contain the information below:
- "subjID": A unique identifier for each subject in the data-set.
- "delay_later": An integer representing the delayed days for the later option (e.g. 1, 6, 28).
- "amount_later": A floating point number representing the amount for the later option (e.g. 10.5, 13.4, 30.9).
- "delay_sooner": An integer representing the delayed days for the sooner option (e.g. 0).
- "amount_sooner": A floating point number representing the amount for the sooner option (e.g. 10).
- "choice": If amount_later was selected, choice == 1; else if amount_sooner was selected, choice == 0.
.. note::
User data may contain other columns of data (e.g. ``ReactionTime``,
``trial_number``, etc.), but only the data within the column names listed
above will be used during the modeling. As long as the necessary columns
mentioned above are present and labeled correctly, there is no need to
remove other miscellaneous data columns.
.. note::
``adapt_delta``, ``stepsize``, and ``max_treedepth`` are advanced options that
give the user more control over Stan's MCMC sampler. It is recommended that
only advanced users change the default values, as alterations can profoundly
change the sampler's behavior. See [Hoffman2014]_ for more information on the
sampler control parameters. One can also refer to 'Section 34.2. HMC Algorithm
Parameters' of the `Stan User's Guide and Reference Manual`__.
.. [Hoffman2014]
Hoffman, M. D., & Gelman, A. (2014).
The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo.
Journal of Machine Learning Research, 15(1), 1593-1623.
__ https://mc-stan.org/users/documentation/
Parameters
----------
data
Data to be modeled. It should be given as a Pandas DataFrame object,
a filepath for a data file, or ``"example"`` for example data.
Data columns should be labeled as: "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice".
niter
Number of iterations, including warm-up. Defaults to 4000.
nwarmup
Number of iterations used for warm-up only. Defaults to 1000.
``nwarmup`` is a numerical value that specifies how many MCMC samples
should not be stored upon the beginning of each chain. For those
familiar with Bayesian methods, this is equivalent to burn-in samples.
Due to the nature of the MCMC algorithm, initial values (i.e., where the
sampling chains begin) can have a heavy influence on the generated
posterior distributions. The ``nwarmup`` argument can be set to a
higher number in order to curb the effects that initial values have on
the resulting posteriors.
nchain
Number of Markov chains to run. Defaults to 4.
``nchain`` is a numerical value that specifies how many chains (i.e.,
independent sampling sequences) should be used to draw samples from
the posterior distribution. Since the posteriors are generated from a
sampling process, it is good practice to run multiple chains to ensure
that a reasonably representative posterior is attained. When the
sampling is complete, it is possible to check the multiple chains for
convergence by running the following line of code:
.. code:: python
output.plot(type='trace')
ncore
Number of CPUs to be used for running. Defaults to 1.
nthin
Every ``nthin``-th sample will be used to generate the posterior
distribution. Defaults to 1. A higher number can be used when
auto-correlation within the MCMC sampling is high.
``nthin`` is a numerical value that specifies the "skipping" behavior
of the MCMC sampler. That is, only every ``nthin``-th sample is used to
generate posterior distributions. By default, ``nthin`` is equal to 1,
meaning that every sample is used to generate the posterior.
inits
String or list specifying how the initial values should be generated.
Options are ``'fixed'`` or ``'random'``, or your own initial values.
ind_pars
String specifying how to summarize the individual parameters.
Current options are: ``'mean'``, ``'median'``, or ``'mode'``.
model_regressor
Whether to export model-based regressors. Currently not available for this model.
vb
Whether to use variational inference to approximately draw from a
posterior distribution. Defaults to ``False``.
inc_postpred
Include trial-level posterior predictive simulations in
model output (may greatly increase file size). Defaults to ``False``.
adapt_delta
Floating point value representing the target acceptance probability of a new
sample in the MCMC chain. Must be between 0 and 1. See note below.
stepsize
Integer value specifying the size of each leapfrog step that the MCMC sampler
can take on each new iteration. See note below.
max_treedepth
Integer value specifying how many leapfrog steps the MCMC sampler can take
on each new iteration. See note below.
**additional_args
Not used for this model.
Returns
-------
model_data
An ``hbayesdm.TaskModel`` instance with the following components:
- ``model``: String value that is the name of the model ('dd_hyperbolic_single').
- ``all_ind_pars``: Pandas DataFrame containing the summarized parameter values
(as specified by ``ind_pars``) for each subject.
- ``par_vals``: OrderedDict holding the posterior samples over different parameters.
- ``fit``: A PyStan StanFit object that contains the fitted Stan model.
- ``raw_data``: Pandas DataFrame containing the raw data used to fit the model,
as specified by the user.
Examples
--------
.. code:: python
from hbayesdm import rhat, print_fit
from hbayesdm.models import dd_hyperbolic_single
# Run the model and store results in "output"
output = dd_hyperbolic_single(data='example', niter=2000, nwarmup=1000, nchain=4, ncore=4)
# Visually check convergence of the sampling chains (should look like "hairy caterpillars")
output.plot(type='trace')
# Plot posterior distributions of the hyper-parameters (distributions should be unimodal)
output.plot()
# Check Rhat values (all Rhat values should be less than or equal to 1.1)
rhat(output, less=1.1)
# Show the LOOIC and WAIC model fit estimates
print_fit(output)
"""
return DdHyperbolicSingle(
data=data,
niter=niter,
nwarmup=nwarmup,
nchain=nchain,
ncore=ncore,
nthin=nthin,
inits=inits,
ind_pars=ind_pars,
model_regressor=model_regressor,
vb=vb,
inc_postpred=inc_postpred,
adapt_delta=adapt_delta,
stepsize=stepsize,
max_treedepth=max_treedepth,
**additional_args)
| gpl-3.0 | 7,724,618,514,999,984,000 | 40.518828 | 145 | 0.640331 | false |
kashev/pysc | util/anagram_dict_builder.py | 1 | 1580 | #!/usr/bin/env python3
# pysc
# Kashev Dalmia | @kashev | [email protected]
# anagram_dict_builder.py
""" A script which builds an anagram dictionary from a dictionary. """
# Credit: Jeff Knupp
# https://github.com/jeffknupp/presser/blob/master/make_anagrams.py
import collections
import os
import string
def build_anagram_dict(infile, outfile):
with open(infile, 'r') as file_handle:
words = collections.defaultdict(list)
letters = set(string.ascii_lowercase + '\n')
for word in file_handle:
# Check to see if the word contains only letters in the set
# (no apostraphies, only an issue if using a poor dictionary)
# and that the word is of a reasonable length
if len(set(word) - letters) == 0 and len(word) < 20 or True:
word = word.strip()
letter_key = ''.join(sorted(word))
words[letter_key].append(word)
anagram_dictionary = [' '.join([key] + value)
for key, value in words.items()]
anagram_dictionary.sort()
with open(outfile, 'w') as file_handle:
file_handle.write('\n'.join(anagram_dictionary))
def main():
""" main function. """
# Change to script directory.
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
for sd in ["sowpods", "twl", "wwf"]:
infile = '../dict/{}.txt'.format(sd)
outfile = '../dict/{}_anagram.txt'.format(sd)
build_anagram_dict(infile, outfile)
if __name__ == '__main__':
main()
| mit | 3,450,408,199,887,984,000 | 31.244898 | 73 | 0.605063 | false |
dbenoit17/ansible_agnostic_deployer | ansible/inventory/ravello.py | 1 | 7162 | #!/usr/bin/python
'''
Ravello external inventory script
==================================================
Generates inventory that Ansible can understand by making an API request to Ravello.
Modeled after https://raw.githubusercontent.com/jameslabocki/ansible_api/master/python/ansible_tower_cloudforms_inventory.py
Required: Ravello Python SDK https://github.com/ravello/python-sdk
Useful: https://www.ravellosystems.com/ravello-api-doc/
Notes: In my testing, with >200 applications and ~1,000 virtual machines this took 30 seconds to execute.
If the get_applications call in the Ravello Python SDK supported dumping design information this could be dramatically reduced.
jlabocki <at> redhat.com or @jameslabocki on twitter
'''
import os
import re
import argparse
import ConfigParser
import requests
import json
from argparse import ArgumentParser
import base64
import getpass
import logging
import logging.handlers
from ravello_sdk import *
def get_credentials():
with open(os.path.expanduser("~/.ravello_login"),"r") as pf:
username = pf.readline().strip()
encrypted_password = pf.readline().strip()
password = base64.b64decode(encrypted_password).decode()
return username,password
def get_user_credentials(username):
password = None
if username:
password = getpass.getpass('Enter a Password: ')
else:
#read user credentials from .ravello_login file in user HOMEDIR
username,password = get_credentials()
if not username or not password:
log.error('User credentials are not set')
print('Error: User credentials are not set')
return None,None
return username,password
def connect(username, password):
client = RavelloClient()
try:
client.login(username, password)
except Exception as e:
print('Error: Invalid user credentials, username {0}'.format(username))
return None
return client
def get_app_id(app_name,client):
app_id=0
for app in client.get_applications():
if app['name'].lower() == app_name.lower():
app_id = app['id']
break
return app_id
class RavelloInventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Read CLI arguments
self.read_settings()
self.parse_cli_args()
# If --apps is set then run get_apps_all
#if self.args.apps is True:
# self.get_apps_all()
# If --list is set then run get_app with ID of application
if self.args.list is not None:
self.get_app()
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Ravello')
parser.add_argument('--apps', action='store_false',
help='List all app names (default: False)')
parser.add_argument('--list', action='store', default=False,
help='Get the group(s) and hostname(s) from a specific application by specifying the app name')
self.args = parser.parse_args()
def read_settings(self):
''' Reads the settings from the ravello.ini file '''
config = ConfigParser.SafeConfigParser()
config_paths = [
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ravello.ini'),
"/etc/ansible/ravello.ini",
]
env_value = os.environ.get('RAVELLO_INI_PATH')
if env_value is not None:
config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
config.read(config_paths)
# Get Auth from INI
INI=True
if config.has_option('ravello', 'username'):
self.ravello_username = config.get('ravello', 'username')
else:
self.ravello_username = "none"
INI=False
if config.has_option('ravello', 'password'):
self.ravello_password = config.get('ravello', 'password')
else:
self.ravello_password = "none"
INI=False
if INI is False:
self.ravello_username, self.ravello_password = get_user_credentials(None)
if not self.ravello_username or not self.ravello_password:
print("ERROR: Could not get Ravello credentials from INI file or .ravello_login (SDK Auth)")
exit(1)
def get_apps_all(self):
#Connect to Ravello
client = connect(self.ravello_username, self.ravello_password)
if not client:
exit (1)
apps = client.get_applications()
names = []
for app in apps:
#Only get the published apps
if app['published']:
myname = (json.dumps(app['name']))
names.append(myname)
for name in names:
print name
def get_app(self):
#Connect to Ravello
myappname = self.args.list
client = connect(self.ravello_username, self.ravello_password)
if not client:
exit (1)
apps = client.get_applications()
myappid = ""
for app in apps:
#Only get the published apps
if app['published']:
if str(app['name']) == myappname:
myappid = app['id']
#First, define empty lists for the the tags, groups, subgroups for tags/vms, and the formatted list for tower.
groups = {}
groups['_meta'] = {}
groups['_meta']['hostvars'] = {}
app = client.get_application(myappid, aspect="deployment")
if app['deployment']:
appname = app['name']
vmsFlag = True if "vms" in app["deployment"] else False
if vmsFlag == True:
vms = app['deployment']['vms']
for vm in vms:
#if 'externalFqdn' in vm:
# hostname = vm['externalFqdn']
#else:
hostnames = vm['hostnames']
hostname = hostnames[0]
desc = vm['description']
for line in desc.splitlines():
if re.match("^tag:", line):
t = line.split(':')
tag = t[1]
if tag in groups.keys():
groups[tag]['hosts'].append(hostname)
else:
groups[tag] = {}
groups[tag]['hosts'] = {}
groups[tag]['hosts'] = [hostname]
if 'externalFqdn' in vm:
groups['_meta']['hostvars'][hostname] = { 'externalFqdn': vm['externalFqdn'] }
if tag == 'bastion' and 'externalFqdn' in vm:
groups['_meta']['hostvars'][hostname].update({ 'bastion': True })
print json.dumps(groups, indent=5)
#Run the script
RavelloInventory()
| gpl-3.0 | 1,995,015,316,470,555,400 | 32.004608 | 134 | 0.586289 | false |
guillaume-havard/testdjango | sitetest/stats/middleware.py | 1 | 1123 | from django.db.models import F
from stats.models import Page
class StatsMiddleware(object):
def process_view(self, request, view_func, view_args, view_kwargs):
""" Incrémente le nombre de page vues à chaque appel de vues """
try:
# Le compteur lié à la page est récupéré et incrémenté
p = Page.objects.get(url=request.path)
p.nb_visites = F('nb_visites') + 1
p.save()
except Page.DoesNotExist:
# Un nouveau compteur à 1 par défaut est créé
Page(url=request.path).save()
def process_response(self, request, response):
""" Affiche le nombre de fois que la page a été vue """
if response.status_code == 200:
p = Page.objects.get(url=request.path)
response.content += bytes(
"Cette page a été vue {0} fois.".format(p.nb_visites), "utf8")
return response
# Ici l'objet F permet de faire des requette directement en base de donnée (plus rapide)
# Attention normalement nous ne modifions pas le contenue de la réponse via un missleware !! | mit | -3,092,915,929,755,777,500 | 39.925926 | 92 | 0.631341 | false |
pythonistas-tw/academy | web-api/maomao/web_test.py | 1 | 3005 | import os
import web
import unittest
import tempfile
from lxml import etree
case = [
"/count?op={}&value1=1&value2=1", "/count?op={}&value1=52&value2=7",
"/count?op={}&value1=100.5&value2=33.1", "/count?op={}",
"/count?op={}&value1=1", "/count?op={}&value2=10",
"/count?op={}&value1=&value2=10", "/count?op={}&value1=kk&value2=1",
"/count?op={}&value1=9&value2=ss1", "/count?op={}&value1=9p&value2=121"
]
sum_answer = ["The Answer of 1 + 1 is 2.0", "The Answer of 52 + 7 is 59.0", "The Answer of 100.5 + 33.1 is 133.6"]
sum_answer += ["Missing values"]*4
sum_answer += ["Invalid values"]*3
minus_answer = ["The Answer of 1 - 1 is 0.0", "The Answer of 52 - 7 is 45.0", "The Answer of 100.5 - 33.1 is 67.4"]
minus_answer += ["Missing values"]*4
minus_answer += ["Invalid values"]*3
multiply_answer = ["The Answer of 1 * 1 is 1.0", "The Answer of 52 * 7 is 364.0", "The Answer of 100.5 * 33.1 is 3326.55"]
multiply_answer += ["Missing values"]*4
multiply_answer += ["Invalid values"]*3
divide_answer = ["The Answer of 1 / 1 is 1.0", "The Answer of 52 / 7 is 7.428571428571429", "The Answer of 100.5 / 33.1 is 3.036253776435045"]
divide_answer += ["Missing values"]*4
divide_answer += ["Invalid values"]*3
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
web.app.config['TESTING'] = True
self.app = web.app.test_client()
def test_sum(self):
num = 0
for c in case:
rv = self.app.get(c.format("sum"))
page = etree.HTML(rv.data)
for i in page.xpath(u"//em"):
assert i.text == sum_answer[num], "The Response of {} isn't correct.".format(c.format("sum"))
num += 1
def test_minus(self):
num = 0
for c in case:
rv = self.app.get(c.format("minus"))
page = etree.HTML(rv.data)
for i in page.xpath(u"//em"):
assert i.text == minus_answer[num], "The Response of {} isn't correct.".format(c.format("minus"))
num += 1
def test_multiply(self):
num = 0
for c in case:
rv = self.app.get(c.format("multiply"))
page = etree.HTML(rv.data)
for i in page.xpath(u"//em"):
assert i.text == multiply_answer[num], "The Response of {} isn't correct.".format(c.format("multiply"))
num += 1
def test_divide(self):
num = 0
for c in case:
rv = self.app.get(c.format("divide"))
page = etree.HTML(rv.data)
for i in page.xpath(u"//em"):
assert i.text == divide_answer[num], "The Response of {} isn't correct.".format(c.format("divide"))
num += 1
rv = self.app.get("/count?op=divide&value1=22&value2=0")
page = etree.HTML(rv.data)
for i in page.xpath(u"//em"):
assert i.text == "Zero Division Error", "The Response of {} isn't correct.".format("/count?op=divide&value1=22&value2=0")
if __name__ == '__main__':
unittest.main() | gpl-2.0 | 7,145,401,281,587,108,000 | 37.050633 | 142 | 0.570383 | false |
priyom/priyomdb | Schema/Patches/patch_3.py | 1 | 1691 | """
File name: patch_3.py
This file is part of: priyomdb
LICENSE
The contents of this file are subject to the Mozilla Public License
Version 1.1 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.mozilla.org/MPL/
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
License for the specific language governing rights and limitations under
the License.
Alternatively, the contents of this file may be used under the terms of
the GNU General Public license (the "GPL License"), in which case the
provisions of GPL License are applicable instead of those above.
FEEDBACK & QUESTIONS
For feedback and questions about priyomdb please e-mail one of the
authors:
Jonas Wielicki <[email protected]>
"""
def apply(store):
statements = [
"""CREATE TABLE `eventClass` (
`ID` INT NOT NULL AUTO_INCREMENT,
`Title` VARCHAR(255) NOT NULL COMMENT 'title of the event class',
PRIMARY KEY (`ID`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8""",
"""CREATE TABLE `events` (
`ID` INT NOT NULL AUTO_INCREMENT,
`Created` BIGINT NOT NULL COMMENT 'creation date of row',
`Modified` BIGINT NOT NULL COMMENT 'last modification date of row',
`StationID` INT NOT NULL COMMENT 'station to which the ID is associated',
`EventClassID` INT DEFAULT NULL COMMENT 'event class, NULL for raw event',
`Description` TEXT NOT NULL COMMENT 'descriptive text of the event',
PRIMARY KEY (`ID`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8"""
]
for statement in statements:
store.execute(statement)
| gpl-3.0 | -6,247,857,135,208,938,000 | 34.978723 | 78 | 0.735068 | false |
pedrohml/smartbot | smartbot/joke_behaviour.py | 1 | 2199 | # coding: utf-8
from smartbot import Behaviour
from smartbot import Utils
from smartbot import ExternalAPI
import re
import os
import random
class JokeBehaviour(Behaviour):
def __init__(self, bot):
super(JokeBehaviour, self).__init__(bot)
self.language = self.bot.config.get('main', 'language') if self.bot.config.has_option('main', 'language') else 'en-US'
def addHandlers(self):
self.bot.addCommandHandler('joke', self.jokeSearch)
self.bot.addCommandHandler('jalk', self.jalkSearch)
def removeHandlers(self):
self.bot.removeCommandHandler('joke', self.jokeSearch)
self.bot.removeCommandHandler('jalk', self.jalkSearch)
def jokeSearch(self, telegramBot, update):
p = re.compile('([^ ]*) (.*)')
query = (p.match(update.message.text).groups()[1] or '').strip()
self.logDebug(u'Joke search (chat_id: %s, query: %s)' % (update.message.chat_id, query or 'None'))
jokes = ExternalAPI.searchJoke(query)
if jokes:
self.bot.sendMessage(chat_id=update.message.chat_id, text=random.choice(jokes))
def jalkSearch(self, telegramBot, update):
p = re.compile('([^ ]*) (.*)')
query = (p.match(update.message.text).groups()[1] or '').strip()
self.logDebug(u'Jalk search (chat_id: %s, query: %s)' % (update.message.chat_id, query or 'None'))
jokes = ExternalAPI.searchJoke(query)
if jokes:
jokes = filter(lambda c: len(re.split('\W+', c, re.MULTILINE)) < 200, jokes)
jokes = sorted(jokes, lambda x, y: len(x) - len(y))
if jokes:
joke = jokes[0]
audioFile = ExternalAPI.textToSpeech(joke, language=self.language, encode='mp3')
if os.path.exists(audioFile) and os.path.getsize(audioFile) > 0:
self.bot.sendAudio(chat_id=update.message.chat_id, audio=audioFile, performer=self.bot.getInfo().username)
else:
self.bot.sendMessage(chat_id=update.message.chat_id, text=u'Não consigo contar')
else:
self.bot.sendMessage(chat_id=update.message.chat_id, text=u'Não encontrei piada curta')
| mit | 4,034,820,672,840,083,500 | 44.770833 | 126 | 0.622667 | false |
nop33/indico-plugins | importer/indico_importer/controllers.py | 1 | 4381 | # This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from datetime import datetime
from dateutil.relativedelta import relativedelta
from flask import jsonify, request
from flask_pluginengine import current_plugin
from pytz import timezone, utc
from werkzeug.exceptions import NotFound
from indico.core.db import db
from indico.modules.events.timetable.controllers import RHManageTimetableBase
from indico.modules.events.timetable.models.entries import TimetableEntry, TimetableEntryType
from indico.legacy.webinterface.rh.base import RHProtected
class RHGetImporters(RHProtected):
def _process(self):
importers = {k: importer.name for k, (importer, _) in current_plugin.importer_engines.iteritems()}
return jsonify(importers)
class RHImportData(RHProtected):
def _process(self):
size = request.args.get('size', 10)
query = request.args.get('query')
importer, plugin = current_plugin.importer_engines.get(request.view_args['importer_name'])
with plugin.plugin_context():
data = {'records': importer.import_data(query, size)}
return jsonify(data)
class RHEndTimeBase(RHManageTimetableBase):
"""Base class for the importer operations"""
normalize_url_spec = {
'locators': {
lambda self: self.event_new
},
'preserved_args': {
'importer_name'
}
}
@staticmethod
def _find_latest_end_dt(entries):
latest_dt = None
for entry in entries:
if latest_dt is None or entry.end_dt > latest_dt:
latest_dt = entry.end_dt
return latest_dt
def _format_date(self, date):
return date.astimezone(timezone(self.event_new.timezone)).strftime('%H:%M')
class RHDayEndTime(RHEndTimeBase):
"""Get the end_dt of the latest timetable entry or the event start_dt if no entry exist on that date"""
def _checkParams(self, params):
RHEndTimeBase._checkParams(self, params)
self.date = self.event_new.tzinfo.localize(datetime.strptime(request.args['selectedDay'], '%Y/%m/%d')).date()
def _process(self):
event_start_date = db.cast(TimetableEntry.start_dt.astimezone(self.event_new.tzinfo), db.Date)
entries = self.event_new.timetable_entries.filter(event_start_date == self.date)
latest_end_dt = self._find_latest_end_dt(entries)
if latest_end_dt is None:
event_start = self.event_new.start_dt
latest_end_dt = utc.localize(datetime.combine(self.date, event_start.time()))
return self._format_date(latest_end_dt)
class RHBlockEndTime(RHEndTimeBase):
"""Return the end_dt of the latest timetable entry inside the block or the block start_dt if it is empty"""
normalize_url_spec = {
'locators': {
lambda self: self.timetable_entry
},
'preserved_args': {
'importer_name'
}
}
def _checkParams(self, params):
RHEndTimeBase._checkParams(self, params)
self.date = timezone(self.event_new.timezone).localize(datetime.strptime(request.args['selectedDay'],
'%Y/%m/%d'))
self.timetable_entry = self.event_new.timetable_entries.filter_by(
type=TimetableEntryType.SESSION_BLOCK, id=request.view_args['entry_id']).first_or_404()
def _process(self):
entries = self.timetable_entry.children
latest_end_dt = self._find_latest_end_dt(entries)
if latest_end_dt is None:
latest_end_dt = self.timetable_entry.start_dt
return self._format_date(latest_end_dt)
| gpl-3.0 | -1,073,251,264,224,585,700 | 37.769912 | 117 | 0.671764 | false |
mhubig/intelhex | scripts/hex2dump.py | 1 | 3960 | #!/usr/bin/python
# Copyright (c) 2008,2010,2011,2012,2013 Alexander Belchenko
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain
# the above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce
# the above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the author nor the names
# of its contributors may be used to endorse
# or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Show content of hex file as hexdump."""
VERSION = '1.5.1'
USAGE = '''hex2dump: show content of hex file as hexdump.
Usage:
python hex2dump.py [options] HEXFILE
Options:
-h, --help this help message.
-v, --version version info.
-r, --range=START:END specify address range for dumping
(ascii hex value).
Range can be in form 'START:' or ':END'.
Arguments:
HEXFILE name of hex file for processing (use '-' to read
from stdin)
'''
import sys
def hex2dump(hexfile, start=None, end=None):
import intelhex
if hexfile == '-':
hexfile = sys.stdin
try:
ih = intelhex.IntelHex(hexfile)
except (IOError, intelhex.IntelHexError), e:
sys.stderr.write('Error reading file: %s\n' % e)
return 1
if not (start is None and end is None):
ih = ih[slice(start,end)]
ih.dump()
return 0
def main(argv=None):
import getopt
if argv is None:
argv = sys.argv[1:]
start = None
end = None
try:
opts, args = getopt.getopt(sys.argv[1:], "hvp:r:",
["help", "version", "range="])
for o, a in opts:
if o in ("-h", "--help"):
print(USAGE)
return 0
elif o in ("-v", "--version"):
print(VERSION)
return 0
elif o in ("-r", "--range"):
try:
l = a.split(":")
if l[0] != '':
start = int(l[0], 16)
if l[1] != '':
end = int(l[1], 16)
except:
raise getopt.GetoptError('Bad range value(s)')
if not args:
raise getopt.GetoptError('Hex file is not specified')
if len(args) > 1:
raise getopt.GetoptError('Too many arguments')
except getopt.GetoptError, msg:
txt = 'ERROR: '+str(msg) # that's required to get not-so-dumb result from 2to3 tool
print(txt)
print(USAGE)
return 2
try:
return hex2dump(args[0], start, end)
except IOError, e:
import errno
if e.errno not in (0, errno.EPIPE):
raise
if __name__ == '__main__':
import sys
sys.exit(main())
| bsd-3-clause | -4,853,036,239,676,083,000 | 31.727273 | 92 | 0.603283 | false |
oliver-sanders/cylc | cylc/flow/network/schema.py | 1 | 52672 | # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2019 NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GraphQL API schema via Graphene implementation."""
import asyncio
from functools import partial
import logging
from textwrap import dedent
from typing import Callable, AsyncGenerator, Any
from graphene import (
Boolean, Field, Float, ID, InputObjectType, Int,
List, Mutation, ObjectType, Schema, String, Union, Enum
)
from graphene.types.generic import GenericScalar
from graphene.utils.str_converters import to_snake_case
from cylc.flow.task_state import (
TASK_STATUSES_ORDERED,
TASK_STATUS_DESC,
# TASK_STATUS_RUNAHEAD,
TASK_STATUS_WAITING,
TASK_STATUS_QUEUED,
TASK_STATUS_EXPIRED,
TASK_STATUS_READY,
TASK_STATUS_SUBMIT_FAILED,
TASK_STATUS_SUBMIT_RETRYING,
TASK_STATUS_SUBMITTED,
TASK_STATUS_RETRYING,
TASK_STATUS_RUNNING,
TASK_STATUS_FAILED,
TASK_STATUS_SUCCEEDED
)
from cylc.flow.data_store_mgr import (
ID_DELIM, FAMILIES, FAMILY_PROXIES,
JOBS, TASKS, TASK_PROXIES
)
from cylc.flow.suite_status import StopMode
def sstrip(text):
"""Simple function to dedent and strip text.
Examples:
>>> print(sstrip('''
... foo
... bar
... baz
... '''))
foo
bar
baz
"""
return dedent(text).strip()
PROXY_NODES = 'proxy_nodes'
NODE_MAP = {
'Task': TASKS,
'TaskProxy': TASK_PROXIES,
'Family': FAMILIES,
'FamilyProxy': FAMILY_PROXIES,
'Job': JOBS,
'Node': PROXY_NODES,
}
CYCLING_TYPES = [
'family_proxies',
'family_proxy',
'jobs',
'job',
'task_proxies',
'task_proxy',
]
PROXY_TYPES = [
'family_proxies',
'family_proxy',
'task_proxies',
'task_proxy',
]
DEF_TYPES = [
'families',
'family',
'tasks',
'task',
]
def parse_workflow_id(item):
"""Split workflow id argument to individual workflow attributes.
Args:
item (owner|workflow:status):
It's possible to traverse workflows,
defaults to UI Server owner, and ``*`` glob for workflow.
Returns:
A tuple of id components in respective order. For example:
(owner, name, status)
"""
owner, workflow, status = (None, None, None)
if ':' in item:
head, status = item.rsplit(':', 1)
else:
head, status = (item, None)
if head.count(ID_DELIM):
owner, workflow = head.split(ID_DELIM, 1)
else:
# more common to filter on workflow (with owner constant)
workflow = head
return (owner, workflow, status)
def parse_node_id(item, node_type=None):
"""Parse definition, job, or proxy id argument returning components.
Args:
item (str): A string representing a node ID. Jobs fill out
cycle|name|num first, cycle is irrelevant to Def
owner|workflow is always last.
For example:
name
cycle|na*
workflow|cycle|name
owner|workflow|cycle|name|submit_num:state
cycle|*|submit_num
node_type (str):
the type of the node to be parsed.
Returns:
A tuple of string id components in respective order. For example:
(owner, workflow, cycle, name, submit_num, state)
None type is set for missing components.
"""
if ':' in item:
head, state = item.rsplit(':', 1)
else:
head, state = (item, None)
if ID_DELIM in head:
dil_count = head.count(ID_DELIM)
parts = head.split(ID_DELIM, dil_count)
else:
return (None, None, None, head, None, state)
if node_type in DEF_TYPES:
owner, workflow, name = [None] * (2 - dil_count) + parts
parts = [owner, workflow, None, name, None]
elif node_type in PROXY_TYPES:
parts = [None] * (3 - dil_count) + parts + [None]
elif dil_count < 4:
if dil_count < 3:
parts = [None, None] + parts + [None] * (2 - dil_count)
else:
parts = [None] * (4 - dil_count) + parts
parts += [state]
return tuple(parts)
# ** Query Related **#
# Field args (i.e. for queries etc):
class SortArgs(InputObjectType):
keys = List(String, default_value=['id'])
reverse = Boolean(default_value=False)
jobs_args = dict(
ids=List(ID, default_value=[]),
exids=List(ID, default_value=[]),
states=List(String, default_value=[]),
exstates=List(String, default_value=[]),
sort=SortArgs(default_value=None),
)
all_jobs_args = dict(
workflows=List(ID, default_value=[]),
exworkflows=List(ID, default_value=[]),
ids=List(ID, default_value=[]),
exids=List(ID, default_value=[]),
states=List(String, default_value=[]),
exstates=List(String, default_value=[]),
sort=SortArgs(default_value=None),
)
def_args = dict(
ids=List(ID, default_value=[]),
exids=List(ID, default_value=[]),
mindepth=Int(default_value=-1),
maxdepth=Int(default_value=-1),
sort=SortArgs(default_value=None),
)
all_def_args = dict(
workflows=List(ID, default_value=[]),
exworkflows=List(ID, default_value=[]),
ids=List(ID, default_value=[]),
exids=List(ID, default_value=[]),
mindepth=Int(default_value=-1),
maxdepth=Int(default_value=-1),
sort=SortArgs(default_value=None),
)
proxy_args = dict(
ghosts=Boolean(default_value=False),
ids=List(ID, default_value=[]),
exids=List(ID, default_value=[]),
states=List(String, default_value=[]),
exstates=List(String, default_value=[]),
is_held=Boolean(),
mindepth=Int(default_value=-1),
maxdepth=Int(default_value=-1),
sort=SortArgs(default_value=None),
)
all_proxy_args = dict(
ghosts=Boolean(default_value=False),
workflows=List(ID, default_value=[]),
exworkflows=List(ID, default_value=[]),
ids=List(ID, default_value=[]),
exids=List(ID, default_value=[]),
states=List(String, default_value=[]),
exstates=List(String, default_value=[]),
is_held=Boolean(),
mindepth=Int(default_value=-1),
maxdepth=Int(default_value=-1),
sort=SortArgs(default_value=None),
)
edge_args = dict(
ids=List(ID, default_value=[]),
exids=List(ID, default_value=[]),
states=List(String, default_value=[]),
exstates=List(String, default_value=[]),
mindepth=Int(default_value=-1),
maxdepth=Int(default_value=-1),
sort=SortArgs(default_value=None),
)
all_edge_args = dict(
workflows=List(ID, default_value=[]),
exworkflows=List(ID, default_value=[]),
sort=SortArgs(default_value=None),
)
nodes_edges_args = dict(
ghosts=Boolean(default_value=False),
ids=List(ID, default_value=[]),
exids=List(ID, default_value=[]),
states=List(String, default_value=[]),
exstates=List(String, default_value=[]),
is_held=Boolean(),
distance=Int(default_value=1),
mindepth=Int(default_value=-1),
maxdepth=Int(default_value=-1),
sort=SortArgs(default_value=None),
)
nodes_edges_args_all = dict(
ghosts=Boolean(default_value=False),
workflows=List(ID, default_value=[]),
exworkflows=List(ID, default_value=[]),
ids=List(ID, default_value=[]),
exids=List(ID, default_value=[]),
states=List(String, default_value=[]),
exstates=List(String, default_value=[]),
is_held=Boolean(),
distance=Int(default_value=1),
mindepth=Int(default_value=-1),
maxdepth=Int(default_value=-1),
sort=SortArgs(default_value=None),
)
# Resolvers are used to collate data needed for query resolution.
# Treated as implicit static methods;
# https://docs.graphene-python.org/en/latest/types
# /objecttypes/#implicit-staticmethod
# they can exist inside or outside the query object types.
#
# Here we define them outside the queries so they can be used with
# multiple resolution calls, both at root query or object field level.
#
# The first argument has a naming convention;
# https://docs.graphene-python.org/en/latest/types
# /objecttypes/#naming-convention
# with name 'root' used here, it provides context to the resolvers.
# Resolvers:
async def get_workflows(root, info, **args):
args['workflows'] = [parse_workflow_id(w_id) for w_id in args['ids']]
args['exworkflows'] = [parse_workflow_id(w_id) for w_id in args['exids']]
resolvers = info.context.get('resolvers')
return await resolvers.get_workflows(args)
async def get_nodes_all(root, info, **args):
"""Resolver for returning job, task, family nodes"""
field_name = to_snake_case(info.field_name)
field_ids = getattr(root, field_name, None)
if hasattr(args, 'id'):
args['ids'] = [args.get('id')]
if field_ids:
args['ids'] = field_ids
elif field_ids == []:
return []
try:
obj_type = str(info.return_type.of_type).replace('!', '')
except AttributeError:
obj_type = str(info.return_type)
node_type = NODE_MAP[obj_type]
args['ids'] = [parse_node_id(n_id, node_type) for n_id in args['ids']]
args['exids'] = [parse_node_id(n_id, node_type) for n_id in args['exids']]
args['workflows'] = [
parse_workflow_id(w_id) for w_id in args['workflows']]
args['exworkflows'] = [
parse_workflow_id(w_id) for w_id in args['exworkflows']]
resolvers = info.context.get('resolvers')
return await resolvers.get_nodes_all(node_type, args)
async def get_nodes_by_ids(root, info, **args):
"""Resolver for returning job, task, family node"""
field_name = to_snake_case(info.field_name)
field_ids = getattr(root, field_name, None)
if hasattr(args, 'id'):
args['ids'] = [args.get('id')]
if field_ids:
if isinstance(field_ids, str):
field_ids = [field_ids]
args['native_ids'] = field_ids
elif field_ids == []:
return []
try:
obj_type = str(info.return_type.of_type).replace('!', '')
except AttributeError:
obj_type = str(info.return_type)
node_type = NODE_MAP[obj_type]
args['ids'] = [parse_node_id(n_id, node_type) for n_id in args['ids']]
args['exids'] = [parse_node_id(n_id, node_type) for n_id in args['exids']]
resolvers = info.context.get('resolvers')
return await resolvers.get_nodes_by_ids(node_type, args)
async def get_node_by_id(root, info, **args):
"""Resolver for returning job, task, family node"""
field_name = to_snake_case(info.field_name)
if field_name == 'source_node':
field_id = getattr(root, 'source', None)
elif field_name == 'target_node':
field_id = getattr(root, 'target', None)
else:
field_id = getattr(root, field_name, None)
if field_id:
args['id'] = field_id
if args.get('id', None) is None:
return None
try:
obj_type = str(info.return_type.of_type).replace('!', '')
except AttributeError:
obj_type = str(info.return_type)
resolvers = info.context.get('resolvers')
return await resolvers.get_node_by_id(NODE_MAP[obj_type], args)
async def get_edges_all(root, info, **args):
args['workflows'] = [
parse_workflow_id(w_id) for w_id in args['workflows']]
args['exworkflows'] = [
parse_workflow_id(w_id) for w_id in args['exworkflows']]
resolvers = info.context.get('resolvers')
return await resolvers.get_edges_all(args)
async def get_edges_by_ids(root, info, **args):
field_name = to_snake_case(info.field_name)
field_ids = getattr(root, field_name, None)
if field_ids:
args['native_ids'] = list(field_ids)
elif field_ids == []:
return []
resolvers = info.context.get('resolvers')
return await resolvers.get_edges_by_ids(args)
async def get_nodes_edges(root, info, **args):
"""Resolver for returning job, task, family nodes"""
node_type = NODE_MAP['TaskProxy']
workflow = getattr(root, 'id', None)
if workflow:
args['workflows'] = [parse_workflow_id(workflow)]
args['exworkflows'] = []
else:
args['workflows'] = [
parse_workflow_id(w_id) for w_id in args['workflows']]
args['exworkflows'] = [
parse_workflow_id(w_id) for w_id in args['exworkflows']]
args['ids'] = [parse_node_id(n_id, node_type) for n_id in args['ids']]
args['exids'] = [parse_node_id(n_id, node_type) for n_id in args['exids']]
resolvers = info.context.get('resolvers')
root_nodes = await resolvers.get_nodes_all(node_type, args)
return await resolvers.get_nodes_edges(root_nodes, args)
def resolve_state_totals(root, info, **args):
state_totals = {state: 0 for state in TASK_STATUSES_ORDERED}
# Update with converted protobuf map container
state_totals.update(
dict(getattr(root, to_snake_case(info.field_name), {})))
return state_totals
# Types:
class DefMeta(ObjectType):
class Meta:
description = """
Meta data fields,
including custom fields in a generic user-defined dump"""
title = String(default_value=None)
description = String(default_value=None)
URL = String(default_value=None)
user_defined = List(String, default_value=[])
class TimeZone(ObjectType):
class Meta:
description = """Time zone info."""
hours = Int()
minutes = Int()
string_basic = String()
string_extended = String()
class Workflow(ObjectType):
class Meta:
description = """Global workflow info."""
id = ID(required=True)
name = String()
status = String()
status_msg = String()
host = String()
port = Int()
owner = String()
tasks = List(
lambda: Task,
description="""Task definitions.""",
args=def_args,
resolver=get_nodes_by_ids)
families = List(
lambda: Family,
description="""Family definitions.""",
args=def_args,
resolver=get_nodes_by_ids)
task_proxies = List(
lambda: TaskProxy,
description="""Task cycle instances.""",
args=proxy_args,
resolver=get_nodes_by_ids)
family_proxies = List(
lambda: FamilyProxy,
description="""Family cycle instances.""",
args=proxy_args,
resolver=get_nodes_by_ids)
edges = Field(
lambda: Edges,
args=edge_args,
description="""Graph edges""")
nodes_edges = Field(
lambda: NodesEdges,
args=nodes_edges_args,
resolver=get_nodes_edges)
api_version = Int()
cylc_version = String()
last_updated = Float()
meta = Field(DefMeta)
newest_runahead_cycle_point = String()
newest_cycle_point = String()
oldest_cycle_point = String()
reloaded = Boolean()
run_mode = String()
is_held_total = Int()
state_totals = GenericScalar(resolver=resolve_state_totals)
workflow_log_dir = String()
time_zone_info = Field(TimeZone)
tree_depth = Int()
ns_defn_order = List(String)
job_log_names = List(String)
states = List(String)
class Job(ObjectType):
class Meta:
description = """Jobs."""
id = ID(required=True)
submit_num = Int()
state = String()
# name and cycle_point for filtering/sorting
name = String(required=True)
cycle_point = String(required=True)
task_proxy = Field(
lambda: TaskProxy,
description="""Associated Task Proxy""",
required=True,
resolver=get_node_by_id)
submitted_time = String()
started_time = String()
finished_time = String()
batch_sys_job_id = ID()
batch_sys_name = String()
env_script = String()
err_script = String()
exit_script = String()
execution_time_limit = Float()
host = String()
init_script = String()
job_log_dir = String()
owner = String()
post_script = String()
pre_script = String()
script = String()
work_sub_dir = String()
batch_sys_conf = List(String)
environment = List(String)
directives = List(String)
param_env_tmpl = List(String)
param_var = List(String)
extra_logs = List(String)
messages = List(String)
class Task(ObjectType):
class Meta:
description = """Task definition, static fields"""
id = ID(required=True)
name = String(required=True)
meta = Field(DefMeta)
mean_elapsed_time = Float()
depth = Int()
proxies = List(
lambda: TaskProxy,
description="""Associated cycle point proxies""",
args=proxy_args,
resolver=get_nodes_by_ids)
namespace = List(String, required=True)
class PollTask(ObjectType):
class Meta:
description = """Polling task edge"""
local_proxy = ID(required=True)
workflow = String()
remote_proxy = ID(required=True)
req_state = String()
graph_string = String()
class Condition(ObjectType):
class Meta:
description = """Prerequisite conditions."""
task_proxy = Field(
lambda: TaskProxy,
description="""Associated Task Proxy""",
resolver=get_node_by_id)
expr_alias = String()
req_state = String()
satisfied = Boolean()
message = String()
class Prerequisite(ObjectType):
class Meta:
description = """Task prerequisite."""
expression = String()
conditions = List(
Condition,
description="""Condition monomers of a task prerequisites.""")
cycle_points = List(String)
satisfied = Boolean()
class TaskProxy(ObjectType):
class Meta:
description = """Task cycle instance."""
id = ID(required=True)
task = Field(
Task,
description="""Task definition""",
required=True,
resolver=get_node_by_id)
state = String()
cycle_point = String()
is_held = Boolean()
spawned = Boolean()
depth = Int()
job_submits = Int()
latest_message = String()
outputs = List(String, default_value=[])
broadcasts = List(String, default_value=[])
# name & namespace for filtering/sorting
name = String(required=True)
namespace = List(String, required=True)
prerequisites = List(Prerequisite)
jobs = List(
Job,
description="""Task jobs.""",
args=jobs_args,
resolver=get_nodes_by_ids)
parents = List(
lambda: FamilyProxy,
description="""Task parents.""",
args=proxy_args,
resolver=get_nodes_by_ids)
first_parent = Field(
lambda: FamilyProxy,
description="""Task first parent.""",
args=proxy_args,
resolver=get_node_by_id)
ancestors = List(
lambda: FamilyProxy,
description="""First parent ancestors.""",
args=proxy_args,
resolver=get_nodes_by_ids)
class Family(ObjectType):
class Meta:
description = """Task definition, static fields"""
id = ID(required=True)
name = String(required=True)
meta = Field(DefMeta)
depth = Int()
proxies = List(
lambda: FamilyProxy,
description="""Associated cycle point proxies""",
args=proxy_args,
resolver=get_nodes_by_ids)
parents = List(
lambda: Family,
description="""Family definition parent.""",
args=def_args,
resolver=get_nodes_by_ids)
child_tasks = List(
Task,
description="""Descendant definition tasks.""",
args=def_args,
resolver=get_nodes_by_ids)
child_families = List(
lambda: Family,
description="""Descendant desc families.""",
args=def_args,
resolver=get_nodes_by_ids)
class FamilyProxy(ObjectType):
class Meta:
description = """Family composite."""
id = ID(required=True)
cycle_point = String()
# name & namespace for filtering/sorting
name = String(required=True)
family = Field(
Family,
description="""Family definition""",
required=True,
resolver=get_node_by_id)
state = String()
is_held = Boolean()
depth = Int()
parents = List(
lambda: FamilyProxy,
description="""Family parent proxies.""",
args=proxy_args,
resolver=get_nodes_by_ids)
child_tasks = List(
TaskProxy,
description="""Descendant task proxies.""",
args=proxy_args,
resolver=get_nodes_by_ids)
child_families = List(
lambda: FamilyProxy,
description="""Descendant family proxies.""",
args=proxy_args,
resolver=get_nodes_by_ids)
first_parent = Field(
lambda: FamilyProxy,
description="""Task first parent.""",
args=proxy_args,
resolver=get_node_by_id)
ancestors = List(
lambda: FamilyProxy,
description="""First parent ancestors.""",
args=proxy_args,
resolver=get_nodes_by_ids)
class Node(Union):
class Meta:
types = (TaskProxy, FamilyProxy)
@classmethod
def resolve_type(cls, instance, info):
if hasattr(instance, 'task'):
return TaskProxy
return FamilyProxy
class Edge(ObjectType):
class Meta:
description = """Dependency edge task/family proxies"""
id = ID(required=True)
source = ID()
source_node = Field(
Node,
resolver=get_node_by_id)
target = ID()
target_node = Field(
Node,
resolver=get_node_by_id)
suicide = Boolean()
cond = Boolean()
class Edges(ObjectType):
class Meta:
description = """Dependency edge"""
edges = List(
Edge,
required=True,
args=edge_args,
resolver=get_edges_by_ids)
workflow_polling_tasks = List(PollTask)
leaves = List(String)
feet = List(String)
class NodesEdges(ObjectType):
class Meta:
description = """Related Nodes & Edges."""
nodes = List(
TaskProxy,
description="""Task nodes from and including root.""")
edges = List(
Edge,
description="""Edges associated with the nodes.""")
# Query declaration
class Queries(ObjectType):
class Meta:
description = """Multi-Workflow root level queries."""
workflows = List(
Workflow,
description=Workflow._meta.description,
ids=List(ID, default_value=[]),
exids=List(ID, default_value=[]),
resolver=get_workflows)
job = Field(
Job,
description=Job._meta.description,
id=ID(required=True),
resolver=get_node_by_id)
jobs = List(
Job,
description=Job._meta.description,
args=all_jobs_args,
resolver=get_nodes_all)
task = Field(
Task,
description=Task._meta.description,
id=ID(required=True),
resolver=get_node_by_id)
tasks = List(
Task,
description=Task._meta.description,
args=all_def_args,
resolver=get_nodes_all)
task_proxy = Field(
TaskProxy,
description=TaskProxy._meta.description,
id=ID(required=True),
resolver=get_node_by_id)
task_proxies = List(
TaskProxy,
description=TaskProxy._meta.description,
args=all_proxy_args,
resolver=get_nodes_all)
family = Field(
Family,
description=Family._meta.description,
id=ID(required=True),
resolver=get_node_by_id)
families = List(
Family,
description=Family._meta.description,
args=all_def_args,
resolver=get_nodes_all)
family_proxy = Field(
FamilyProxy,
description=FamilyProxy._meta.description,
id=ID(required=True),
resolver=get_node_by_id)
family_proxies = List(
FamilyProxy,
description=FamilyProxy._meta.description,
args=all_proxy_args,
resolver=get_nodes_all)
edges = List(
Edge,
description=Edge._meta.description,
args=all_edge_args,
resolver=get_edges_all)
nodes_edges = Field(
NodesEdges,
description=NodesEdges._meta.description,
args=nodes_edges_args_all,
resolver=get_nodes_edges)
# ** Mutation Related ** #
# Generic containers
class GenericResponse(ObjectType):
class Meta:
description = """Container for command queued response"""
result = GenericScalar()
# Mutators are used to call the internals of the parent program in the
# resolution of mutation requests (or can make external calls themselves).
# Like query resolvers (read above), they are treated as implicit
# static methods, with object context pass in as the first argument.
# Mutators:
async def mutator(root, info, command=None, workflows=None,
exworkflows=None, **args):
"""Call the resolver method that act on the workflow service
via the internal command queue."""
if workflows is None:
workflows = []
if exworkflows is None:
exworkflows = []
w_args = {}
w_args['workflows'] = [parse_workflow_id(w_id) for w_id in workflows]
w_args['exworkflows'] = [parse_workflow_id(w_id) for w_id in exworkflows]
if args.get('args', False):
args.update(args.get('args', {}))
args.pop('args')
resolvers = info.context.get('resolvers')
res = await resolvers.mutator(info, command, w_args, args)
return GenericResponse(result=res)
async def nodes_mutator(root, info, command, ids, workflows=None,
exworkflows=None, **args):
"""Call the resolver method, dealing with multiple node id arguments,
which acts on the workflow service via the internal command queue."""
if command == 'put_messages':
node_type = 'jobs'
else:
node_type = 'task_proxy'
ids = [parse_node_id(n_id, node_type) for n_id in ids]
# if the workflows arg is empty extract from proxy args
if workflows is None:
workflows = set()
for owner, workflow, _, _, _, _ in ids:
if owner and workflow:
workflows.add(f'{owner}{ID_DELIM}{workflow}')
elif workflow:
workflows.add(workflow)
if not workflows:
return GenericResponse(result="Error: No given Workflow(s)")
if exworkflows is None:
exworkflows = []
w_args = {}
w_args['workflows'] = [parse_workflow_id(w_id) for w_id in workflows]
w_args['exworkflows'] = [parse_workflow_id(w_id) for w_id in exworkflows]
if args.get('args', False):
args.update(args.get('args', {}))
args.pop('args')
resolvers = info.context.get('resolvers')
res = await resolvers.nodes_mutator(info, command, ids, w_args, args)
return GenericResponse(result=res)
# Input types:
class WorkflowID(String):
"""A registered workflow."""
class CyclePoint(String):
"""An integer or date-time cyclepoint."""
class CyclePointGlob(String):
"""A glob for integer or date-time cyclepoints.
The wildcard character (`*`) can be used to perform globbing.
For example `2000*` might match `2000-01-01T00:00Z`.
"""
class RuntimeConfiguration(String):
"""A configuration item for a task or family e.g. `script`."""
class BroadcastSetting(InputObjectType):
"""A task/family runtime setting as a key, value pair."""
key = RuntimeConfiguration(
description=sstrip('''
The cylc namespace for the setting to modify.
e.g. `[environment]variable_name`.
'''),
required=True
)
value = String(
description='The value of the modification',
required=True
)
class BroadcastMode(Enum):
Set = 'put_broadcast'
Clear = 'clear_broadcast'
@property
def description(self):
if self == BroadcastMode.Set:
return 'Create a new broadcast.'
if self == BroadcastMode.Clear:
return 'Revoke an existing broadcast.'
return ''
class TaskStatus(Enum):
"""The status of a task in a workflow."""
# NOTE: this is an enumeration purely for the GraphQL schema
# TODO: the task statuses should be formally declared in a Python
# enumeration rendering this class unnecessary
# NOTE: runahead purposefully omitted to hide users from the task pool
# Runahead = TASK_STATUS_RUNAHEAD
Waiting = TASK_STATUS_WAITING
Queued = TASK_STATUS_QUEUED
Expired = TASK_STATUS_EXPIRED
Ready = TASK_STATUS_READY
SubmitFailed = TASK_STATUS_SUBMIT_FAILED
SubmitRetrying = TASK_STATUS_SUBMIT_RETRYING
Submitted = TASK_STATUS_SUBMITTED
Retrying = TASK_STATUS_RETRYING
Running = TASK_STATUS_RUNNING
Failed = TASK_STATUS_FAILED
Succeeded = TASK_STATUS_SUCCEEDED
@property
def description(self):
return TASK_STATUS_DESC.get(self.value, '')
class TaskState(InputObjectType):
"""The state of a task, a combination of status and other fields."""
status = TaskStatus()
is_held = Boolean(description=sstrip('''
If a task is held no new job submissions will be made
'''))
class TaskName(String):
"""The name a task.
* Must be a task not a family.
* Does not include the cycle point.
* Any parameters must be expanded (e.g. can't be `foo<bar>`).
"""
class NamespaceName(String):
"""The name of a task or family."""
class NamespaceIDGlob(String):
"""A glob search for an active task or family.
Can use the wildcard character (`*`), e.g `foo*` might match `foot`.
"""
class TaskID(String):
"""The name of an active task."""
class JobID(String):
"""A job submission from an active task."""
class TimePoint(String):
"""A date-time in the ISO8601 format."""
LogLevels = Enum(
'LogLevels',
list(logging._nameToLevel.items()),
description=lambda x: f'Python logging level: {x.name} = {x.value}.'
if x else ''
)
class SuiteStopMode(Enum):
"""The mode used to stop a running workflow."""
# Note: contains only the REQUEST_* values from StopMode
Clean = StopMode.REQUEST_CLEAN
Now = StopMode.REQUEST_NOW
NowNow = StopMode.REQUEST_NOW_NOW
@property
def description(self):
return StopMode(self.value).describe()
# Mutations:
# TODO: re-instate:
# - get-broadcast (can just use GraphQL query BUT needs CLI access too)
# - expire-broadcast
class Broadcast(Mutation):
class Meta:
description = sstrip('''
Override or add new [runtime] config in targeted namespaces in
a running suite.
Uses for broadcast include making temporary changes to task
behaviour, and task-to-downstream-task communication via
environment variables.
A broadcast can target any [runtime] namespace for all cycles or
for a specific cycle. If a task is affected by specific-cycle and
all-cycle broadcasts at once, the specific takes precedence. If
a task is affected by broadcasts to multiple ancestor
namespaces, the result is determined by normal [runtime]
inheritance. In other words, it follows this order:
`all:root -> all:FAM -> all:task -> tag:root -> tag:FAM ->
tag:task`
Broadcasts persist, even across suite restarts, until they expire
when their target cycle point is older than the oldest current in
the suite, or until they are explicitly cancelled with this
command. All-cycle broadcasts do not expire.
For each task the final effect of all broadcasts to all namespaces
is computed on the fly just prior to job submission. The
`--cancel` and `--clear` options simply cancel (remove) active
broadcasts, they do not act directly on the final task-level
result. Consequently, for example, you cannot broadcast to "all
cycles except Tn" with an all-cycle broadcast followed by a cancel
to Tn (there is no direct broadcast to Tn to cancel); and you
cannot broadcast to "all members of FAMILY except member_n" with a
general broadcast to FAMILY followed by a cancel to member_n (there
is no direct broadcast to member_n to cancel).
''')
resolver = partial(mutator, command='broadcast')
class Arguments:
workflows = List(WorkflowID, required=True)
mode = BroadcastMode(
default_value=1,
required=True
)
cycle_points = List(
CyclePoint,
description=sstrip('''
List of cycle points to target or `*` to cancel all all-cycle
broadcasts without canceling all specific-cycle broadcasts.
'''),
default_value=['*'])
tasks = List(
NamespaceName,
description='Target namespaces.',
default_value=['root']
)
settings = List(
BroadcastSetting,
description='Target settings.'
)
# TODO: work out how to implement this feature, it needs to be
# handled client-side which makes it slightly awkward in
# api-on-the-fly land
# files = List(
# String,
# description=sstrip('''
# File with config to broadcast. Can be used multiple times
# ''')
# )
result = GenericScalar()
class Hold(Mutation):
class Meta:
description = sstrip('''
Hold a workflow or tasks within it.
''')
resolver = partial(mutator, command='hold')
class Arguments:
workflows = List(WorkflowID, required=True)
tasks = List(
NamespaceIDGlob,
description='Hold the specified tasks rather than the workflow.'
)
time = TimePoint(description=sstrip('''
Get the workflow to hold after the specified wallclock time
has passed.
'''))
result = GenericScalar()
class Nudge(Mutation):
class Meta:
description = sstrip('''
Cause the Cylc task processing loop to be invoked on a running
suite.
This happens automatically when the state of any task changes
such that task processing (dependency negotiation etc.)
is required, or if a clock-trigger task is ready to run.
''')
resolver = partial(mutator, command='nudge')
class Arguments:
workflows = List(WorkflowID, required=True)
result = GenericScalar()
class Ping(Mutation):
class Meta:
description = sstrip('''
Send a test message to a running suite.
''')
resolver = partial(mutator, command='ping_suite')
class Arguments:
workflows = List(WorkflowID, required=True)
result = GenericScalar()
class Message(Mutation):
class Meta:
description = sstrip('''
Record task job messages.
Send task job messages to:
- The job stdout/stderr.
- The job status file, if there is one.
- The suite server program, if communication is possible.
Task jobs use this to record and report status such
as success and failure. Applications run by task jobs can use
this command to report messages and to report registered task
outputs.
''')
resolver = partial(nodes_mutator, command='put_messages')
class Arguments:
workflows = List(WorkflowID, required=True)
task_job = String(required=True)
event_time = String(default_value=None)
messages = List(
List(String),
description="""List in the form `[[severity, message], ...]`.""",
default_value=None
)
result = GenericScalar()
class Release(Mutation):
class Meta:
description = sstrip('''
Release a held workflow or tasks within it.
See also the opposite command `hold`.
''')
resolver = partial(mutator, command='release')
class Arguments:
workflows = List(WorkflowID, required=True)
tasks = List(
NamespaceIDGlob,
description=sstrip('''
Release matching tasks rather than the workflow as whole.
''')
)
result = GenericScalar()
class Reload(Mutation):
class Meta:
description = sstrip('''
Tell a suite to reload its definition at run time.
All settings including task definitions, with the
exception of suite log configuration, can be changed on reload.
Note that defined tasks can be be added to or removed from a
running suite using "insert" and "remove" without reloading. This
command also allows addition and removal of actual task
definitions, and therefore insertion of tasks that were not defined
at all when the suite started (you will still need to manually
insert a particular instance of a newly defined task). Live task
proxies that are orphaned by a reload (i.e. their task definitions
have been removed) will be removed from the task pool if they have
not started running yet. Changes to task definitions take effect
immediately, unless a task is already running at reload time.
If the suite was started with Jinja2 template variables
set on the command line (cylc run --set FOO=bar REG) the same
template settings apply to the reload (only changes to the suite.rc
file itself are reloaded).
If the modified suite definition does not parse,
failure to reload will be reported but no harm will be done to the
running suite.
''')
resolver = partial(mutator, command='reload_suite')
class Arguments:
workflows = List(WorkflowID, required=True)
result = GenericScalar()
class SetVerbosity(Mutation):
class Meta:
description = sstrip('''
Change the logging severity level of a running suite.
Only messages at or above the chosen severity level will be logged;
for example, if you choose `WARNING`, only warnings and critical
messages will be logged.
''')
resolver = partial(mutator, command='set_verbosity')
class Arguments:
workflows = List(WorkflowID, required=True)
level = LogLevels(required=True)
result = GenericScalar()
class Stop(Mutation):
class Meta:
description = sstrip(f'''
Tell a suite server program to shut down.
By default suites wait for all submitted and running tasks to
complete before shutting down. You can change this behaviour
with the "mode" option.
''')
resolver = partial(mutator, command='stop_workflow')
class Arguments:
workflows = List(WorkflowID, required=True)
mode = SuiteStopMode(
# TODO default
)
cycle_point = CyclePoint(
description='Stop after the suite reaches this cycle.'
)
clock_time = TimePoint(
description='Stop after wall-clock time passes this point.'
)
task = TaskID(
description='Stop after this task succeeds.'
)
result = GenericScalar()
class Checkpoint(Mutation):
class Meta:
description = 'Tell the suite to checkpoint its current state.'
resolver = partial(mutator, command='take_checkpoints')
class Arguments:
workflows = List(WorkflowID, required=True)
name = String(
description='The checkpoint name.',
required=True
)
result = GenericScalar()
class ExtTrigger(Mutation):
class Meta:
description = sstrip('''
Report an external event message to a suite server program.
It is expected that a task in the suite has registered the same
message as an external trigger - a special prerequisite to be
satisfied by an external system, via this command, rather than by
triggering off other tasks.
The ID argument should uniquely distinguish one external trigger
event from the next. When a task's external trigger is satisfied by
an incoming message, the message ID is broadcast to all downstream
tasks in the cycle point as `$CYLC_EXT_TRIGGER_ID` so that they can
use it - e.g. to identify a new data file that the external
triggering system is responding to.
Use the retry options in case the target suite is down or out of
contact.
Note: To manually trigger a task use "Trigger" not
"ExtTrigger".
''')
resolver = partial(mutator, command='put_ext_trigger')
class Arguments:
workflows = List(WorkflowID, required=True)
message = String(
description='External trigger message.',
required=True
)
id = String(
description='Unique trigger ID.',
required=True
)
result = GenericScalar()
class TaskMutation:
class Arguments:
workflows = List(
WorkflowID,
required=True
)
tasks = List(
NamespaceIDGlob,
required=True
)
result = GenericScalar()
class DryRun(Mutation, TaskMutation):
class Meta:
description = sstrip('''
[For internal use] Prepare the job file for a task.
''')
resolver = partial(mutator, command='dry_run_tasks')
class Arguments(TaskMutation.Arguments):
check_syntax = Boolean(
description='Check shell syntax.',
default_value=True
)
class Insert(Mutation, TaskMutation):
class Meta:
description = sstrip('''
Insert new task proxies into the task pool of a running workflow.
For example to enable re-triggering earlier tasks already removed
from the pool.
Note: inserted cycling tasks cycle on as normal, even if another
instance of the same task exists at a later cycle (instances of the
same task at different cycles can coexist, but a newly spawned task
will not be added to the pool if it catches up to another task with
the same ID).
See also "Submit", for running tasks without the scheduler.
''')
resolver = partial(mutator, command='insert_tasks')
class Arguments(TaskMutation.Arguments):
check_point = Boolean(
description=sstrip('''
Check that the provided cycle point is on one of the task's
recurrences as defined in the suite configuration before
inserting.
'''),
default_value=True
)
stop_point = CyclePoint(
description='hold/stop cycle point for inserted task.'
)
class Kill(Mutation, TaskMutation):
# TODO: This should be a job mutation?
class Meta:
description = sstrip('''
Kill jobs of active tasks and update their statuses accordingly.
''')
resolver = partial(mutator, command='kill_tasks')
class Poll(Mutation, TaskMutation):
class Meta:
description = sstrip('''
Poll (query) task jobs to verify and update their statuses.
''')
resolver = partial(mutator, command='poll_tasks')
class Arguments(TaskMutation.Arguments):
poll_succeeded = Boolean(
description='Allow polling of succeeded tasks.',
default_value=False
)
class Remove(Mutation, TaskMutation):
class Meta:
description = sstrip('''
Remove one or more task instances from a running workflow.
Tasks will be forced to spawn successors before removal if they
have not done so already, unless you change the `spawn` option.
''')
resolver = partial(mutator, command='remove_tasks')
class Arguments(TaskMutation.Arguments):
spawn = Boolean(
description='Spawn successors before removal.',
default_value=True
)
class Reset(Mutation, TaskMutation):
class Meta:
description = sstrip(f'''
Force task instances to a specified state.
Outputs are automatically updated to reflect the new task state,
except for custom message outputs which can be manipulated directly
with `outputs`.
Prerequisites reflect the state of other tasks; they are not
changed except to unset them on resetting state to
`{TASK_STATUS_WAITING}` or earlier.
Note: To hold and release tasks use "Hold" and "Release", not this
command.
''')
resolver = partial(mutator, command='reset_task_states')
class Arguments(TaskMutation.Arguments):
state = TaskStatus(
description='Reset the task status to this.'
)
outputs = List(
String,
description=sstrip('''
Find task output by message string or trigger string, set
complete or incomplete with `!OUTPUT`, `*` to set all
complete, `!*` to set all incomplete.
''')
)
class Spawn(Mutation, TaskMutation):
class Meta:
description = sstrip(f'''
Force task proxies to spawn successors at their own next cycle
point.
Tasks normally spawn on reaching the {TASK_STATUS_SUBMITTED}
status. Spawning them early allows running successive instances of
the same task out of order. See also the `spawn to max active
cycle points` workflow configuration.
Note this command does not operate on tasks at any arbitrary point
in the abstract workflow graph - tasks not already in the pool must
be inserted first with "Insert".
''')
resolver = partial(mutator, command='spawn_tasks')
class Trigger(Mutation, TaskMutation):
class Meta:
description = sstrip('''
Manually trigger tasks.
TODO: re-implement edit funtionality!
For single tasks you can use `edit` to edit the generated job
script before it submits, to apply one-off changes. A diff between
the original and edited job script will be saved to the task job
log directory.
Warning: waiting tasks that are queue-limited will be queued if
triggered, to submit as normal when released by the queue; queued
tasks will submit immediately if triggered, even if that violates
the queue limit (so you may need to trigger a queue-limited task
twice to get it to submit immediately).
Note: tasks not already in the pool must be inserted first with
"Insert" in order to be matched.
''')
resolver = partial(mutator, command='trigger_tasks')
class Arguments(TaskMutation.Arguments):
# back_out = Boolean()
# TODO: remove or re-implement?
pass
# Mutation declarations
class Mutations(ObjectType):
# workflow actions
broadcast = Broadcast.Field(description=Message._meta.description)
ext_trigger = ExtTrigger.Field(
description=ExtTrigger._meta.description)
hold = Hold.Field(description=Hold._meta.description)
nudge = Nudge.Field(description=Nudge._meta.description)
message = Message.Field(description=Message._meta.description)
ping = Ping.Field(description=Ping._meta.description)
release = Release.Field(description=Release._meta.description)
reload = Reload.Field(description=Reload._meta.description)
set_verbosity = SetVerbosity.Field(
description=SetVerbosity._meta.description)
stop = Stop.Field(description=Stop._meta.description)
checkpoint = Checkpoint.Field(
description=Checkpoint._meta.description)
# task actions
dry_run = DryRun.Field(description=DryRun._meta.description)
insert = Insert.Field(description=Insert._meta.description)
kill = Kill.Field(description=Kill._meta.description)
poll = Poll.Field(description=Poll._meta.description)
remove = Remove.Field(description=Remove._meta.description)
reset = Reset.Field(description=Reset._meta.description)
spawn = Spawn.Field(description=Spawn._meta.description)
trigger = Trigger.Field(description=Trigger._meta.description)
# job actions
# TODO
# ** Subscription Related ** #
def to_subscription(func: Callable, sleep_seconds: float = 5.) -> Callable:
"""Wraps a function in a while-true-sleep, transforming
the function into an async-generator, used by the
websockets/subscriptions.
Args:
func (Callable): a callable.
sleep_seconds (float): asyncio sleep interval in seconds.
Returns:
Callable: a callable async-generator wrapping the original callable.
"""
async def gen(*args: Any, **kwargs: Any) -> AsyncGenerator[Any, None]:
"""
Args:
*args: Variable length argument list, varies as per schema.
**kwargs: Arbitrary keyword arguments, varies as per schema.
Returns:
AsyncGenerator[Any, None]: an async generator that will
yield values from resolvers.
"""
while True:
yield await func(*args, **kwargs)
await asyncio.sleep(sleep_seconds)
return gen
class Subscriptions(ObjectType):
"""Defines the subscriptions available in the schema."""
class Meta:
description = """Multi-Workflow root level subscriptions."""
workflows = List(
Workflow,
description=Workflow._meta.description,
ids=List(ID, default_value=[]),
exids=List(ID, default_value=[]),
resolver=to_subscription(get_workflows))
job = Field(
Job,
description=Job._meta.description,
id=ID(required=True),
resolver=to_subscription(get_node_by_id))
jobs = List(
Job,
description=Job._meta.description,
args=all_jobs_args,
resolver=to_subscription(get_nodes_all))
task = Field(
Task,
description=Task._meta.description,
id=ID(required=True),
resolver=to_subscription(get_node_by_id))
tasks = List(
Task,
description=Task._meta.description,
args=all_def_args,
resolver=to_subscription(get_nodes_all))
task_proxy = Field(
TaskProxy,
description=TaskProxy._meta.description,
id=ID(required=True),
resolver=to_subscription(get_node_by_id))
task_proxies = List(
TaskProxy,
description=TaskProxy._meta.description,
args=all_proxy_args,
resolver=to_subscription(get_nodes_all))
family = Field(
Family,
description=Family._meta.description,
id=ID(required=True),
resolver=to_subscription(get_node_by_id))
families = List(
Family,
description=Family._meta.description,
args=all_def_args,
resolver=to_subscription(get_nodes_all))
family_proxy = Field(
FamilyProxy,
description=FamilyProxy._meta.description,
id=ID(required=True),
resolver=to_subscription(get_node_by_id))
family_proxies = List(
FamilyProxy,
description=FamilyProxy._meta.description,
args=all_proxy_args,
resolver=to_subscription(get_nodes_all))
edges = List(
Edge,
description=Edge._meta.description,
args=all_edge_args,
resolver=to_subscription(get_edges_all))
nodes_edges = Field(
NodesEdges,
description=NodesEdges._meta.description,
args=nodes_edges_args_all,
resolver=to_subscription(get_nodes_edges))
schema = Schema(query=Queries, subscription=Subscriptions, mutation=Mutations)
| gpl-3.0 | 7,159,439,198,366,642,000 | 30.371054 | 79 | 0.621867 | false |
yuanming-hu/taichi | examples/mgpcg_advanced.py | 1 | 9080 | import math
import time
import numpy as np
import taichi as ti
@ti.data_oriented
class MGPCG:
'''
Grid-based MGPCG solver for the possion equation.
See `examples/stable_fluid.py <https://github.com/taichi-dev/taichi/blob/master/examples/stable_fluid.py>`_ for a usage example.
.. note::
This solver only runs on CPU and CUDA backends since it requires the
``pointer`` SNode.
'''
def __init__(self, dim=2, N=512, n_mg_levels=6, real=float):
'''
:parameter dim: Dimensionality of the fields.
:parameter N: Grid resolution.
:parameter n_mg_levels: Number of multigrid levels.
'''
# grid parameters
self.use_multigrid = True
self.N = N
self.n_mg_levels = n_mg_levels
self.pre_and_post_smoothing = 2
self.bottom_smoothing = 50
self.dim = dim
self.real = real
self.N_ext = self.N // 2 # number of ext cells set so that that total grid size is still power of 2
self.N_tot = 2 * self.N
# setup sparse simulation data arrays
self.r = [ti.field(dtype=self.real)
for _ in range(self.n_mg_levels)] # residual
self.z = [ti.field(dtype=self.real)
for _ in range(self.n_mg_levels)] # M^-1 self.r
self.x = ti.field(dtype=self.real) # solution
self.p = ti.field(dtype=self.real) # conjugate gradient
self.Ap = ti.field(dtype=self.real) # matrix-vector product
self.alpha = ti.field(dtype=self.real) # step size
self.beta = ti.field(dtype=self.real) # step size
self.sum = ti.field(dtype=self.real) # storage for reductions
indices = ti.ijk if self.dim == 3 else ti.ij
self.grid = ti.root.pointer(indices, [self.N_tot // 4]).dense(
indices, 4).place(self.x, self.p, self.Ap)
for l in range(self.n_mg_levels):
self.grid = ti.root.pointer(indices,
[self.N_tot // (4 * 2**l)]).dense(
indices,
4).place(self.r[l], self.z[l])
ti.root.place(self.alpha, self.beta, self.sum)
@ti.func
def init_r(self, I, r_I):
I = I + self.N_ext
self.r[0][I] = r_I
self.z[0][I] = 0
self.Ap[I] = 0
self.p[I] = 0
self.x[I] = 0
@ti.kernel
def init(self, r: ti.template(), k: ti.template()):
'''
Set up the solver for $\nabla^2 x = k r$, a scaled Poisson problem.
:parameter k: (scalar) A scaling factor of the right-hand side.
:parameter r: (ti.field) Unscaled right-hand side.
'''
for I in ti.grouped(ti.ndrange(*[self.N] * self.dim)):
self.init_r(I, r[I] * k)
@ti.func
def get_x(self, I):
I = I + self.N_ext
return self.x[I]
@ti.kernel
def get_result(self, x: ti.template()):
'''
Get the solution field.
:parameter x: (ti.field) The field to store the solution
'''
for I in ti.grouped(ti.ndrange(*[self.N] * self.dim)):
x[I] = self.get_x(I)
@ti.func
def neighbor_sum(self, x, I):
ret = ti.cast(0.0, self.real)
for i in ti.static(range(self.dim)):
offset = ti.Vector.unit(self.dim, i)
ret += x[I + offset] + x[I - offset]
return ret
@ti.kernel
def compute_Ap(self):
for I in ti.grouped(self.Ap):
self.Ap[I] = 2 * self.dim * self.p[I] - self.neighbor_sum(
self.p, I)
@ti.kernel
def reduce(self, p: ti.template(), q: ti.template()):
self.sum[None] = 0
for I in ti.grouped(p):
self.sum[None] += p[I] * q[I]
@ti.kernel
def update_x(self):
for I in ti.grouped(self.p):
self.x[I] += self.alpha[None] * self.p[I]
@ti.kernel
def update_r(self):
for I in ti.grouped(self.p):
self.r[0][I] -= self.alpha[None] * self.Ap[I]
@ti.kernel
def update_p(self):
for I in ti.grouped(self.p):
self.p[I] = self.z[0][I] + self.beta[None] * self.p[I]
@ti.kernel
def restrict(self, l: ti.template()):
for I in ti.grouped(self.r[l]):
res = self.r[l][I] - (2 * self.dim * self.z[l][I] -
self.neighbor_sum(self.z[l], I))
self.r[l + 1][I // 2] += res * 0.5
@ti.kernel
def prolongate(self, l: ti.template()):
for I in ti.grouped(self.z[l]):
self.z[l][I] = self.z[l + 1][I // 2]
@ti.kernel
def smooth(self, l: ti.template(), phase: ti.template()):
# phase = red/black Gauss-Seidel phase
for I in ti.grouped(self.r[l]):
if (I.sum()) & 1 == phase:
self.z[l][I] = (self.r[l][I] + self.neighbor_sum(
self.z[l], I)) / (2 * self.dim)
def apply_preconditioner(self):
self.z[0].fill(0)
for l in range(self.n_mg_levels - 1):
for i in range(self.pre_and_post_smoothing << l):
self.smooth(l, 0)
self.smooth(l, 1)
self.z[l + 1].fill(0)
self.r[l + 1].fill(0)
self.restrict(l)
for i in range(self.bottom_smoothing):
self.smooth(self.n_mg_levels - 1, 0)
self.smooth(self.n_mg_levels - 1, 1)
for l in reversed(range(self.n_mg_levels - 1)):
self.prolongate(l)
for i in range(self.pre_and_post_smoothing << l):
self.smooth(l, 1)
self.smooth(l, 0)
def solve(self,
max_iters=-1,
eps=1e-12,
abs_tol=1e-12,
rel_tol=1e-12,
verbose=False):
'''
Solve a Poisson problem.
:parameter max_iters: Specify the maximal iterations. -1 for no limit.
:parameter eps: Specify a non-zero value to prevent ZeroDivisionError.
:parameter abs_tol: Specify the absolute tolerance of loss.
:parameter rel_tol: Specify the tolerance of loss relative to initial loss.
'''
self.reduce(self.r[0], self.r[0])
initial_rTr = self.sum[None]
tol = max(abs_tol, initial_rTr * rel_tol)
# self.r = b - Ax = b since self.x = 0
# self.p = self.r = self.r + 0 self.p
if self.use_multigrid:
self.apply_preconditioner()
else:
self.z[0].copy_from(self.r[0])
self.update_p()
self.reduce(self.z[0], self.r[0])
old_zTr = self.sum[None]
# Conjugate gradients
iter = 0
while max_iters == -1 or iter < max_iters:
# self.alpha = rTr / pTAp
self.compute_Ap()
self.reduce(self.p, self.Ap)
pAp = self.sum[None]
self.alpha[None] = old_zTr / (pAp + eps)
# self.x = self.x + self.alpha self.p
self.update_x()
# self.r = self.r - self.alpha self.Ap
self.update_r()
# check for convergence
self.reduce(self.r[0], self.r[0])
rTr = self.sum[None]
if verbose:
print(f'iter {iter}, |residual|_2={math.sqrt(rTr)}')
if rTr < tol:
break
# self.z = M^-1 self.r
if self.use_multigrid:
self.apply_preconditioner()
else:
self.z[0].copy_from(self.r[0])
# self.beta = new_rTr / old_rTr
self.reduce(self.z[0], self.r[0])
new_zTr = self.sum[None]
self.beta[None] = new_zTr / (old_zTr + eps)
# self.p = self.z + self.beta self.p
self.update_p()
old_zTr = new_zTr
iter += 1
class MGPCG_Example(MGPCG):
def __init__(self):
super().__init__(dim=3, N=128, n_mg_levels=4)
self.N_gui = 512 # gui resolution
self.pixels = ti.field(dtype=float,
shape=(self.N_gui, self.N_gui)) # image buffer
@ti.kernel
def init(self):
for I in ti.grouped(ti.ndrange(*[self.N] * self.dim)):
r_I = 5.0
for k in ti.static(range(self.dim)):
r_I *= ti.cos(5 * np.pi * I[k] / self.N)
self.init_r(I, r_I)
@ti.kernel
def paint(self):
if ti.static(self.dim == 3):
kk = self.N_tot * 3 // 8
for i, j in self.pixels:
ii = int(i * self.N / self.N_gui) + self.N_ext
jj = int(j * self.N / self.N_gui) + self.N_ext
self.pixels[i, j] = self.x[ii, jj, kk] / self.N_tot
def run(self, verbose=False):
self.init()
self.solve(max_iters=400, verbose=verbose)
self.paint()
ti.imshow(self.pixels)
ti.kernel_profiler_print()
if __name__ == '__main__':
ti.init(kernel_profiler=True)
solver = MGPCG_Example()
t = time.time()
solver.run(verbose=True)
print(f'Solver time: {time.time() - t:.3f} s')
| mit | 2,937,493,401,941,731,300 | 30.527778 | 128 | 0.508921 | false |
Brunel-Visualization/Brunel | python/brunel/brunel_util.py | 1 | 1472 | # Copyright (c) 2015 IBM Corporation and others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# If the JVM cannot be located automatically, use this variable to get it from an environment variable. It should be the fully qualified
# path to the JVM. Typically jvm.dll on Windows or libjvm.so on Unix
import os
JVM_PATH = ""
D3_LOC = "https://cdnjs.cloudflare.com/ajax/libs/d3/4.13.0/d3.min"
TOPO_JSON_LOC = "https://cdnjs.cloudflare.com/ajax/libs/topojson/1.6.20/topojson.min"
JS_LOC = "/nbextensions/brunel_ext"
BRUNEL_CONFIG = os.getenv("BRUNEL_CONFIG", "")
opts = BRUNEL_CONFIG.strip().split(";")
for opt in opts:
keyval = opt.strip().split("=");
if keyval[0].strip().lower() == "jvm":
JVM_PATH = keyval[1]
elif keyval[0].strip().lower() == "locd3":
D3_LOC = keyval[1]
elif keyval[0].strip().lower() == "locjavascript":
JS_LOC = keyval[1]
elif keyval[0].strip().lower() == "loctopojson":
TOPO_JSON_LOC = keyval[1]
| apache-2.0 | 7,878,519,849,403,592,000 | 39.888889 | 137 | 0.69769 | false |
scenarios/tensorflow | tensorflow/contrib/distributions/python/ops/bijector.py | 2 | 92884 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Bijector Ops.
An API for invertible, differentiable transformations of random variables.
## Background
Differentiable, bijective transformations of continuous random variables alter
the calculations made in the cumulative/probability distribution functions and
sample function. This module provides a standard interface for making these
manipulations.
For more details and examples, see the `Bijector` docstring.
To apply a `Bijector`, use `distributions.TransformedDistribution`.
## Bijectors
@@Affine
@@AffineLinearOperator
@@Bijector
@@Chain
@@CholeskyOuterProduct
@@Exp
@@Identity
@@Inline
@@Invert
@@SigmoidCentered
@@SoftmaxCentered
@@Softplus
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import itertools
import math
import re
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_identity
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
__all__ = [
"Affine",
"AffineLinearOperator",
"Bijector",
"Chain",
"CholeskyOuterProduct",
"Exp",
"Identity",
"Inline",
"Invert",
"PowerTransform",
"SigmoidCentered",
"SoftmaxCentered",
"Softplus",
]
def _as_tensor(x, name):
"""Convenience to convert to `Tensor` or leave as `None`."""
return None if x is None else ops.convert_to_tensor(x, name=name)
class _Mapping(collections.namedtuple("_Mapping",
["x", "y", "ildj", "condition_kwargs"])):
"""Helper class to make it easier to manage caching in `Bijector`."""
def __new__(cls, x=None, y=None, ildj=None, condition_kwargs=None):
"""Custom __new__ so namedtuple items have defaults.
Args:
x: `Tensor`. Forward.
y: `Tensor`. Inverse.
ildj: `Tensor`. Inverse log det Jacobian.
condition_kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
Returns:
mapping: New instance of _Mapping.
"""
return super(_Mapping, cls).__new__(cls, x, y, ildj, condition_kwargs)
@property
def x_key(self):
"""Returns key used for caching Y=g(X)."""
return (self.x,) + self._deep_tuple(tuple(sorted(
self.condition_kwargs.items())))
@property
def y_key(self):
"""Returns key used for caching X=g^{-1}(Y)."""
return (self.y,) + self._deep_tuple(tuple(sorted(
self.condition_kwargs.items())))
def merge(self, x=None, y=None, ildj=None,
condition_kwargs=None, mapping=None):
"""Returns new _Mapping with args merged with self.
Args:
x: `Tensor`. Forward.
y: `Tensor`. Inverse.
ildj: `Tensor`. Inverse log det Jacobian.
condition_kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
mapping: Instance of _Mapping to merge. Can only be specified if no other
arg is specified.
Returns:
mapping: New instance of `_Mapping` which has inputs merged with self.
Raises:
ValueError: if mapping and any other arg is not `None`.
"""
if mapping is None:
mapping = _Mapping(x=x, y=y, ildj=ildj,
condition_kwargs=condition_kwargs)
elif not all(arg is None for arg in [x, y, ildj, condition_kwargs]):
raise ValueError("Cannot specify mapping and individual args.")
return _Mapping(
x=self._merge(self.x, mapping.x),
y=self._merge(self.y, mapping.y),
ildj=self._merge(self.ildj, mapping.ildj),
condition_kwargs=self._merge(self.condition_kwargs,
mapping.condition_kwargs))
def _merge(self, old, new):
"""Helper to merge which handles merging one value."""
if old is None:
return new
elif new is not None and old != new:
raise ValueError("Incompatible values: %s != %s" % (old, new))
return old
def _deep_tuple(self, x):
"""Converts lists of lists to tuples of tuples."""
return (tuple(map(self._deep_tuple, x))
if isinstance(x, (list, tuple)) else x)
@six.add_metaclass(abc.ABCMeta)
class Bijector(object):
"""Interface for transforming a `Distribution` sample.
A `Bijector` implements a
[diffeomorphism](https://en.wikipedia.org/wiki/Diffeomorphism), i.e., a
bijective, differentiable function. A `Bijector` is used by
`TransformedDistribution` but can be generally used for transforming a
`Distribution` generated `Tensor`. A `Bijector` is characterized by three
operations:
1. Forward Evaluation
Useful for turning one random outcome into another random outcome from a
different distribution.
2. Inverse Evaluation
Useful for "reversing" a transformation to compute one probability in
terms of another.
3. (log o det o Jacobian o inverse)(x)
"The log of the determinant of the matrix of all first-order partial
derivatives of the inverse function."
Useful for inverting a transformation to compute one probability in terms
of another. Geometrically, the det(Jacobian) is the volume of the
transformation and is used to scale the probability.
By convention, transformations of random variables are named in terms of the
forward transformation. The forward transformation creates samples, the
inverse is useful for computing probabilities.
Example Use:
- Basic properties:
```python
x = ... # A tensor.
# Evaluate forward transformation.
fwd_x = my_bijector.forward(x)
x == my_bijector.inverse(fwd_x)
x != my_bijector.forward(fwd_x) # Not equal because g(x) != g(g(x)).
```
- Computing a log-likelihood:
```python
def transformed_log_pdf(bijector, log_pdf, x):
return (bijector.inverse_log_det_jacobian(x) +
log_pdf(bijector.inverse(x)))
```
- Transforming a random outcome:
```python
def transformed_sample(bijector, x):
return bijector.forward(x)
```
Example transformations:
- "Exponential"
```
Y = g(X) = exp(X)
X ~ Normal(0, 1) # Univariate.
```
Implies:
```
g^{-1}(Y) = log(Y)
|Jacobian(g^{-1})(y)| = 1 / y
Y ~ LogNormal(0, 1), i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= (1 / y) Normal(log(y); 0, 1)
```
Here is an example of how one might implement the `Exp` bijector:
```
class Exp(Bijector):
def __init__(self, event_ndims=0, validate_args=False, name="exp"):
super(Exp, self).__init__(batch_ndims=0, event_ndims=event_ndims,
validate_args=validate_args, name=name)
def _forward(self, x):
return math_ops.exp(x)
def _inverse_and_inverse_log_det_jacobian(self, y):
x = math_ops.log(y)
return x, -self._forward_log_det_jacobian(x)
def _forward_log_det_jacobian(self, x):
if self.shaper is None:
raise ValueError("Jacobian requires known event_ndims.")
_, _, event_dims = self.shaper.get_dims(x)
return math_ops.reduce_sum(x, reduction_indices=event_dims)
```
- "Affine"
```
Y = g(X) = sqrtSigma * X + mu
X ~ MultivariateNormal(0, I_d)
```
Implies:
```
g^{-1}(Y) = inv(sqrtSigma) * (Y - mu)
|Jacobian(g^{-1})(y)| = det(inv(sqrtSigma))
Y ~ MultivariateNormal(mu, sqrtSigma) , i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= det(sqrtSigma)^(-d) *
MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d)
```
Example of why a `Bijector` needs to understand sample, batch, event
partitioning:
- Consider the `Exp` `Bijector` applied to a `Tensor` which has sample, batch,
and event (S, B, E) shape semantics. Suppose
the `Tensor`'s partitioned-shape is `(S=[4], B=[2], E=[3, 3])`.
For `Exp`, the shape of the `Tensor` returned by `forward` and `inverse` is
unchanged, i.e., `[4, 2, 3, 3]`. However the shape returned by
`inverse_log_det_jacobian` is `[4, 2]` because the Jacobian is a reduction
over the event dimensions.
Subclass Requirements:
- Typically subclasses implement `_forward` and one or both of:
- `_inverse`, `_inverse_log_det_jacobian`,
- `_inverse_and_inverse_log_det_jacobian`.
- If the `Bijector`'s use is limited to `TransformedDistribution` (or friends
like `QuantizedDistribution`) then depending on your use, you may not need
to implement all of `_forward` and `_inverse` functions. Examples:
1. Sampling (e.g., `sample`) only requires `_forward`.
2. Probability functions (e.g., `prob`, `cdf`, `survival`) only require
`_inverse` (and related).
3. Only calling probability functions on the output of `sample` means
`_inverse` can be implemented as a cache lookup.
See `Example Use` [above] which shows how these functions are used to
transform a distribution. (Note: `_forward` could theoretically be
implemented as a cache lookup but this would require controlling the
underlying sample generation mechanism.)
- If computation can be shared among `_inverse` and
`_inverse_log_det_jacobian` it is preferable to implement
`_inverse_and_inverse_log_det_jacobian`. This usually reduces
graph-construction overhead because a `Distribution`'s implementation of
`log_prob` will need to evaluate both the inverse Jacobian as well as the
inverse function.
- If an additional use case needs just `inverse` or just
`inverse_log_det_jacobian` then he or she may also wish to implement these
functions to avoid computing the `inverse_log_det_jacobian` or the
`inverse`, respectively.
- Subclasses should implement `_get_forward_event_shape`,
`_forward_event_shape` (and `inverse` counterparts) if the transformation is
shape-changing. By default the event-shape is assumed unchanged from input.
Tips for implementing `_inverse` and `_inverse_log_det_jacobian`:
- As case 3 [above] indicates, under some circumstances the inverse function
can be implemented as a cache lookup.
- The inverse `log o det o Jacobian` can be implemented as the negative of the
forward `log o det o Jacobian`. This is useful if the `inverse` is
implemented as a cache or the inverse Jacobian is computationally more
expensive (e.g., `CholeskyOuterProduct` `Bijector`). The following
demonstrates the suggested implementation.
```python
def _inverse_and_log_det_jacobian(self, y):
x = # ... implement inverse, possibly via cache.
return x, -self._forward_log_det_jac(x) # Note negation.
```
By overriding the `_inverse_and_log_det_jacobian` function we have access to
the inverse in one call.
The correctness of this approach can be seen from the following claim.
- Claim:
Assume `Y=g(X)` is a bijection whose derivative exists and is nonzero
for its domain, i.e., `d/dX g(X)!=0`. Then:
```none
(log o det o jacobian o g^{-1})(Y) = -(log o det o jacobian o g)(X)
```
- Proof:
From the bijective, nonzero differentiability of `g`, the
[inverse function theorem](
https://en.wikipedia.org/wiki/Inverse_function_theorem)
implies `g^{-1}` is differentiable in the image of `g`.
Applying the chain rule to `y = g(x) = g(g^{-1}(y))` yields
`I = g'(g^{-1}(y))*g^{-1}'(y)`.
The same theorem also implies `g{-1}'` is non-singular therefore:
`inv[ g'(g^{-1}(y)) ] = g^{-1}'(y)`.
The claim follows from [properties of determinant](
https://en.wikipedia.org/wiki/Determinant#Multiplicativity_and_matrix_groups).
- If possible, prefer a direct implementation of the inverse Jacobian. This
should have superior numerical stability and will often share subgraphs with
the `_inverse` implementation.
"""
@abc.abstractmethod
def __init__(self,
batch_ndims=None,
event_ndims=None,
graph_parents=None,
is_constant_jacobian=False,
validate_args=False,
dtype=None,
name=None):
"""Constructs Bijector.
A `Bijector` transforms random variables into new random variables.
Examples:
```python
# Create the Y = g(X) = X transform which operates on 4-Tensors of vectors.
identity = Identity(batch_ndims=4, event_ndims=1)
# Create the Y = g(X) = exp(X) transform which operates on matrices.
exp = Exp(batch_ndims=0, event_ndims=2)
```
See `Bijector` subclass docstring for more details and specific examples.
Args:
batch_ndims: number of dimensions associated with batch coordinates.
event_ndims: number of dimensions associated with event coordinates.
graph_parents: Python list of graph prerequisites of this `Bijector`.
is_constant_jacobian: `Boolean` indicating that the Jacobian is not a
function of the input.
validate_args: `Boolean`, default `False`. Whether to validate input with
asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not
enforced.
name: The name to give Ops created by the initializer.
"""
if batch_ndims is None or event_ndims is None:
self._shaper = None # Apparently subclass will create.
else:
self._shaper = _DistributionShape(
batch_ndims=batch_ndims,
event_ndims=event_ndims,
validate_args=validate_args)
self._graph_parents = graph_parents or []
self._is_constant_jacobian = is_constant_jacobian
self._validate_args = validate_args
self._dtype = dtype
self._from_y = {}
self._from_x = {}
# Using abbreviation ildj for "inverse log det Jacobian."
# This variable is not `None` iff is_constant_jacobian is `True`.
self._constant_ildj = None
if name:
self._name = name
else:
# We want the default convention to be snake_case rather than CamelCase
# since `Chain` uses bijector.name as the condition_kwargs dictionary key.
def camel_to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
self._name = camel_to_snake(type(self).__name__)
@property
def shaper(self):
"""Returns shape object used to manage shape constraints."""
return self._shaper
@property
def graph_parents(self):
"""Returns this `Bijector`'s graph_parents as a Python list."""
return self._graph_parents
@property
def is_constant_jacobian(self):
"""Returns true iff the Jacobian is not a function of x.
Note: Jacobian is either constant for both forward and inverse or neither.
Returns:
`Boolean`.
"""
return self._is_constant_jacobian
@property
def validate_args(self):
"""Returns True if Tensor arguments will be validated."""
return self._validate_args
@property
def dtype(self):
"""dtype of `Tensor`s transformable by this distribution."""
return self._dtype
@property
def name(self):
"""Returns the string name of this `Bijector`."""
return self._name
def _forward_event_shape(self, input_shape):
"""Subclass implementation for `forward_event_shape` public function."""
return input_shape
def forward_event_shape(self, input_shape, name="forward_event_shape"):
"""Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
input_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `forward` function.
name: name to give to the op
Returns:
forward_event_shape: `Tensor`, `int32` vector indicating event-portion
shape after applying `forward`.
"""
with self._name_scope(name, [input_shape]):
input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32,
name="input_shape")
return self._forward_event_shape(input_shape)
def _get_forward_event_shape(self, input_shape):
"""Subclass implementation for `get_forward_event_shape` public function."""
return input_shape
def get_forward_event_shape(self, input_shape):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `forward_event_shape`. May be only partially defined.
Args:
input_shape: `TensorShape` indicating event-portion shape passed into
`forward` function.
Returns:
forward_event_shape: `TensorShape` indicating event-portion shape after
applying `forward`. Possibly unknown.
"""
return self._get_forward_event_shape(tensor_shape.TensorShape(input_shape))
def _inverse_event_shape(self, output_shape):
"""Subclass implementation for `inverse_event_shape` public function."""
return output_shape
def inverse_event_shape(self, output_shape, name="inverse_event_shape"):
"""Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
output_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `inverse` function.
name: name to give to the op
Returns:
inverse_event_shape: `Tensor`, `int32` vector indicating event-portion
shape after applying `inverse`.
"""
with self._name_scope(name, [output_shape]):
output_shape = ops.convert_to_tensor(output_shape, dtype=dtypes.int32,
name="output_shape")
return self._inverse_event_shape(output_shape)
def _get_inverse_event_shape(self, output_shape):
"""Subclass implementation for `get_inverse_event_shape` public function."""
return self._get_inverse_event_shape(tensor_shape.TensorShape(output_shape))
def get_inverse_event_shape(self, output_shape):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `inverse_event_shape`. May be only partially defined.
Args:
output_shape: `TensorShape` indicating event-portion shape passed into
`inverse` function.
Returns:
inverse_event_shape: `TensorShape` indicating event-portion shape after
applying `inverse`. Possibly unknown.
"""
return self._get_inverse_event_shape(output_shape)
def _forward(self, x):
"""Subclass implementation for `forward` public function."""
raise NotImplementedError("forward not implemented.")
def forward(self, x, name="forward", **condition_kwargs):
"""Returns the forward `Bijector` evaluation, i.e., X = g(Y).
Args:
x: `Tensor`. The input to the "forward" evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
NotImplementedError: if `_forward` is not implemented.
"""
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
mapping = self._lookup(x=x, condition_kwargs=condition_kwargs)
if mapping.y is not None:
return mapping.y
mapping = mapping.merge(y=self._forward(x, **condition_kwargs))
self._cache(mapping)
return mapping.y
def _inverse(self, y):
"""Subclass implementation for `inverse` public function."""
raise NotImplementedError("inverse not implemented")
def inverse(self, y, name="inverse", **condition_kwargs):
"""Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y).
Args:
y: `Tensor`. The input to the "inverse" evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_inverse` nor
`_inverse_and_inverse_log_det_jacobian` are implemented.
"""
with self._name_scope(name, [y]):
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
mapping = self._lookup(y=y, condition_kwargs=condition_kwargs)
if mapping.x is not None:
return mapping.x
ildj = None
try:
x = self._inverse(y, **condition_kwargs)
except NotImplementedError as original_error:
# Since _inverse was not implemented, try to see if it's implemented
# by the _inverse_and_inverse_log_det_jacobian member.
try:
x, ildj = self._inverse_and_inverse_log_det_jacobian(
y, **condition_kwargs)
except NotImplementedError:
raise original_error
if self._constant_ildj is not None:
ildj = self._constant_ildj # Use the "global" result.
elif self.is_constant_jacobian:
self._constant_ildj = ildj
x = x if mapping.x is None else mapping.x
mapping = mapping.merge(x=x, ildj=ildj)
self._cache(mapping)
return mapping.x
def _inverse_log_det_jacobian(self, y):
"""Subclass implementation for `inverse_log_det_jacobian` public function.""" # pylint: disable=line-too-long
raise NotImplementedError("inverse_log_det_jacobian not implemented.")
def inverse_log_det_jacobian(
self, y, name="inverse_log_det_jacobian", **condition_kwargs):
"""Returns the (log o det o Jacobian o inverse)(y).
Mathematically, returns: `log(det(dX/dY))(Y)`. (Recall that: `X=g^{-1}(Y)`.)
Note that `forward_log_det_jacobian` is the negative of this function.
Args:
y: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_inverse_log_det_jacobian` nor
`_inverse_and_inverse_log_det_jacobian` are implemented.
"""
with self._name_scope(name, [y]):
if self._constant_ildj is not None:
return self._constant_ildj
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
mapping = self._lookup(y=y, condition_kwargs=condition_kwargs)
if mapping.ildj is not None:
return mapping.ildj
try:
x = mapping.x
ildj = self._inverse_log_det_jacobian(y, **condition_kwargs)
except NotImplementedError as original_error:
# Since _inverse_log_det_jacobian was not implemented, try to see if
# it's implemented by the _inverse_and_inverse_log_det_jacobian member.
try:
x, ildj = self._inverse_and_inverse_log_det_jacobian(
y, **condition_kwargs)
except NotImplementedError:
raise original_error
if mapping.x is not None:
x = mapping.x
if self.is_constant_jacobian:
self._constant_ildj = ildj
x = x if mapping.x is None else mapping.x
mapping = mapping.merge(x=x, ildj=ildj)
self._cache(mapping)
return mapping.ildj
def _inverse_and_inverse_log_det_jacobian(self, y):
"""Subclass implementation for `inverse_and_inverse_log_det_jacobian` public function.""" # pylint: disable=line-too-long
raise NotImplementedError(
"inverse_and_inverse_log_det_jacobian not implemented.")
def inverse_and_inverse_log_det_jacobian(
self, y, name="inverse_and_inverse_log_det_jacobian", **condition_kwargs):
"""Returns both the inverse evaluation and inverse_log_det_jacobian.
Enables possibly more efficient calculation when both inverse and
corresponding Jacobian are needed.
See `inverse()`, `inverse_log_det_jacobian()` for more details.
Args:
y: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_inverse_and_inverse_log_det_jacobian`
nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented.
"""
with self._name_scope(name, [y]):
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
mapping = self._lookup(y=y, condition_kwargs=condition_kwargs)
if mapping.x is not None and mapping.ildj is not None:
return mapping.x, mapping.ildj
try:
x, ildj = self._inverse_and_inverse_log_det_jacobian(
y, **condition_kwargs)
except NotImplementedError as original_error:
# Since _inverse_and_inverse_log_det_jacobian was not implemented, try
# to see if we can separately use _inverse and
# _inverse_log_det_jacobian members.
try:
# We want this same try/except to catch either NotImplementedError.
x = self._inverse(y, **condition_kwargs)
if self._constant_ildj is None:
ildj = self._inverse_log_det_jacobian(y, **condition_kwargs)
except NotImplementedError:
raise original_error
if self._constant_ildj is not None:
ildj = self._constant_ildj # Ignore any ildj we may/not have.
elif self.is_constant_jacobian:
self._constant_ildj = ildj
# We use the mapped version of x, even if we re-computed x above with a
# call to self._inverse_and_inverse_log_det_jacobian. This prevents
# re-evaluation of the inverse in a common case.
x = x if mapping.x is None else mapping.x
mapping = mapping.merge(x=x, ildj=ildj)
self._cache(mapping)
return mapping.x, mapping.ildj
def _forward_log_det_jacobian(self, x):
"""Subclass implementation for `forward_log_det_jacobian` public function.""" # pylint: disable=line-too-long
raise NotImplementedError(
"forward_log_det_jacobian not implemented.")
def forward_log_det_jacobian(
self, x, name="forward_log_det_jacobian", **condition_kwargs):
"""Returns both the forward_log_det_jacobian.
Args:
x: `Tensor`. The input to the "forward" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_forward_log_det_jacobian`
nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented.
"""
with self._name_scope(name, [x]):
if self._constant_ildj is not None:
# Need "-1. *" to avoid invalid-unary-operand-type linter warning.
return -1. * self._constant_ildj
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
mapping = self._lookup(x=x, condition_kwargs=condition_kwargs)
if mapping.ildj is not None:
return -mapping.ildj
y = None
try:
ildj = -self._forward_log_det_jacobian(x, **condition_kwargs)
except NotImplementedError as original_error:
try:
# We want this same try/except to catch either NotImplementedError.
y = self.inverse(x, **condition_kwargs) if y is None else y
ildj = self.inverse_log_det_jacobian(y, **condition_kwargs)
except NotImplementedError:
raise original_error
if self.is_constant_jacobian:
self._constant_ildj = ildj
y = y if mapping.y is None else mapping.y
mapping = mapping.merge(y=y, ildj=ildj)
self._cache(mapping)
return -mapping.ildj
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(
name, values=(values or []) + self.graph_parents) as scope:
yield scope
def _maybe_assert_dtype(self, x):
"""Helper to check dtype when self.dtype is known."""
if self.dtype is not None and self.dtype.base_dtype != x.dtype.base_dtype:
raise TypeError("Input had dtype %s but expected %s." %
(self.dtype, x.dtype))
def _cache(self, mapping):
"""Helper which stores mapping info in forward/inverse dicts."""
if self._constant_ildj is not None:
# Fold in ildj if known constant Jacobian.
mapping = mapping.merge(ildj=self._constant_ildj)
# Merging from lookup is an added check that we're not overwriting anything
# which is not None.
mapping = mapping.merge(mapping=self._lookup(
mapping.x, mapping.y, mapping.condition_kwargs))
if mapping.x is None and mapping.y is None:
raise ValueError("Caching expects at least one of (x,y) to be known, "
"i.e., not None.")
self._from_x[mapping.x_key] = mapping
self._from_y[mapping.y_key] = mapping
def _lookup(self, x=None, y=None, condition_kwargs=None):
"""Helper which retrieves mapping info from forward/inverse dicts."""
mapping = _Mapping(x=x, y=y, condition_kwargs=condition_kwargs)
# Since _cache requires both x,y to be set, we only need to do one cache
# lookup since the mapping is always in both or neither.
if mapping.x is not None:
return self._from_x.get(mapping.x_key, mapping)
if mapping.y is not None:
return self._from_y.get(mapping.y_key, mapping)
return mapping
class Inline(Bijector):
# pylint: disable=line-too-long
"""Bijector constructed from callables implementing forward, inverse, and inverse_log_det_jacobian.
Example Use:
```python
exp = Inline(
forward_fn=tf.exp,
inverse_fn=tf.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.log(y), reduction_indices=-1)),
name="exp")
```
The above example is equivalent to the `Bijector` `Exp(event_ndims=1)`.
"""
# pylint: enable=line-too-long
def __init__(self,
forward_fn=None,
inverse_fn=None,
inverse_log_det_jacobian_fn=None,
forward_log_det_jacobian_fn=None,
get_forward_event_shape_fn=None,
forward_event_shape_fn=None,
get_inverse_event_shape_fn=None,
inverse_event_shape_fn=None,
is_constant_jacobian=False,
validate_args=False,
name="inline"):
"""Creates a `Bijector` from callables.
Args:
forward_fn: Python callable implementing the forward transformation.
inverse_fn: Python callable implementing the inverse transformation.
inverse_log_det_jacobian_fn: Python callable implementing the
log o det o jacobian of the inverse transformation.
forward_log_det_jacobian_fn: Python callable implementing the
log o det o jacobian of the forward transformation.
get_forward_event_shape_fn: Python callable implementing non-identical
static event shape changes. Default: shape is assumed unchanged.
forward_event_shape_fn: Python callable implementing non-identical event
shape changes. Default: shape is assumed unchanged.
get_inverse_event_shape_fn: Python callable implementing non-identical
static event shape changes. Default: shape is assumed unchanged.
inverse_event_shape_fn: Python callable implementing non-identical event
shape changes. Default: shape is assumed unchanged.
is_constant_jacobian: `Boolean` indicating that the Jacobian is constant
for all input arguments.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String`, name given to ops managed by this object.
"""
super(Inline, self).__init__(
batch_ndims=0,
event_ndims=0,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
self._forward_fn = forward_fn
self._inverse_fn = inverse_fn
self._inverse_log_det_jacobian_fn = inverse_log_det_jacobian_fn
self._forward_log_det_jacobian_fn = forward_log_det_jacobian_fn
self._get_forward_event_shape_fn = get_forward_event_shape_fn
self._forward_event_shape_fn = forward_event_shape_fn
self._get_inverse_event_shape_fn = get_inverse_event_shape_fn
self._inverse_event_shape_fn = inverse_event_shape_fn
def _get_forward_event_shape(self, input_shape):
if self._get_forward_event_shape_fn is None:
# By default assume shape doesn't change.
return input_shape
return self._get_forward_event_shape_fn(input_shape)
def _forward_event_shape(self, input_shape):
if self._forward_event_shape_fn is None:
# By default assume shape doesn't change.
return input_shape
return self._forward_event_shape_fn(input_shape)
def _get_inverse_event_shape(self, output_shape):
if self._get_inverse_event_shape_fn is None:
# By default assume shape doesn't change.
return output_shape
return self._get_inverse_event_shape_fn(output_shape)
def _inverse_event_shape(self, output_shape):
if self._inverse_event_shape_fn is None:
# By default assume shape doesn't change.
return output_shape
return self._inverse_event_shape_fn(output_shape)
def _forward(self, x, **condition_kwargs):
if not callable(self._forward_fn):
raise NotImplementedError(
"forward_fn is not a callable function.")
return self._forward_fn(x, **condition_kwargs)
def _inverse(self, y, **condition_kwargs):
if not callable(self._inverse_fn):
raise NotImplementedError(
"inverse_fn is not a callable function.")
return self._inverse_fn(y, **condition_kwargs)
def _inverse_log_det_jacobian(self, y, **condition_kwargs):
if not callable(self._inverse_log_det_jacobian_fn):
raise NotImplementedError(
"inverse_log_det_jacobian_fn is not a callable function.")
return self._inverse_log_det_jacobian_fn(y, **condition_kwargs)
def _forward_log_det_jacobian(self, y, **condition_kwargs):
if not callable(self._forward_log_det_jacobian_fn):
raise NotImplementedError(
"forward_log_det_jacobian_fn is not a callable function.")
return self._forward_log_det_jacobian_fn(y, **condition_kwargs)
class Invert(Bijector):
"""Bijector which inverts another Bijector.
Example Use: [ExpGammaDistribution (see Background & Context)](
https://reference.wolfram.com/language/ref/ExpGammaDistribution.html)
models `Y=log(X)` where `X ~ Gamma`.
```python
exp_gamma_distribution = TransformedDistribution(
Gamma(alpha=1., beta=2.),
bijector.Invert(bijector.Exp())
```
"""
def __init__(self, bijector, validate_args=False, name=None):
"""Creates a `Bijector` which swaps the meaning of `inverse` and `forward`.
Note: An inverted bijector's `inverse_log_det_jacobian` is often more
efficient if the base bijector implements `_forward_log_det_jacobian`. If
`_forward_log_det_jacobian` is not implemented then the following code is
used:
```python
y = self.inverse(x, **condition_kwargs)
return -self.inverse_log_det_jacobian(y, **condition_kwargs)
```
Args:
bijector: Bijector instance.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String`, name given to ops managed by this object.
"""
self._bijector = bijector
super(Invert, self).__init__(
graph_parents=bijector.graph_parents,
is_constant_jacobian=bijector.is_constant_jacobian,
validate_args=validate_args,
dtype=bijector.dtype,
name=name or "_".join(["invert", bijector.name]))
self._shaper = bijector.shaper
def _get_forward_event_shape(self, input_shape):
return self.bijector.get_inverse_event_shape(input_shape)
def _forward_event_shape(self, input_shape):
return self.bijector.inverse_event_shape(input_shape)
def _get_inverse_event_shape(self, output_shape):
return self.bijector.get_forward_event_shape(output_shape)
def _inverse_event_shape(self, output_shape):
return self.bijector.forward_event_shape(output_shape)
@property
def bijector(self):
return self._bijector
def _forward(self, x, **condition_kwargs):
return self.bijector.inverse(x, **condition_kwargs)
def _inverse_and_inverse_log_det_jacobian(self, y, **condition_kwargs):
return (self.bijector.forward(y, **condition_kwargs),
self.bijector.forward_log_det_jacobian(y, **condition_kwargs))
def _forward_log_det_jacobian(self, x, **condition_kwargs):
return self.bijector.inverse_log_det_jacobian(x, **condition_kwargs)
class Chain(Bijector):
"""Bijector which applies a sequence of bijectors.
Example Use:
```python
chain = Chain([Exp(), Softplus()], name="one_plus_exp")
```
Results in:
* Forward:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).forward(x)
= exp.forward(softplus.forward(x))
= tf.exp(tf.log(1. + tf.exp(x)))
= 1. + tf.exp(x)
```
* Inverse:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).inverse(y)
= softplus.inverse(exp.inverse(y))
= tf.log(tf.exp(tf.log(y)) - 1.)
= tf.log(y - 1.)
```
"""
def __init__(self, bijectors=(), validate_args=False, name=None):
"""Instantiates `Chain` bijector.
Args:
bijectors: Python list of bijector instances. An empty list makes this
bijector equivalent to the `Identity` bijector.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String`, name given to ops managed by this object. Default: E.g.,
`Chain([Exp(), Softplus()]).name == "chain_of_exp_of_softplus"`.
Raises:
ValueError: if bijectors have different dtypes.
"""
self._bijectors = bijectors
dtype = list(set([b.dtype for b in bijectors]))
if len(dtype) > 2:
raise ValueError("incompatible dtypes: %s" % dtype)
elif len(dtype) == 2:
dtype = dtype[1] if dtype[0] is None else dtype[0]
elif len(dtype) == 1:
dtype = dtype[0]
else:
dtype = None
super(Chain, self).__init__(
graph_parents=list(itertools.chain.from_iterable(
b.graph_parents for b in bijectors)),
is_constant_jacobian=all(b.is_constant_jacobian for b in bijectors),
validate_args=validate_args,
dtype=dtype,
name=name or ("identity" if not bijectors else
"_of_".join(["chain"] + [b.name for b in bijectors])))
@property
def bijectors(self):
return self._bijectors
def _shape_helper(self, func_name, input_shape, reverse):
new_shape = input_shape
for b in reversed(self.bijectors) if reverse else self.bijectors:
func = getattr(b, func_name, None)
if func is None:
raise ValueError("unable to call %s on bijector %s (%s)" %
(func_name, b.name, func))
new_shape = func(new_shape)
return new_shape
def _get_forward_event_shape(self, input_shape):
return self._shape_helper("get_forward_event_shape", input_shape,
reverse=True)
def _forward_event_shape(self, input_shape):
return self._shape_helper("forward_event_shape", input_shape, reverse=True)
def _get_inverse_event_shape(self, output_shape):
return self._shape_helper("get_inverse_event_shape", output_shape,
reverse=False)
def _inverse_event_shape(self, output_shape):
return self._shape_helper("inverse_event_shape", output_shape,
reverse=False)
def _forward(self, x, **condition_kwargs):
y = x
for b in reversed(self.bijectors):
y = b.forward(y, **condition_kwargs.get(b.name, {}))
return y
def _inverse_and_inverse_log_det_jacobian(self, y, **condition_kwargs):
x = y
ildj = constant_op.constant(0., dtype=x.dtype,
name="inverse_log_det_jacobian")
for b in self.bijectors:
x, j = b.inverse_and_inverse_log_det_jacobian(
x, **condition_kwargs.get(b.name, {}))
ildj += j
return x, ildj
def _forward_log_det_jacobian(self, x, **condition_kwargs):
y = x
fldj = constant_op.constant(0., dtype=x.dtype,
name="forward_log_det_jacobian")
for b in reversed(self.bijectors):
bijector_condition_kwargs = condition_kwargs.get(b.name, {})
fldj += b.forward_log_det_jacobian(y, **bijector_condition_kwargs)
y = b.forward(y, **bijector_condition_kwargs)
return fldj
class Identity(Bijector):
"""Bijector which computes Y = g(X) = X.
Example Use:
```python
# Create the Y=g(X)=X transform which is intended for Tensors with 1 batch
# ndim and 1 event ndim (i.e., vector of vectors).
identity = Identity(batch_ndims=1, event_ndims=1)
x = [[1., 2],
[3, 4]]
x == identity.forward(x) == identity.inverse(x)
```
"""
def __init__(self, validate_args=False, name="identity"):
super(Identity, self).__init__(
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
def _forward(self, x):
return x
def _inverse_and_inverse_log_det_jacobian(self, y):
return y, constant_op.constant(0., dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(0., dtype=x.dtype)
class PowerTransform(Bijector):
"""Bijector which computes `Y = g(X) = (1 + X * c)**(1 / c), X >= -1 / c`.
The [power transform](https://en.wikipedia.org/wiki/Power_transform) maps
inputs from `[0, inf]` to `[-1/c, inf]`; this is equivalent to the `inverse`
of this bijector.
This bijector is equivalent to the `Exp` bijector when `c=0`.
"""
def __init__(self,
power=0.,
event_ndims=0,
validate_args=False,
name="power_transform"):
"""Instantiates the `PowerTransform` bijector.
Args:
power: Python `float` scalar indicating the transform power, i.e.,
`Y = g(X) = (1 + X * c)**(1 / c)` where `c` is the `power`.
event_ndims: Python scalar indicating the number of dimensions associated
with a particular draw from the distribution.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
Raises:
ValueError: if `power < 0` or is not known statically.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[power]):
power = tensor_util.constant_value(
ops.convert_to_tensor(power, name="power"))
if power is None or power < 0:
raise ValueError("`power` must be a non-negative TF constant.")
self._power = power
super(PowerTransform, self).__init__(
batch_ndims=0,
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
@property
def power(self):
"""The `c` in: `Y = g(X) = (1 + X * c)**(1 / c)`."""
return self._power
def _forward(self, x):
x = self._maybe_assert_valid_x(x)
if self.power == 0.:
return math_ops.exp(x)
# TODO(jvdillon): If large x accuracy is an issue, consider using
# (1. + x * self.power)**(1. / self.power) when x >> 1.
return math_ops.exp(math_ops.log1p(x * self.power) / self.power)
def _inverse_and_inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(y)
if self.power == 0.:
x = math_ops.log(y)
ildj = -math_ops.reduce_sum(x, reduction_indices=event_dims)
return x, ildj
# TODO(jvdillon): If large y accuracy is an issue, consider using
# (y**self.power - 1.) / self.power when y >> 1.
x = math_ops.expm1(math_ops.log(y) * self.power) / self.power
ildj = (self.power - 1.) * math_ops.reduce_sum(
math_ops.log(y),
reduction_indices=event_dims)
return x, ildj
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid_x(x)
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(x)
if self.power == 0.:
return math_ops.reduce_sum(x, reduction_indices=event_dims)
return (1. / self.power - 1.) * math_ops.reduce_sum(
math_ops.log1p(x * self.power),
reduction_indices=event_dims)
def _maybe_assert_valid_x(self, x):
if not self.validate_args or self.power == 0.:
return x
is_valid = check_ops.assert_non_negative(
1. + self.power * x,
message="Forward transformation input must be at least {}.".format(
-1. / self.power))
return control_flow_ops.with_dependencies([is_valid], x)
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_valid = check_ops.assert_positive(
y, message="Inverse transformation input must be greater than 0.")
return control_flow_ops.with_dependencies([is_valid], y)
class Exp(PowerTransform):
"""Bijector which computes Y = g(X) = exp(X).
Example Use:
```python
# Create the Y=g(X)=exp(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
exp = Exp(batch_ndims=1, event_ndims=2)
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
exp(x) == exp.forward(x)
log(x) == exp.inverse(x)
```
Note: the exp(.) is applied element-wise but the Jacobian is a reduction
over the event space.
"""
def __init__(self,
event_ndims=0,
validate_args=False,
name="exp"):
"""Instantiates the `Exp` bijector.
Args:
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
"""
super(Exp, self).__init__(
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
# TODO(srvasude): Deprecate this class with a dedicated Linear Operator
# corresponding to TriL + V D V.T.
class _TriLPlusVDVTLightweightOperatorPD(object):
"""Helper/hidden class fake an OperatorPD for TriL+VDV.T."""
def __init__(self, tril, v, diag=None, validate_args=False):
"""Creates an instance of _TriLPlusVDVTLightweightOperatorPD.
WARNING: This object is not to be used outside of `Affine` where it is
currently being temporarily used for refactoring purposes.
Args:
tril: `Tensor` of shape `[B1,..,Bb, d, d]`.
v: `Tensor` of shape `[B1,...,Bb, d, k]`.
diag: `Tensor` of shape `[B1,...,Bb, k, k]` or None
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
"""
self._m = tril
self._v = v
self._validate_args = validate_args
self._inputs = [tril, v]
if diag is not None:
self._inputs += [diag]
self._d = operator_pd_diag.OperatorPDDiag(diag, verify_pd=validate_args)
self._d_inv = operator_pd_diag.OperatorPDDiag(1. / diag,
verify_pd=validate_args)
return
if v.get_shape().is_fully_defined():
v_shape = v.get_shape().as_list()
id_shape = v_shape[:-2] + [v_shape[-1], v_shape[-1]]
else:
v_shape = array_ops.shape(v)
id_shape = array_ops.concat([v_shape[:-2], [v_shape[-1], v_shape[-1]]], 0)
self._d = operator_pd_identity.OperatorPDIdentity(
id_shape, v.dtype, verify_pd=self.validate_args)
self._d_inv = self._d
@property
def inputs(self):
return self._inputs
@property
def dtype(self):
return self._m.dtype.base_dtype
@property
def validate_args(self):
return self._validate_args
def rank(self):
"""Returns `rank(self)`."""
return array_ops.rank(self._m)
def sqrt_matmul(self, x):
"""Computes `matmul(self, x)`.
Doesn't actually do the sqrt! Named as such to agree with API.
Args:
x: `Tensor`
Returns:
self_times_x: `Tensor`
"""
m_x = math_ops.matmul(self._m, x)
vt_x = math_ops.matmul(self._v, x, adjoint_a=True)
d_vt_x = self._d.matmul(vt_x)
v_d_vt_x = math_ops.matmul(self._v, d_vt_x)
return m_x + v_d_vt_x
def sqrt_solve(self, x):
"""Computes `solve(self, x)`.
Doesn't actually do the sqrt! Named as such to agree with API.
To compute (M + V D V.T), we use the the Woodbury matrix identity:
inv(M + V D V.T) = inv(M) - inv(M) V inv(C) V.T inv(M)
where,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Woodbury_matrix_identity
Args:
x: `Tensor`
Returns:
inv_of_self_times_x: `Tensor`
"""
minv_x = linalg_ops.matrix_triangular_solve(self._m, x)
vt_minv_x = math_ops.matmul(self._v, minv_x, transpose_a=True)
cinv_vt_minv_x = linalg_ops.matrix_solve(
self._woodbury_sandwiched_term(), vt_minv_x)
v_cinv_vt_minv_x = math_ops.matmul(self._v, cinv_vt_minv_x)
minv_v_cinv_vt_minv_x = linalg_ops.matrix_triangular_solve(
self._m, v_cinv_vt_minv_x)
return minv_x - minv_v_cinv_vt_minv_x
def sqrt_log_abs_det(self):
"""Computes (log o abs o det)(X) for matrix X.
Doesn't actually do the sqrt! Named as such to agree with API.
To compute det(M + V D V.T), we use the matrix determinant lemma:
det(Tril + V D V.T) = det(C) det(D) det(M)
where C is defined as in `_inverse`, ie,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Matrix_determinant_lemma
Returns:
log_abs_det: `Tensor`.
"""
log_det_c = math_ops.log(math_ops.abs(
linalg_ops.matrix_determinant(self._woodbury_sandwiched_term())))
# Reduction is ok because we always prepad inputs to this class.
log_det_m = math_ops.reduce_sum(math_ops.log(math_ops.abs(
array_ops.matrix_diag_part(self._m))), reduction_indices=[-1])
return log_det_c + 2. * self._d.sqrt_log_abs_det() + log_det_m
def _woodbury_sandwiched_term(self):
"""Computes the sandwiched term in the Woodbury identity.
Computes the "`C`" in the the identity:
inv(M + V D V.T) = inv(M) - inv(M) V inv(C) V.T inv(M)
where,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Woodbury_matrix_identity
Returns:
woodbury_sandwich_term: A `Tensor` to be used like `C`, above.
"""
minv_v = linalg_ops.matrix_triangular_solve(self._m, self._v)
vt_minv_v = math_ops.matmul(self._v, minv_v, adjoint_a=True)
return self._d_inv.add_to_tensor(vt_minv_v)
class Affine(Bijector):
# pylint: disable=line-too-long
"""Bijector which computes `Y = g(X; shift, scale) = matmul(scale, X) + shift` where `scale = c * I + diag(D1) + tril(L) + V @ diag(D2) @ V.T`.
Write `A @ X` for `matmul(A, X)`. In TF parlance, the `scale` term is
logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.diag(tf.ones(d)) +
tf.diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
The `scale` term is applied without necessarily materializing constituent
matrices, i.e., the matmul is [matrix-free](
https://en.wikipedia.org/wiki/Matrix-free_methods) when possible.
Examples:
```python
# Y = X
b = Affine()
# Y = X + shift
b = Affine(shift=[1., 2, 3])
# Y = 2 * I @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_identity_multiplier=2.)
# Y = tf.diag(d1) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[-1., 2, 1]) # Implicitly 3x3.
# Y = (I + v * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
# Y = (diag(d1) + v * diag(d2) * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[1., 3, 3], # Implicitly 3x3.
scale_perturb_diag=[2., 1], # Implicitly 2x2.
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
```
"""
# pylint: enable=line-too-long
def __init__(self,
shift=None,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
event_ndims=1,
validate_args=False,
name="affine"):
"""Instantiates the `Affine` bijector.
This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
giving the forward operation:
```none
Y = g(X) = scale @ X + shift
```
where the `scale` term is logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.diag(tf.ones(d)) +
tf.diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
specified then `scale += IdentityMatrix`. Otherwise specifying a
`scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
`scale_diag != None` means `scale += tf.diag(scale_diag)`.
Args:
shift: Numeric `Tensor`. If this is set to `None`, no shift is applied.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag=scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
scale_diag: Numeric `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to `scale`.
scale_tril: Numeric `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to `scale`.
The upper triangular elements above the diagonal are ignored.
scale_perturb_factor: Numeric `Tensor` representing factor matrix with
last two dimensions of shape `(k, r)`.
When `None`, no rank-r update is added to `scale`.
scale_perturb_diag: Numeric `Tensor` representing the diagonal matrix.
`scale_perturb_diag` has shape [N1, N2, ... r], which represents an
r x r Diagonal matrix.
When `None` low rank updates will take the form `scale_perturb_factor *
scale_perturb_factor.T`.
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
Raises:
ValueError: if `perturb_diag` is specified but not `perturb_factor`.
TypeError: if `shift` has different `dtype` from `scale` arguments.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
# Ambiguous definition of low rank update.
if scale_perturb_diag is not None and scale_perturb_factor is None:
raise ValueError("When scale_perturb_diag is specified, "
"scale_perturb_factor must be specified.")
# Special case, only handling a scaled identity matrix. We don't know its
# dimensions, so this is special cased.
# We don't check identity_multiplier, since below we set it to 1. if all
# other scale args are None.
self._is_only_identity_multiplier = (scale_tril is None and
scale_diag is None and
scale_perturb_factor is None)
# When no args are specified, pretend the scale matrix is the identity
# matrix.
if self._is_only_identity_multiplier and scale_identity_multiplier is None:
scale_identity_multiplier = 1.
with self._name_scope("init", values=[
shift, scale_identity_multiplier, scale_diag, scale_tril,
scale_perturb_diag, scale_perturb_factor, event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
if validate_args:
is_less_than_two = check_ops.assert_less(
event_ndims, 2,
message="event_ndims must be 0 or 1")
event_ndims = control_flow_ops.with_dependencies(
[is_less_than_two], event_ndims)
self._shift = _as_tensor(shift, "shift")
# self._create_scale_operator returns an OperatorPD in all cases except if
# self._is_only_identity_multiplier; in which case it returns a scalar
# Tensor.
self._scale = self._create_scale_operator(
identity_multiplier=scale_identity_multiplier,
diag=scale_diag,
tril=scale_tril,
perturb_diag=scale_perturb_diag,
perturb_factor=scale_perturb_factor,
event_ndims=event_ndims,
validate_args=validate_args)
if (self._shift is not None and
self._shift.dtype.base_dtype != self._scale.dtype.base_dtype):
raise TypeError("shift.dtype({}) does not match scale.dtype({})".format(
self._shift.dtype, self._scale.dtype))
super(Affine, self).__init__(
batch_ndims=self._infer_batch_ndims(),
event_ndims=event_ndims,
graph_parents=(
[event_ndims] +
[self._scale] if contrib_framework.is_tensor(self._scale)
else self._scale.inputs +
[self._shift] if self._shift is not None else []),
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
def _create_scale_operator(self, identity_multiplier, diag, tril,
perturb_diag, perturb_factor, event_ndims,
validate_args):
"""Construct `scale` from various components.
Args:
identity_multiplier: floating point rank 0 `Tensor` representing a scaling
done to the identity matrix.
diag: Numeric `Tensor` representing the diagonal matrix. `scale_diag` has
shape [N1, N2, ... k], which represents a k x k diagonal matrix.
tril: Numeric `Tensor` representing the diagonal matrix. `scale_tril` has
shape [N1, N2, ... k], which represents a k x k lower triangular matrix.
perturb_diag: Numeric `Tensor` representing the diagonal matrix of the
low rank update.
perturb_factor: Numeric `Tensor` representing factor matrix.
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
Returns:
scale and batch_ndims. In the case of scaling by a constant, scale is a
floating point `Tensor`. Otherwise, scale is an `OperatorPD`.
Raises:
ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.
"""
identity_multiplier = _as_tensor(identity_multiplier, "identity_multiplier")
diag = _as_tensor(diag, "diag")
tril = _as_tensor(tril, "tril")
perturb_diag = _as_tensor(perturb_diag, "perturb_diag")
perturb_factor = _as_tensor(perturb_factor, "perturb_factor")
identity_multiplier = self._maybe_validate_identity_multiplier(
identity_multiplier, validate_args)
if perturb_factor is not None:
perturb_factor = self._process_matrix(
perturb_factor, min_rank=2, event_ndims=event_ndims)
if perturb_diag is not None:
perturb_diag = self._process_matrix(
perturb_diag, min_rank=1, event_ndims=event_ndims)
# The following if-statments are ordered by increasingly stronger
# assumptions in the base matrix, i.e., we process in the order:
# TriL, Diag, Identity.
if tril is not None:
tril = self._preprocess_tril(
identity_multiplier, diag, tril, event_ndims)
if perturb_factor is None:
return operator_pd_cholesky.OperatorPDCholesky(
tril, verify_pd=validate_args)
return _TriLPlusVDVTLightweightOperatorPD(
tril=tril, v=perturb_factor, diag=perturb_diag,
validate_args=validate_args)
if diag is not None:
diag = self._preprocess_diag(identity_multiplier, diag, event_ndims)
if perturb_factor is None:
return operator_pd_diag.OperatorPDSqrtDiag(
diag, verify_pd=validate_args)
return operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator=operator_pd_diag.OperatorPDDiag(
diag, verify_pd=validate_args),
v=perturb_factor,
diag=perturb_diag,
verify_pd=validate_args)
if identity_multiplier is not None:
if perturb_factor is None:
return identity_multiplier
# Infer the shape from the V and D.
v_shape = array_ops.shape(perturb_factor)
identity_shape = array_ops.concat((v_shape[:-1], (v_shape[-2],)), 0)
scaled_identity = operator_pd_identity.OperatorPDIdentity(
identity_shape,
perturb_factor.dtype.base_dtype,
scale=identity_multiplier,
verify_pd=validate_args)
return operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator=scaled_identity,
v=perturb_factor,
diag=perturb_diag,
verify_pd=validate_args)
raise ValueError("One of tril, diag and/or identity_multiplier must be "
"specified.")
def _maybe_validate_identity_multiplier(self, identity_multiplier,
validate_args):
"""Check that the init arg `identity_multiplier` is valid."""
if identity_multiplier is None or not validate_args:
return identity_multiplier
if validate_args:
identity_multiplier = control_flow_ops.with_dependencies(
[check_ops.assert_positive(identity_multiplier)],
identity_multiplier)
return identity_multiplier
def _preprocess_tril(self, identity_multiplier, diag, tril, event_ndims):
"""Helper to preprocess a lower triangular matrix."""
tril = array_ops.matrix_band_part(tril, -1, 0) # Zero out TriU.
if identity_multiplier is None and diag is None:
return self._process_matrix(tril, min_rank=2, event_ndims=event_ndims)
new_diag = array_ops.matrix_diag_part(tril)
if identity_multiplier is not None:
new_diag += identity_multiplier
if diag is not None:
new_diag += diag
tril = array_ops.matrix_set_diag(tril, new_diag)
return self._process_matrix(tril, min_rank=2, event_ndims=event_ndims)
def _preprocess_diag(self, identity_multiplier, diag, event_ndims):
"""Helper to preprocess a diagonal matrix."""
if identity_multiplier is not None:
diag += identity_multiplier
return self._process_matrix(diag, min_rank=1, event_ndims=event_ndims)
def _process_matrix(self, matrix, min_rank, event_ndims):
"""Helper to __init__ which gets matrix in batch-ready form."""
# Pad the matrix so that matmul works in the case of a matrix and vector
# input. Keep track if the matrix was padded, to distinguish between a
# rank 3 tensor and a padded rank 2 tensor.
# TODO(srvasude): Remove side-effects from functions. Its currently unbroken
# but error-prone since the function call order may change in the future.
self._rank_two_event_ndims_one = math_ops.logical_and(
math_ops.equal(array_ops.rank(matrix), min_rank),
math_ops.equal(event_ndims, 1))
left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
pad = array_ops.concat(
[array_ops.ones(
[left], dtype=dtypes.int32), array_ops.shape(matrix)],
0)
return array_ops.reshape(matrix, pad)
def _infer_batch_ndims(self):
"""Return batch_ndims."""
if self._is_only_identity_multiplier:
return 0
# The real batch dims is one less when we pad in the case of event_ndims =
# 1, and the rank of the underlying scale being 2. This allows us to have
# non-negative sample dims.
return (self._scale.rank() - 2 -
array_ops.where(self._rank_two_event_ndims_one, 1, 0))
@property
def shift(self):
return self._shift
@property
def scale(self):
# TODO(srvasude): Remove this exception once TriLPlusVDVT is properly
# implemented.
if isinstance(self._scale, _TriLPlusVDVTLightweightOperatorPD):
raise NotImplementedError("Cannot access scale when Tril+VDV.T.")
return self._scale
def _forward(self, x):
y = x
if self._is_only_identity_multiplier:
y *= self._scale
if self.shift is not None:
return y + self.shift
return y
y, sample_shape = self.shaper.make_batch_of_event_sample_matrices(y)
y = self._scale.sqrt_matmul(y)
y = self.shaper.undo_make_batch_of_event_sample_matrices(y, sample_shape)
if self.shift is not None:
return y + self.shift
return y
def _inverse(self, y):
x = y
if self.shift is not None:
x -= self.shift
if self._is_only_identity_multiplier:
return x / self._scale
x, sample_shape = self.shaper.make_batch_of_event_sample_matrices(x)
x = self._scale.sqrt_solve(x)
x = self.shaper.undo_make_batch_of_event_sample_matrices(x, sample_shape)
return x
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(y)
def _forward_log_det_jacobian(self, x):
if self._is_only_identity_multiplier:
# TODO(jvdillon): We don't pad in this case and instead let the fldj be
# applied via broadcast.
d = math_ops.cast(array_ops.shape(x)[-1], dtype=self._scale.dtype)
return math_ops.log(math_ops.abs(self._scale)) * array_ops.where(
math_ops.equal(self.shaper.event_ndims, 0), 1., d)
fldj = self._scale.sqrt_log_abs_det()
# We need to squeeze off the padded dimension.
start = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
return array_ops.reshape(fldj, array_ops.shape(fldj)[start:])
class AffineLinearOperator(Bijector):
"""Bijector which computes `Y = g(X; shift, scale) = scale @ X.T + shift`.
`shift` is a numeric `Tensor` and `scale` is a `LinearOperator`.
If `X` is a scalar then the forward transformation is: `scale * X + shift`
where `*` denotes the scalar product.
Note: we don't always simply transpose `X` (but write it this way for
brevity). Actually the input `X` undergoes the following transformation
before being premultiplied by `scale`:
1. If there are no sample dims, we call `X = tf.expand_dims(X, 0)`, i.e.,
`new_sample_shape = [1]`. Otherwise do nothing.
2. The sample shape is flattened to have one dimension, i.e.,
`new_sample_shape = [n]` where `n = tf.reduce_prod(old_sample_shape)`.
3. The sample dim is cyclically rotated left by 1, i.e.,
`new_shape = [B1,...,Bb, k, n]` where `n` is as above, `k` is the
event_shape, and `B1,...,Bb` are the batch shapes for each of `b` batch
dimensions.
(For more details see `shape.make_batch_of_event_sample_matrices`.)
The result of the above transformation is that `X` can be regarded as a batch
of matrices where each column is a draw from the distribution. After
premultiplying by `scale`, we take the inverse of this procedure. The input
`Y` also undergoes the same transformation before/after premultiplying by
`inv(scale)`.
Example Use:
```python
linalg = tf.contrib.linalg
x = [1., 2, 3]
shift = [-1., 0., 1]
diag = [1., 2, 3]
scale = linalg.LinearOperatorDiag(diag)
affine = AffineLinearOperator(shift, scale)
# In this case, `forward` is equivalent to:
# diag * scale + shift
y = affine.forward(x) # [0., 4, 10]
shift = [2., 3, 1]
tril = [[1., 0, 0],
[2, 1, 0],
[3, 2, 1]]
scale = linalg.LinearOperatorTriL(tril)
affine = AffineLinearOperator(shift, scale)
# In this case, `forward` is equivalent to:
# np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1) + shift
y = affine.forward(x) # [3., 7, 11]
```
"""
def __init__(self,
shift=None,
scale=None,
event_ndims=1,
validate_args=False,
name="affine_linear_operator"):
"""Instantiates the `AffineLinearOperator` bijector.
Args:
shift: Numeric `Tensor`.
scale: Subclass of `LinearOperator`. Represents the (batch) positive
definite matrix `M` in `R^{k x k}`.
event_ndims: Scalar `integer` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
Raises:
ValueError: if `event_ndims` is not 0 or 1.
TypeError: if `scale` is not a `LinearOperator`.
TypeError: if `shift.dtype` does not match `scale.dtype`.
ValueError: if not `scale.is_non_singular`.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
graph_parents = []
with self._name_scope("init", values=[shift]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
if tensor_util.constant_value(event_ndims) is not None:
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims not in (0, 1):
raise ValueError("event_ndims({}) was not 0 or 1".format(event_ndims))
else:
if validate_args:
# Shape tool will catch if event_ndims is negative.
event_ndims = control_flow_ops.with_dependencies(
[check_ops.assert_less(
event_ndims, 2, message="event_ndims must be 0 or 1")],
event_ndims)
graph_parents += [event_ndims]
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
graph_parents += [shift]
self._shift = shift
if scale is not None:
if (shift is not None and
shift.dtype.base_dtype != scale.dtype.base_dtype):
raise TypeError(
"shift.dtype({}) is incompatible with scale.dtype({}).".format(
shift.dtype, scale.dtype))
if not isinstance(scale, linear_operator.LinearOperator):
raise TypeError("scale is not an instance of tf.LinearOperator")
if validate_args and not scale.is_non_singular:
raise ValueError("Scale matrix must be non-singular.")
graph_parents += scale.graph_parents
if scale.tensor_rank is not None:
batch_ndims = scale.tensor_rank - 2
else:
batch_ndims = scale.tensor_rank_tensor() - 2
graph_parents += [batch_ndims]
else:
batch_ndims = 0 # We won't need shape inference when scale is None.
self._scale = scale
super(AffineLinearOperator, self).__init__(
batch_ndims=batch_ndims,
event_ndims=event_ndims,
graph_parents=graph_parents,
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
@property
def shift(self):
"""The `shift` `Tensor` in `Y = scale @ X.T + shift`."""
return self._shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X.T + shift`."""
return self._scale
def _forward(self, x):
y = x
if self.scale is not None:
y, sample_shape = self.shaper.make_batch_of_event_sample_matrices(
y, expand_batch_dim=False)
with ops.control_dependencies([self.scale.assert_non_singular()] if
self.validate_args else []):
y = self.scale.apply(y)
y = self.shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape, expand_batch_dim=False)
if self.shift is not None:
y += self.shift
return y
def _inverse(self, y):
x = y
if self.shift is not None:
x -= self.shift
if self.scale is not None:
x, sample_shape = self.shaper.make_batch_of_event_sample_matrices(
x, expand_batch_dim=False)
# Solve fails if the op is singular so we may safely skip this assertion.
x = self.scale.solve(x)
x = self.shaper.undo_make_batch_of_event_sample_matrices(
x, sample_shape, expand_batch_dim=False)
return x
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(y)
def _forward_log_det_jacobian(self, x): # pylint: disable=unused-argument
if self.scale is None:
return constant_op.constant(0, dtype=x.dtype.base_dtype)
with ops.control_dependencies([self.scale.assert_non_singular()] if
self.validate_args else []):
return self.scale.log_abs_determinant()
class Softplus(Bijector):
"""Bijector which computes `Y = g(X) = Log[1 + exp(X)]`.
The softplus `Bijector` has the following two useful properties:
* The domain is the positive real numbers
* `softplus(x) approx x`, for large `x`, so it does not overflow as easily as
the `Exp` `Bijector`.
Example Use:
```python
# Create the Y=g(X)=softplus(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
softplus = Softplus(batch_ndims=1, event_ndims=2)
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
log(1 + exp(x)) == softplus.forward(x)
log(exp(x) - 1) == softplus.inverse(x)
```
Note: log(.) and exp(.) are applied element-wise but the Jacobian is a
reduction over the event space.
"""
def __init__(self,
event_ndims=0,
validate_args=False,
name="softplus"):
super(Softplus, self).__init__(
batch_ndims=0,
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _forward(self, x):
return nn_ops.softplus(x)
def _inverse_and_inverse_log_det_jacobian(self, y):
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(y)
# Could also do:
# ildj = math_ops.reduce_sum(y - distribution_util.softplus_inverse(y),
# reduction_indices=event_dims)
# but the following is more numerically stable. Ie,
# Y = Log[1 + exp{X}] ==> X = Log[exp{Y} - 1]
# ==> dX/dY = exp{Y} / (exp{Y} - 1)
# = 1 / (1 - exp{-Y}),
# which is the most stable for large Y > 0. For small Y, we use
# 1 - exp{-Y} approx Y.
ildj = -math_ops.reduce_sum(math_ops.log(-math_ops.expm1(-y)),
reduction_indices=event_dims)
return distribution_util.softplus_inverse(y), ildj
def _forward_log_det_jacobian(self, x): # pylint: disable=unused-argument
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(x)
return -math_ops.reduce_sum(
nn_ops.softplus(-x), reduction_indices=event_dims)
class SoftmaxCentered(Bijector):
"""Bijector which computes `Y = g(X) = exp([X 0]) / sum(exp([X 0]))`.
To implement [softmax](https://en.wikipedia.org/wiki/Softmax_function) as a
bijection, the forward transformation appends a value to the input and the
inverse removes this coordinate. The appended coordinate represents a pivot,
e.g., `softmax(x) = exp(x-c) / sum(exp(x-c))` where `c` is the implicit last
coordinate.
Because we append a coordinate, this bijector only supports `event_ndim in [0,
1]`, i.e., scalars and vectors.
Example Use:
```python
bijector.SoftmaxCentered(event_ndims=1).forward(tf.log([2, 3, 4]))
# Result: [0.2, 0.3, 0.4, 0.1]
# Extra result: 0.1
bijector.SoftmaxCentered(event_ndims=1).inverse([0.2, 0.3, 0.4, 0.1])
# Result: tf.log([2, 3, 4])
# Extra coordinate removed.
```
At first blush it may seem like the [Invariance of domain](
https://en.wikipedia.org/wiki/Invariance_of_domain) theorem implies this
implementation is not a bijection. However, the appended dimension
makes the (forward) image non-open and the theorem does not directly apply.
"""
def __init__(self,
event_ndims=0,
validate_args=False,
name="softmax_centered"):
self._graph_parents = []
self._name = name
with self._name_scope("init", values=[event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims is None or event_ndims not in [0, 1]:
raise ValueError("`event_ndims` must be a TF constant which is 0 or 1")
self._static_event_ndims = event_ndims
super(SoftmaxCentered, self).__init__(
batch_ndims=0, # We'll regard all non-event dims as sample dims.
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _get_forward_event_shape(self, input_shape):
if input_shape.ndims is None:
return input_shape
if input_shape.ndims != self._static_event_ndims:
raise ValueError("input_shape.dims = %d != %d" %
(input_shape.ndims, self._static_event_ndims))
if input_shape.ndims == 0:
return tensor_shape.TensorShape([2])
if input_shape.ndims == 1:
return tensor_shape.TensorShape(input_shape[0] + 1)
# Unreachable code:
raise ValueError("event_ndims = %d must be 0 or 1" % input_shape.ndims)
def _forward_event_shape(self, input_shape):
ndims = array_ops.shape(input_shape)
if self.validate_args:
# It is not possible for a negative shape so we need only check <= 1.
is_zero_or_one = check_ops.assert_equal(
ndims, 0 if self._static_event_ndims == 0 else 1,
message="event_ndims must be 0 or 1")
ndims = control_flow_ops.with_dependencies([is_zero_or_one], ndims)
if self._static_event_ndims == 0:
return ops.convert_to_tensor(
[2], dtype=dtypes.int32, name="output_shape")
return input_shape + 1
def _get_inverse_event_shape(self, output_shape):
if output_shape.ndims is None:
return output_shape
if output_shape.ndims != 1:
raise ValueError("output_shape.ndims = %d != 1" % output_shape.ndims)
if self._static_event_ndims == 0:
return tensor_shape.TensorShape([])
return tensor_shape.TensorShape(output_shape[0] - 1)
def _inverse_event_shape(self, output_shape):
ndims = array_ops.shape(output_shape)[0]
if self.validate_args:
# It is not possible for a negative shape so we need only check <= 1.
is_one = check_ops.assert_equal(
ndims, 1, message="event_ndims must be 1")
ndims = control_flow_ops.with_dependencies([is_one], ndims)
if self._static_event_ndims == 0:
return ops.convert_to_tensor([], dtype=dtypes.int32, name="output_shape")
return array_ops.expand_dims(output_shape[0] - 1, dim=0)
def _forward(self, x):
# Pad the last dim with a zeros vector. We need this because it lets us
# infer the scale in the inverse function.
y = array_ops.expand_dims(x, dim=-1) if self._static_event_ndims == 0 else x
ndims = (y.get_shape().ndims if y.get_shape().ndims is not None
else array_ops.rank(y))
y = array_ops.pad(y,
paddings=array_ops.concat(
(array_ops.zeros(
(ndims - 1, 2), dtype=dtypes.int32), [[0, 1]]),
0))
# Set shape hints.
if x.get_shape().ndims is not None:
shape = x.get_shape().as_list()
if self._static_event_ndims == 0:
shape += [2]
elif shape[-1] is not None:
shape[-1] += 1
shape = tensor_shape.TensorShape(shape)
y.get_shape().assert_is_compatible_with(shape)
y.set_shape(shape)
# Since we only support event_ndims in [0, 1] and we do padding, we always
# reduce over the last dimension, i.e., dim=-1 (which is the default).
return nn_ops.softmax(y)
def _inverse(self, y):
# To derive the inverse mapping note that:
# y[i] = exp(x[i]) / normalization
# and
# y[end] = 1 / normalization.
# Thus:
# x[i] = log(exp(x[i])) - log(y[end]) - log(normalization)
# = log(exp(x[i])/normalization) - log(y[end])
# = log(y[i]) - log(y[end])
shape = (np.asarray(y.get_shape().as_list(), dtype=np.int32)
if y.get_shape().is_fully_defined()
else array_ops.shape(y, name="shape"))
ndims = y.get_shape().ndims or math_ops.rank(y, name="ndims")
# Do this first to make sure CSE catches that it'll happen again in
# _inverse_log_det_jacobian.
x = math_ops.log(y)
# We now extract the last coordinate of the rightmost dimension.
# Our trick is to slice from [0,0,...,shape[-1]-1] to shape[:-1]+[1].
begin = array_ops.one_hot(indices=ndims-1,
depth=ndims,
on_value=shape[-1]-np.array(1, dtype=shape.dtype),
dtype=shape.dtype)
size = array_ops.concat((shape[:-1], np.asarray([1], dtype=shape.dtype)), 0)
log_normalization = -array_ops.strided_slice(x, begin, begin + size)
# Here we slice out all but the last coordinate; see above for idea.
begin = array_ops.zeros_like(shape)
size = array_ops.concat((shape[:-1], [shape[-1] - 1]), 0)
x = array_ops.strided_slice(x, begin, begin + size)
x += log_normalization
if self._static_event_ndims == 0:
x = array_ops.squeeze(x, squeeze_dims=[ndims-1])
# Set shape hints.
if y.get_shape().ndims is not None:
shape = y.get_shape().as_list()
if self._static_event_ndims == 0:
shape = shape[:-1]
elif shape[-1] is not None:
shape[-1] -= 1
shape = tensor_shape.TensorShape(shape)
x.get_shape().assert_is_compatible_with(shape)
x.set_shape(shape)
return x
def _inverse_log_det_jacobian(self, y):
# WLOG, consider the vector case:
# x = log(y[:-1]) - log(y[-1])
# where,
# y[-1] = 1 - sum(y[:-1]).
# We have:
# det{ dX/dY } = det{ diag(1 ./ y[:-1]) + 1 / y[-1] }
# = det{ inv{ diag(y[:-1]) - y[:-1]' y[:-1] } } (1)
# = 1 / det{ diag(y[:-1]) - y[:-1]' y[:-1] }
# = 1 / { (1 + y[:-1]' inv(diag(y[:-1])) y[:-1]) *
# det(diag(y[:-1])) } (2)
# = 1 / { y[-1] prod(y[:-1]) }
# = 1 / prod(y)
# (1) - https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula
# or by noting that det{ dX/dY } = 1 / det{ dY/dX } from Bijector
# docstring "Tip".
# (2) - https://en.wikipedia.org/wiki/Matrix_determinant_lemma
return -math_ops.reduce_sum(math_ops.log(y), reduction_indices=-1)
def _forward_log_det_jacobian(self, x):
if self._static_event_ndims == 0:
return x - 2. * nn_ops.softplus(x)
else:
# This code is similar to nn_ops.log_softmax but different because we have
# an implicit zero column to handle. I.e., instead of:
# reduce_sum(logits - reduce_sum(exp(logits), dim))
# we must do:
# log_normalization = 1 + reduce_sum(exp(logits))
# -log_normalization + reduce_sum(logits - log_normalization)
log_normalization = nn_ops.softplus(
math_ops.reduce_logsumexp(x, reduction_indices=-1, keep_dims=True))
fldj = (-log_normalization +
math_ops.reduce_sum(x - log_normalization,
reduction_indices=-1,
keep_dims=True))
return array_ops.squeeze(fldj, squeeze_dims=-1)
class SigmoidCentered(SoftmaxCentered):
"""Bijector which computes Y = g(X) = exp([X 0]) / (1 + exp(-X)).
Equivalent to: `bijector.SoftmaxCentered(event_ndims=0)`.
See `bijector.SoftmaxCentered` for more details.
"""
def __init__(self, validate_args=False, name="sigmoid_centered"):
super(SigmoidCentered, self).__init__(
validate_args=validate_args, name=name)
class CholeskyOuterProduct(Bijector):
# pylint: disable=line-too-long
"""Bijector which computes Y = g(X) = X X.T where X is a lower-triangular, positive-diagonal matrix.
`event_ndims` must be 0 or 2, i.e., scalar or matrix.
Note: the upper-triangular part of X is ignored (whether or not its zero).
Examples:
```python
bijector.CholeskyOuterProduct(event_ndims=2).forward(x=[[1., 0], [2, 1]])
# Result: [[1, 1], [1, 5]], i.e., x x.T
bijector.SoftmaxCentered(event_ndims=2).inverse(y=[[1., 1], [1, 5]])
# Result: [[1, 0], [2, 1]], i.e., chol(y).
```
"""
# pylint: enable=line-too-long
def __init__(self, event_ndims=2, validate_args=False,
name="cholesky_outer_product"):
"""Instantiates the `CholeskyOuterProduct` bijector.
Args:
event_ndims: `constant` `int32` scalar `Tensor` indicating the number of
dimensions associated with a particular draw from the distribution. Must
be 0 or 2.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
Raises:
ValueError: if event_ndims is neither 0 or 2.
"""
self._graph_parents = []
self._name = name
with self._name_scope("init", values=[event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims is None or event_ndims not in [0, 2]:
raise ValueError("`event_ndims` must be a TF constant which is 0 or 2")
self._static_event_ndims = event_ndims
super(CholeskyOuterProduct, self).__init__(
validate_args=validate_args,
name=name)
def _forward(self, x):
if self._static_event_ndims == 0:
return math_ops.square(x)
if self.validate_args:
is_matrix = check_ops.assert_rank_at_least(x, 2)
shape = array_ops.shape(x)
is_square = check_ops.assert_equal(shape[-2], shape[-1])
x = control_flow_ops.with_dependencies([is_matrix, is_square], x)
# For safety, explicitly zero-out the upper triangular part.
x = array_ops.matrix_band_part(x, -1, 0)
return math_ops.matmul(x, x, adjoint_b=True)
def _inverse_and_inverse_log_det_jacobian(self, y):
x = (math_ops.sqrt(y) if self._static_event_ndims == 0
else linalg_ops.cholesky(y))
return x, -self._forward_log_det_jacobian(x)
def _forward_log_det_jacobian(self, x):
# Let Y be a symmetric, positive definite matrix and write:
# Y = X X.T
# where X is lower-triangular.
#
# Observe that,
# dY[i,j]/dX[a,b]
# = d/dX[a,b] { X[i,:] X[j,:] }
# = sum_{d=1}^p { I[i=a] I[d=b] X[j,d] + I[j=a] I[d=b] X[i,d] }
#
# To compute the Jacobian dX/dY we must represent X,Y as vectors. Since Y is
# symmetric and X is lower-triangular, we need vectors of dimension:
# d = p (p + 1) / 2
# where X, Y are p x p matrices, p > 0. We use a row-major mapping, i.e.,
# k = { i (i + 1) / 2 + j i>=j
# { undef i<j
# and assume zero-based indexes. When k is undef, the element is dropped.
# Example:
# j k
# 0 1 2 3 /
# 0 [ 0 . . . ]
# i 1 [ 1 2 . . ]
# 2 [ 3 4 5 . ]
# 3 [ 6 7 8 9 ]
# Write vec[.] to indicate transforming a matrix to vector via k(i,j). (With
# slight abuse: k(i,j)=undef means the element is dropped.)
#
# We now show d vec[Y] / d vec[X] is lower triangular. Assuming both are
# defined, observe that k(i,j) < k(a,b) iff (1) i<a or (2) i=a and j<b.
# In both cases dvec[Y]/dvec[X]@[k(i,j),k(a,b)] = 0 since:
# (1) j<=i<a thus i,j!=a.
# (2) i=a>j thus i,j!=a.
#
# Since the Jacobian is lower-triangular, we need only compute the product
# of diagonal elements:
# d vec[Y] / d vec[X] @[k(i,j), k(i,j)]
# = X[j,j] + I[i=j] X[i,j]
# = 2 X[j,j].
# Since there is a 2 X[j,j] term for every lower-triangular element of X we
# conclude:
# |Jac(d vec[Y]/d vec[X])| = 2^p prod_{j=0}^{p-1} X[j,j]^{p-j}.
if self._static_event_ndims == 0:
if self.validate_args:
is_positive = check_ops.assert_positive(
x, message="All elements must be positive.")
x = control_flow_ops.with_dependencies([is_positive], x)
return math.log(2.) + math_ops.log(x)
diag = array_ops.matrix_diag_part(x)
if self.validate_args:
is_matrix = check_ops.assert_rank_at_least(
x, 2, message="Input must be a (batch of) matrix.")
shape = array_ops.shape(x)
is_square = check_ops.assert_equal(
shape[-2], shape[-1],
message="Input must be a (batch of) square matrix.")
# Assuming lower-triangular means we only need check diag>0.
is_positive_definite = check_ops.assert_positive(
diag, message="Input must be positive definite.")
x = control_flow_ops.with_dependencies(
[is_matrix, is_square, is_positive_definite], x)
# Create a column vector equal to: [p, p-1, ..., 2, 1].T.
if x.get_shape().ndims is None or x.get_shape()[-1].value is None:
p = array_ops.shape(x)[-1]
else:
p = x.get_shape()[-1].value
exponents = array_ops.expand_dims(
math_ops.linspace(math_ops.cast(p, dtype=x.dtype), 1., p),
dim=1)
sum_weighted_log_diag = array_ops.squeeze(
math_ops.matmul(math_ops.log(diag), exponents), squeeze_dims=-1)
fldj = p * math.log(2.) + sum_weighted_log_diag
if x.get_shape().ndims is not None:
fldj.set_shape(x.get_shape()[:-2])
return fldj
| apache-2.0 | -2,422,548,313,249,080,300 | 36.574434 | 145 | 0.632617 | false |
byronrau/tweepyScripts | twitterUserTimeline/twitterUserTimeline.py | 1 | 3599 | #!/usr/bin/python
import tweepy
import sys
import os
import codecs
import unicodecsv as csv
# API and ACCESS KEYS
API_KEY = 'jz3feMK2gN0kaN377FsTXY7uY'
API_SECRET = 'sGfCEayfwORloC9SvHy6BmDjifUsUEIF0EF51SgiYUgs054n7H'
# Don't buffer stdout, so we can tail the log output redirected to a file
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
# Max Tweets
maxTweets = int(sys.argv[1])
# Filename
fName = sys.argv[2]
tweetsPerQry = 200
# List of users read from users.txt
users = []
#open users.txt file and gets the list of users
with open('users.txt', 'r') as f:
for line in f:
users.append(line.strip())
sinceId = None
if(len(sys.argv) > 3):
if(sys.argv[3] != '-1'):
sinceId = sys.argv[3]
last_id = -1L
if(len(sys.argv) > 4):
last_id = long(sys.argv[4])
def getHashtags(hashes):
hashStr = ''
for i, h in enumerate(hashes):
if i == len(hashes)-1:
hashStr = hashStr + h['text']
else:
hashStr = hashStr + h['text'] + ','
return hashStr
def getMentions(mentions):
mentionStr = ''
for i, m in enumerate(mentions):
if i == len(mentions)-1:
mentionStr = mentionStr + m['screen_name']
else:
mentionStr = mentionStr + m['screen_name'] + ','
return mentionStr
auth = tweepy.AppAuthHandler(API_KEY, API_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
if (not api):
print ("Can't Authenticate Bye!")
sys.exit(-1)
tweetCount = 0
print("Downloading max {0} tweets".format(maxTweets))
with open(fName, 'ab') as f:
writer = csv.writer(f, encoding='utf-8')
writer.writerow(['Username','Created_at','ID','Tweet','RetweetCount','Name','Location','URL','Description','TweetCount','FollowersCount','FriendsCount','hashtags','mentions'])
for user in users:
tweetCount = 0
last_id = 0
while tweetCount < maxTweets:
print 'User is ' + user + ' Tweet count ' + str(tweetCount) + ' max Tweets ' + str(maxTweets) + ' SinceId ' + str(sinceId) + ' last_id ' + str(last_id)
try:
if (last_id <= 0):
if (not sinceId):
new_tweets = api.user_timeline(screen_name=user, count=tweetsPerQry)
else:
new_tweets = api.user_timeline(screen_name=user, count=tweetsPerQry, since_id=sinceId)
else:
if (not sinceId):
new_tweets = api.user_timeline(screen_name=user, count=tweetsPerQry, max_id=str(last_id - 1))
else:
new_tweets = api.user_timeline(screen_name=user, count=tweetsPerQry, max_id=str(last_id - 1), since_id=sinceId)
if not new_tweets:
print("No more tweets found")
break
for tweet in new_tweets:
try:
hashTags = getHashtags(tweet.entities.get('hashtags'))
mentions = getMentions(tweet.entities.get('user_mentions'))
writer.writerow([tweet.user.screen_name,tweet.created_at,tweet.id_str,tweet.text,str(tweet.retweet_count),tweet.user.name, tweet.user.location, str(tweet.user.url),tweet.user.description,str(tweet.user.statuses_count),str(tweet.user.followers_count),str(tweet.user.friends_count),hashTags,mentions])
except tweepy.TweepError as e:
print("some error : " + str(e) + " for user: " + user)
break
tweetCount += len(new_tweets)
print("Downloaded {0} tweets".format(tweetCount))
last_id = new_tweets[-1].id
except tweepy.TweepError as e:
# Just exit if any error
print("some error : " + str(e))
break
print ("Downloaded {0} tweets, Saved to {1}".format(tweetCount, fName)) | mit | 5,696,373,822,382,111,000 | 30.578947 | 311 | 0.639066 | false |
djfroofy/beatlounge | tutor/song5.py | 1 | 2172 | from itertools import cycle
from bl.ugen import W
from bl.arp import ChordPatternArp, OrderedArp, RandomArp, ArpMap
from bl.scheduler import clock
from bl.orchestra.midi import ChordPlayer
from tutor.complib import piano_f
pattern = [3, 3, [3, 1], 1, 2, 1, 2, 1, [3, 2, 1, 0, 4], 0, 1, 2, 3, 4, 3, 2,
[3, 2], 0, 0, [0, 1, 2], 2, 1, 2, 0, [0, 1, 2, 3], 3, 2, 1, 0,
[5, 4, 1], 5, 4, 3, 4, 2, 1, 5, 0, [5, 0]]
notes = cycle([[38, 50, 62, 65, 69, 80],
[38, 50, 62, 65, 69, 84],
[38, 50, 62, 65, 67, 84],
[38, 50, 62, 65, 69, 84],
[36, 50, 62, 65, 69, 84],
[36, 55, 62, 65, 69, 84],
[36, 55, 62, 67, 69, 84],
[36, 55, 60, 67, 69, 84],
[36, 53, 55, 67, 69, 84],
[36, 53, 55, 67, 69, 81],
[36, 53, 55, 65, 69, 81],
[36, 53, 55, 65, 67, 81],
[38, 53, 55, 65, 67, 81],
[38, 53, 55, 67, 69, 81],
[38, 53, 55, 67, 69, 74],
[38, 53, 55, 65, 67, 74],
[36, 53, 55, 65, 67, 74],
[36, 55, 57, 65, 67, 74],
[36, 55, 57, 60, 67, 74],
[36, 55, 57, 60, 64, 74],
[36, 55, 57, 60, 64, 80],
[36, 55, 57, 60, 64, 81],
[36, 55, 57, 60, 64, 84],
[36, 55, 57, 60, 63, 84],
[36, 55, 57, 60, 64, 84],
[36, 55, 57, 60, 69, 84],
[36, 55, 57, 60, 69, 81],
[36, 55, 57, 60, 69, 78],
[36, 53, 55, 60, 69, 78],
[36, 53, 55, 62, 69, 78]])
piano = piano_f()
piano.controlChange(reverb=120, sustain=100, chorus=50, vibrato=15)
r = W((0, 5), (12, 2), (-12, 3))
f = lambda chord: [r() + n for n in chord]
arp = ArpMap(f, ChordPatternArp(notes.next(), pattern))
player = ChordPlayer(piano, arp,
velocity=OrderedArp([127, 80, 90, 80, 90, 120, 120, 80]),
release=RandomArp([11, 10, 9, 8]))
resetter = clock.schedule(lambda: arp.reset(notes.next())
).startAfter((2, 1), (2, 1))
player.resumePlaying()
| mit | 760,528,155,715,385,200 | 36.448276 | 78 | 0.422652 | false |
MathGen/oppgavegenerator | oppgavegen/views/game_views.py | 1 | 8226 | from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response, redirect
from django.http import HttpResponseForbidden
from django.template import RequestContext
from oppgavegen.generation_folder.generation import generate_level
from oppgavegen.view_logic.rating import *
from oppgavegen.view_logic.progress import *
from oppgavegen.models import Set, Chapter, Level
from oppgavegen.forms import QuestionForm
from oppgavegen.view_logic.submit_and_answer import *
@login_required
def game(request, set_id):
context = RequestContext(request)
set = Set.objects.get(pk=set_id)
set_title = set.name
if request.user in set.users.all():
goto = render_to_response('game/screen.html', {'set_id': set_id, 'set_title': set_title}, context)
else:
goto = render_to_response('game/set_notmember.html', {'set': set}, context)
return goto
def chapters(request, set_id):
game_set = Set.objects.get(pk=set_id)
set_title = game_set.name
is_requirement = game_set.is_requirement
is_password_protected = game_set.password_protected
set_chapters = game_set.chapters.all()
context = RequestContext(request)
medals = [] # Both lists get updated in chapter_progress
completed = []
if is_requirement:
# In case we want to do something special if the set is a requirement type set
progress_number = chapter_progress(request.user, game_set, medals, completed)
else:
progress_number = chapter_progress(request.user, game_set, medals, completed)
order = game_set.order
set_chapters_ordered = []
for x in order.split(','):
for chapter in set_chapters:
if chapter.pk == int(x):
set_chapters_ordered.append(chapter)
break
if request.is_ajax():
response = render_to_response('game/chapters.html',
{'chapters': set_chapters_ordered, 'medals': json.dumps(medals),
'completed': json.dumps(completed), 'progress_number': progress_number,
'set_id': set_id, 'is_requirement': is_requirement,
'is_password_protected': is_password_protected}, context)
else:
response = render_to_response('game/chapters_noajax.html',
{'chapters': set_chapters_ordered, 'medals': json.dumps(medals),
'completed': json.dumps(completed), 'progress_number': progress_number,
'set_id': set_id, "set_title": set_title, "is_requirement": is_requirement,
'is_password_protected': is_password_protected}, context)
return response
def levels(request, chapter_id):
game_chapter = Chapter.objects.get(pk=chapter_id)
in_requirement_set = game_chapter.in_requirement_set
chapter_levels = game_chapter.levels.all()
chapter_title = game_chapter.name
context = RequestContext(request)
if in_requirement_set:
progress_number = len(chapter_levels)
else:
progress_number = calculate_progress(request.user, game_chapter)
star_per_level = get_stars_per_level(request.user, game_chapter)
order = game_chapter.order
chapter_levels_ordered = []
for x in order.split(','):
for chapter in chapter_levels:
if chapter.pk == int(x):
chapter_levels_ordered.append(chapter)
break
if request.is_ajax():
return render_to_response('game/levels.html',
{'levels': chapter_levels_ordered, 'chapter_title': chapter_title,
'progress_number': progress_number, 'spl': star_per_level, 'chapter_id': chapter_id,
'in_requirement_set':in_requirement_set},
context)
else:
return render_to_response('game/levels_noajax.html',
{'levels': chapter_levels_ordered, 'chapter_title': chapter_title,
'progress_number': progress_number, 'spl': star_per_level, 'chapter_id': chapter_id,
'in_requirement_set':in_requirement_set},
context)
@login_required
def get_template(request):
"""Gets a template for a given level"""
context = RequestContext(request)
#if request.method == 'POST':
context_dict = {'message': 'Noe har gått feil.'}
form = request.POST
if int(form.get('level_id')) == None:
return redirect('/')
level_id = int(form.get('level_id'))
chapter_id = int(form.get('chapter_id'))
set_id = int(form.get('set_id'))
set = Set.objects.get(pk=set_id)
#if check_for_level_skip(request.user, Chapter.objects.get(pk=chapter_id), level_id):
# return render_to_response('game/template.html', context_dict, context)
context['set_title'] = set.name
context['set_id'] = set_id
context['chapter_id'] = chapter_id
context['chapter_title'] = Chapter.objects.get(pk=chapter_id).name
context['level_title'] = Level.objects.get(pk=level_id).name
context['level_id'] = level_id
context_dict = generate_level(request.user, level_id)
context_dict['rating'] = get_user_rating(request.user)
level = Level.objects.get(pk=level_id)
context_dict['stars'] = get_user_stars_for_level(request.user, level)
context_dict['ulp'] = get_user_rating_for_level(request.user, level)
if request.is_ajax():
return render_to_response('game/template.html', context_dict, context)
else:
return render_to_response('game/template_noajax.html', context_dict, context)
def get_solution(request, level=1):
"""Returns a render of answers.html"""
context = RequestContext(request)
cheat_message = '\\text{Ulovlig tegn har blitt brukt i svar}'
required_message = '\\text{Svaret ditt har ikke utfylt alle krav}'
render_to = 'game/answer.html'
if request.method == 'POST':
form = QuestionForm(request.POST)
if form.is_valid():
form_values = form.process()
template = Template.objects.get(pk=form_values['primary_key'])
user_answer = form_values['user_answer']
try:
disallowed = json.loads(template.disallowed)
except ValueError:
disallowed = []
try:
required = json.loads(template.required)
except ValueError:
required = []
context_dict = make_answer_context_dict(form_values)
if (cheat_check(user_answer, disallowed, form_values['variable_dictionary'].split('§'))) and\
(form_values['template_type'] == 'normal') and (context_dict['user_won']):
context_dict['answer'] = cheat_message
return render_to_response(render_to, context_dict, context)
elif (required_check(user_answer, required, form_values['variable_dictionary'].split('§'))) and \
(form_values['template_type'] == 'normal') and (context_dict['user_won']):
context_dict['answer'] = required_message
return render_to_response(render_to, context_dict, context)
if request.is_ajax():
new_user_rating, new_star = change_level_rating(template, request.user, context_dict['user_won'],
form_values['template_type'], level)
context_dict['chapter_id'] = request.POST['chapter_id']
context_dict['ulp'] = int(new_user_rating)
context_dict['new_star'] = new_star
context_dict['stars'] = get_user_stars_for_level(request.user, Level.objects.get(pk=level))
return render_to_response(render_to, context_dict, context)
else:
change_elo(template, request.user, context_dict['user_won'], form_values['template_type'])
render_to_response(render_to, context_dict, context)
else:
print(form.errors)
| bsd-3-clause | -671,454,598,317,702,500 | 44.181319 | 119 | 0.60501 | false |
mtommasi/pygiftparser | setup.py | 1 | 1462 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
from setuptools import setup
try:
long_description = open("README.rst").read()
except IOError:
long_description = ""
LOCALEDIR = os.path.join('share', 'locale')
setup(
name="pygiftparser",
version="1.1",
url="https://github.com/mtommasi/pygiftparser",
description="GIFT parser in python that parses a Gift source code and loads data in a Question/Answer model for further use in an application",
license="MIT",
author="Marc Tommasi - UdL/INRIA",
author_email="[email protected]",
py_modules=['pygiftparser.parser',
'pygiftparser.i18n',
'pygiftparser.answer',
'pygiftparser.question',
'pygiftparser.utils'],
install_requires=['yattag', 'markdown', 'MarkdownSuperscript'],
long_description=long_description,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.4",
"Topic :: Text Processing"
],
data_files=[(os.path.join('share',
'locale',
lang,
'LC_MESSAGES'),
[os.path.join('share',
'locale',
lang,
'LC_MESSAGES',
'pygiftparser.mo')])
for lang in os.listdir(LOCALEDIR)]
)
| mit | -5,657,474,728,144,140,000 | 33 | 147 | 0.53078 | false |
agartland/utils | custom_legends.py | 1 | 2291 | import matplotlib.pyplot as plt
import numpy as np
__all__ = ['colorLegend',
'symbolLegend']
def colorLegend(colors, labels, alphas=None, edgecolor='black',loc='best', axh=None, **legendKwargs):
"""Custom matplotlib legend with colors and labels etc.
Useful in cases where it is awkward to include labels on the appropriate plot() objects.
Parameters specify the characteristics of each line in the legend.
Parameters
----------
colors : list of valid matplotlib colors
labels : list of strings
alphas : list of alpha values
edgecolor : single valid matplotlib color
All remaining kwargs are passed to legend()
"""
if axh is None:
axh = plt.gca()
if alphas is None:
alphas = np.ones(len(colors))
circles = (plt.Circle((0, 0), fc=c, ec=edgecolor, alpha=a) for c, a in zip(colors, alphas))
lh = axh.legend(circles,
labels,
loc=loc,
**legendKwargs)
return lh
def symbolLegend(symbols, labels, facecolors=None, edgecolors=None, alphas=None,loc='best', **legendKwargs):
"""Custom matplotlib legend with lines, symbols and labels etc.
Useful in cases where it is awkward to include labels on the appropriate plot() objects.
Parameters specify the characteristics of each line in the legend.
Parameters
----------
symbols : list of valid matplotlib symbols
E.g. 'xs^*.<>' or other matplotlib.markers
labels : list of strings
facecolors : list of valid matplotlib colors
edgecolors : list of valid matplotlib colors
alphas : list of alpha values
All remaining kwargs are passed to legend()
"""
if alphas is None:
alphas = np.ones(len(symbols))
if edgecolors is None:
edgecolors = ['black'] * len(symbols)
if facecolors is None:
facecolors = ['white'] * len(symbols)
lh = plt.legend((plt.Line2D([0], [0], ls = '', marker = s, markerfacecolor = mfc, markeredgecolor = ec, alpha = a) for s, mfc, ec, a in zip(symbols, facecolors, edgecolors, alphas)),
labels,
loc,
numpoints=1,
**legendKwargs)
return lh
| mit | 6,611,612,067,164,619,000 | 34.951613 | 186 | 0.61196 | false |
radez/packstack | packstack/installer/utils/shell.py | 1 | 4512 | # -*- coding: utf-8 -*-
import re
import types
import logging
import subprocess
from ..exceptions import (ExecuteRuntimeError, ScriptRuntimeError,
NetworkError)
from .strings import mask_string
block_fmt = ("\n============= %(title)s ==========\n%(content)s\n"
"======== END OF %(title)s ========")
def execute(cmd, workdir=None, can_fail=True, mask_list=None,
use_shell=False, log=True):
"""
Runs shell command cmd. If can_fail is set to False
ExecuteRuntimeError is raised if command returned non-zero return
code. Otherwise
"""
mask_list = mask_list or []
repl_list = [("'", "'\\''")]
if not isinstance(cmd, types.StringType):
import pipes
masked = ' '.join((pipes.quote(i) for i in cmd))
else:
masked = cmd
masked = mask_string(masked, mask_list, repl_list)
if log:
logging.info("Executing command:\n%s" % masked)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=workdir,
shell=use_shell, close_fds=True)
out, err = proc.communicate()
masked_out = mask_string(out, mask_list, repl_list)
masked_err = mask_string(err, mask_list, repl_list)
if log:
logging.debug(block_fmt % {'title': 'STDOUT', 'content': masked_out})
if proc.returncode:
if log:
logging.debug(block_fmt % {'title': 'STDERR',
'content': masked_err})
if can_fail:
msg = 'Failed to execute command: %s' % masked_out
raise ExecuteRuntimeError(msg, stdout=out, stderr=err)
return proc.returncode, out
class ScriptRunner(object):
_pkg_search = 'rpm -q'
def __init__(self, ip=None):
self.script = []
self.ip = ip
def append(self, s):
self.script.append(s)
def clear(self):
self.script = []
def execute(self, can_fail=True, mask_list=None, log=True):
mask_list = mask_list or []
repl_list = [("'", "'\\''")]
script = "\n".join(self.script)
masked = mask_string(script, mask_list, repl_list)
if log:
logging.info("[%s] Executing script:\n%s" %
(self.ip or 'localhost', masked))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if self.ip:
cmd = ["ssh", "-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"root@%s" % self.ip, "bash -x"]
else:
cmd = ["bash", "-x"]
obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE,
close_fds=True, shell=False)
script = "function t(){ exit $? ; } \n trap t ERR \n" + script
out, err = obj.communicate(script)
masked_out = mask_string(out, mask_list, repl_list)
masked_err = mask_string(err, mask_list, repl_list)
if log:
logging.debug(block_fmt % {'title': 'STDOUT',
'content': masked_out})
if obj.returncode:
if log:
logging.debug(block_fmt % {'title': 'STDERR',
'content': masked_err})
if can_fail:
pattern = (r'^ssh\:')
if re.search(pattern, err):
raise NetworkError(masked_err, stdout=out, stderr=err)
else:
msg = 'Failed to run remote script: %s' % masked_out
raise ScriptRuntimeError(msg, stdout=out, stderr=err)
return obj.returncode, out
def template(self, src, dst, varsdict):
with open(src) as fp:
content = fp.read() % varsdict
self.append("cat > %s <<- EOF\n%s\nEOF\n" % (dst, content))
def if_not_exists(self, path, command):
self.append("[ -e %s ] || %s" % (path, command))
def if_exists(self, path, command):
self.append("[ -e %s ] && %s" % (path, command))
def if_installed(self, pkg, command):
self.append("%s %s && %s" % (self._pkg_search, pkg, command))
def if_not_installed(self, pkg, command):
self.append("%s %s || %s" % (self._pkg_search, pkg, command))
def chown(self, target, uid, gid):
self.append("chown %s:%s %s" % (uid, gid, target))
def chmod(self, target, mode):
self.append("chmod %s %s" % (mode, target))
| apache-2.0 | -8,818,339,688,155,416,000 | 33.707692 | 77 | 0.526596 | false |
hivesolutions/netius | src/netius/clients/mjpg.py | 1 | 4936 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <[email protected]>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
import netius
from . import http
class MJPGProtocol(http.HTTPProtocol):
MAGIC_JPEG = b"\xff\xd8\xff\xe0"
""" The magic signature for the JPEG infra-structure, this
sequence of bytes is going to be used to detect new frames
coming from the HTTP based stream """
EOI_JPEG = b"\xff\xd9"
""" The sequence of bytes that indicate the end of the current
image, when these bytes are detected on the stream the message
should be "flushed" to the current output (emit) """
def __init__(self, *args, **kwargs):
http.HTTPProtocol.__init__(self, *args, **kwargs)
self.buffer_l = []
def add_buffer(self, data):
self.buffer_l.append(data)
def get_buffer(self, delete = True):
if not self.buffer_l: return b""
buffer = b"".join(self.buffer_l)
if delete: del self.buffer_l[:]
return buffer
def on_partial(self, data):
http.HTTPProtocol.on_partial(self, data)
# retrieves the reference to the top class that is going to
# be used for the correct parsing of the image
cls = self.__class__
# tries to find the end of image (EOI) indicator in the current
# received data, and in case it's not found add the (partial)
# data to the current buffer, to be latter processed
eoi_index = data.find(cls.EOI_JPEG)
if eoi_index == -1: self.buffer_l.append(data); return
# calculates the size of the end of image (EOI) token so that
# this value will be used for the calculus of the image data
eoi_size = len(cls.EOI_JPEG)
# adds the partial valid data of the current chunk to the buffer
# and then joins the current buffer as the frame data, removing
# the multipart header from it (to become a valid image)
self.buffer_l.append(data[:eoi_index + eoi_size])
frame = b"".join(self.buffer_l)
multipart_index = frame.find(b"\r\n\r\n")
frame = frame[multipart_index + 4:]
# clears the current buffer and adds the remaining part of the
# current chunk, that may be already part of a new image
del self.buffer_l[:]
self.buffer_l.append(data[eoi_index + eoi_size:])
# calls the proper event handler for the new frame data that has
# just been received, triggering the processing of the frame
self.on_frame_mjpg(frame)
def on_frame_mjpg(self, data):
self.trigger("frame", self, data)
class MJPGClient(http.HTTPClient):
protocol = MJPGProtocol
if __name__ == "__main__":
index = 0
limit = 30
def on_frame(protocol, data):
global index
index += 1
if index >= limit: return protocol.close()
base_path = netius.conf("IMAGES_PATH", "images")
base_path = os.path.abspath(base_path)
base_path = os.path.normpath(base_path)
if not os.path.exists(base_path): os.makedirs(base_path)
path = os.path.join(base_path, "%08d.jpg" % index)
file = open(path, "wb")
try: file.write(data)
finally: file.close()
print("Saved frame %08d of %d bytes" % (index, len(data)))
def on_finish(protocol):
netius.compat_loop(loop).stop()
url = netius.conf("MJPG_URL", "http://euglena.stanford.edu:20005/?action=stream")
client = MJPGClient()
loop, protocol = client.get(url)
protocol.bind("frame", on_frame)
protocol.bind("finish", on_finish)
loop.run_forever()
loop.close()
else:
__path__ = []
| apache-2.0 | 5,063,929,398,906,030,000 | 32.263889 | 85 | 0.625659 | false |
gameduell/pysupplies | tests/test_params.py | 1 | 3109 | import pytest
from supplies.annotate import delay
from supplies.params import param, Params
__author__ = 'dwae'
class Foo(Params):
@param
def bar(self, val: (1, 42)=23):
return val
@delay
def bla(self):
return ...
class Bar(Foo, Params):
@param
def baz(self, val: str='f00'):
return val
def test_basic():
foo = Foo()
assert foo.bar == 23
foo.bar = 1
assert foo.bar == 1
foo.bar = 42
assert foo.bar == 42
foo = Foo(bar=13)
assert foo.bar == 13
foo.bar = 37
assert foo.bar == 37
bar = Bar()
assert bar.bar == 23
assert bar.baz == 'f00'
bar = Bar(baz='')
assert bar.bar == 23
assert bar.baz == ''
bar = Bar(bar=6)
assert bar.bar == 6
assert bar.baz == 'f00'
bar = Bar(bar=12, baz='foo')
assert bar.bar == 12
assert bar.baz == 'foo'
bar.bar = 2
bar.baz = 'to'
assert bar.bar == 2
assert bar.baz == 'to'
with pytest.raises(TypeError):
Bar(bar=1, nil=None)
def test_export():
bar = Bar(bar=42, baz='foo')
params = bar.params
assert {'bar', 'baz'} == params.names
assert params.bar.name == 'bar'
assert params['baz'].name == 'baz'
assert params['bar'].value == 42
assert params.baz.value == 'foo'
assert params.bar.default == 23
assert params.baz.default == 'f00'
assert 'bar=42' in str(bar)
assert "baz='foo'" in repr(bar)
assert bar.bla is ...
with pytest.raises(KeyError):
params['bla']
with pytest.raises(AttributeError):
params.bla
class Convert(Params):
@param
def a(self, val=1):
return int(val)
@param
def b(self, val=''):
return str(val)
def test_convert():
conv = Convert()
assert conv.a == 1
assert conv.b == ''
conv = Convert(a='13', b=37)
assert conv.a == 13
assert conv.b == '37'
conv.a = '42'
assert conv.a == 42
conv.b = None
assert conv.b == str(None)
class Dependent(Params):
@param
def a(self, val=1):
return val
@param
def b(self, val=None):
if val is None:
return self.a + 1
else:
return val
@param
def c(self, val):
return self.a + val
@param
def d(self, val=3):
return self.a + val
def test_depend():
dep = Dependent()
assert dep.a == 1
assert dep.b == 2
dep.a = 2
assert dep.a == 2
assert dep.b == 3
dep.a = 1
assert dep.a == 1
assert dep.b == 2
dep.b = 4
dep.a = 5
assert dep.a == 5
assert dep.b == 4
dep.c = 3
assert dep.c == 8
dep.a = 3
dep.b = 2
assert dep.c == 6
assert dep.b == 2
del dep.b
assert dep.b == 4
del dep.a
assert dep.b == 2
del dep.c
with pytest.raises(TypeError):
dep.c
assert dep.d == 4
dep.a = 3
assert dep.d == 6
del dep.a
assert dep.d == 4
dep.d = 4
assert dep.d == 5
dep.a = 4
assert dep.d == 8
del dep.d
assert dep.d == 7
del dep.a
assert dep.d == 4
| mit | -5,483,111,285,384,543,000 | 15.537234 | 41 | 0.525893 | false |
onyb/cpp | DSATP/LinkedList/test_CH3_S6_Single_Linked_Lists.py | 1 | 2988 | import unittest
from LinkedList.CH3_S6_Single_Linked_Lists import LinkedList
from LinkedList.CH3_S6_Single_Linked_Lists import Node
from LinkedList.CH3_S6_Single_Linked_Lists import ElementNotFoundException
class TestSingleLinkedList(unittest.TestCase):
def test_node(self):
node_a = Node()
node_a.set_data('foo')
self.assertEqual(node_a.get_data(), 'foo')
node_b = Node()
node_b.set_data('baz')
node_a.set_next(node_b)
self.assertEqual(node_a.get_next(), node_b)
self.assertEqual(node_a.has_next, True)
self.assertEqual(node_b.has_next, False)
def test_linked_list(self):
ll = LinkedList()
node = Node()
node.set_data('foo')
ll.set_head(node)
self.assertEqual(ll.head, node)
self.assertEqual(ll.length, 1)
# Insert at beginning
ll.insert_at_pos('bar', 0)
self.assertEqual(ll.head.get_data(), 'bar')
self.assertEqual(ll.head.get_next().get_data(), 'foo')
self.assertEqual(ll.length, 2)
# Insert at end
ll.insert_at_pos('baz', 2)
self.assertEqual(ll.head.get_next().get_next().get_data(), 'baz')
self.assertEqual(ll.length, 3)
# Insert at position
ll.insert_at_pos('ani', 1)
self.assertEqual(ll.head.get_next().get_next().get_data(), 'ani')
self.assertEqual(ll.length, 4)
# Delete from beginning
ll.delete_from_beginning()
self.assertEqual(ll.head.get_data(), 'foo')
self.assertEqual(ll.length, 3)
# Delete last node
ll.delete_last_node()
self.assertEqual(ll.head.get_next().get_data(), 'ani')
self.assertEqual(ll.head.get_next().get_next(), None)
self.assertEqual(ll.length, 2)
# Delete node
ll.insert_at_pos('anirudha', 2)
ll.insert_at_pos('bose', 3)
self.assertEqual(ll.length, 4)
node = ll.head.get_next().get_next()
ll.delete_node(node)
self.assertEqual(ll.length, 3)
# Delete a non-existent node
node = Node()
node.set_data('pirated')
with self.assertRaises(ElementNotFoundException):
ll.delete_node(node)
self.assertEqual(ll.length, 3)
# Delete value
ll.insert_at_pos('onyb', 2)
self.assertEqual(ll.head.get_next().get_next().get_next().get_data(), 'onyb')
self.assertEqual(ll.length, 4)
ll.delete_value('onyb')
self.assertEqual(ll.head.get_next().get_data(), 'ani')
self.assertEqual(ll.length, 3)
# Delete position
ll.insert_at_pos('onyb', 2)
self.assertEqual(ll.head.get_next().get_next().get_next().get_data(), 'onyb')
self.assertEqual(ll.length, 4)
ll.delete_pos(2)
self.assertEqual(ll.head.get_next().get_data(), 'ani')
self.assertEqual(ll.length, 3)
# Clear linked list
ll.clear()
self.assertEqual(ll.length, 0) | mit | -401,720,435,719,677,000 | 31.846154 | 85 | 0.594712 | false |
jedie/DragonPy | PyDC/PyDC/wave2bitstream.py | 1 | 23203 | #!/usr/bin/env python2
"""
:copyleft: 2013 by Jens Diemer
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import array
import functools
import itertools
import logging
import math
import struct
import time
import wave
from PyDC.utils import (
ProcessInfo,
TextLevelMeter,
average,
bits2codepoint,
codepoints2bitstream,
count_sign,
diff_info,
duration2hz,
human_duration,
hz2duration,
iter_steps,
iter_window,
sinus_values_by_hz,
)
try:
import audioop
except ImportError as err:
# e.g. PyPy, see: http://bugs.pypy.org/msg4430
print("Can't use audioop:", err)
audioop = None
log = logging.getLogger("PyDC")
WAVE_READ_SIZE = 16 * 1024 # How many frames should be read from WAVE file at once?
WAV_ARRAY_TYPECODE = {
1: "b", # 8-bit wave file
2: "h", # 16-bit wave file
4: "l", # 32-bit wave file TODO: Test it
}
# Maximum volume value in wave files:
MAX_VALUES = {
1: 255, # 8-bit wave file
2: 32768, # 16-bit wave file
4: 2147483647, # 32-bit wave file
}
HUMAN_SAMPLEWIDTH = {
1: "8-bit",
2: "16-bit",
4: "32-bit",
}
class WaveBase:
def get_typecode(self, samplewidth):
try:
typecode = WAV_ARRAY_TYPECODE[samplewidth]
except KeyError:
raise NotImplementedError(
"Only %s wave files are supported, yet!" % (
", ".join(["%sBit" % (i * 8) for i in list(WAV_ARRAY_TYPECODE.keys())])
)
)
return typecode
def pformat_pos(self):
sec = float(self.wave_pos) / self.framerate / self.samplewidth
return f"{human_duration(sec)} (frame no.: {self.wave_pos})"
def _hz2duration(self, hz):
return hz2duration(hz, framerate=self.framerate)
def _duration2hz(self, duration):
return duration2hz(duration, framerate=self.framerate)
def set_wave_properties(self):
self.framerate = self.wavefile.getframerate() # frames / second
self.samplewidth = self.wavefile.getsampwidth() # 1 for 8-bit, 2 for 16-bit, 4 for 32-bit samples
self.max_value = MAX_VALUES[self.samplewidth]
self.nchannels = self.wavefile.getnchannels() # typically 1 for mono, 2 for stereo
log.info("Framerate: %sHz samplewidth: %i (%sBit, max volume value: %s) channels: %s" % (
self.framerate,
self.samplewidth, self.samplewidth * 8, self.max_value,
self.nchannels,
))
assert self.nchannels == 1, "Only MONO files are supported, yet!"
class Wave2Bitstream(WaveBase):
def __init__(self, wave_filename, cfg):
self.wave_filename = wave_filename
self.cfg = cfg
self.half_sinus = False # in trigger yield the full cycle
self.wave_pos = 0 # Absolute position in the frame stream
assert cfg.END_COUNT > 0 # Sample count that must be pos/neg at once
assert cfg.MID_COUNT > 0 # Sample count that can be around null
print(f"open wave file '{wave_filename}'...")
try:
self.wavefile = wave.open(wave_filename, "rb")
except OSError as err:
msg = f"Error opening {repr(wave_filename)}: {err}"
log.error(msg)
sys.stderr.write(msg)
sys.exit(-1)
self.set_wave_properties()
self.frame_count = self.wavefile.getnframes()
print("Number of audio frames:", self.frame_count)
self.min_volume = int(round(self.max_value * cfg.MIN_VOLUME_RATIO / 100))
print(f"Ignore sample lower than {cfg.MIN_VOLUME_RATIO:.1f}% = {self.min_volume:d}")
self.half_sinus = False # in trigger yield the full cycle
self.frame_no = None
# create the generator chain:
# get frame numer + volume value from the WAVE file
self.wave_values_generator = self.iter_wave_values()
if cfg.AVG_COUNT > 1:
# merge samples to a average sample
log.debug(f"Merge {cfg.AVG_COUNT} audio sample to one average sample")
self.avg_wave_values_generator = self.iter_avg_wave_values(
self.wave_values_generator, cfg.AVG_COUNT
)
# trigger sinus cycle
self.iter_trigger_generator = self.iter_trigger(self.avg_wave_values_generator)
else:
# trigger sinus cycle
self.iter_trigger_generator = self.iter_trigger(self.wave_values_generator)
# duration of a complete sinus cycle
self.iter_duration_generator = self.iter_duration(self.iter_trigger_generator)
# build from sinus cycle duration the bit stream
self.iter_bitstream_generator = self.iter_bitstream(self.iter_duration_generator)
def _print_status(self, process_info):
percent = float(self.wave_pos) / self.frame_count * 100
rest, eta, rate = process_info.update(self.wave_pos)
sys.stdout.write(
f"\r{percent:.1f}% wav pos:{self.pformat_pos()} - eta: {eta} (rate: {rate:d}Frames/sec) ")
sys.stdout.flush()
def _get_statistics(self, max=None):
statistics = {}
iter_duration_generator = self.iter_duration(self.iter_trigger_generator)
for count, duration in enumerate(iter_duration_generator):
try:
statistics[duration] += 1
except KeyError:
statistics[duration] = 1
if max is not None and count >= max:
break
return statistics
def analyze(self):
"""
Example output:
394Hz ( 28 Samples) exist: 1
613Hz ( 18 Samples) exist: 1
788Hz ( 14 Samples) exist: 1
919Hz ( 12 Samples) exist: 329 *********
1002Hz ( 11 Samples) exist: 1704 **********************************************
1103Hz ( 10 Samples) exist: 1256 **********************************
1225Hz ( 9 Samples) exist: 1743 ***********************************************
1378Hz ( 8 Samples) exist: 1
1575Hz ( 7 Samples) exist: 322 *********
1838Hz ( 6 Samples) exist: 1851 **************************************************
2205Hz ( 5 Samples) exist: 1397 **************************************
2756Hz ( 4 Samples) exist: 913 *************************
"""
log.debug("enable half sinus scan")
self.half_sinus = True
statistics = self._get_statistics()
width = 50
max_count = max(statistics.values())
print()
print("Found this zeror crossing timings in the wave file:")
print()
for duration, count in sorted(list(statistics.items()), reverse=True):
hz = duration2hz(duration, self.framerate / 2)
w = int(round(float(width) / max_count * count))
stars = "*" * w
print(f"{hz:>5}Hz ({duration:>5} Samples) exist: {count:>4} {stars}")
print()
print("Notes:")
print(" - Hz values are converted to full sinus cycle duration.")
print(" - Sample cound is from half sinus cycle.")
def sync(self, length):
"""
synchronized weave sync trigger
"""
# go in wave stream to the first bit
try:
next(self)
except StopIteration:
print("Error: no bits identified!")
sys.exit(-1)
log.info("First bit is at: %s" % self.pformat_pos())
log.debug("enable half sinus scan")
self.half_sinus = True
# Toggle sync test by consuming one half sinus sample
# self.iter_trigger_generator.next() # Test sync
# get "half sinus cycle" test data
test_durations = itertools.islice(self.iter_duration_generator, length)
# It's a tuple like: [(frame_no, duration)...]
test_durations = list(test_durations)
diff1, diff2 = diff_info(test_durations)
log.debug(f"sync diff info: {diff1:d} vs. {diff2:d}")
if diff1 > diff2:
log.info("\nbit-sync one step.")
next(self.iter_trigger_generator)
log.debug("Synced.")
else:
log.info("\nNo bit-sync needed.")
self.half_sinus = False
log.debug("disable half sinus scan")
def __iter__(self):
return self
def __next__(self):
return next(self.iter_bitstream_generator)
def iter_bitstream(self, iter_duration_generator):
"""
iterate over self.iter_trigger() and
yield the bits
"""
assert self.half_sinus is False # Allways trigger full sinus cycle
# build min/max Hz values
bit_nul_min_hz = self.cfg.BIT_NUL_HZ - self.cfg.HZ_VARIATION
bit_nul_max_hz = self.cfg.BIT_NUL_HZ + self.cfg.HZ_VARIATION
bit_one_min_hz = self.cfg.BIT_ONE_HZ - self.cfg.HZ_VARIATION
bit_one_max_hz = self.cfg.BIT_ONE_HZ + self.cfg.HZ_VARIATION
bit_nul_max_duration = self._hz2duration(bit_nul_min_hz)
bit_nul_min_duration = self._hz2duration(bit_nul_max_hz)
bit_one_max_duration = self._hz2duration(bit_one_min_hz)
bit_one_min_duration = self._hz2duration(bit_one_max_hz)
log.info(f"bit-0 in {bit_nul_min_hz}Hz - {bit_nul_max_hz}Hz (duration: {bit_nul_min_duration}-{bit_nul_max_duration}) | bit-1 in {bit_one_min_hz}Hz - {bit_one_max_hz}Hz (duration: {bit_one_min_duration}-{bit_one_max_duration})")
assert bit_nul_max_hz < bit_one_min_hz, "HZ_VARIATION value is %sHz too high!" % (
((bit_nul_max_hz - bit_one_min_hz) / 2) + 1
)
assert bit_one_max_duration < bit_nul_min_duration, "HZ_VARIATION value is too high!"
# for end statistics
bit_one_count = 0
one_hz_min = sys.maxsize
one_hz_avg = None
one_hz_max = 0
bit_nul_count = 0
nul_hz_min = sys.maxsize
nul_hz_avg = None
nul_hz_max = 0
for duration in iter_duration_generator:
if bit_one_min_duration < duration < bit_one_max_duration:
hz = self._duration2hz(duration)
log.log(5,
f"bit 1 at {self.pformat_pos()} in {duration}Samples = {hz}Hz"
)
yield 1
bit_one_count += 1
if hz < one_hz_min:
one_hz_min = hz
if hz > one_hz_max:
one_hz_max = hz
one_hz_avg = average(one_hz_avg, hz, bit_one_count)
elif bit_nul_min_duration < duration < bit_nul_max_duration:
hz = self._duration2hz(duration)
log.log(5,
f"bit 0 at {self.pformat_pos()} in {duration}Samples = {hz}Hz"
)
yield 0
bit_nul_count += 1
if hz < nul_hz_min:
nul_hz_min = hz
if hz > nul_hz_max:
nul_hz_max = hz
nul_hz_avg = average(nul_hz_avg, hz, bit_nul_count)
else:
hz = self._duration2hz(duration)
log.log(7,
f"Skip signal at {self.pformat_pos()} with {hz}Hz ({duration}Samples) out of frequency range."
)
continue
bit_count = bit_one_count + bit_nul_count
if bit_count == 0:
print("ERROR: No information from wave to generate the bits")
print("trigger volume to high?")
sys.exit(-1)
log.info(f"\n{bit_count:d} Bits: {bit_one_count:d} positive bits and {bit_nul_count:d} negative bits")
if bit_one_count > 0:
log.info("Bit 1: {}Hz - {}Hz avg: {:.1f}Hz variation: {}Hz".format(
one_hz_min, one_hz_max, one_hz_avg, one_hz_max - one_hz_min
))
if bit_nul_count > 0:
log.info("Bit 0: {}Hz - {}Hz avg: {:.1f}Hz variation: {}Hz".format(
nul_hz_min, nul_hz_max, nul_hz_avg, nul_hz_max - nul_hz_min
))
def iter_duration(self, iter_trigger):
"""
yield the duration of two frames in a row.
"""
print()
process_info = ProcessInfo(self.frame_count, use_last_rates=4)
start_time = time.time()
next_status = start_time + 0.25
old_pos = next(iter_trigger)
for pos in iter_trigger:
duration = pos - old_pos
# log.log(5, "Duration: %s" % duration)
yield duration
old_pos = pos
if time.time() > next_status:
next_status = time.time() + 1
self._print_status(process_info)
self._print_status(process_info)
print()
def iter_trigger(self, iter_wave_values):
"""
trigger middle crossing of the wave sinus curve
"""
window_size = (2 * self.cfg.END_COUNT) + self.cfg.MID_COUNT
# sinus curve goes from negative into positive:
pos_null_transit = [(0, self.cfg.END_COUNT), (self.cfg.END_COUNT, 0)]
# sinus curve goes from positive into negative:
neg_null_transit = [(self.cfg.END_COUNT, 0), (0, self.cfg.END_COUNT)]
if self.cfg.MID_COUNT > 3:
mid_index = int(round(self.cfg.MID_COUNT / 2.0))
else:
mid_index = 0
in_pos = False
for values in iter_window(iter_wave_values, window_size):
# Split the window
previous_values = values[:self.cfg.END_COUNT] # e.g.: 123-----
mid_values = values[self.cfg.END_COUNT:self.cfg.END_COUNT + self.cfg.MID_COUNT] # e.g.: ---45---
next_values = values[-self.cfg.END_COUNT:] # e.g.: -----678
# get only the value and strip the frame_no
# e.g.: (frame_no, value) tuple -> value list
previous_values = [i[1] for i in previous_values]
next_values = [i[1] for i in next_values]
# Count sign from previous and next values
sign_info = [
count_sign(previous_values, 0),
count_sign(next_values, 0)
]
# log.log(5, "sign info: %s" % repr(sign_info))
# yield the mid crossing
if in_pos is False and sign_info == pos_null_transit:
log.log(5, "sinus curve goes from negative into positive")
# log.debug(" %s | %s | %s" % (previous_values, mid_values, next_values))
yield mid_values[mid_index][0]
in_pos = True
elif in_pos and sign_info == neg_null_transit:
if self.half_sinus:
log.log(5, "sinus curve goes from positive into negative")
# log.debug(" %s | %s | %s" % (previous_values, mid_values, next_values))
yield mid_values[mid_index][0]
in_pos = False
def iter_avg_wave_values(self, wave_values_generator, avg_count):
if log.level >= 5:
tlm = TextLevelMeter(self.max_value, 79)
for value_tuples in iter_steps(wave_values_generator, avg_count):
values = [i[1] for i in value_tuples]
avg_value = int(round(
float(sum(values)) / avg_count
))
if tlm:
msg = tlm.feed(avg_value)
percent = 100.0 / self.max_value * abs(avg_value)
log.log(5,
f"{msg} average {','.join([str(v) for v in values])} samples to: {avg_value} ({percent:.1f}%)"
)
yield (self.wave_pos, avg_value)
def iter_wave_values(self):
"""
yield frame numer + volume value from the WAVE file
"""
typecode = self.get_typecode(self.samplewidth)
if log.level >= 5:
if self.cfg.AVG_COUNT > 1:
# merge samples -> log output in iter_avg_wave_values
tlm = None
else:
tlm = TextLevelMeter(self.max_value, 79)
# Use only a read size which is a quare divider of the samplewidth
# Otherwise array.array will raise: ValueError: string length not a multiple of item size
divider = int(round(float(WAVE_READ_SIZE) / self.samplewidth))
read_size = self.samplewidth * divider
if read_size != WAVE_READ_SIZE:
log.info(f"Real use wave read size: {read_size:d} Bytes")
get_wave_block_func = functools.partial(self.wavefile.readframes, read_size)
skip_count = 0
manually_audioop_bias = self.samplewidth == 1 and audioop is None
for frames in iter(get_wave_block_func, ""):
if self.samplewidth == 1:
if audioop is None:
log.warning("use audioop.bias() work-a-round for missing audioop.")
else:
# 8 bit samples are unsigned, see:
# http://docs.python.org/2/library/audioop.html#audioop.lin2lin
frames = audioop.bias(frames, 1, 128)
try:
values = array.array(typecode, frames)
except ValueError as err:
# e.g.:
# ValueError: string length not a multiple of item size
# Work-a-round: Skip the last frames of this block
frame_count = len(frames)
divider = int(math.floor(float(frame_count) / self.samplewidth))
new_count = self.samplewidth * divider
frames = frames[:new_count] # skip frames
log.error(
"Can't make array from %s frames: Value error: %s (Skip %i and use %i frames)" % (
frame_count, err, frame_count - new_count, len(frames)
))
values = array.array(typecode, frames)
for value in values:
self.wave_pos += 1 # Absolute position in the frame stream
if manually_audioop_bias:
# audioop.bias can't be used.
# See: http://hg.python.org/cpython/file/482590320549/Modules/audioop.c#l957
value = value % 0xff - 128
# if abs(value) < self.min_volume:
# # log.log(5, "Ignore to lower amplitude")
# skip_count += 1
# continue
yield (self.wave_pos, value)
log.info(f"Skip {skip_count:d} samples that are lower than {self.min_volume:d}")
log.info("Last readed Frame is: %s" % self.pformat_pos())
class Bitstream2Wave(WaveBase):
def __init__(self, destination_filepath, cfg):
self.destination_filepath = destination_filepath
self.cfg = cfg
wave_max_value = MAX_VALUES[self.cfg.SAMPLEWIDTH]
self.used_max_values = int(round(
float(wave_max_value) / 100 * self.cfg.VOLUME_RATIO
))
log.info(
f"Create {HUMAN_SAMPLEWIDTH[self.cfg.SAMPLEWIDTH]} wave file with {self.cfg.FRAMERATE}Hz and {self.used_max_values} max volumen ({self.cfg.VOLUME_RATIO}%)")
self.typecode = self.get_typecode(self.cfg.SAMPLEWIDTH)
self.bit_nul_samples = self.get_samples(self.cfg.BIT_NUL_HZ)
self.bit_one_samples = self.get_samples(self.cfg.BIT_ONE_HZ)
log.info(f"create wave file '{destination_filepath}'...")
try:
self.wavefile = wave.open(destination_filepath, "wb")
except OSError as err:
log.error(f"Error opening {repr(destination_filepath)}: {err}")
sys.exit(-1)
self.wavefile.setnchannels(1) # Mono
self.wavefile.setsampwidth(self.cfg.SAMPLEWIDTH)
self.wavefile.setframerate(self.cfg.FRAMERATE)
self.set_wave_properties()
@property
def wave_pos(self):
pos = self.wavefile._nframeswritten * self.samplewidth
return pos
def pack_values(self, values):
value_length = len(values)
pack_format = f"{value_length:d}{self.typecode}"
packed_samples = struct.pack(pack_format, *values)
return packed_samples
def get_samples(self, hz):
values = tuple(
sinus_values_by_hz(self.cfg.FRAMERATE, hz, self.used_max_values)
)
real_hz = float(self.cfg.FRAMERATE) / len(values)
log.debug(f"Real frequency: {real_hz:.2f}")
return self.pack_values(values)
def write_codepoint(self, codepoints):
written_codepoints = []
bits = []
for bit in codepoints2bitstream(codepoints):
bits.append(bit)
if len(bits) == 8:
written_codepoints.append(bits2codepoint(bits))
bits = []
if bit == 0:
# wavefile.writeframes(self.bit_nul_samples)
self.wavefile.writeframes(self.bit_nul_samples)
elif bit == 1:
# wavefile.writeframes(self.bit_one_samples)
self.wavefile.writeframes(self.bit_one_samples)
else:
raise TypeError
log.debug(f"Written at {self.pformat_pos()}: {','.join([hex(x) for x in written_codepoints])}")
def write_silence(self, sec):
start_pos = self.pformat_pos()
silence = [0x00] * int(round(sec * self.framerate))
packed_samples = self.pack_values(silence)
self.wavefile.writeframes(packed_samples)
log.debug(f"Write {sec}sec. silence {start_pos} - {self.pformat_pos()}")
def close(self):
self.wavefile.close()
log.info(f"Wave file {self.destination_filepath} written ({self.pformat_pos()})")
if __name__ == "__main__":
import doctest
print(doctest.testmod(
verbose=False
# verbose=True
))
# sys.exit()
# test via CLI:
import sys
import subprocess
# subprocess.Popen([sys.executable, "../PyDC_cli.py", "--help"])
# sys.exit()
# subprocess.Popen([sys.executable, "../PyDC_cli.py", "--verbosity=10",
# # "--log_format=%(module)s %(lineno)d: %(message)s",
# "--analyze",
# "../test_files/HelloWorld1 xroar.wav"
# # "../test_files/HelloWorld1 origin.wav"
# ])
# print "\n"*3
# print "="*79
# print "\n"*3
# bas -> wav
subprocess.Popen([sys.executable, "../PyDC_cli.py",
"--verbosity=10",
# "--verbosity=5",
# "--logfile=5",
# "--log_format=%(module)s %(lineno)d: %(message)s",
"../test_files/HelloWorld1.bas", "--dst=../test.wav"
]).wait()
# print "\n"*3
# print "="*79
# print "\n"*3
#
# # wav -> bas
# subprocess.Popen([sys.executable, "../PyDC_cli.py",
# "--verbosity=10",
# # "--verbosity=5",
# # "--logfile=5",
# # "--log_format=%(module)s %(lineno)d: %(message)s",
# "../test.wav", "--dst=../test.bas",
# # "../test_files/HelloWorld1 origin.wav", "--dst=../test_files/HelloWorld1.bas",
# ]).wait()
#
# print "-- END --"
| gpl-3.0 | 9,016,746,850,299,839,000 | 35.425432 | 238 | 0.545274 | false |
andyliuliming/azure-linux-extensions | VMBackup/main/handle.py | 1 | 27941 | #!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import base64
import os
import os.path
import re
import json
import string
import subprocess
import sys
import imp
import time
import shlex
import traceback
import xml.parsers.expat
import datetime
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
from threading import Thread
from time import sleep
from os.path import join
from mounts import Mounts
from mounts import Mount
from patch import *
from fsfreezer import FsFreezer
from common import CommonVariables
from parameterparser import ParameterParser
from Utils import HandlerUtil
from Utils import SizeCalculation
from Utils import Status
from freezesnapshotter import FreezeSnapshotter
from backuplogger import Backuplogger
from blobwriter import BlobWriter
from taskidentity import TaskIdentity
from MachineIdentity import MachineIdentity
import ExtensionErrorCodeHelper
from PluginHost import PluginHost
from PluginHost import PluginHostResult
#Main function is the only entrence to this extension handler
def main():
global MyPatching,backup_logger,hutil,run_result,run_status,error_msg,freezer,freeze_result,snapshot_info_array,total_used_size,size_calculation_failed
try:
run_result = CommonVariables.success
run_status = 'success'
error_msg = ''
freeze_result = None
snapshot_info_array = None
total_used_size = 0
size_calculation_failed = False
HandlerUtil.waagent.LoggerInit('/dev/console','/dev/stdout')
## HandlerUtil.waagent.Logger.Log((CommonVariables.extension_name) + " started to handle." )
hutil = HandlerUtil.HandlerUtility(HandlerUtil.waagent.Log, HandlerUtil.waagent.Error, CommonVariables.extension_name)
backup_logger = Backuplogger(hutil)
MyPatching = GetMyPatching(backup_logger)
hutil.patching = MyPatching
for a in sys.argv[1:]:
if re.match("^([-/]*)(disable)", a):
disable()
elif re.match("^([-/]*)(uninstall)", a):
uninstall()
elif re.match("^([-/]*)(install)", a):
install()
elif re.match("^([-/]*)(enable)", a):
enable()
elif re.match("^([-/]*)(update)", a):
update()
elif re.match("^([-/]*)(daemon)", a):
daemon()
except Exception as e:
sys.exit(0)
def install():
global hutil
hutil.do_parse_context('Install')
hutil.do_exit(0, 'Install','success','0', 'Install Succeeded')
def timedelta_total_seconds(delta):
if not hasattr(datetime.timedelta, 'total_seconds'):
return delta.days * 86400 + delta.seconds
else:
return delta.total_seconds()
def status_report_to_file(file_report_msg):
global backup_logger,hutil
hutil.write_to_status_file(file_report_msg)
backup_logger.log("file status report message:",True)
backup_logger.log(file_report_msg,True)
def status_report_to_blob(blob_report_msg):
global backup_logger,hutil,para_parser
try:
if(para_parser is not None and para_parser.statusBlobUri is not None and para_parser.statusBlobUri != ""):
blobWriter = BlobWriter(hutil)
if(blob_report_msg is not None):
blobWriter.WriteBlob(blob_report_msg,para_parser.statusBlobUri)
backup_logger.log("blob status report message:",True)
backup_logger.log(blob_report_msg,True)
else:
backup_logger.log("blob_report_msg is none",True)
except Exception as e:
err_msg='cannot write status to the status blob'+traceback.format_exc()
backup_logger.log(err_msg, True, 'Warning')
def get_status_to_report(status, status_code, message, snapshot_info = None):
global MyPatching,backup_logger,hutil,para_parser,total_used_size,size_calculation_failed
blob_report_msg = None
file_report_msg = None
try:
if total_used_size == -1 :
sizeCalculation = SizeCalculation.SizeCalculation(patching = MyPatching , logger = backup_logger)
total_used_size,size_calculation_failed = sizeCalculation.get_total_used_size()
number_of_blobs = len(para_parser.blobs)
maximum_possible_size = number_of_blobs * 1099511627776
if(total_used_size>maximum_possible_size):
total_used_size = maximum_possible_size
backup_logger.log("Assertion Check, total size : {0} ,maximum_possible_size : {1}".format(total_used_size,maximum_possible_size),True)
if(para_parser is not None and para_parser.statusBlobUri is not None and para_parser.statusBlobUri != ""):
blob_report_msg, file_report_msg = hutil.do_status_report(operation='Enable',status=status,\
status_code=str(status_code),\
message=message,\
taskId=para_parser.taskId,\
commandStartTimeUTCTicks=para_parser.commandStartTimeUTCTicks,\
snapshot_info=snapshot_info,\
total_size = total_used_size,\
failure_flag = size_calculation_failed)
except Exception as e:
err_msg='cannot get status report parameters , Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(err_msg, True, 'Warning')
return blob_report_msg, file_report_msg
def exit_with_commit_log(status,result,error_msg, para_parser):
global backup_logger
backup_logger.log(error_msg, True, 'Error')
if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
backup_logger.commit(para_parser.logsBlobUri)
blob_report_msg, file_report_msg = get_status_to_report(status, result, error_msg, None)
status_report_to_file(file_report_msg)
status_report_to_blob(blob_report_msg)
sys.exit(0)
def exit_if_same_taskId(taskId):
global backup_logger,hutil,para_parser
trans_report_msg = None
taskIdentity = TaskIdentity()
last_taskId = taskIdentity.stored_identity()
if(taskId == last_taskId):
backup_logger.log("TaskId is same as last, so skip with Processed Status, current:" + str(taskId) + "== last:" + str(last_taskId), True)
status=CommonVariables.status_success
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.SuccessAlreadyProcessedInput)
status_code=CommonVariables.SuccessAlreadyProcessedInput
message='TaskId AlreadyProcessed nothing to do'
try:
if(para_parser is not None):
blob_report_msg, file_report_msg = hutil.do_status_report(operation='Enable',status=status,\
status_code=str(status_code),\
message=message,\
taskId=taskId,\
commandStartTimeUTCTicks=para_parser.commandStartTimeUTCTicks,\
snapshot_info=None)
status_report_to_file(file_report_msg)
except Exception as e:
err_msg='cannot write status to the status file, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(err_msg, True, 'Warning')
sys.exit(0)
def convert_time(utcTicks):
return datetime.datetime(1, 1, 1) + datetime.timedelta(microseconds = utcTicks / 10)
def freeze_snapshot(timeout):
try:
global hutil,backup_logger,run_result,run_status,error_msg,freezer,freeze_result,para_parser,snapshot_info_array,g_fsfreeze_on
if(hutil.get_value_from_configfile('doseq') == '2'):
hutil.set_value_to_configfile('doseq', '1')
if(hutil.get_value_from_configfile('doseq') != '1'):
hutil.set_value_to_configfile('doseq', '2')
freeze_snap_shotter = FreezeSnapshotter(backup_logger, hutil, freezer, g_fsfreeze_on, para_parser)
backup_logger.log("Calling do snapshot method", True, 'Info')
run_result, run_status, snapshot_info_array = freeze_snap_shotter.doFreezeSnapshot()
except Exception as e:
if(hutil.get_value_from_configfile('doseq') == '2'):
hutil.set_value_to_configfile('doseq', '0')
errMsg = 'Failed to do the snapshot with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, True, 'Error')
run_result = CommonVariables.error
run_status = 'error'
error_msg = 'Enable failed with exception in safe freeze or snapshot '
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)
#snapshot_done = True
def check_snapshot_array_fail():
global snapshot_info_array, backup_logger
snapshot_array_fail = False
if snapshot_info_array is not None and snapshot_info_array !=[]:
for snapshot_index in range(len(snapshot_info_array)):
if(snapshot_info_array[snapshot_index].isSuccessful == False):
backup_logger.log('T:S snapshot failed at index ' + str(snapshot_index), True)
snapshot_array_fail = True
break
return snapshot_array_fail
def daemon():
global MyPatching,backup_logger,hutil,run_result,run_status,error_msg,freezer,para_parser,snapshot_done,snapshot_info_array,g_fsfreeze_on,total_used_size
#this is using the most recent file timestamp.
hutil.do_parse_context('Executing')
freezer = FsFreezer(patching= MyPatching, logger = backup_logger)
global_error_result = None
# precheck
freeze_called = False
configfile='/etc/azure/vmbackup.conf'
thread_timeout=str(60)
#Adding python version to the telemetry
try:
python_version_info = sys.version_info
python_version = str(sys.version_info[0])+ '.' + str(sys.version_info[1]) + '.' + str(sys.version_info[2])
HandlerUtil.HandlerUtility.add_to_telemetery_data("pythonVersion", python_version)
except Exception as e:
errMsg = 'Failed to do retrieve python version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, True, 'Error')
try:
if(freezer.mounts is not None):
hutil.partitioncount = len(freezer.mounts.mounts)
backup_logger.log(" configfile " + str(configfile), True)
config = ConfigParsers.ConfigParser()
config.read(configfile)
if config.has_option('SnapshotThread','timeout'):
thread_timeout= config.get('SnapshotThread','timeout')
except Exception as e:
errMsg='cannot read config file or file not present'
backup_logger.log(errMsg, True, 'Warning')
backup_logger.log("final thread timeout" + thread_timeout, True)
snapshot_info_array = None
try:
# we need to freeze the file system first
backup_logger.log('starting daemon', True)
"""
protectedSettings is the privateConfig passed from Powershell.
WATCHOUT that, the _context_config are using the most freshest timestamp.
if the time sync is alive, this should be right.
"""
if(hutil.is_prev_in_transition()):
backup_logger.log('retrieving the previous logs for this again inside daemon', True)
backup_logger.set_prev_log()
protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')
public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
para_parser = ParameterParser(protected_settings, public_settings)
commandToExecute = para_parser.commandToExecute
#validate all the required parameter here
backup_logger.log(commandToExecute,True)
if(CommonVariables.iaas_install_command in commandToExecute.lower()):
backup_logger.log('install succeed.',True)
run_status = 'success'
error_msg = 'Install Succeeded'
run_result = CommonVariables.success
backup_logger.log(error_msg)
elif(CommonVariables.iaas_vmbackup_command in commandToExecute.lower()):
if(para_parser.backup_metadata is None or para_parser.public_config_obj is None or para_parser.private_config_obj is None):
run_result = CommonVariables.error_parameter
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)
run_status = 'error'
error_msg = 'required field empty or not correct'
backup_logger.log(error_msg, True, 'Error')
else:
backup_logger.log('commandToExecute is ' + commandToExecute, True)
"""
make sure the log is not doing when the file system is freezed.
"""
temp_status= 'success'
temp_result=CommonVariables.ExtensionTempTerminalState
temp_msg='Transitioning state in extension'
blob_report_msg, file_report_msg = get_status_to_report(temp_status, temp_result, temp_msg, None)
if(hutil.is_status_file_exists()):
status_report_to_file(file_report_msg)
status_report_to_blob(blob_report_msg)
#partial logging before freeze
if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
backup_logger.commit_to_blob(para_parser.logsBlobUri)
else:
backup_logger.log("the logs blob uri is not there, so do not upload log.")
backup_logger.log('commandToExecute is ' + commandToExecute, True)
PluginHostObj = PluginHost(logger=backup_logger)
PluginHostErrorCode,dobackup,g_fsfreeze_on = PluginHostObj.pre_check()
doFsConsistentbackup = False
appconsistentBackup = False
if not (PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigParsing or
PluginHostErrorCode == CommonVariables.FailedPrepostPluginConfigParsing or
PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigNotFound or
PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigPermissionError or
PluginHostErrorCode == CommonVariables.FailedPrepostPluginConfigNotFound):
backup_logger.log('App Consistent Consistent Backup Enabled', True)
HandlerUtil.HandlerUtility.add_to_telemetery_data("isPrePostEnabled", "true")
appconsistentBackup = True
if(PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success):
backup_logger.log('Triggering File System Consistent Backup because of error code' + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(PluginHostErrorCode), True)
doFsConsistentbackup = True
preResult = PluginHostResult()
postResult = PluginHostResult()
if not doFsConsistentbackup:
preResult = PluginHostObj.pre_script()
dobackup = preResult.continueBackup
if(g_fsfreeze_on == False and preResult.anyScriptFailed):
dobackup = False
if dobackup:
freeze_snapshot(thread_timeout)
if not doFsConsistentbackup:
postResult = PluginHostObj.post_script()
if not postResult.continueBackup:
dobackup = False
if(g_fsfreeze_on == False and postResult.anyScriptFailed):
dobackup = False
if not dobackup:
if run_result == CommonVariables.success and PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success:
run_status = 'error'
run_result = PluginHostErrorCode
hutil.SetExtErrorCode(PluginHostErrorCode)
error_msg = 'Plugin Host Precheck Failed'
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
backup_logger.log(error_msg, True)
if run_result == CommonVariables.success:
pre_plugin_errors = preResult.errors
for error in pre_plugin_errors:
if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
run_status = 'error'
run_result = error.errorCode
hutil.SetExtErrorCode(error.errorCode)
error_msg = 'PreScript failed for the plugin ' + error.pluginName
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
backup_logger.log(error_msg, True)
break
if run_result == CommonVariables.success:
post_plugin_errors = postResult.errors
for error in post_plugin_errors:
if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
run_status = 'error'
run_result = error.errorCode
hutil.SetExtErrorCode(error.errorCode)
error_msg = 'PostScript failed for the plugin ' + error.pluginName
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)
backup_logger.log(error_msg, True)
break
if appconsistentBackup:
if(PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success):
hutil.SetExtErrorCode(PluginHostErrorCode)
pre_plugin_errors = preResult.errors
for error in pre_plugin_errors:
if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
hutil.SetExtErrorCode(error.errorCode)
post_plugin_errors = postResult.errors
for error in post_plugin_errors:
if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:
hutil.SetExtErrorCode(error.errorCode)
if run_result == CommonVariables.success and not doFsConsistentbackup and not (preResult.anyScriptFailed or postResult.anyScriptFailed):
run_status = 'success'
run_result = CommonVariables.success_appconsistent
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success_appconsistent)
error_msg = 'Enable Succeeded with App Consistent Snapshot'
backup_logger.log(error_msg, True)
else:
run_status = 'error'
run_result = CommonVariables.error_parameter
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)
error_msg = 'command is not correct'
backup_logger.log(error_msg, True, 'Error')
except Exception as e:
errMsg = 'Failed to enable the extension with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, True, 'Error')
global_error_result = e
"""
we do the final report here to get rid of the complex logic to handle the logging when file system be freezed issue.
"""
try:
if(global_error_result is not None):
if(hasattr(global_error_result,'errno') and global_error_result.errno == 2):
run_result = CommonVariables.error_12
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_12)
elif(para_parser is None):
run_result = CommonVariables.error_parameter
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)
else:
run_result = CommonVariables.error
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)
run_status = 'error'
error_msg += ('Enable failed.' + str(global_error_result))
status_report_msg = None
HandlerUtil.HandlerUtility.add_to_telemetery_data("extErrorCode", str(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeNameDict[hutil.ExtErrorCode]))
total_used_size = -1
blob_report_msg, file_report_msg = get_status_to_report(run_status,run_result,error_msg, snapshot_info_array)
if(hutil.is_status_file_exists()):
status_report_to_file(file_report_msg)
status_report_to_blob(blob_report_msg)
except Exception as e:
errMsg = 'Failed to log status in extension'
backup_logger.log(errMsg, True, 'Error')
if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
backup_logger.commit(para_parser.logsBlobUri)
else:
backup_logger.log("the logs blob uri is not there, so do not upload log.")
backup_logger.commit_to_local()
sys.exit(0)
def uninstall():
hutil.do_parse_context('Uninstall')
hutil.do_exit(0,'Uninstall','success','0', 'Uninstall succeeded')
def disable():
hutil.do_parse_context('Disable')
hutil.do_exit(0,'Disable','success','0', 'Disable Succeeded')
def update():
hutil.do_parse_context('Upadate')
hutil.do_exit(0,'Update','success','0', 'Update Succeeded')
def enable():
global backup_logger,hutil,error_msg,para_parser
hutil.do_parse_context('Enable')
try:
backup_logger.log('starting to enable', True)
# handle the restoring scenario.
mi = MachineIdentity()
stored_identity = mi.stored_identity()
if(stored_identity is None):
mi.save_identity()
else:
current_identity = mi.current_identity()
if(current_identity != stored_identity):
current_seq_no = -1
backup_logger.log("machine identity not same, set current_seq_no to " + str(current_seq_no) + " " + str(stored_identity) + " " + str(current_identity), True)
hutil.set_last_seq(current_seq_no)
mi.save_identity()
hutil.exit_if_same_seq()
"""
protectedSettings is the privateConfig passed from Powershell.
WATCHOUT that, the _context_config are using the most freshest timestamp.
if the time sync is alive, this should be right.
"""
protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')
public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
para_parser = ParameterParser(protected_settings, public_settings)
if(bool(public_settings) and not protected_settings): #Protected settings decryption failed case
error_msg = "unable to load certificate"
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedHandlerGuestAgentCertificateNotFound)
temp_result=CommonVariables.FailedHandlerGuestAgentCertificateNotFound
temp_status= 'error'
exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)
if(para_parser.commandStartTimeUTCTicks is not None and para_parser.commandStartTimeUTCTicks != ""):
utcTicksLong = int(para_parser.commandStartTimeUTCTicks)
backup_logger.log('utcTicks in long format' + str(utcTicksLong), True)
commandStartTime = convert_time(utcTicksLong)
utcNow = datetime.datetime.utcnow()
backup_logger.log('command start time is ' + str(commandStartTime) + " and utcNow is " + str(utcNow), True)
timespan = utcNow - commandStartTime
MAX_TIMESPAN = 150 * 60 # in seconds
# handle the machine identity for the restoration scenario.
total_span_in_seconds = timedelta_total_seconds(timespan)
backup_logger.log('timespan is ' + str(timespan) + ' ' + str(total_span_in_seconds))
if(para_parser.taskId is not None and para_parser.taskId != ""):
backup_logger.log('taskId: ' + str(para_parser.taskId), True)
exit_if_same_taskId(para_parser.taskId)
taskIdentity = TaskIdentity()
taskIdentity.save_identity(para_parser.taskId)
hutil.save_seq()
temp_status= 'transitioning'
temp_result=CommonVariables.success
temp_msg='Transitioning state in enable'
blob_report_msg, file_report_msg = get_status_to_report(temp_status, temp_result, temp_msg, None)
file_status_upload_thread=Thread(target=status_report_to_file, args=(file_report_msg,))
file_status_upload_thread.start()
blob_status_upload_thread=Thread(target=status_report_to_blob, args=(blob_report_msg,))
blob_status_upload_thread.start()
if(hutil.is_prev_in_transition()):
backup_logger.log('retrieving the previous logs for this', True)
backup_logger.set_prev_log()
if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != ""):
log_upload_thread=Thread(target=thread_for_log_upload)
log_upload_thread.start()
log_upload_thread.join(60)
file_status_upload_thread.join(30)
blob_status_upload_thread.join(60)
start_daemon();
sys.exit(0)
except Exception as e:
errMsg = 'Failed to call the daemon with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, True, 'Error')
global_error_result = e
temp_status= 'error'
temp_result=CommonVariables.error
hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)
error_msg = 'Failed to call the daemon'
exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)
def thread_for_log_upload():
global para_parser,backup_logger
backup_logger.commit(para_parser.logsBlobUri)
def start_daemon():
args = [os.path.join(os.getcwd(), "main/handle.sh"), "daemon"]
backup_logger.log("start_daemon with args: {0}".format(args), True)
#This process will start a new background process by calling
# handle.py -daemon
#to run the script and will exit itself immediatelly.
#Redirect stdout and stderr to /dev/null. Otherwise daemon process will
#throw Broke pipe exeception when parent process exit.
devnull = open(os.devnull, 'w')
child = subprocess.Popen(args, stdout=devnull, stderr=devnull)
if __name__ == '__main__' :
main()
| apache-2.0 | 1,799,172,088,873,042,700 | 49.344144 | 206 | 0.642318 | false |
michimussato/pypelyne2 | pypelyne2/src/parser/tests/test_parse_outputs.py | 1 | 1455 | import unittest
import pypelyne2.src.parser.parse_outputs as parse_outputs
import pypelyne2.src.modules.output.output as output
class ParseOutputsTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.output_objects = parse_outputs.get_outputs()
def test_get_outputs(self):
""" Test checks if the outputs are output objects """
for output_object in self.output_objects:
self.assertIsInstance(output_object, output.Output)
def test_abbreviation(self):
""" Test checks if the output object has this method """
self.assertTrue('abbreviation' in dir(self.output_objects[0]))
def test_color(self):
""" Test checks if the output object has this method """
self.assertTrue('color' in dir(self.output_objects[0]))
def test_formats(self):
""" Test checks if the output object has this method """
self.assertTrue('_formats' in dir(self.output_objects[0]))
def test_icon(self):
""" Test checks if the output object has this method """
self.assertTrue('icon' in dir(self.output_objects[0]))
def test_output(self):
""" Test checks if the output object has this method """
self.assertTrue('output' in dir(self.output_objects[0]))
def test_output_enable(self):
""" Test checks if the output object has this method """
self.assertTrue('output_enable' in dir(self.output_objects[0]))
| gpl-2.0 | -5,201,670,322,074,896,000 | 36.307692 | 71 | 0.666667 | false |
kinnou02/navitia | source/jormungandr/jormungandr/street_network/tests/streetnetwork_backend_manager_test.py | 1 | 16126 | # Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import
import pytest
from jormungandr.instance import Instance
from jormungandr.street_network.streetnetwork_backend_manager import StreetNetworkBackendManager
from navitiacommon.models.streetnetwork_backend import StreetNetworkBackend
from jormungandr.street_network.kraken import Kraken
from jormungandr.street_network.valhalla import Valhalla
from jormungandr.exceptions import ConfigException, TechnicalError
import datetime
KRAKEN_CLASS = 'jormungandr.street_network.kraken.Kraken'
VALHALLA_CLASS = 'jormungandr.street_network.valhalla.Valhalla'
ALL_MODES = ['walking', 'bike', 'bss', 'car']
def _init_and_create_backend_without_default(conf):
sn_manager = StreetNetworkBackendManager()
sn_manager._create_street_network_backends("instance", conf)
return sn_manager.get_all_street_networks_legacy("instance")
def kraken_class_test():
kraken_conf = [{'modes': ALL_MODES, 'class': KRAKEN_CLASS}]
services = _init_and_create_backend_without_default(kraken_conf)
assert len(services) == 1
assert isinstance(services[0], Kraken)
def kraken_klass_test():
kraken_conf = [{'modes': ALL_MODES, 'klass': KRAKEN_CLASS}]
services = _init_and_create_backend_without_default(kraken_conf)
assert len(services) == 1
assert isinstance(services[0], Kraken)
def valhalla_class_without_url_test():
with pytest.raises(ValueError) as excinfo:
valhalla_without_url = [{'modes': ALL_MODES, 'class': VALHALLA_CLASS}]
_init_and_create_backend_without_default(valhalla_without_url)
assert 'service_url None is not a valid url' in str(excinfo.value)
def valhalla_class_wit_empty_url_test():
with pytest.raises(ValueError) as excinfo:
kraken_conf = [{'modes': ALL_MODES, 'class': VALHALLA_CLASS, 'args': {"service_url": ""}}]
_init_and_create_backend_without_default(kraken_conf)
assert 'service_url is not a valid url' in str(excinfo.value)
def valhalla_class_with_invalid_url_test():
with pytest.raises(ValueError) as excinfo:
kraken_conf = [{'modes': ALL_MODES, 'class': VALHALLA_CLASS, 'args': {"service_url": "bob"}}]
_init_and_create_backend_without_default(kraken_conf)
assert 'service_url bob is not a valid url' in str(excinfo.value)
def valhalla_class_without_costing_options_test():
kraken_conf = [
{'modes': ALL_MODES, 'class': VALHALLA_CLASS, 'args': {"service_url": "http://localhost:8002"}}
]
services = _init_and_create_backend_without_default(kraken_conf)
assert len(services) == 1
assert isinstance(services[0], Valhalla)
def valhalla_class_with_empty_costing_options_test():
kraken_conf = [
{
'modes': ALL_MODES,
'class': VALHALLA_CLASS,
'args': {"service_url": "http://localhost:8002", "costing_options": {}},
}
]
services = _init_and_create_backend_without_default(kraken_conf)
assert len(services) == 1
assert isinstance(services[0], Valhalla)
def valhalla_class_with_url_valid_test():
kraken_conf = [
{
'modes': ALL_MODES,
'class': VALHALLA_CLASS,
'args': {
"service_url": "http://localhost:8002",
"costing_options": {"pedestrian": {"walking_speed": 50.1}},
},
}
]
services = _init_and_create_backend_without_default(kraken_conf)
assert len(services) == 1
assert isinstance(services[0], Valhalla)
def street_network_without_class_test():
with pytest.raises(KeyError) as excinfo:
kraken_conf = [
{
'modes': ['walking'],
'args': {
"service_url": "http://localhost:8002",
"costing_options": {"pedestrian": {"walking_speed": 50.1}},
},
}
]
_init_and_create_backend_without_default(kraken_conf)
assert (
'impossible to build a StreetNetwork, missing mandatory field in configuration: class or klass'
in str(excinfo.value)
)
def valhalla_class_with_class_invalid_test():
with pytest.raises(ConfigException) as excinfo:
kraken_conf = [
{
'class': 'jormungandr',
'modes': ['walking'],
'args': {
"service_url": "http://localhost:8002",
"costing_options": {"pedestrian": {"walking_speed": 50.1}},
},
}
]
_init_and_create_backend_without_default(kraken_conf)
def valhalla_class_with_class_not_exist_test():
with pytest.raises(ConfigException) as excinfo:
kraken_conf = [
{
'class': 'jormungandr.street_network.valhalla.bob',
'modes': ['walking'],
'args': {
"service_url": "http://localhost:8002",
"costing_options": {"pedestrian": {"walking_speed": 50.1}},
},
}
]
_init_and_create_backend_without_default(kraken_conf)
def sn_backends_getter_ok():
sn_backend1 = StreetNetworkBackend(id='kraken')
sn_backend1.klass = 'jormungandr.street_network.tests.StreetNetworkBackendMock'
sn_backend1.args = {'url': 'kraken.url'}
sn_backend1.created_at = datetime.datetime.utcnow()
sn_backend2 = StreetNetworkBackend(id='asgard')
sn_backend2.klass = 'jormungandr.street_network.tests.StreetNetworkBackendMock'
sn_backend2.args = {'url': 'asgard.url'}
return [sn_backend1, sn_backend2]
def sn_backends_getter_update():
sn_backend = StreetNetworkBackend(id='kraken')
sn_backend.klass = 'jormungandr.street_network.tests.StreetNetworkBackendMock'
sn_backend.args = {'url': 'kraken.url.UPDATE'}
sn_backend.updated_at = datetime.datetime.utcnow()
return [sn_backend]
def sn_backends_getter_wrong_class():
sn_backend = StreetNetworkBackend(id='kraken')
sn_backend.klass = 'jormungandr/street_network/tests/StreetNetworkBackendMock'
sn_backend.args = {'url': 'kraken.url'}
return [sn_backend]
def streetnetwork_backend_manager_db_test():
"""
Test that streetnetwork backends are created from db when conditions are met
"""
manager = StreetNetworkBackendManager(sn_backends_getter_ok, -1)
# 2 sn_backends defined in db are associated to the coverage
# -> 2 sn_backends created
manager._update_config("instance")
assert not manager._streetnetwork_backends_by_instance_legacy
assert len(manager._streetnetwork_backends) == 2
assert 'kraken' in manager._streetnetwork_backends
assert manager._streetnetwork_backends['kraken'].url == 'kraken.url'
assert 'asgard' in manager._streetnetwork_backends
assert manager._streetnetwork_backends['asgard'].url == 'asgard.url'
manager_update = manager._last_update
assert 'kraken' in manager._streetnetwork_backends_last_update
kraken_update = manager._streetnetwork_backends_last_update['kraken']
# Sn_backend already existing is updated
manager._sn_backends_getter = sn_backends_getter_update
manager._update_config("instance")
assert manager._last_update > manager_update
assert not manager._streetnetwork_backends_by_instance_legacy
assert len(manager._streetnetwork_backends) == 2
assert 'kraken' in manager._streetnetwork_backends
assert manager._streetnetwork_backends['kraken'].url == 'kraken.url.UPDATE'
assert 'kraken' in manager._streetnetwork_backends_last_update
assert manager._streetnetwork_backends_last_update['kraken'] > kraken_update
# Long update interval so sn_backend shouldn't be updated
manager = StreetNetworkBackendManager(sn_backends_getter_ok, 600)
manager._update_config("instance")
assert not manager._streetnetwork_backends_by_instance_legacy
assert len(manager._streetnetwork_backends) == 2
assert 'kraken' in manager._streetnetwork_backends
assert manager._streetnetwork_backends['kraken'].url == 'kraken.url'
manager_update = manager._last_update
manager.sn_backends_getter = sn_backends_getter_update
manager._update_config("instance")
assert manager._last_update == manager_update
assert not manager._streetnetwork_backends_by_instance_legacy
assert len(manager._streetnetwork_backends) == 2
assert 'kraken' in manager._streetnetwork_backends
assert manager._streetnetwork_backends['kraken'].url == 'kraken.url'
def wrong_streetnetwork_backend_test():
"""
Test that streetnetwork backends with wrong parameters aren't created
"""
# Sn_backend has a class wrongly formatted
manager = StreetNetworkBackendManager(sn_backends_getter_wrong_class, -1)
manager._update_config("instance")
assert not manager._streetnetwork_backends_by_instance_legacy
assert not manager._streetnetwork_backends
# No sn_backends available in db
manager._sn_backends_getter = []
manager._update_config("instance")
assert not manager._streetnetwork_backends_by_instance_legacy
assert not manager._streetnetwork_backends
def append_default_street_network_to_config_test():
manager = StreetNetworkBackendManager(sn_backends_getter=None, update_interval=-1)
# Kraken is used for all the modes.
config_full_default = [
{
'args': {
'street_network': {
'args': {'timeout': 10},
'class': 'jormungandr.street_network.Kraken',
'modes': [],
}
},
'class': 'jormungandr.street_network.Taxi',
'modes': ['taxi'],
},
{
'args': {
'street_network': {
'args': {'timeout': 10},
'class': 'jormungandr.street_network.Kraken',
'modes': [],
}
},
'class': 'jormungandr.street_network.Ridesharing',
'modes': ['ridesharing'],
},
{
'args': {'timeout': 10},
'class': 'jormungandr.street_network.Kraken',
'modes': ['car', 'walking', 'bike', 'bss'],
},
]
response = manager._append_default_street_network_to_config(None)
assert response == config_full_default
# Asgard is used for car, Kraken for all the other modes.
config_car_asgard = [{'class': 'jormungandr.street_network.Asgard', 'modes': ['car']}]
response = manager._append_default_street_network_to_config(config_car_asgard)
config_asgard_plus_default = [
{'class': 'jormungandr.street_network.Asgard', 'modes': ['car']},
{
'args': {
'street_network': {
'args': {'timeout': 10},
'class': 'jormungandr.street_network.Kraken',
'modes': [],
}
},
'class': 'jormungandr.street_network.Taxi',
'modes': ['taxi'],
},
{
'args': {
'street_network': {
'args': {'timeout': 10},
'class': 'jormungandr.street_network.Kraken',
'modes': [],
}
},
'class': 'jormungandr.street_network.Ridesharing',
'modes': ['ridesharing'],
},
{
'args': {'timeout': 10},
'class': 'jormungandr.street_network.Kraken',
'modes': ['walking', 'bike', 'bss'],
},
]
assert response == config_asgard_plus_default
# Surf is used for surf, Kraken for all the other modes.
# Surf stay in the config but is not used.
wrong_config = [{'class': 'jormungandr.street_network.Surf', 'modes': ['surf']}]
response = manager._append_default_street_network_to_config(wrong_config)
wrong_plus_default_config = [
{'class': 'jormungandr.street_network.Surf', 'modes': ['surf']},
{
'args': {
'street_network': {
'args': {'timeout': 10},
'class': 'jormungandr.street_network.Kraken',
'modes': [],
}
},
'class': 'jormungandr.street_network.Taxi',
'modes': ['taxi'],
},
{
'args': {
'street_network': {
'args': {'timeout': 10},
'class': 'jormungandr.street_network.Kraken',
'modes': [],
}
},
'class': 'jormungandr.street_network.Ridesharing',
'modes': ['ridesharing'],
},
{
'args': {'timeout': 10},
'class': 'jormungandr.street_network.Kraken',
'modes': ['car', 'walking', 'bike', 'bss'],
},
]
assert response == wrong_plus_default_config
def get_street_network_db_test():
manager = StreetNetworkBackendManager(sn_backends_getter_ok, -1)
sn = manager.get_street_network_db("instance", "kraken")
assert sn is not None
assert sn.timeout == 2
assert sn.url == 'kraken.url'
sn = manager.get_street_network_db("instance", "asgard")
assert sn is not None
assert sn.timeout == 2
assert sn.url == 'asgard.url'
with pytest.raises(TechnicalError) as excinfo:
sn = manager.get_street_network_db("instance", "plopi")
assert (
str(excinfo.value.data['message'])
== 'impossible to find a streetnetwork module for instance instance with configuration plopi'
)
assert 'TechnicalError' == str(excinfo.typename)
class FakeInstance(Instance):
street_network_car = "asgard"
street_network_walking = "asgard"
street_network_bike = "geovelo"
street_network_bss = "kraken"
street_network_taxi = None
street_network_ridesharing = None
def __init__(self):
super(FakeInstance, self).__init__(
context=None,
name="instance",
zmq_socket=None,
street_network_configurations=[],
ridesharing_configurations=None,
realtime_proxies_configuration=[],
zmq_socket_type=None,
autocomplete_type='kraken',
instance_equipment_providers=[],
streetnetwork_backend_manager=None,
)
def get_all_street_networks_db_test():
manager = StreetNetworkBackendManager(sn_backends_getter_ok, -1)
instance = FakeInstance()
all_sn = manager.get_all_street_networks_db(instance)
assert len(all_sn) == 2
# So that Asgard and kraken are always in the same order
all_sn_sorted = sorted(all_sn, key=lambda sn: sn.url)
assert all_sn_sorted[0].url == "asgard.url"
assert sorted(all_sn_sorted[0].modes) == sorted(["walking", "car"])
assert all_sn_sorted[1].url == "kraken.url"
assert all_sn_sorted[1].modes == ["bss"]
| agpl-3.0 | -8,971,987,324,433,375,000 | 35.817352 | 103 | 0.622163 | false |
ddico/odoo | addons/fleet/models/fleet_vehicle_cost.py | 1 | 9422 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from dateutil.relativedelta import relativedelta
class FleetVehicleLogContract(models.Model):
_inherit = ['mail.thread', 'mail.activity.mixin']
_name = 'fleet.vehicle.log.contract'
_description = 'Contract information on a vehicle'
_order = 'state desc,expiration_date'
def compute_next_year_date(self, strdate):
oneyear = relativedelta(years=1)
start_date = fields.Date.from_string(strdate)
return fields.Date.to_string(start_date + oneyear)
vehicle_id = fields.Many2one('fleet.vehicle', 'Vehicle', default=1, required=True, help='Vehicle concerned by this log')
cost_subtype_id = fields.Many2one('fleet.service.type', 'Type', help='Cost type purchased with this cost', domain=[('category', '=', 'contract')])
amount = fields.Float('Cost')
date = fields.Date(help='Date when the cost has been executed')
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env.company)
currency_id = fields.Many2one('res.currency', related='company_id.currency_id')
name = fields.Char(string='Name', compute='_compute_contract_name', store=True)
active = fields.Boolean(default=True)
user_id = fields.Many2one('res.users', 'Responsible', default=lambda self: self.env.user, index=True)
start_date = fields.Date('Contract Start Date', default=fields.Date.context_today,
help='Date when the coverage of the contract begins')
expiration_date = fields.Date('Contract Expiration Date', default=lambda self:
self.compute_next_year_date(fields.Date.context_today(self)),
help='Date when the coverage of the contract expirates (by default, one year after begin date)')
days_left = fields.Integer(compute='_compute_days_left', string='Warning Date')
insurer_id = fields.Many2one('res.partner', 'Vendor')
purchaser_id = fields.Many2one(related='vehicle_id.driver_id', string='Driver')
ins_ref = fields.Char('Reference', size=64, copy=False)
state = fields.Selection([
('futur', 'Incoming'),
('open', 'In Progress'),
('expired', 'Expired'),
('closed', 'Closed')
], 'Status', default='open', readonly=True,
help='Choose whether the contract is still valid or not',
tracking=True,
copy=False)
notes = fields.Text('Terms and Conditions', help='Write here all supplementary information relative to this contract', copy=False)
cost_generated = fields.Float('Recurring Cost')
cost_frequency = fields.Selection([
('no', 'No'),
('daily', 'Daily'),
('weekly', 'Weekly'),
('monthly', 'Monthly'),
('yearly', 'Yearly')
], 'Recurring Cost Frequency', default='monthly', help='Frequency of the recuring cost', required=True)
service_ids = fields.Many2many('fleet.service.type', string="Included Services")
@api.depends('vehicle_id', 'cost_subtype_id')
def _compute_contract_name(self):
for record in self:
name = record.vehicle_id.name
if record.cost_subtype_id.name:
name = record.cost_subtype_id.name + ' ' + name
record.name = name
@api.depends('expiration_date', 'state')
def _compute_days_left(self):
"""return a dict with as value for each contract an integer
if contract is in an open state and is overdue, return 0
if contract is in a closed state, return -1
otherwise return the number of days before the contract expires
"""
for record in self:
if record.expiration_date and record.state in ['open', 'expired']:
today = fields.Date.from_string(fields.Date.today())
renew_date = fields.Date.from_string(record.expiration_date)
diff_time = (renew_date - today).days
record.days_left = diff_time > 0 and diff_time or 0
else:
record.days_left = -1
def write(self, vals):
res = super(FleetVehicleLogContract, self).write(vals)
if vals.get('expiration_date') or vals.get('user_id'):
self.activity_reschedule(['fleet.mail_act_fleet_contract_to_renew'], date_deadline=vals.get('expiration_date'), new_user_id=vals.get('user_id'))
return res
def contract_close(self):
for record in self:
record.state = 'closed'
def contract_draft(self):
for record in self:
record.state = 'futur'
def contract_open(self):
for record in self:
record.state = 'open'
@api.model
def scheduler_manage_contract_expiration(self):
# This method is called by a cron task
# It manages the state of a contract, possibly by posting a message on the vehicle concerned and updating its status
params = self.env['ir.config_parameter'].sudo()
delay_alert_contract = int(params.get_param('hr_fleet.delay_alert_contract', default=30))
date_today = fields.Date.from_string(fields.Date.today())
outdated_days = fields.Date.to_string(date_today + relativedelta(days=+delay_alert_contract))
nearly_expired_contracts = self.search([('state', '=', 'open'), ('expiration_date', '<', outdated_days)])
for contract in nearly_expired_contracts.filtered(lambda contract: contract.user_id):
contract.activity_schedule(
'fleet.mail_act_fleet_contract_to_renew', contract.expiration_date,
user_id=contract.user_id.id)
expired_contracts = self.search([('state', 'not in', ['expired', 'closed']), ('expiration_date', '<',fields.Date.today() )])
expired_contracts.write({'state': 'expired'})
futur_contracts = self.search([('state', 'not in', ['futur', 'closed']), ('start_date', '>', fields.Date.today())])
futur_contracts.write({'state': 'futur'})
now_running_contracts = self.search([('state', '=', 'futur'), ('start_date', '<=', fields.Date.today())])
now_running_contracts.write({'state': 'open'})
def run_scheduler(self):
self.scheduler_manage_contract_expiration()
class FleetVehicleLogServices(models.Model):
_name = 'fleet.vehicle.log.services'
_inherit = ['mail.thread', 'mail.activity.mixin']
_rec_name = 'service_type_id'
_description = 'Services for vehicles'
@api.model
def default_get(self, default_fields):
res = super(FleetVehicleLogServices, self).default_get(default_fields)
service = self.env.ref('fleet.type_service_service_8', raise_if_not_found=False)
res.update({
'date': fields.Date.context_today(self),
'service_type_id': service.id if service else None,
})
return res
active = fields.Boolean(default=True)
vehicle_id = fields.Many2one('fleet.vehicle', 'Vehicle', default=1, required=True, help='Vehicle concerned by this log')
amount = fields.Float('Cost')
description = fields.Char('Description')
odometer_id = fields.Many2one('fleet.vehicle.odometer', 'Odometer', help='Odometer measure of the vehicle at the moment of this log')
odometer = fields.Float(compute="_get_odometer", inverse='_set_odometer', string='Odometer Value',
help='Odometer measure of the vehicle at the moment of this log')
odometer_unit = fields.Selection(related='vehicle_id.odometer_unit', string="Unit", readonly=True)
date = fields.Date(help='Date when the cost has been executed')
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env.company)
currency_id = fields.Many2one('res.currency', related='company_id.currency_id')
purchaser_id = fields.Many2one(related='vehicle_id.driver_id', string="Driver")
inv_ref = fields.Char('Vendor Reference')
vendor_id = fields.Many2one('res.partner', 'Vendor')
notes = fields.Text()
service_type_id = fields.Many2one('fleet.service.type', 'Service Type', required=True)
state = fields.Selection([
('todo', 'To Do'),
('running', 'Running'),
('done', 'Done'),
('cancelled', 'Cancelled'),
], default='todo', string='Stage')
def _get_odometer(self):
self.odometer = 0
for record in self:
if record.odometer_id:
record.odometer = record.odometer_id.value
def _set_odometer(self):
for record in self:
if not record.odometer:
raise UserError(_('Emptying the odometer value of a vehicle is not allowed.'))
odometer = self.env['fleet.vehicle.odometer'].create({
'value': record.odometer,
'date': record.date or fields.Date.context_today(record),
'vehicle_id': record.vehicle_id.id
})
self.odometer_id = odometer
@api.model_create_multi
def create(self, vals_list):
for data in vals_list:
if 'odometer' in data and not data['odometer']:
# if received value for odometer is 0, then remove it from the
# data as it would result to the creation of a
# odometer log with 0, which is to be avoided
del data['odometer']
return super(FleetVehicleLogServices, self).create(vals_list) | agpl-3.0 | 1,315,773,969,023,667,500 | 48.335079 | 156 | 0.641159 | false |
bird-house/bird-feeder | birdfeeder/walker.py | 1 | 5030 | import os
from netCDF4 import Dataset as NCDataset
from dateutil import parser as dateparser
from datetime import datetime
from birdfeeder.utils import humanize_filesize
import logging
logger = logging.getLogger(__name__)
SPATIAL_VARIABLES = [
'longitude', 'lon',
'latitude', 'lat',
'altitude', 'alt', 'level', 'height',
'rotated_pole',
'rotated_latitude_longitude',
'time']
class Dataset(object):
def __init__(self, filepath, basedir='/'):
self.filepath = filepath
self.path = os.path.sep + os.path.relpath(filepath, basedir)
self.bytes = os.path.getsize(filepath)
self.size = humanize_filesize(self.bytes)
self.name = os.path.basename(filepath)
self.url = 'file://' + filepath
self.content_type = 'application/netcdf'
self.resourcename = filepath
self._last_modified = None
self.attributes = {}
self._parse(filepath)
def __str__(self):
return "attributes={0}".format(self.attributes)
@property
def last_modified(self):
if self._last_modified is None:
mtime = os.path.getmtime(self.filepath)
self._last_modified = datetime.fromtimestamp(mtime).strftime('%Y-%m-%dT%H:%M:%SZ')
return self._last_modified
@property
def variable(self):
return self.attributes.get('variable')
@property
def variable_long_name(self):
return self.attributes.get('variable_long_name')
@property
def cf_standard_name(self):
return self.attributes.get('cf_standard_name')
@property
def units(self):
return self.attributes.get('units')
@property
def comments(self):
return self.attributes.get('comments')
@property
def institute(self):
return self.attributes.get('institute_id')
@property
def experiment(self):
return self.attributes.get('experiment_id')
@property
def project(self):
return self.attributes.get('project_id')
@property
def model(self):
return self.attributes.get('model_id')
@property
def frequency(self):
return self.attributes.get('frequency')
@property
def creation_date(self):
if 'creation_date' in self.attributes:
return self.attributes['creation_date'][0]
else:
return None
def _add_attribute(self, key, value):
if not key in self.attributes:
self.attributes[key] = []
self.attributes[key].append(value)
def _parse(self, filepath):
filepath = os.path.abspath(filepath)
logger.debug("parse %s", filepath)
try:
ds = NCDataset(filepath, 'r')
# loop over global attributes
for attname in ds.ncattrs():
attvalue = getattr(ds, attname)
if 'date' in attname.lower():
# must format dates in Solr format, if possible
try:
solr_dt = dateparser.parse(attvalue)
self._add_attribute(attname, solr_dt.strftime('%Y-%m-%dT%H:%M:%SZ') )
except:
pass # disregard this attribute
else:
self._add_attribute(attname, attvalue)
# loop over dimensions
for key, dim in ds.dimensions.items():
self._add_attribute('dimension', "%s:%s" % (key, len(dim)) )
# loop over variable attributes
for key, variable in ds.variables.items():
if key.lower() in ds.dimensions:
# skip dimension variables
continue
if '_bnds' in key.lower():
continue
if key.lower() in SPATIAL_VARIABLES:
continue
self._add_attribute('variable', key)
self._add_attribute('variable_long_name', getattr(variable, 'long_name', None) )
cf_standard_name = getattr(variable, 'standard_name', None)
if cf_standard_name is not None:
self._add_attribute('cf_standard_name', getattr(variable, 'standard_name', None) )
self._add_attribute('units', getattr(variable, 'units', None) )
except Exception as e:
logging.error(e)
finally:
try:
ds.close()
except:
pass
def crawl(start_dir):
if not os.path.isdir(start_dir):
raise Exception("Invalid start directory: %s", start_dir)
logger.info('start directory = %s', start_dir)
for directory, subdirs, files in os.walk(start_dir):
# loop over files in this directory
for filename in files:
# only parse .nc files
if filename.endswith('.nc'):
filepath = os.path.join(directory, filename)
yield Dataset(filepath, basedir=start_dir)
| apache-2.0 | -8,682,199,717,071,598,000 | 28.763314 | 102 | 0.56501 | false |
dvcolgan/ludumdare27 | game/management/commands/generate_map_pngs.py | 1 | 2676 | from django.core.management.base import BaseCommand, CommandError
from game.models import *
from settings import MIN_COL, MAX_COL, MIN_ROW, MAX_ROW, GRID_SIZE
from PIL import Image
from PIL import ImageDraw
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
if lv == 1:
v = int(value, 16)*17
return v, v, v
if lv == 3:
return tuple(int(value[i:i+1], 16)*17 for i in range(0, 3))
return tuple(int(value[i:i+lv/3], 16) for i in range(0, lv, lv/3))
class Command(BaseCommand):
args = ''
help = 'Generate pngs of the board for zooming'
def handle(self, *args, **options):
squares = Square.objects.order_by('row', 'col')
width = (20 + MAX_COL - MIN_COL) * GRID_SIZE
height = (20 + MAX_ROW - MIN_ROW) * GRID_SIZE
im = Image.new('RGB', (width, height), 'black')
#http://effbot.org/imagingbook/imagedraw.htm
draw = ImageDraw.Draw(im)
for square in squares:
if square.owner != None:
fill_color = square.owner.color
else:
fill_color = '#ffffff'
x1 = square.col*GRID_SIZE+width/2
y1 = square.row*GRID_SIZE+height/2
x2 = square.col*GRID_SIZE+GRID_SIZE+width/2
y2 = square.row*GRID_SIZE+GRID_SIZE+height/2
draw.rectangle(((x1, y1), (x2, y2)), fill=fill_color)
for i, unit in enumerate(square.units.all()):
if i == 0:
ax1 = x1 + GRID_SIZE/4 - GRID_SIZE/8
ay1 = y1 + GRID_SIZE/4 - GRID_SIZE/8
ax2 = x1 + GRID_SIZE/4 + GRID_SIZE/8
ay2 = y1 + GRID_SIZE/4 + GRID_SIZE/8
if i == 1:
ax1 = x1 + 3*GRID_SIZE/4 - GRID_SIZE/8
ay1 = y1 + 3*GRID_SIZE/4 - GRID_SIZE/8
ax2 = x1 + GRID_SIZE/4 + GRID_SIZE/8
ay2 = y1 + GRID_SIZE/4 + GRID_SIZE/8
if i == 2:
ax1 = x1 + 3*GRID_SIZE/4 - GRID_SIZE/8
ay1 = y1 + 3*GRID_SIZE/4 - GRID_SIZE/8
ax2 = x1 + 3*GRID_SIZE/4 + GRID_SIZE/8
ay2 = y1 + 3*GRID_SIZE/4 + GRID_SIZE/8
if i == 3:
ax1 = x1 + GRID_SIZE/4 - GRID_SIZE/8
ay1 = y1 + GRID_SIZE/4 - GRID_SIZE/8
ax2 = x1 + 3*GRID_SIZE/4 + GRID_SIZE/8
ay2 = y1 + 3*GRID_SIZE/4 + GRID_SIZE/8
draw.ellipse(((ax1, ay1, ax2, ay2)), outline='#000000', fill=unit.owner.color)
im.save('static/images/minimap.png', 'PNG')
print 'Saved full image'
| mit | -4,220,150,537,329,599,500 | 36.690141 | 94 | 0.501495 | false |
petr-kalinin/progrobot | tools/import_python3.py | 1 | 9519 | #!/usr/bin/python3
from pymongo import MongoClient
import os
import os.path
import re
import bs4
import itertools
from bs4 import BeautifulSoup
import utils
class ReferenceItem:
def __init__(self):
self.name = ""
self.module = ""
self.usage = ""
self.short = ""
self.full = ""
self.fullest = ""
self.href = ""
self.copyright = ""
self.subitems = []
def __str__(self):
return ("name: " + self.name + "\n"
+ "href: " + self.href + "\n"
+ "module: " + str(self.module) + "\n"
+ "usage: " + str(self.usage) + "\n"
+ "short: " + self.short + "\n\n"
#+ "full: " + self.full + "\n\n"
#+ "fullest: " + self.fullest + "\n\n"
+ "subitems: " + str(self.subitems)
+ "copyright: " + self.copyright)
def to_dict(self):
return {"name" : self.name,
"href": self.href,
"module" : self.module,
"usage" : self.usage,
"short" : self.short,
"full" : self.full,
"fullest" : self.fullest,
"subitems" : self.subitems,
"copyright": self.copyright}
def hasclass(tag, classes):
for cl in tag.get("class", []):
if cl in classes:
return True
return False
def create_ref(refs, name, module, base_href):
if not name in refs:
refs[name] = ReferenceItem()
refs[name].name = name
if module:
refs[name].module = "import " + module
refs[name].href = base_href + "#" + name
refs[name].copyright = "ⓒ Python developers, " + refs[name].href
parent = ".".join(name.split(".")[:-1])
if parent != "" and parent[0] == "@":
parent = parent[1:]
if not parent in refs:
refs[parent] = ReferenceItem()
subitem = (name, "")
if not subitem in refs[parent].subitems:
refs[parent].subitems.append(subitem)
def can_be_short(text):
#print("Testing string `" + text + "`")
if re.match("New in version", text):
return False
if re.match("Source code:", text):
return False
return True
def next_tag(tag):
while not tag.next_sibling and tag.parent:
tag = tag.parent
if tag.next_sibling:
return tag.next_sibling
else:
return None
def parse_file(filename, refs):
base_href = "https://docs.python.org/" + filename[2:]
soup = BeautifulSoup(open(filename), 'lxml')
module_a = soup.h1.a
if not "headerlink" in module_a.get("class"):
module = module_a.string
else:
module = None
#print("found module", module)
currentName = module
if currentName:
create_ref(refs, currentName, module, base_href)
tag = soup.h1.next_sibling
while tag is not None:
#print("Tag: `", tag, "`")
if isinstance(tag, bs4.element.Comment):
tag = tag.next_element
continue
if isinstance(tag, bs4.element.NavigableString):
text = tag.strip()
if text != "" and currentName:
if refs[currentName].short == "":
if can_be_short(text):
refs[currentName].short = text
refs[currentName].full += text
tag = tag.next_element
continue
#if currentName:
# print(currentName, tag.name, "`"+refs[currentName].full+"`", "\n\n")
if hasclass(tag, ["sphinxsidebar"]):
break
elif hasclass(tag, ["section", "seealso"]):
currentName = None
tag = tag.next_element
elif hasclass(tag, ['class', 'classmethod', 'method', 'function', 'data', 'exception', 'attribute', 'staticmethod', 'cmdoption']):
currentName = tag.dt.get('id')
usage = "".join(tag.dt.strings).strip()
if currentName and usage[0] == "@":
currentName = "@" + currentName
if currentName:
create_ref(refs, currentName, module, base_href)
refs[currentName].usage = usage[:-1].strip()
tag = tag.dd.next_element
elif tag.name in ('p', 'pre', 'code', 'li', 'dt', 'dd', 'tr', 'td', 'th'):
if (tag.name == 'p'
and len(tag.contents) == 1
and isinstance(tag.contents[0], bs4.element.Tag)
and tag.contents[0].name=="strong"):
currentName = None
if currentName:
if refs[currentName].short == "":
text = "".join(tag.strings)
if can_be_short(text):
refs[currentName].short = "".join(str(x) for x in tag.contents)
refs[currentName].full += str(tag)
tag = next_tag(tag)
if not tag:
break
else:
tag = tag.next_element
return refs
def insert_ref(ref, reference, index):
result = reference.insert_one(ref.to_dict())
#print("insert: ", ref.to_dict())
names = [ref.name]
for name in names:
split_name = name.strip('@ ').split(".")
if len(split_name) > 3:
print(split_name," --- ", ref.name)
for i in range(len(split_name)):
perm = [x.lower() for x in split_name[i:]]
subname = " ".join(sorted(perm))
doc = {
"reference_id" : result.inserted_id,
"name" : subname,
"relevance" : 1-i/len(split_name),
"full_name" : ref.name
}
#print("index: ", doc)
index.insert_one(doc)
def process_file(filename, refs):
print("\n-----------\n" + filename)
print(".", end="", flush=True)
parse_file(filename, refs)
def finalize(refs):
for ref_name, ref in refs.items():
if ref.name == "":
ref.name = ref_name
new_subitems = []
for item in ref.subitems:
new_subitems.append((item[0], utils.first_sentence(refs[item[0]].short)))
ref.subitems = new_subitems
os.chdir("../raw_data/python3/docs.python.org")
client = MongoClient()
client.drop_database("python3")
db = client.python3
reference = db.reference
index = db.index
index.create_index("name")
refs = {}
for directory, subdirs, files in os.walk("."):
for f in files:
process_file(os.path.join(directory, f), refs)
#process_file("3/library/itertools.html", refs)
#process_file("3/library/re.html", refs)
#process_file("3/library/json.html", refs)
#process_file("3/library/pprint.html", refs)
#process_file("3/library/unittest.html", refs)
#process_file("3/library/ctypes.html", refs)
finalize(refs)
#print(refs['datetime.datetime'].subitems)
for ref in refs.values():
if ref.name != "":
#print(ref)
#print("\n")
insert_ref(ref, reference, index)
#------- Testing
def assert_starts_with(text, start):
if not text.startswith(start):
print("Text `" + text + "` does not start with `" + start + "`")
raise AssertionError()
def assert_ends_with(text, start):
if not text.endswith(start):
print("Text `" + text + "` does not end with `" + start + "`")
raise AssertionError()
def find_subitem(ref, subitem):
found = None
for item in ref.subitems:
if item[0] == subitem:
assert not found
found = item
return found
def check_urllib_parse():
assert_starts_with(refs["urllib.parse"].short, "This module")
item = find_subitem(refs["urllib"], "urllib.parse")
assert_starts_with(item[1], "This module")
assert_ends_with(item[1], "“base URL.”")
def check_unittest_mock():
assert_starts_with(refs["unittest.mock"].short, '<a class="reference internal"')
item = find_subitem(refs["unittest"], "unittest.mock")
assert_starts_with(item[1], '<a class="reference internal"')
def check_urllib():
assert_ends_with(refs["urllib"].full, "files</li>")
def check_re():
assert len(refs["re"].subitems) > 0
assert "re.match" in refs
assert refs["re"].subitems[0][0] == "re.compile"
assert_ends_with(refs["re"].subitems[0][1], "described below.")
assert len(refs["re"].subitems[0][1].strip()) > 0
def check_unittest():
assert_ends_with(refs["unittest"].full, "executing the tests.</dd>")
def check_unittest_skip():
assert "@unittest.skip" in refs
assert find_subitem(refs["unittest"], "@unittest.skip")
def check_utcnow():
assert "datetime.datetime.utcnow" in refs
assert find_subitem(refs["datetime.datetime"], "datetime.datetime.utcnow")
def check_pprint():
assert "pprint.pprint" in refs
assert_ends_with(refs["pprint.pprint"].full, "</pre>")
def check_itertools():
assert_ends_with(refs['itertools'].full, 'vector2))</span></code>.</p>')
def check_ctypes():
assert "ctypes.Array._length_" in refs
assert find_subitem(refs["ctypes.Array"], "ctypes.Array._length_")
def check_paragraph_signs():
found = False
for ref in refs:
if "¶" in refs[ref].full:
print("¶ found in ", ref)
found = True
assert not found
check_paragraph_signs()
check_ctypes()
check_itertools()
check_re()
check_pprint()
check_utcnow()
check_urllib_parse()
check_unittest_mock()
check_urllib()
check_unittest()
check_unittest_skip()
| agpl-3.0 | 1,616,040,906,343,496,200 | 30.916107 | 138 | 0.551887 | false |
arunkgupta/gramps | gramps/gen/filters/rules/person/_haslds.py | 1 | 1723 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# gen.filters.rules/Person/_HasLDS.py
# $Id$
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hasldsbase import HasLDSBase
#-------------------------------------------------------------------------
#
# HasLDS
#
#-------------------------------------------------------------------------
class HasLDS(HasLDSBase):
"""Rule that checks for a person with a LDS event"""
name = _('People with <count> LDS events')
description = _("Matches people with a certain number of LDS events")
| gpl-2.0 | 813,847,480,302,232,000 | 33.46 | 75 | 0.536274 | false |
vitobasso/audio-ml | src/train_raw.py | 1 | 2372 | __author__ = 'victor'
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import RPropMinusTrainer
from pybrain import FeedForwardNetwork, FullConnection, IdentityConnection, TanhLayer
from datasource import *
# dataset
timeWidth = 5140 # num of samples to input to the net
mixer = MixedStream('piano', 'acapella', timeWidth)
# training
batchsize = 100
epochs = 1000
def build_net(width):
net = FeedForwardNetwork()
# layers
net.addInputModule(TanhLayer(width, name='in'))
net.addOutputModule(TanhLayer(width, name='out'))
net.addModule(TanhLayer(100, name='h1'))
net.addModule(TanhLayer(50, name='h2'))
net.addModule(TanhLayer(100, name='h3'))
# connections
net.addConnection(FullConnection(net['in'], net['h1']))
net.addConnection(FullConnection(net['h1'], net['h2']))
# net.addConnection(FullConnection(net['h1'], net['h3']))
# net.addConnection(FullConnection(net['h1'], net['out']))
net.addConnection(FullConnection(net['h2'], net['h3']))
# net.addConnection(FullConnection(net['h2'], net['out']))
net.addConnection(FullConnection(net['h3'], net['out']))
net.addConnection(IdentityConnection(net['in'], net['out']))
net.sortModules()
return net
def train(mix, target):
print 'preparing to train, netwidth=%d, batchsize=%d, epochs=%d' % (timeWidth, batchsize, epochs)
net = build_net(timeWidth)
trainer = RPropMinusTrainer(net, batchlearning=True, learningrate=0.1, lrdecay=1, momentum=0.03, weightdecay=0.01)
def train_batch(i):
batch = SupervisedDataSet(timeWidth, timeWidth)
begin = i * batchsize
end = begin + batchsize
for i in np.arange(begin, end):
batch.addSample(mix[i], target[i])
trainer.setData(batch)
err = trainer.train()
return err
print 'training...'
plot_cont(train_batch, epochs)
# print 'saving net...'
# err = trainer.train() # train an extra time just to get the final error
# savenet(trainer.module, partlen, err)
return net
def test(net, mix):
print 'testing...'
result = np.empty(timeWidth)
for i in np.arange(500):
netout = net.activate(mix[i])
result = np.append(result, netout, axis=0)
wavwrite(result, outputfile='output.wav')
net = train(mixer, mixer.stream1)
test(net, mixer) | gpl-2.0 | -8,458,553,163,728,513,000 | 28.296296 | 118 | 0.670742 | false |
NCAR/mizuRoute | manage_externals/test/test_unit_repository_git.py | 1 | 31259 | #!/usr/bin/env python
"""Unit test driver for checkout_externals
Note: this script assume the path to the checkout_externals.py module is
already in the python path.
"""
# pylint: disable=too-many-lines,protected-access
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import shutil
import unittest
from manic.repository_git import GitRepository
from manic.externals_status import ExternalStatus
from manic.externals_description import ExternalsDescription
from manic.externals_description import ExternalsDescriptionDict
from manic.global_constants import EMPTY_STR
# NOTE(bja, 2017-11) order is important here. origin should be a
# subset of other to trap errors on processing remotes!
GIT_REMOTE_OUTPUT_ORIGIN_UPSTREAM = '''
upstream /path/to/other/repo (fetch)
upstream /path/to/other/repo (push)
other /path/to/local/repo2 (fetch)
other /path/to/local/repo2 (push)
origin /path/to/local/repo (fetch)
origin /path/to/local/repo (push)
'''
class TestGitRepositoryCurrentRef(unittest.TestCase):
"""test the current_ref command on a git repository
"""
def setUp(self):
self._name = 'component'
rdata = {ExternalsDescription.PROTOCOL: 'git',
ExternalsDescription.REPO_URL:
'/path/to/local/repo',
ExternalsDescription.TAG:
'tag1',
}
data = {self._name:
{
ExternalsDescription.REQUIRED: False,
ExternalsDescription.PATH: 'junk',
ExternalsDescription.EXTERNALS: EMPTY_STR,
ExternalsDescription.REPO: rdata,
},
}
model = ExternalsDescriptionDict(data)
repo = model[self._name][ExternalsDescription.REPO]
self._repo = GitRepository('test', repo)
#
# mock methods replacing git system calls
#
@staticmethod
def _git_current_branch(branch_found, branch_name):
"""Return a function that takes the place of
repo._git_current_branch, which returns the given output."""
def my_git_current_branch():
"""mock function that can take the place of repo._git_current_branch"""
return branch_found, branch_name
return my_git_current_branch
@staticmethod
def _git_current_tag(tag_found, tag_name):
"""Return a function that takes the place of
repo._git_current_tag, which returns the given output."""
def my_git_current_tag():
"""mock function that can take the place of repo._git_current_tag"""
return tag_found, tag_name
return my_git_current_tag
@staticmethod
def _git_current_hash(hash_found, hash_name):
"""Return a function that takes the place of
repo._git_current_hash, which returns the given output."""
def my_git_current_hash():
"""mock function that can take the place of repo._git_current_hash"""
return hash_found, hash_name
return my_git_current_hash
# ------------------------------------------------------------------------
# Begin tests
# ------------------------------------------------------------------------
def test_ref_branch(self):
"""Test that we correctly identify we are on a branch
"""
self._repo._git_current_branch = self._git_current_branch(
True, 'feature3')
self._repo._git_current_tag = self._git_current_tag(True, 'foo_tag')
self._repo._git_current_hash = self._git_current_hash(True, 'abc123')
expected = 'feature3'
result = self._repo._current_ref()
self.assertEqual(result, expected)
def test_ref_detached_tag(self):
"""Test that we correctly identify that the ref is detached at a tag
"""
self._repo._git_current_branch = self._git_current_branch(False, '')
self._repo._git_current_tag = self._git_current_tag(True, 'foo_tag')
self._repo._git_current_hash = self._git_current_hash(True, 'abc123')
expected = 'foo_tag'
result = self._repo._current_ref()
self.assertEqual(result, expected)
def test_ref_detached_hash(self):
"""Test that we can identify ref is detached at a hash
"""
self._repo._git_current_branch = self._git_current_branch(False, '')
self._repo._git_current_tag = self._git_current_tag(False, '')
self._repo._git_current_hash = self._git_current_hash(True, 'abc123')
expected = 'abc123'
result = self._repo._current_ref()
self.assertEqual(result, expected)
def test_ref_none(self):
"""Test that we correctly identify that we're not in a git repo.
"""
self._repo._git_current_branch = self._git_current_branch(False, '')
self._repo._git_current_tag = self._git_current_tag(False, '')
self._repo._git_current_hash = self._git_current_hash(False, '')
result = self._repo._current_ref()
self.assertEqual(result, EMPTY_STR)
class TestGitRepositoryCheckSync(unittest.TestCase):
"""Test whether the GitRepository _check_sync_logic functionality is
correct.
Note: there are a lot of combinations of state:
- external description - tag, branch
- working copy
- doesn't exist (not checked out)
- exists, no git info - incorrect protocol, e.g. svn, or tarball?
- exists, git info
- as expected:
- different from expected:
- detached tag,
- detached hash,
- detached branch (compare remote and branch),
- tracking branch (compare remote and branch),
- same remote
- different remote
- untracked branch
Test list:
- doesn't exist
- exists no git info
- num_external * (working copy expected + num_working copy different)
- total tests = 16
"""
# NOTE(bja, 2017-11) pylint complains about long method names, but
# it is hard to differentiate tests without making them more
# cryptic. Also complains about too many public methods, but it
# doesn't really make sense to break this up.
# pylint: disable=invalid-name,too-many-public-methods
TMP_FAKE_DIR = 'fake'
TMP_FAKE_GIT_DIR = os.path.join(TMP_FAKE_DIR, '.git')
def setUp(self):
"""Setup reusable git repository object
"""
self._name = 'component'
rdata = {ExternalsDescription.PROTOCOL: 'git',
ExternalsDescription.REPO_URL:
'/path/to/local/repo',
ExternalsDescription.TAG: 'tag1',
}
data = {self._name:
{
ExternalsDescription.REQUIRED: False,
ExternalsDescription.PATH: self.TMP_FAKE_DIR,
ExternalsDescription.EXTERNALS: EMPTY_STR,
ExternalsDescription.REPO: rdata,
},
}
model = ExternalsDescriptionDict(data)
repo = model[self._name][ExternalsDescription.REPO]
self._repo = GitRepository('test', repo)
# The unit tests here don't care about the result of
# _current_ref, but we replace it here so that we don't need to
# worry about calling a possibly slow and possibly
# error-producing command (since _current_ref calls various git
# functions):
self._repo._current_ref = self._current_ref_empty
self._create_tmp_git_dir()
def tearDown(self):
"""Cleanup tmp stuff on the file system
"""
self._remove_tmp_git_dir()
def _create_tmp_git_dir(self):
"""Create a temporary fake git directory for testing purposes.
"""
if not os.path.exists(self.TMP_FAKE_GIT_DIR):
os.makedirs(self.TMP_FAKE_GIT_DIR)
def _remove_tmp_git_dir(self):
"""Remove the temporary fake git directory
"""
if os.path.exists(self.TMP_FAKE_DIR):
shutil.rmtree(self.TMP_FAKE_DIR)
#
# mock methods replacing git system calls
#
@staticmethod
def _current_ref_empty():
"""Return an empty string.
"""
return EMPTY_STR
@staticmethod
def _git_remote_origin_upstream():
"""Return an info string that is a checkout hash
"""
return GIT_REMOTE_OUTPUT_ORIGIN_UPSTREAM
@staticmethod
def _git_remote_none():
"""Return an info string that is a checkout hash
"""
return EMPTY_STR
@staticmethod
def _git_current_hash(myhash):
"""Return a function that takes the place of repo._git_current_hash,
which returns the given hash
"""
def my_git_current_hash():
"""mock function that can take the place of repo._git_current_hash"""
return 0, myhash
return my_git_current_hash
def _git_revparse_commit(self, expected_ref, mystatus, myhash):
"""Return a function that takes the place of
repo._git_revparse_commit, which returns a tuple:
(mystatus, myhash).
Expects the passed-in ref to equal expected_ref
status = 0 implies success, non-zero implies failure
"""
def my_git_revparse_commit(ref):
"""mock function that can take the place of repo._git_revparse_commit"""
self.assertEqual(expected_ref, ref)
return mystatus, myhash
return my_git_revparse_commit
# ----------------------------------------------------------------
#
# Tests where working copy doesn't exist or is invalid
#
# ----------------------------------------------------------------
def test_sync_dir_not_exist(self):
"""Test that a directory that doesn't exist returns an error status
Note: the Repository classes should be prevented from ever
working on an empty directory by the _Source object.
"""
stat = ExternalStatus()
self._repo._check_sync(stat, 'invalid_directory_name')
self.assertEqual(stat.sync_state, ExternalStatus.STATUS_ERROR)
# check_dir should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
def test_sync_dir_exist_no_git_info(self):
"""Test that a non-existent git repo returns an unknown status
"""
stat = ExternalStatus()
# Now we over-ride the _git_remote_verbose method on the repo to return
# a known value without requiring access to git.
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._tag = 'tag1'
self._repo._git_current_hash = self._git_current_hash('')
self._repo._git_revparse_commit = self._git_revparse_commit(
'tag1', 1, '')
self._repo._check_sync(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.UNKNOWN)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
# ------------------------------------------------------------------------
#
# Tests where version in configuration file is not a valid reference
#
# ------------------------------------------------------------------------
def test_sync_invalid_reference(self):
"""Test that an invalid reference returns out-of-sync
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._tag = 'tag1'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = self._git_revparse_commit(
'tag1', 1, '')
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.MODEL_MODIFIED)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
# ----------------------------------------------------------------
#
# Tests where external description specifies a tag
#
# ----------------------------------------------------------------
def test_sync_tag_on_same_hash(self):
"""Test expect tag on same hash --> status ok
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._tag = 'tag1'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = self._git_revparse_commit(
'tag1', 0, 'abc123')
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.STATUS_OK)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
def test_sync_tag_on_different_hash(self):
"""Test expect tag on a different hash --> status modified
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._tag = 'tag1'
self._repo._git_current_hash = self._git_current_hash('def456')
self._repo._git_revparse_commit = self._git_revparse_commit(
'tag1', 0, 'abc123')
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.MODEL_MODIFIED)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
# ----------------------------------------------------------------
#
# Tests where external description specifies a hash
#
# ----------------------------------------------------------------
def test_sync_hash_on_same_hash(self):
"""Test expect hash on same hash --> status ok
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._tag = ''
self._repo._hash = 'abc'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = self._git_revparse_commit(
'abc', 0, 'abc123')
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.STATUS_OK)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
def test_sync_hash_on_different_hash(self):
"""Test expect hash on a different hash --> status modified
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._tag = ''
self._repo._hash = 'abc'
self._repo._git_current_hash = self._git_current_hash('def456')
self._repo._git_revparse_commit = self._git_revparse_commit(
'abc', 0, 'abc123')
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.MODEL_MODIFIED)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
# ----------------------------------------------------------------
#
# Tests where external description specifies a branch
#
# ----------------------------------------------------------------
def test_sync_branch_on_same_hash(self):
"""Test expect branch on same hash --> status ok
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._branch = 'feature-2'
self._repo._tag = ''
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = (
self._git_revparse_commit('origin/feature-2', 0, 'abc123'))
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.STATUS_OK)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
def test_sync_branch_on_diff_hash(self):
"""Test expect branch on diff hash --> status modified
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._branch = 'feature-2'
self._repo._tag = ''
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = (
self._git_revparse_commit('origin/feature-2', 0, 'def456'))
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.MODEL_MODIFIED)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
def test_sync_branch_diff_remote(self):
"""Test _determine_remote_name with a different remote
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._branch = 'feature-2'
self._repo._tag = ''
self._repo._url = '/path/to/other/repo'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = (
self._git_revparse_commit('upstream/feature-2', 0, 'def456'))
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
# The test passes if _git_revparse_commit is called with the
# expected argument
def test_sync_branch_diff_remote2(self):
"""Test _determine_remote_name with a different remote
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._branch = 'feature-2'
self._repo._tag = ''
self._repo._url = '/path/to/local/repo2'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = (
self._git_revparse_commit('other/feature-2', 0, 'def789'))
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
# The test passes if _git_revparse_commit is called with the
# expected argument
def test_sync_branch_on_unknown_remote(self):
"""Test expect branch, but remote is unknown --> status modified
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._branch = 'feature-2'
self._repo._tag = ''
self._repo._url = '/path/to/unknown/repo'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = (
self._git_revparse_commit('unknown_remote/feature-2', 1, ''))
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.MODEL_MODIFIED)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
def test_sync_branch_on_untracked_local(self):
"""Test expect branch, on untracked branch in local repo --> status ok
Setting the externals description to '.' indicates that the
user only wants to consider the current local repo state
without fetching from remotes. This is required to preserve
the current branch of a repository during an update.
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._branch = 'feature3'
self._repo._tag = ''
self._repo._url = '.'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = (
self._git_revparse_commit('feature3', 0, 'abc123'))
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.STATUS_OK)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
class TestGitStatusPorcelain(unittest.TestCase):
"""Test parsing of output from git status --porcelain=v1 -z
"""
# pylint: disable=C0103
GIT_STATUS_PORCELAIN_V1_ALL = (
r' D INSTALL\0MM Makefile\0M README.md\0R cmakelists.txt\0'
r'CMakeLists.txt\0D commit-message-template.txt\0A stuff.txt\0'
r'?? junk.txt')
GIT_STATUS_PORCELAIN_CLEAN = r''
def test_porcelain_status_dirty(self):
"""Verify that git status output is considered dirty when there are
listed files.
"""
git_output = self.GIT_STATUS_PORCELAIN_V1_ALL
is_dirty = GitRepository._status_v1z_is_dirty(git_output)
self.assertTrue(is_dirty)
def test_porcelain_status_clean(self):
"""Verify that git status output is considered clean when there are no
listed files.
"""
git_output = self.GIT_STATUS_PORCELAIN_CLEAN
is_dirty = GitRepository._status_v1z_is_dirty(git_output)
self.assertFalse(is_dirty)
class TestGitCreateRemoteName(unittest.TestCase):
"""Test the create_remote_name method on the GitRepository class
"""
def setUp(self):
"""Common infrastructure for testing _create_remote_name
"""
self._rdata = {ExternalsDescription.PROTOCOL: 'git',
ExternalsDescription.REPO_URL:
'empty',
ExternalsDescription.TAG:
'very_useful_tag',
ExternalsDescription.BRANCH: EMPTY_STR,
ExternalsDescription.HASH: EMPTY_STR, }
self._repo = GitRepository('test', self._rdata)
def test_remote_git_proto(self):
"""Test remote with git protocol
"""
self._repo._url = '[email protected]:very_nice_org/useful_repo'
remote_name = self._repo._create_remote_name()
self.assertEqual(remote_name, 'very_nice_org_useful_repo')
def test_remote_https_proto(self):
"""Test remote with git protocol
"""
self._repo._url = 'https://www.github.com/very_nice_org/useful_repo'
remote_name = self._repo._create_remote_name()
self.assertEqual(remote_name, 'very_nice_org_useful_repo')
def test_remote_local_abs(self):
"""Test remote with git protocol
"""
self._repo._url = '/path/to/local/repositories/useful_repo'
remote_name = self._repo._create_remote_name()
self.assertEqual(remote_name, 'repositories_useful_repo')
def test_remote_local_rel(self):
"""Test remote with git protocol
"""
os.environ['TEST_VAR'] = '/my/path/to/repos'
self._repo._url = '${TEST_VAR}/../../useful_repo'
remote_name = self._repo._create_remote_name()
self.assertEqual(remote_name, 'path_useful_repo')
del os.environ['TEST_VAR']
class TestVerifyTag(unittest.TestCase):
"""Test logic verifying that a tag exists and is unique
"""
def setUp(self):
"""Setup reusable git repository object
"""
self._name = 'component'
rdata = {ExternalsDescription.PROTOCOL: 'git',
ExternalsDescription.REPO_URL:
'/path/to/local/repo',
ExternalsDescription.TAG: 'tag1',
}
data = {self._name:
{
ExternalsDescription.REQUIRED: False,
ExternalsDescription.PATH: 'tmp',
ExternalsDescription.EXTERNALS: EMPTY_STR,
ExternalsDescription.REPO: rdata,
},
}
model = ExternalsDescriptionDict(data)
repo = model[self._name][ExternalsDescription.REPO]
self._repo = GitRepository('test', repo)
@staticmethod
def _shell_true(url, remote=None):
_ = url
_ = remote
return 0
@staticmethod
def _shell_false(url, remote=None):
_ = url
_ = remote
return 1
@staticmethod
def _mock_function_true(ref):
_ = ref
return (TestValidRef._shell_true, '97ebc0e0deadc0de')
@staticmethod
def _mock_function_false(ref):
_ = ref
return (TestValidRef._shell_false, '97ebc0e0deadc0de')
def test_tag_not_tag_branch_commit(self):
"""Verify a non-tag returns false
"""
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_false
self._repo._tag = 'something'
remote_name = 'origin'
received, _ = self._repo._is_unique_tag(self._repo._tag, remote_name)
self.assertFalse(received)
def test_tag_not_tag(self):
"""Verify a non-tag, untracked remote returns false
"""
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_true
self._repo._git_lsremote_branch = self._shell_true
self._repo._git_revparse_commit = self._mock_function_false
self._repo._tag = 'tag1'
remote_name = 'origin'
received, _ = self._repo._is_unique_tag(self._repo._tag, remote_name)
self.assertFalse(received)
def test_tag_indeterminant(self):
"""Verify an indeterminant tag/branch returns false
"""
self._repo._git_showref_tag = self._shell_true
self._repo._git_showref_branch = self._shell_true
self._repo._git_lsremote_branch = self._shell_true
self._repo._git_revparse_commit = self._mock_function_true
self._repo._tag = 'something'
remote_name = 'origin'
received, _ = self._repo._is_unique_tag(self._repo._tag, remote_name)
self.assertFalse(received)
def test_tag_is_unique(self):
"""Verify a unique tag match returns true
"""
self._repo._git_showref_tag = self._shell_true
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_true
self._repo._tag = 'tag1'
remote_name = 'origin'
received, _ = self._repo._is_unique_tag(self._repo._tag, remote_name)
self.assertTrue(received)
def test_tag_is_not_hash(self):
"""Verify a commit hash is not classified as a tag
"""
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_true
self._repo._tag = '97ebc0e0'
remote_name = 'origin'
received, _ = self._repo._is_unique_tag(self._repo._tag, remote_name)
self.assertFalse(received)
def test_hash_is_commit(self):
"""Verify a commit hash is not classified as a tag
"""
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_true
self._repo._tag = '97ebc0e0'
remote_name = 'origin'
received, _ = self._repo._is_unique_tag(self._repo._tag, remote_name)
self.assertFalse(received)
class TestValidRef(unittest.TestCase):
"""Test logic verifying that a reference is a valid tag, branch or sha1
"""
def setUp(self):
"""Setup reusable git repository object
"""
self._name = 'component'
rdata = {ExternalsDescription.PROTOCOL: 'git',
ExternalsDescription.REPO_URL:
'/path/to/local/repo',
ExternalsDescription.TAG: 'tag1',
}
data = {self._name:
{
ExternalsDescription.REQUIRED: False,
ExternalsDescription.PATH: 'tmp',
ExternalsDescription.EXTERNALS: EMPTY_STR,
ExternalsDescription.REPO: rdata,
},
}
model = ExternalsDescriptionDict(data)
repo = model[self._name][ExternalsDescription.REPO]
self._repo = GitRepository('test', repo)
@staticmethod
def _shell_true(url, remote=None):
_ = url
_ = remote
return 0
@staticmethod
def _shell_false(url, remote=None):
_ = url
_ = remote
return 1
@staticmethod
def _mock_function_false(ref):
_ = ref
return (TestValidRef._shell_false, '')
@staticmethod
def _mock_function_true(ref):
_ = ref
return (TestValidRef._shell_true, '')
def test_valid_ref_is_invalid(self):
"""Verify an invalid reference raises an exception
"""
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_false
self._repo._tag = 'invalid_ref'
with self.assertRaises(RuntimeError):
self._repo._check_for_valid_ref(self._repo._tag)
def test_valid_tag(self):
"""Verify a valid tag return true
"""
self._repo._git_showref_tag = self._shell_true
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_true
self._repo._tag = 'tag1'
received = self._repo._check_for_valid_ref(self._repo._tag)
self.assertTrue(received)
def test_valid_branch(self):
"""Verify a valid tag return true
"""
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_true
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_true
self._repo._tag = 'tag1'
received = self._repo._check_for_valid_ref(self._repo._tag)
self.assertTrue(received)
def test_valid_hash(self):
"""Verify a valid hash return true
"""
def _mock_revparse_commit(ref):
_ = ref
return (0, '56cc0b539426eb26810af9e')
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = _mock_revparse_commit
self._repo._hash = '56cc0b5394'
received = self._repo._check_for_valid_ref(self._repo._hash)
self.assertTrue(received)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 9,038,250,738,399,458,000 | 37.73482 | 84 | 0.595572 | false |
brefsdal/sherpa | sherpa/estmethods/__init__.py | 1 | 43921 | #_PYTHON_INSERT_SAO_COPYRIGHT_HERE_(2007)_
#_PYTHON_INSERT_GPL_LICENSE_HERE_
import numpy
_ = numpy.seterr(invalid='ignore')
from sherpa.utils import NoNewAttributesAfterInit, print_fields, Knuth_close, is_iterable, list_to_open_interval, mysgn, quad_coef, apache_muller, bisection, demuller, zeroin, OutOfBoundErr, func_counter, _multi, _ncpus
import logging
import sherpa.estmethods._est_funcs
from itertools import izip
try:
import multiprocessing
except:
pass
__all__ = ('EstNewMin', 'Covariance', 'Confidence',
'Projection', 'est_success', 'est_failure', 'est_hardmin',
'est_hardmax', 'est_hardminmax', 'est_newmin', 'est_maxiter',
'est_hitnan')
est_success = 0
est_failure = 1
est_hardmin = 2
est_hardmax = 3
est_hardminmax = 4
est_newmin = 5
est_maxiter = 6
est_hitnan = 7
# For every method listed here, we have the same goal: derive confidence
# limits for thawed parameters. Thawed parameters are allowed to vary
# during a fit; when a model has been fit to data, then the current
# parameter values are presumably the best-fit values. Think of the
# best-fit values as being at the lowest point in a valley in parameter
# space--any step away, in any direction, means a worse fit (i.e., the
# value of the fit statistic is greater). Confidence limits tell you
# how well constrained those best fit values are; i.e., are we in a deep,
# narrow valley in parameter space? If so, we can be confident the limits
# are small. But if the valley is shallow and broad, then the confidence
# limits will also be very broad.
#
# Every method is passed the same information:
# the current values of all thawed parameters;
# the soft limits of all thawed parameters;
# the hard limits of all thawed parameters;
# the list of parameters for which we are actually want confidence
# limits (this can be a subset of all thawed parameters)
# a reference to the statistic function;
# a reference to the fitting function.
#class EstMethodError(SherpaError):
# "Reached an error while computing parameter confidence limits"
# pass
#
#class EstHardMin(EstMethodError):
# "Reached a parameter hard minimum"
# pass
#
#class EstHardMax(EstMethodError):
# "Reached a parameter hard maximum"
# pass
#
class EstNewMin(Exception):
"Reached a new minimum fit statistic"
pass
#
#class EstMaxIter(EstMethodError):
# "Reached maxmimum iterations in scaling function"
# pass
#
#class EstNaN(EstMethodError):
# "Reached a NaN during computation"
# pass
class EstMethod(NoNewAttributesAfterInit):
# defined pre-instantiation for pickling
config = {'sigma' : 1,
'eps' : 0.01,
'maxiters' : 200,
'soft_limits' : False}
def __init__(self, name, estfunc):
self._estfunc = estfunc
self.name = name
# config should be defined pre-instantiation for pickling
# however, for some unknown reason membership in self.__dict__
# requires declaration in __init__()
self.config = self.config.copy()
NoNewAttributesAfterInit.__init__(self)
def __getattr__(self, name):
if name in self.__dict__.get('config', ()):
return self.config[name]
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def __setattr__(self, name, val):
if name in self.__dict__.get('config', ()):
self.config[name] = val
else:
NoNewAttributesAfterInit.__setattr__(self, name, val)
def __repr__(self):
return ("<%s error-estimation method instance '%s'>" %
(type(self).__name__, self.name))
def __str__(self):
# Put name first always
keylist = self.config.keys()
keylist = ['name'] + keylist
full_config = {'name' : self.name}
full_config.update(self.config)
return print_fields(keylist, full_config)
def __setstate__(self, state):
self.__dict__.update(state)
# obtain config values from object class
self.__dict__['config'] = getattr(self.__class__(),'config',{})
# update new config dict with user defined from old
self.__dict__['config'].update(state.get('config',{}))
def compute(self, statfunc, fitfunc, pars,
parmins, parmaxes, parhardmins,
parhardmaxes, limit_parnums, freeze_par, thaw_par,
report_progress, get_par_name,
statargs=(), statkwargs={}):
def stat_cb(pars):
return statfunc(pars)[0]
def fit_cb(scb, pars, parmins, parmaxes, i):
# parameter i is a no-op usually
return fitfunc(scb, pars, parmins, parmaxes)[2]
# remin means reminimize -- *generally* not done (only
# proj needs to reminimize).
#
# covar still needs to pass a reminimize flag to
# get_one_sided_interval; a value less than zero is
# interpreted as "never reminimize", so pass that value here.
remin = -1.0
tol = -1.0
return self._estfunc(pars, parmins, parmaxes, parhardmins,
parhardmaxes, self.sigma, self.eps,
tol,
self.maxiters, remin, limit_parnums,
stat_cb, fit_cb, report_progress)
class Covariance(EstMethod):
def __init__(self, name='covariance'):
EstMethod.__init__(self, name, covariance)
class Confidence(EstMethod):
# defined pre-instantiation for pickling
_added_config = {'remin': 0.01,
'fast': False,
'parallel': True,
'numcores' : _ncpus,
'maxfits' : 5,
'max_rstat' : 3,
'tol' : 0.2,
'verbose' : False,
'openinterval': False }
def __init__(self, name='confidence'):
EstMethod.__init__(self, name, confidence)
# Update EstMethod.config dict with Confidence specifics
self.config.update(self._added_config)
def compute(self, statfunc, fitfunc, pars,
parmins, parmaxes, parhardmins,
parhardmaxes, limit_parnums, freeze_par, thaw_par,
report_progress, get_par_name,
statargs=(), statkwargs={}):
def stat_cb(pars):
return statfunc(pars)[0]
def fit_cb(pars, parmins, parmaxes, i):
# freeze model parameter i
(current_pars,
current_parmins,
current_parmaxes) = freeze_par(pars, parmins, parmaxes, i)
fit_pars = fitfunc(statfunc, current_pars,
current_parmins,
current_parmaxes)[1]
# If stat is not chi-squared, and fit method is
# lmdif, need to recalculate stat at end, just
# like in sherpa/sherpa/fit.py:fit()
stat = statfunc(fit_pars)[0]
#stat = fitfunc(scb, pars, parmins, parmaxes)[2]
# thaw model parameter i
thaw_par(i)
return stat
#
# convert stat call back to have the same signature as fit call back
#
def stat_cb_extra_args( fcn ):
def stat_cb_wrapper( x, *args ):
return fcn( x )
return stat_cb_wrapper
statcb = stat_cb_extra_args( stat_cb )
if 1 == len( pars ):
fitcb = statcb
else:
fitcb = fit_cb
return self._estfunc(pars, parmins, parmaxes, parhardmins,
parhardmaxes, self.sigma, self.eps,
self.tol, self.maxiters, self.remin,
self.verbose, limit_parnums,
statcb, fitcb, report_progress, get_par_name,
self.parallel, self.numcores, self.openinterval)
class Projection(EstMethod):
# defined pre-instantiation for pickling
_added_config = {'remin': 0.01,
'fast': True,
'parallel':True,
'numcores' : _ncpus,
'maxfits' : 5,
'max_rstat' : 3,
'tol' : 0.2}
def __init__(self, name='projection'):
EstMethod.__init__(self, name, projection)
# Update EstMethod.config dict with Projection specifics
self.config.update(self._added_config)
def compute(self, statfunc, fitfunc, pars,
parmins, parmaxes, parhardmins,
parhardmaxes, limit_parnums, freeze_par, thaw_par,
report_progress, get_par_name,
statargs=(), statkwargs={}):
def stat_cb(pars):
return statfunc(pars)[0]
def fit_cb(pars, parmins, parmaxes, i):
# freeze model parameter i
(current_pars,
current_parmins,
current_parmaxes) = freeze_par(pars, parmins, parmaxes, i)
fit_pars = fitfunc(statfunc, current_pars,
current_parmins,
current_parmaxes)[1]
# If stat is not chi-squared, and fit method is
# lmdif, need to recalculate stat at end, just
# like in sherpa/sherpa/fit.py:fit()
stat = statfunc(fit_pars)[0]
#stat = fitfunc(scb, pars, parmins, parmaxes)[2]
# thaw model parameter i
thaw_par(i)
return stat
return self._estfunc(pars, parmins, parmaxes, parhardmins,
parhardmaxes, self.sigma, self.eps,
self.tol,
self.maxiters, self.remin, limit_parnums,
stat_cb, fit_cb, report_progress, get_par_name,
self.parallel, self.numcores)
def covariance(pars, parmins, parmaxes, parhardmins, parhardmaxes, sigma, eps,
tol, maxiters, remin, limit_parnums, stat_cb, fit_cb,
report_progress):
# Do nothing with tol
# Do nothing with report_progress (generally fast enough we don't
# need to report back per-parameter progress)
# Even though we only want limits on certain parameters, we have to
# compute the matrix for *all* thawed parameters. So we will do that,
# and then pick the parameters of interest out of the result.
try:
info = _est_funcs.info_matrix(pars, parmins, parmaxes, parhardmins,
parhardmaxes, sigma, eps, maxiters,
remin, stat_cb)
except EstNewMin:
# catch the EstNewMin exception and attach the modified
# parameter values to the exception obj. These modified
# parvals determine the new lower statistic.
raise EstNewMin(pars)
except:
raise
# Invert matrix, take its square root and multiply by sigma to get
# parameter uncertainties; parameter uncertainties are the
# diagonal elements of the matrix.
# Use simpler matrix inversion function from numpy. If that
# doesn't work, assume it's an ill-conditioned or singular matrix,
# and call pinv from numpy -- pinv will call the SVD function to
# invert the matrix. But call pinv *only* when inv is shown not
# to work in a particular case -- use inv by default!
# The reason for this is that pinv can give back very strange
# results, when you don't *need* to use pinv, and it *also*
# happens that the ratio between smallest and largest diagonal
# elements approaches the machine precision for the data type.
# The result is that errors that come from the largest diagonal
# element are ludicrously small; you can't have a parameter value
# of order 1.0, and an error of order 10^-30, for example. The
# simpler inv function for inverting matrices does not appear to
# have the same issue.
invfunc = numpy.linalg.inv
inv_info = None
try:
inv_info = invfunc(info)
except numpy.linalg.linalg.LinAlgError:
# catch the SVD exception and exit gracefully
inv_info = numpy.zeros_like(info)
inv_info[:] = numpy.nan
except:
# Compatibility with pre-0.9.8 numpy
if hasattr(numpy.linalg, 'pinv'):
invfunc = numpy.linalg.pinv
else:
invfunc = numpy.linalg.generalized_inverse
try:
inv_info = invfunc(info)
except numpy.linalg.linalg.LinAlgError:
# catch the SVD exception and exit gracefully
inv_info = numpy.zeros_like(info)
inv_info[:] = numpy.nan
diag = (sigma * numpy.sqrt(inv_info)).diagonal()
# limit_parnums lists the indices of the array pars, that
# correspond to the parameters of interest. We will pick out
# the diagonal elements corresponding to entries in limits_parnums,
# and return only those bounds to the user.
upper_bounds = []
lower_bounds = []
error_flags = []
for num in limit_parnums:
eflag = est_success
ubound = diag[num]
lbound = -diag[num]
if (pars[num] + ubound < parhardmaxes[num]):
pass
else:
ubound = numpy.nan
eflag = est_hardmax
if (pars[num] + lbound > parhardmins[num]):
pass
else:
lbound = numpy.nan
if (eflag == est_hardmax):
eflag = est_hardminmax
else:
eflag = est_hardmin
upper_bounds.append(ubound)
lower_bounds.append(lbound)
error_flags.append(eflag)
return (numpy.array(lower_bounds), numpy.array(upper_bounds),
numpy.array(error_flags), 0, inv_info)
def projection(pars, parmins, parmaxes, parhardmins, parhardmaxes, sigma, eps,
tol, maxiters, remin, limit_parnums, stat_cb, fit_cb,
report_progress, get_par_name, do_parallel, numcores):
i = 0 # Iterate through parameters
# to be searched on
numsearched = len(limit_parnums) # Number of parameters to be
# searched on (*not* number
# of thawed parameters, just
# number we are searching on
# (i.e., len(limit_parnums))
lower_limits = numpy.array([]) # Lower limits for parameters
# searched on
upper_limits = numpy.array([]) # Upper limits for parameters
# searched on
eflags = numpy.array([], numpy.int) # Fail status after search for
# each parameter
nfits = 0 # Total number of fits
# _est_funcs.projection can be called on any subset of the thawed
# parameters. So we made a change here to call _est_funcs.projection
# once per parameter number listed in limit_parnums, instead of
# calling _est_funcs.projection once, with the original limit_parnums
# array. This way, we can report pack progress after the confidence
# limit search is completed for each parameter, without descending
# into the C++ code.
#
# It does mean we have to take apart the tuple returned by each call
# to _est_funcs.projection; take the data we've pulled out, and
# upon exiting the while loop, constructing a new tuple to return.
# SMD 03/17/2009
# Keep references to numpy.append, _est_funcs.projection, because
# we call these functions every time through the loop.
append = numpy.append
proj_func = _est_funcs.projection
def func(i, singleparnum, lock=None):
try:
singlebounds = proj_func(pars, parmins, parmaxes,
parhardmins, parhardmaxes,
sigma, eps, tol, maxiters,
remin, [singleparnum], stat_cb,
fit_cb)
except EstNewMin:
# catch the EstNewMin exception and attach the modified
# parameter values to the exception obj. These modified
# parvals determine the new lower statistic.
raise EstNewMin(pars)
except:
raise
if lock is not None:
lock.acquire()
report_progress(singleparnum, singlebounds[0], singlebounds[1])
if lock is not None:
lock.release()
return (singlebounds[0][0], singlebounds[1][0], singlebounds[2][0],
singlebounds[3], None)
if len(limit_parnums) < 2 or not _multi or numcores < 2:
do_parallel = False
if not do_parallel:
append = numpy.append
lower_limits = numpy.array([])
upper_limits = numpy.array([])
eflags = numpy.array([], numpy.int)
nfits = 0
for i in range(len(limit_parnums)):
singlebounds = func(i, limit_parnums[i])
lower_limits = append(lower_limits, singlebounds[0])
upper_limits = append(upper_limits, singlebounds[1])
eflags = append(eflags, singlebounds[2])
nfits = nfits + singlebounds[3]
return (lower_limits, upper_limits, eflags, nfits, None)
return parallel_est(func, limit_parnums, pars, numcores)
#################################confidence###################################
class ConfArgs( object ):
"""The class ConfArgs is responsible for the arguments to the fit
call back function."""
def __init__( self, xpars, smin, smax, hmin, hmax, target_stat ):
self.ith_par = 0
self.xpars = numpy.array( xpars, copy=True )
self.slimit = ( numpy.array( smin, copy=True ),
numpy.array( smax, copy=True ) )
self.hlimit = ( numpy.array( hmin, copy=True ),
numpy.array( hmax, copy=True ) )
self.target_stat = target_stat
def __call__( self ):
return ( self.ith_par, self.xpars, self.slimit, self.hlimit,
self.target_stat )
def __str__( self ):
a2s = numpy.array2string
msg = ''
msg += '# smin = ' + a2s(self.slimit[0],precision=6) + '\n'
msg += '# smax = ' + a2s(self.slimit[1],precision=6) + '\n'
msg += '# hmin = ' + a2s(self.hlimit[0],precision=6) + '\n'
msg += '# hmax = ' + a2s(self.hlimit[1],precision=6) + '\n#\n'
msg += '# Note: for the intermediate steps, the notation:\n'
msg += ' par.name -/+: f( x ) = stat\n'
msg += '# ==> `stat` is the statistic when parameter `par.name` is frozen at `x`\n'
msg += '# while searching for the `lower/upper` confidence level, repectively.\n#'
return msg
def __rep__( self ):
return ( "<%s ConfArgs method instance'%s'>" %
(type(self).__name__, self.name) )
def get_par( self ):
"""return the current (worked on) par"""
return self.xpars[ self.ith_par ]
def get_hlimit( self, dir ):
""" return the current (worked on) hard limit"""
return self.hlimit[ dir ][ self.ith_par ]
def get_slimit( self, dir ):
""" return the current (worked on) soft limit"""
return self.slimit[ dir ][ self.ith_par ]
class ConfBlog( object ):
def __init__( self, blogger, prefix, verbose, lock, debug=False ):
self.blogger = blogger
self.prefix = prefix
self.verbose = verbose
self.lock = lock
self.debug = debug
def __str__( self ):
return 'ConfBlog::__str__( )'
def __rep__( self ):
return 'ConfBlog::__rep__( )'
class ConfBracket( object ):
"""The class ConfBracket is reponsible for bracketing the root within
the interval (a,b) where f(a)*f(b) < 0.0"""
neg_pos = ( -1, 1 )
class Limit( object ):
def __init__( self, limit ):
self.limit = limit
class LowerLimit( Limit ):
def __init__( self, limit ):
ConfBracket.Limit.__init__( self, limit )
def __str__( self ):
str = 'LowerLimit: limit=%e' % self.limit
return str
def is_beyond_limit( self, x ):
if x < self.limit:
return True
else:
return False
class UpperLimit( Limit ):
def __init__( self, limit ):
ConfBracket.Limit.__init__( self, limit )
def __str__( self ):
str = 'UpperLimit: limit=%e' % self.limit
return str
def is_beyond_limit( self, x ):
if x > self.limit:
return True
else:
return False
def __init__( self, myargs, trial_points ):
self.myargs = myargs
self.trial_points = trial_points
self.fcn = None
def __repr__( self ):
return ("<%s Bracket error-estimation method instance '%s'>" %
(type(self).__name__, self.name))
def __call__( self, dir, iter, step_size, open_interval, maxiters, tol,
bloginfo ):
#
# Either 1) a root has been found (ConfRootZero), 2) an interval
# where the root has been confined (ConfRootBracket) or 3) No
# possible chance for a root (ConfRootNone), ie by trying points
# upto/beyond the hard limit and no chance for a root has been found.
#
find = trace_fcn( self.find, bloginfo )
return find( dir, iter, step_size, open_interval, maxiters, tol,
bloginfo )
def find( self, dir, iter, step_size, open_interval, maxiters, tol,
bloginfo, base=2.0 ):
assert self.fcn != None, 'callback func has not been set'
hlimit = [ ConfBracket.LowerLimit( self.myargs.get_hlimit( dir ) ),
ConfBracket.UpperLimit( self.myargs.get_hlimit( dir ) ) ]
slimit = [ ConfBracket.LowerLimit( self.myargs.get_slimit( dir ) ),
ConfBracket.UpperLimit( self.myargs.get_slimit( dir ) ) ]
xxx = self.trial_points[ 0 ]
fff = self.trial_points[ 1 ]
conf_step = ConfStep( xxx, fff )
mymaxiters = maxiters
if mymaxiters > 16:
mymaxiters = 16
plateau = 0
max_plateau_iter = 5
try:
for iter in range( mymaxiters ):
if 0 == iter:
x = conf_step.covar( dir, iter, step_size, base )
elif 1 == iter:
x = conf_step.secant( dir, iter, step_size, base )
#x = conf_step.covar( dir, iter, step_size, base )
else:
x = conf_step.quad( dir, iter, step_size, base, bloginfo )
if x is None or numpy.isnan( x ):
return ConfRootNone( )
# Make sure x is not beyond the **hard** limit
if hlimit[ dir ].is_beyond_limit( x ):
x = hlimit[ dir ].limit
f = self.fcn( x, self.myargs( ) )
#print 'find(): beyond hard limit: f(%.14e)=%.14e' % (x,f)
if abs( f ) <= tol:
return ConfRootZero( x )
if f >= 0.0:
return ConfRootBracket( self.fcn, self.trial_points,
open_interval )
else:
return ConfRootNone( )
elif slimit[ dir ].is_beyond_limit( x ):
f = self.fcn( x, self.myargs( ) )
#print 'find(): beyond soft limit: f(%.14e)=%.14e' % (x,f)
if abs( f ) <= tol:
return ConfRootZero( x )
if f >= 0.0:
return ConfRootBracket( self.fcn, self.trial_points,
open_interval )
elif f < fff[ -2 ]:
# if the fit beyond the soft limit is a better fit
# then the confidence for the parameter does not exist
return ConfRootNone( )
else:
f = self.fcn( x, self.myargs( ) )
#print 'find(): f(%.14e)=%.14e' % (x,f)
if abs( f ) <= tol:
return ConfRootZero( x )
elif f >= 0.0:
return ConfRootBracket( self.fcn, self.trial_points,
open_interval )
if Knuth_close( fff[-2], fff[-1], 1.0e-6 ):
plateau += 1
if plateau > max_plateau_iter:
#print 'find( %d ): plateau = %d', (iter,plateau)
return ConfRootNone( None )
#else:
# if plateau > 0:
# plateau -= 1
return ConfRootNone( None )
except OutOfBoundErr:
return ConfRootNone( )
class ConfRootNone( object ):
"""The base class for the root of the confidence interval"""
def __init__( self, root=None ):
"""If self.root == None, then
1) points up to the hard limits were tried and it was not possible
to bracketed the solution.
2) a parameter beyond the soft limit has been tried and the new stat
was found to be **less** then the initial minimum"""
self.root = root
def __call__( self, tol, bloginfo ):
return self.root
def __str__( self ):
return 'No possible root exist'
class ConfRootBracket( ConfRootNone ):
"""The class contains the bracket where the confidence root has
been bracketed, ie where f(a)*f(b) < 0"""
def __init__( self, fcn, trial_points, open_interval ):
ConfRootNone.__init__( self, None )
self.fcn = fcn
self.trial_points = trial_points
self.open_interval = open_interval
def __call__( self, tol, bloginfo ):
def warn_user_about_open_interval( list ):
if bloginfo.lock is not None:
bloginfo.lock.acquire()
if 0 == bloginfo.verbose:
prefix = '%s ' % bloginfo.prefix.lstrip()
else:
prefix = '%s ' % bloginfo.prefix
interval = list_to_open_interval( list )
bloginfo.blogger.info( prefix +
'WARNING: The confidence level lies within '
+ interval )
if bloginfo.lock is not None:
bloginfo.lock.release()
return
xxx = self.trial_points[ 0 ]
fff = self.trial_points[ 1 ]
if mysgn( fff[ -2 ] ) == mysgn( fff[ -1 ] ):
self.root = None
return None
myzeroin = trace_fcn( zeroin, bloginfo )
answer = myzeroin( self.fcn, xxx[ -2 ], xxx[ -1 ], fa=fff[ -2 ],
fb=fff[ -1 ], maxfev=32, tol=tol )
if abs( answer[0][1] ) > tol:
xafa = answer[ 1 ][ 0 ]
xa = xafa[ 0 ]
fa = xafa[ 1 ]
xbfb = answer[ 1 ][ 1 ]
xb = xbfb[ 0 ]
fb = xbfb[ 1 ]
if mysgn( fa ) != mysgn( fb ):
if False == self.open_interval:
warn_user_about_open_interval( [ xa, xb ] )
return ( xa + xb ) / 2.0
else:
if xa < xb:
return ( xa, xb )
else:
return ( xb, xa )
else:
return None
self.root = answer[ 0 ][ 0 ]
return self.root
def __str__( self ):
str = 'root is within the interval ( f(%e)=%e, f(%e)=%e )' \
% ( self.trial_points[ 0 ][ -2 ], self.trial_points[ 1 ][ -2 ],
self.trial_points[ 0 ][ -1 ], self.trial_points[ 1 ][ -1 ], )
return str
class ConfRootZero( ConfRootNone ):
"""The class with the root/zero of the confidence interval"""
def __init__( self, root ):
ConfRootNone.__init__( self, root )
def __str__( self ):
str = 'root = %e' % self.root
return str
class ConfStep( object ):
def __init__( self, xtrial, ftrial ):
self.xtrial = xtrial
self.ftrial = ftrial
def covar( self, dir, iter, stepsize, base ):
return self.xtrial[ -1 ] + \
ConfBracket.neg_pos[ dir ] * pow( base, iter ) * stepsize
#return self.xtrial[ 0 ] + \
# ConfBracket.neg_pos[ dir ] * pow( base, iter ) * stepsize
def Halley( self, coeffs, x, maxfev=8, tol=1.0e-3 ):
for nfev in range( maxfev ):
ax = coeffs[ 0 ] * x
fval = ( ax + coeffs[ 1 ] ) * x + coeffs[ 2 ]
if abs( fval ) <= tol:
return [x, fval]
fdif = 2.0 * ax + coeffs[ 1 ]
fdif2 = 2.0
numer = 2.0 * fval * fdif
denom = 2.0 * fdif * fdif - fval * fdif2
x -= numer / denom
nfev += 1
return [x, fval]
def is_same_dir( self, dir, current_pos, proposed_pos ):
delta = proposed_pos - current_pos
return mysgn( delta ) == ConfBracket.neg_pos[ dir ]
def quad( self, dir, iter, step_size, base, bloginfo ):
coeffs = quad_coef( self.xtrial[ -3: ], self.ftrial[ -3: ] )
delta = ConfBracket.neg_pos[ dir ]
delta *= abs( self.xtrial[ -1 ] - self.xtrial[ -2 ] )
lastx = self.xtrial[ -1 ]
mroot = demuller( numpy.poly1d( coeffs ), lastx + delta,
lastx + 2 * delta, lastx + 3 * delta,
tol=1.0e-2 )
xroot = mroot[ 0 ][ 0 ]
if xroot is None or numpy.isnan( xroot ):
return self.covar( dir, iter, step_size, base )
try:
Halley = trace_fcn( self.Halley, bloginfo )
[xroot, froot] = Halley( coeffs, xroot, tol=1.0e-3 )
except ZeroDivisionError:
xroot = None
if (None != xroot and False == numpy.isnan( xroot )) and \
self.is_same_dir( dir, self.xtrial[ -1 ], xroot ):
return xroot
else:
return self.covar( dir, iter, step_size, base )
def secant( self, dir, iter, step_size, base ):
xb = self.xtrial[ -2 ]
fb = self.ftrial[ -2 ]
xa = self.xtrial[ -1 ]
fa = self.ftrial[ -1 ]
if abs( fb ) > abs( fa ) or 0.0 == fa:
return self.covar( dir, iter, step_size, base )
s = fb / fa
p = ( xa - xb ) * s
if 1.0 == s:
return self.covar( dir, iter, step_size, base )
q = 1.0 - s
x = xb - p / q
if self.is_same_dir( dir, xa, x ):
return x
else:
return self.covar( dir, iter, step_size, base )
def trace_fcn( fcn, bloginfo ):
if False == bloginfo.debug:
return fcn
from itertools import chain
def echo( *args, **kwargs ):
'''compact but more details then debugger'''
name = fcn.__name__
str = '%s%s(%s)' % (bloginfo.prefix,fcn.__name__, ", ".join(map(repr, chain(args, kwargs.values()))))
bloginfo.blogger.info( str )
return fcn( *args, **kwargs )
def debugger( *args, **kwargs ):
str = '%s%s( ' % ( bloginfo.prefix, fcn.__name__ )
if len( args ) > 1:
str += args[ 0 ].__str__( )
for arg in args[ 1: ]:
str = '%s, %s' % ( str, arg )
for key in kwargs.iterkeys( ):
value = kwargs[ key ]
str = '%s, %s=%s' % ( str, key, value )
val = fcn( *args, **kwargs )
str += ' ) %s ' % val
bloginfo.blogger.info( str )
return val
return debugger
def confidence(pars, parmins, parmaxes, parhardmins, parhardmaxes, sigma, eps,
tol, maxiters, remin, verbose, limit_parnums, stat_cb,
fit_cb, report_progress, get_par_name, do_parallel, numcores,
open_interval):
def get_prefix( index, name, minus_plus ):
'''To print the prefix/indent when verbose is on'''
prefix = [[],[]]
blank = 3 * index * ' '
for dir in range( 2 ):
prefix[ dir ] = blank + name + ' ' + minus_plus[ dir ] + ':'
return prefix
def get_delta_root( arg, dir, par_at_min ):
my_neg_pos = ConfBracket.neg_pos[ dir ]
if is_iterable( arg ):
return arg
#return map( lambda x: my_neg_pos * abs( x - par_at_min ), arg )
elif None != arg:
arg -= par_at_min
return my_neg_pos * abs( arg )
else:
return arg
def get_step_size( error_scales, upper_scales, index, par ):
if 0 != error_scales[ index ]:
# if covar error is NaN then set it to fraction of the par value.
ith_covar_err = 0.0625 * abs( par )
else:
ith_covar_err = abs( upper_scales[ index ] )
if 0.0 == ith_covar_err:
# just in case covar and/or par is 0
ith_covar_err = 1.0e-6
return ith_covar_err
def monitor_func( fcn, history ):
def myfunc( x, *args ):
fval = fcn( x, *args )
history[ 0 ].append( x )
history[ 1 ].append( fval )
return fval
return myfunc
def print_status( myblog, verbose, prefix, answer, lock ):
if lock is not None:
lock.acquire()
if 0 == verbose:
msg = '%s\t' % prefix.lstrip()
else:
msg = '%s\t' % prefix
if is_iterable( answer ):
msg += list_to_open_interval( answer )
elif answer is None:
msg += '-----'
else:
msg += '%g' % answer
myblog( msg )
if lock is not None:
lock.release()
#
# Work in the translated coordinate. Hence the 'errors/confidence'
# are the zeros/roots in the translated coordinate system.
#
def translated_fit_cb( fcn, myargs ):
def translated_fit_cb_wrapper( x, *args ):
hlimit = myargs.hlimit
slimit = myargs.slimit
hmin = hlimit[ 0 ]
hmax = hlimit[ 1 ]
xpars = myargs.xpars
ith_par = myargs.ith_par
# The parameter must be within the hard limits
if x < hmin[ ith_par ] or x > hmax[ ith_par ]:
raise OutOfBoundErr
smin = slimit[ 0 ]
smax = slimit[ 1 ]
orig_ith_xpar = xpars[ ith_par ]
xpars[ ith_par ] = x
translated_stat = fcn( xpars, smin, smax, ith_par ) - myargs.target_stat
xpars[ ith_par ] = orig_ith_xpar
return translated_stat
return translated_fit_cb_wrapper
def verbose_fitcb( fcn, bloginfo ):
if 0 == bloginfo.verbose:
return fcn
def verbose_fcn( x, *args ):
fval = fcn( x, *args )
str = '%s f( %e ) =' % ( bloginfo.prefix, x )
if fval is None:
str = '%s None' % str
else:
str = '%s %e' % ( str, fval )
bloginfo.blogger.info( str )
return fval
return verbose_fcn
sherpablog = logging.getLogger( 'sherpa' ) # where to print progress report
# Get minimum fit statistic, and calculate target statistic value
orig_min_stat = stat_cb(pars)
delta_stat = sigma * sigma
target_stat = orig_min_stat + delta_stat
lower_scales = None
upper_scales = None
error_scales = None
nfits = 0
results = None
try:
(lower_scales, upper_scales, error_scales, nfits,
results) = covariance( pars, parmins, parmaxes, parhardmins,
parhardmaxes, 1.0, eps, tol, maxiters,
remin, limit_parnums, stat_cb,
fit_cb, report_progress )
except EstNewMin, e:
raise e
except:
error_scales = numpy.array( len( pars ) * [ est_hardminmax ] )
debug = False # for internal use only
myargs = ConfArgs( pars, parmins, parmaxes, parhardmins, parhardmaxes,
target_stat )
if 0 != verbose:
msg = '#\n# f' + numpy.array2string(numpy.asarray(pars),precision=6)
msg += ' = %e\n' % orig_min_stat
msg += '# sigma = %e\n' % sigma
msg += '# target_stat = %e\n' % target_stat
msg += '# tol = %e\n' % eps
msg += '%s' % myargs
sherpablog.info( msg )
dict = { }
def func( counter, singleparnum, lock=None ):
# nfev contains the number of times it was fitted
nfev, counter_cb = func_counter( fit_cb )
#
# These are the bounds to be returned by this method
#
conf_int = [ [], [] ]
error_flags = []
#
# If the user has requested a specific parameter to be
# calculated then 'ith_par' represents the index of the
# free parameter to deal with.
#
myargs.ith_par = singleparnum
fitcb = translated_fit_cb( counter_cb, myargs )
par_name = get_par_name( myargs.ith_par )
ith_covar_err = get_step_size( error_scales, upper_scales, counter,
pars[ myargs.ith_par ] )
trial_points = [ [ ], [ ] ]
fitcb = monitor_func( fitcb, trial_points )
bracket = ConfBracket( myargs, trial_points )
# the parameter name is set, may as well get the prefix
prefix = get_prefix( counter, par_name, ['-', '+' ] )
myfitcb = [ verbose_fitcb( fitcb,
ConfBlog(sherpablog,prefix[0],verbose,lock) ),
verbose_fitcb( fitcb,
ConfBlog(sherpablog,prefix[1],verbose,lock) ) ]
for dir in range( 2 ):
#
# trial_points stores the history of the points for the
# parameter which has been evaluated in order to locate
# the root. Note the first point is 'given' since the info
# of the minimum is crucial to the search.
#
bracket.trial_points[0].append( pars[ myargs.ith_par ] )
bracket.trial_points[1].append( - delta_stat )
myblog = ConfBlog( sherpablog, prefix[ dir ], verbose, lock,
debug )
# have to set the callback func otherwise disaster.
bracket.fcn = myfitcb[ dir ]
root = bracket( dir, iter, ith_covar_err, open_interval, maxiters,
eps, myblog )
myzero = root( eps, myblog )
delta_zero = get_delta_root( myzero, dir, pars[ myargs.ith_par ] )
conf_int[ dir ].append( delta_zero )
status_prefix = get_prefix( counter, par_name, ['lower bound',
'upper bound' ] )
print_status( myblog.blogger.info, verbose, status_prefix[ dir ],
delta_zero, lock )
error_flags.append( est_success )
#
# include the minimum point to seperate the -/+ interval
#
dict[ par_name ] = trial_points
return ( conf_int[ 0 ][0], conf_int[ 1 ][0], error_flags[0],
nfev[0], None )
if len(limit_parnums) < 2 or not _multi or numcores < 2:
do_parallel = False
if not do_parallel:
lower_limits = []
upper_limits = []
eflags = []
nfits = 0
for i in range(len(limit_parnums)):
lower_limit, upper_limit, flags, nfit, extra = func(i, limit_parnums[i])
lower_limits.append(lower_limit)
upper_limits.append(upper_limit)
eflags.append(flags)
nfits += nfit
return (lower_limits, upper_limits, eflags, nfits, None)
return parallel_est(func, limit_parnums, pars, numcores)
#################################confidence###################################
def parallel_est(estfunc, limit_parnums, pars, numcores=_ncpus):
tasks = []
def worker(out_q, err_q, parids, parnums, parvals, lock):
results = []
for parid, singleparnum in izip(parids, parnums):
try:
result = estfunc(parid, singleparnum, lock)
results.append( (parid, result) )
except EstNewMin:
# catch the EstNewMin exception and include the exception
# class and the modified parameter values to the error queue.
# These modified parvals determine the new lower statistic.
# The exception class will be instaniated re-raised with the
# parameter values attached. C++ Python exceptions are not
# picklable for use in the queue.
err_q.put( EstNewMin(parvals) )
return
except Exception, e:
#err_q.put( e.__class__() )
err_q.put(e)
return
out_q.put(results)
# The multiprocessing manager provides references to process-safe
# shared objects like Queue and Lock
manager = multiprocessing.Manager()
out_q = manager.Queue()
err_q = manager.Queue()
lock = manager.Lock()
size = len(limit_parnums)
parids = numpy.arange(size)
# if len(limit_parnums) is less than numcores, only use length number of
# processes
if size < numcores:
numcores = size
# group limit_parnums into numcores-worth of chunks
limit_parnums = numpy.array_split(limit_parnums, numcores)
parids = numpy.array_split(parids, numcores)
tasks = [multiprocessing.Process(target=worker,
args=(out_q, err_q, parid, parnum, pars, lock))
for parid, parnum in izip(parids, limit_parnums)]
return run_tasks(tasks, out_q, err_q, size)
def run_tasks(tasks, out_q, err_q, size):
die = (lambda tasks : [task.terminate() for task in tasks
if task.exitcode is None])
try:
for task in tasks:
task.start()
for task in tasks:
task.join()
except KeyboardInterrupt, e:
# kill all slave processes on ctrl-C
die(tasks)
raise e
if not err_q.empty():
die(tasks)
raise err_q.get()
lower_limits = size*[None]
upper_limits = size*[None]
eflags = size*[None]
nfits = 0
while not out_q.empty():
for parid, singlebounds in out_q.get():
# Have to guarantee that the tuple returned by projection
# is always (array, array, array, int) for this to work.
lower_limits[parid] = singlebounds[0]
upper_limits[parid] = singlebounds[1]
eflags[parid] = singlebounds[2]
nfits += singlebounds[3]
return (lower_limits, upper_limits, eflags, nfits, None)
| gpl-2.0 | 2,263,206,828,706,974,500 | 34.88317 | 219 | 0.526787 | false |
olivierverdier/sfepy | sfepy/postprocess/dataset_manager.py | 1 | 9486 | """
Code to help with managing a TVTK data set in Pythonic ways.
"""
# Author: Prabhu Ramachandran <[email protected]>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
from enthought.traits.api import (HasTraits, Instance, Array, Str,
Property, Dict)
from enthought.tvtk.api import tvtk
from enthought.tvtk.array_handler import array2vtk
######################################################################
# Utility functions.
######################################################################
def get_array_type(arr):
"""Returns if the array is a scalar ('scalars'), vector
('vectors') or tensor ('tensors'). It looks at the number of
components to decide. If it has a wierd number of components it
returns the empty string.
"""
n = arr.number_of_components
ret = {1: 'scalars', 3: 'vectors', 4: 'scalars', 9:'tensors'}
return ret.get(n) or ''
def get_attribute_list(data):
""" Gets scalar, vector and tensor information from the given data
(either cell or point data).
"""
attr = {'scalars':[], 'vectors':[], 'tensors':[]}
if data is not None:
n = data.number_of_arrays
for i in range(n):
name = data.get_array_name(i)
t = get_array_type(data.get_array(i))
if len(t) > 0 and name is not None:
attr[t].extend([name])
def _mk_first(lst, value):
"""Makes the specified `value` the first item in `lst`."""
lst.remove(value)
lst.insert(0, value)
attr1 = attr.copy()
for a in attr:
v = getattr(data, a)
if v is not None:
name = v.name
if name is not None:
try:
_mk_first(attr[a], v.name)
except ValueError:
# Sometimes we have a multi-component scalar.
attr1[a].insert(0, name)
return attr1
def get_all_attributes(obj):
"""Gets the scalar, vector and tensor attributes that are
available in the given VTK data object.
"""
point_attr = get_attribute_list(obj.point_data)
cell_attr = get_attribute_list(obj.cell_data)
return point_attr, cell_attr
################################################################################
# `DatasetManager` class.
################################################################################
class DatasetManager(HasTraits):
# The TVTK dataset we manage.
dataset = Instance(tvtk.DataSet)
# Our output, this is the dataset modified by us with different
# active arrays.
output = Property(Instance(tvtk.DataSet))
# The point scalars for the dataset. You may manipulate the arrays
# in-place. However adding new keys in this dict will not set the
# data in the `dataset` for that you must explicitly call
# `add_array`.
point_scalars = Dict(Str, Array)
# Point vectors.
point_vectors = Dict(Str, Array)
# Point tensors.
point_tensors = Dict(Str, Array)
# The cell scalars for the dataset.
cell_scalars = Dict(Str, Array)
cell_vectors = Dict(Str, Array)
cell_tensors = Dict(Str, Array)
# This filter allows us to change the attributes of the data
# object and will ensure that the pipeline is properly taken care
# of. Directly setting the array in the VTK object will not do
# this.
_assign_attribute = Instance(tvtk.AssignAttribute, args=(),
allow_none=False)
######################################################################
# Public interface.
######################################################################
def add_array(self, array, name, category='point'):
"""
Add an array to the dataset to specified category ('point' or
'cell').
"""
assert len(array.shape) <= 2, "Only 2D arrays can be added."
data = getattr(self.dataset, '%s_data'%category)
if len(array.shape) == 2:
assert array.shape[1] in [1, 3, 4, 9], \
"Only Nxm arrays where (m in [1,3,4,9]) are supported"
va = tvtk.to_tvtk(array2vtk(array))
va.name = name
data.add_array(va)
mapping = {1:'scalars', 3: 'vectors', 4: 'scalars',
9: 'tensors'}
dict = getattr(self, '%s_%s'%(category,
mapping[array.shape[1]]))
dict[name] = array
else:
va = tvtk.to_tvtk(array2vtk(array))
va.name = name
data.add_array(va)
dict = getattr(self, '%s_scalars'%(category))
dict[name] = array
def remove_array(self, name, category='point'):
"""Remove an array by its name and optional category (point and
cell). Returns the removed array.
"""
type = self._find_array(name, category)
data = getattr(self.dataset, '%s_data'%category)
data.remove_array(name)
d = getattr(self, '%s_%s'%(category, type))
return d.pop(name)
def rename_array(self, name1, name2, category='point'):
"""Rename a particular array from `name1` to `name2`.
"""
type = self._find_array(name1, category)
data = getattr(self.dataset, '%s_data'%category)
arr = data.get_array(name1)
arr.name = name2
d = getattr(self, '%s_%s'%(category, type))
d[name2] = d.pop(name1)
def activate(self, name, category='point'):
"""Make the specified array the active one.
"""
type = self._find_array(name, category)
self._activate_data_array(type, category, name)
def update(self):
"""Update the dataset when the arrays are changed.
"""
self.dataset.modified()
self._assign_attribute.update()
######################################################################
# Non-public interface.
######################################################################
def _dataset_changed(self, value):
self._setup_data()
self._assign_attribute.input = value
def _get_output(self):
return self._assign_attribute.output
def _setup_data(self):
"""Updates the arrays from what is available in the input data.
"""
input = self.dataset
pnt_attr, cell_attr = get_all_attributes(input)
self._setup_data_arrays(cell_attr, 'cell')
self._setup_data_arrays(pnt_attr, 'point')
def _setup_data_arrays(self, attributes, d_type):
"""Given the dict of the attributes from the
`get_all_attributes` function and the data type (point/cell)
data this will setup the object and the data.
"""
attrs = ['scalars', 'vectors', 'tensors']
aa = self._assign_attribute
input = self.dataset
data = getattr(input, '%s_data'%d_type)
for attr in attrs:
values = attributes[attr]
# Get the arrays from VTK, create numpy arrays and setup our
# traits.
arrays = {}
for name in values:
va = data.get_array(name)
npa = va.to_array()
# Now test if changes to the numpy array are reflected
# in the VTK array, if they are we are set, else we
# have to set the VTK array back to the numpy array.
if len(npa.shape) > 1:
old = npa[0,0]
npa[0][0] = old - 1
if abs(va[0][0] - npa[0,0]) > 1e-8:
va.from_array(npa)
npa[0][0] = old
else:
old = npa[0]
npa[0] = old - 1
if abs(va[0] - npa[0]) > 1e-8:
va.from_array(npa)
npa[0] = old
arrays[name] = npa
setattr(self, '%s_%s'%(d_type, attr), arrays)
def _activate_data_array(self, data_type, category, name):
"""Activate (or deactivate) a particular array.
Given the nature of the data (scalars, vectors etc.) and the
type of data (cell or points) it activates the array given by
its name.
Parameters:
-----------
data_type: one of 'scalars', 'vectors', 'tensors'
category: one of 'cell', 'point'.
name: string of array name to activate.
"""
input = self.dataset
data = None
data = getattr(input, category + '_data')
method = getattr(data, 'set_active_%s'%data_type)
if len(name) == 0:
# If the value is empty then we deactivate that attribute.
method(None)
else:
aa = self._assign_attribute
method(name)
aa.assign(name, data_type.upper(), category.upper() +'_DATA')
aa.update()
def _find_array(self, name, category='point'):
"""Return information on which kind of attribute contains the
specified named array in a particular category."""
types = ['scalars', 'vectors', 'tensors']
for type in types:
attr = '%s_%s'%(category, type)
d = getattr(self, attr)
if name in d.keys():
return type
raise KeyError('No %s array named %s available in dataset'
%(category, name))
| bsd-3-clause | -8,936,205,743,073,525,000 | 35.767442 | 81 | 0.519713 | false |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/matplotlib/sphinxext/mathmpl.py | 1 | 3759 | import hashlib
from pathlib import Path
from docutils import nodes
from docutils.parsers.rst import Directive, directives
import sphinx
import matplotlib as mpl
from matplotlib import cbook
from matplotlib.mathtext import MathTextParser
mathtext_parser = MathTextParser("Bitmap")
# Define LaTeX math node:
class latex_math(nodes.General, nodes.Element):
pass
def fontset_choice(arg):
return directives.choice(arg, MathTextParser._font_type_mapping)
def math_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
i = rawtext.find('`')
latex = rawtext[i+1:-1]
node = latex_math(rawtext)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node], []
math_role.options = {'fontset': fontset_choice}
class MathDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {'fontset': fontset_choice}
def run(self):
latex = ''.join(self.content)
node = latex_math(self.block_text)
node['latex'] = latex
node['fontset'] = self.options.get('fontset', 'cm')
return [node]
# This uses mathtext to render the expression
def latex2png(latex, filename, fontset='cm'):
latex = "$%s$" % latex
with mpl.rc_context({'mathtext.fontset': fontset}):
if Path(filename).exists():
depth = mathtext_parser.get_depth(latex, dpi=100)
else:
try:
depth = mathtext_parser.to_png(filename, latex, dpi=100)
except Exception:
cbook._warn_external(
f"Could not render math expression {latex}")
depth = 0
return depth
# LaTeX to HTML translation stuff:
def latex2html(node, source):
inline = isinstance(node.parent, nodes.TextElement)
latex = node['latex']
fontset = node['fontset']
name = 'math-{}'.format(
hashlib.md5((latex + fontset).encode()).hexdigest()[-10:])
destdir = Path(setup.app.builder.outdir, '_images', 'mathmpl')
destdir.mkdir(parents=True, exist_ok=True)
dest = destdir / f'{name}.png'
depth = latex2png(latex, dest, fontset)
if inline:
cls = ''
else:
cls = 'class="center" '
if inline and depth != 0:
style = 'style="position: relative; bottom: -%dpx"' % (depth + 1)
else:
style = ''
return (f'<img src="{setup.app.builder.imgpath}/mathmpl/{name}.png"'
f' {cls}{style}/>')
def setup(app):
setup.app = app
# Add visit/depart methods to HTML-Translator:
def visit_latex_math_html(self, node):
source = self.document.attributes['source']
self.body.append(latex2html(node, source))
def depart_latex_math_html(self, node):
pass
# Add visit/depart methods to LaTeX-Translator:
def visit_latex_math_latex(self, node):
inline = isinstance(node.parent, nodes.TextElement)
if inline:
self.body.append('$%s$' % node['latex'])
else:
self.body.extend(['\\begin{equation}',
node['latex'],
'\\end{equation}'])
def depart_latex_math_latex(self, node):
pass
app.add_node(latex_math,
html=(visit_latex_math_html, depart_latex_math_html),
latex=(visit_latex_math_latex, depart_latex_math_latex))
app.add_role('mathmpl', math_role)
app.add_directive('mathmpl', MathDirective)
if sphinx.version_info < (1, 8):
app.add_role('math', math_role)
app.add_directive('math', MathDirective)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
| gpl-2.0 | 8,906,122,994,853,540,000 | 28.833333 | 73 | 0.611865 | false |
mitodl/bootcamp-ecommerce | profiles/utils_test.py | 1 | 2363 | """User utils tests"""
import pytest
from profiles.utils import ensure_active_user, is_duplicate_username_error, usernameify
@pytest.mark.parametrize(
"full_name,email,expected_username",
[
[" John Doe ", None, "john-doe"],
["Tabby Tabberson", None, "tabby-tabberson"],
["Àccèntèd Ñame, Ësq.", None, "àccèntèd-ñame-ësq"],
["-Dashy_St._Underscores-", None, "dashy-st-underscores"],
["Repeated-----Chars___Jr.", None, "repeated-chars-jr"],
["Numbers123 !$!@ McStrange!!##^", None, "numbers-mcstrange"],
["Кирил Френков", None, "кирил-френков"],
["年號", None, "年號"],
["abcdefghijklmnopqrstuvwxyz", None, "abcdefghijklmnopqrst"],
["ai bi cı dI eİ fI", None, "ai-bi-ci-di-ei-fi"],
["", "[email protected]", "someemail"],
],
)
def test_usernameify(mocker, full_name, email, expected_username):
"""usernameify should turn a user's name into a username, or use the email if necessary"""
# Change the username max length to 20 for test data simplicity's sake
temp_username_max_len = 20
mocker.patch("profiles.utils.USERNAME_MAX_LEN", temp_username_max_len)
patched_log_error = mocker.patch("profiles.utils.log.error")
assert usernameify(full_name, email=email) == expected_username
assert patched_log_error.called == bool(email and not full_name)
def test_usernameify_fail():
"""usernameify should raise an exception if the full name and email both fail to produce a username"""
with pytest.raises(ValueError):
assert usernameify("!!!", email="[email protected]")
@pytest.mark.parametrize(
"exception_text,expected_value",
[
["DETAILS: (username)=(ABCDEFG) already exists", True],
["DETAILS: (email)=(ABCDEFG) already exists", False],
],
)
def test_is_duplicate_username_error(exception_text, expected_value):
"""
is_duplicate_username_error should return True if the exception text provided indicates a duplicate username error
"""
assert is_duplicate_username_error(exception_text) is expected_value
def test_ensure_active_user(user):
"""
Test that ensure_active_user activates
"""
user.is_active = False
user.save()
assert not user.is_active
ensure_active_user(user)
assert user.is_active
| bsd-3-clause | -5,640,492,194,927,244,000 | 35.809524 | 118 | 0.660198 | false |
akshayms/eho | eho/server/storage/models.py | 1 | 8401 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from uuid import uuid4
from eho.server.storage.storage import DB
class NodeTemplate(DB.Model):
__tablename__ = 'NodeTemplate'
id = DB.Column(DB.String(36), primary_key=True)
name = DB.Column(DB.String(80), unique=True, nullable=False)
node_type_id = DB.Column(DB.String(36), DB.ForeignKey('NodeType.id'),
nullable=False)
tenant_id = DB.Column(DB.String(36), nullable=False) # is it needed?
flavor_id = DB.Column(DB.String(36), nullable=False)
node_template_configs = DB.relationship('NodeTemplateConfig',
cascade="all,delete",
backref='node_template')
cluster_node_counts = DB.relationship('ClusterNodeCount',
cascade="all,delete",
backref='node_template')
nodes = DB.relationship('Node', cascade="all,delete",
backref='node_template')
def __init__(self, name, node_type_id, tenant_id, flavor_id):
self.id = uuid4().hex
self.name = name
self.node_type_id = node_type_id
self.tenant_id = tenant_id
self.flavor_id = flavor_id
def __repr__(self):
return '<NodeTemplate %s / %s>' % (self.name, self.node_type_id)
class Cluster(DB.Model):
__tablename__ = 'Cluster'
id = DB.Column(DB.String(36), primary_key=True)
name = DB.Column(DB.String(80), unique=True, nullable=False)
base_image_id = DB.Column(DB.String(36), nullable=False)
status = DB.Column(DB.String(80))
tenant_id = DB.Column(DB.String(36), nullable=False)
nodes = DB.relationship('Node', cascade="all,delete", backref='cluster')
service_urls = DB.relationship('ServiceUrl', cascade="all,delete",
backref='cluster')
node_counts = DB.relationship('ClusterNodeCount', cascade="all,delete",
backref='cluster')
# node_templates: [(node_template_id, count), ...]
def __init__(self, name, base_image_id, tenant_id, status=None):
self.id = uuid4().hex
self.name = name
self.base_image_id = base_image_id
if not status:
status = 'Starting'
self.status = status
self.tenant_id = tenant_id
def __repr__(self):
return '<Cluster %s / %s>' % (self.name, self.status)
NODE_TYPE_NODE_PROCESS = DB.Table('NodeType_NodeProcess', DB.metadata,
DB.Column('node_type_id', DB.String(36),
DB.ForeignKey('NodeType.id')),
DB.Column('node_process_id', DB.String(36),
DB.ForeignKey('NodeProcess.id')))
class NodeType(DB.Model):
__tablename__ = 'NodeType'
id = DB.Column(DB.String(36), primary_key=True)
name = DB.Column(DB.String(80), unique=True, nullable=False)
processes = DB.relationship('NodeProcess',
cascade="all,delete",
secondary=NODE_TYPE_NODE_PROCESS,
backref='node_types')
node_templates = DB.relationship('NodeTemplate', cascade="all,delete",
backref='node_type')
def __init__(self, name):
self.id = uuid4().hex
self.name = name
def __repr__(self):
return '<NodeType %s>' % self.name
class NodeProcess(DB.Model):
__tablename__ = 'NodeProcess'
id = DB.Column(DB.String(36), primary_key=True)
name = DB.Column(DB.String(80), unique=True, nullable=False)
node_process_properties = DB.relationship('NodeProcessProperty',
cascade="all,delete",
backref='node_process')
def __init__(self, name):
self.id = uuid4().hex
self.name = name
def __repr__(self):
return '<NodeProcess %s>' % self.name
class NodeProcessProperty(DB.Model):
__tablename__ = 'NodeProcessProperty'
__table_args__ = (
DB.UniqueConstraint('node_process_id', 'name'),
)
id = DB.Column(DB.String(36), primary_key=True)
node_process_id = DB.Column(DB.String(36), DB.ForeignKey('NodeProcess.id'))
name = DB.Column(DB.String(80), nullable=False)
required = DB.Column(DB.Boolean, nullable=False)
default = DB.Column(DB.String(36))
node_template_configs = DB.relationship('NodeTemplateConfig',
cascade="all,delete",
backref='node_process_property')
def __init__(self, node_process_id, name, required=True, default=None):
self.id = uuid4().hex
self.node_process_id = node_process_id
self.name = name
self.required = required
self.default = default
def __repr__(self):
return '<NodeProcessProperty %s>' % self.name
class NodeTemplateConfig(DB.Model):
__tablename__ = 'NodeTemplateConfig'
__table_args__ = (
DB.UniqueConstraint('node_template_id', 'node_process_property_id'),
)
id = DB.Column(DB.String(36), primary_key=True)
node_template_id = DB.Column(
DB.String(36),
DB.ForeignKey('NodeTemplate.id'))
node_process_property_id = DB.Column(
DB.String(36),
DB.ForeignKey('NodeProcessProperty.id'))
value = DB.Column(DB.String(36))
def __init__(self, node_template_id, node_process_property_id, value):
self.id = uuid4().hex
self.node_template_id = node_template_id
self.node_process_property_id = node_process_property_id
self.value = value
def __repr__(self):
return '<NodeTemplateConfig %s.%s / %s>' \
% (self.node_template_id, self.node_process_property_id,
self.value)
class ClusterNodeCount(DB.Model):
__tablename__ = 'ClusterNodeCount'
__table_args__ = (
DB.UniqueConstraint('cluster_id', 'node_template_id'),
)
id = DB.Column(DB.String(36), primary_key=True)
cluster_id = DB.Column(DB.String(36), DB.ForeignKey('Cluster.id'))
node_template_id = DB.Column(DB.String(36),
DB.ForeignKey('NodeTemplate.id'))
count = DB.Column(DB.Integer, nullable=False)
def __init__(self, cluster_id, node_template_id, count):
self.id = uuid4().hex
self.cluster_id = cluster_id
self.node_template_id = node_template_id
self.count = count
def __repr__(self):
return '<ClusterNodeCount %s / %s>' \
% (self.node_template_id, self.count)
class Node(DB.Model):
__tablename__ = 'Node'
# do we need own id?
vm_id = DB.Column(DB.String(36), primary_key=True)
cluster_id = DB.Column(DB.String(36), DB.ForeignKey('Cluster.id'))
node_template_id = DB.Column(DB.String(36),
DB.ForeignKey('NodeTemplate.id'))
def __init__(self, vm_id, cluster_id, node_template_id):
self.vm_id = vm_id
self.cluster_id = cluster_id
self.node_template_id = node_template_id
def __repr__(self):
return '<Node based on %s>' % self.node_template.name
class ServiceUrl(DB.Model):
__tablename__ = 'ServiceUrl'
id = DB.Column(DB.String(36), primary_key=True)
cluster_id = DB.Column(DB.String(36), DB.ForeignKey('Cluster.id'))
name = DB.Column(DB.String(80))
url = DB.Column(DB.String(80), nullable=False)
def __init__(self, cluster_id, name, url):
self.id = uuid4().hex
self.cluster_id = cluster_id
self.name = name
self.url = url
def __repr__(self):
return '<ServiceUrl %s / %s>' % (self.name, self.url)
| apache-2.0 | 8,855,471,069,699,424,000 | 34.901709 | 79 | 0.578145 | false |
VJftw/invoke-tools | invoke_tools/lxc/docker.py | 1 | 5987 | """
invoke_tools.lxc.docker
"""
from __future__ import print_function
import os
import json
import getpass
class Docker:
"""
Docker
"""
@staticmethod
def __print_line(line):
"""
"""
try:
line = line.decode('utf-8')
except:
print("Could not decode line")
return
try:
line = json.loads(line)
if "stream" in line:
line = line["stream"]
pass
elif "status" in line:
o = line["status"]
if "progress" in line:
o += "{0}".format(line["progress"])
if "id" in line:
o = "{0} {1}".format(line["id"], o)
if line["status"] == "Download complete" or "Pulling" in line["status"] or "Digest" in line["status"] or "Image" in line["status"] or "image" in line["status"]:
line = " {0}{1}\n".format(o, " " * 80)
else:
line = " {0}\r".format(o)
except:
pass
print(line, end="", flush=True)
@staticmethod
def pull(cli, image):
print("#")
print("# Pulling Docker image: {0}".format(image))
print("#")
for line in cli.pull(image, stream=True):
Docker.__print_line(line)
@staticmethod
def build(cli, dockerfile, tag):
print("#")
print("# Building Docker image from '{0}' with tag '{1}'".format(dockerfile, tag))
print("#")
for line in cli.build(
dockerfile=dockerfile,
pull=True,
path=".",
rm=True,
tag=tag):
Docker.__print_line(line)
print()
@staticmethod
def execute(cli, container_id, cmd):
print("#")
print("# Executing on {1}: {0}".format(cmd, container_id))
print("#")
exec_container = cli.exec_create(
container=container_id,
cmd=cmd
# user='root' if CI else 'app'
)
for line in cli.exec_start(
exec_id=exec_container.get('Id'),
stream=True):
Docker.__print_line(line)
print()
inspect = cli.exec_inspect(exec_container.get('Id'))
exit_code = inspect.get('ExitCode')
if exit_code != 0:
cli.stop(container_id)
cli.remove_container(container_id)
raise Exception("Exit Code: {0}\n{1}".format(exit_code, inspect))
@staticmethod
def clean(cli, objs):
print("#")
print("# Cleaning files & directories: {0}".format(objs))
print("#")
cli.pull("alpine:latest")
container = cli.create_container(
image='alpine:latest',
volumes=[
'{0}:/app'.format(os.getcwd())
],
working_dir='/app',
host_config=cli.create_host_config(binds=[
'{0}:/app'.format(os.getcwd())
]),
command='/bin/sh -c "rm -rf {0}"'.format(" ".join(objs))
)
response = cli.start(container=container.get('Id'))
cli.wait(container=container.get('Id'), timeout=600)
print(response)
cli.remove_container(container.get('Id'))
print()
@staticmethod
def push(cli, tags):
"""
"""
for tag in tags:
print("#")
print("# Pushing {0} to Registry".format(tag))
print("#")
for line in cli.push(tag, stream=True):
Docker.__print_line(line)
print()
@staticmethod
def login(cli):
"""
"""
if os.getenv('DOCKER_EMAIL') and os.getenv('DOCKER_USERNAME') and os.getenv('DOCKER_PASSWORD'):
email = os.getenv('DOCKER_EMAIL')
username = os.getenv('DOCKER_USERNAME')
password = os.getenv('DOCKER_PASSWORD')
else:
email = input('Docker email:')
username = input('Docker username:')
password = getpass.getpass('Docker password:')
cli.login(
username=username,
email=email,
password=password,
registry='https://index.docker.io/v1/'
)
print()
return cli, username
@staticmethod
def run(
cli,
tag,
command,
volumes=None,
working_dir="",
environment=None,
links=None,
detach=False,
privileged=False):
"""
"""
if environment is None:
environment = {}
if volumes is None:
volumes = []
print("#")
print("# Running on {1}: {0}".format(command, tag))
print("#")
params = dict()
params['image'] = tag
params['command'] = command
if len(volumes) > 0:
params['volumes'] = volumes
params['host_config'] = cli.create_host_config(binds=volumes, links=links, privileged=privileged)
if working_dir != "":
params['working_dir'] = working_dir
if environment:
params['environment'] = environment
if links:
params['host_config'] = cli.create_host_config(binds=volumes, links=links, privileged=privileged)
if privileged:
params['host_config'] = cli.create_host_config(binds=volumes, links=links, privileged=privileged)
container = cli.create_container(**params)
cli.start(container.get('Id'))
if detach:
return container
for line in cli.attach(container=container.get('Id'), stream=True, logs=True):
Docker.__print_line(line)
exit_code = cli.wait(container=container.get('Id'))
cli.remove_container(container.get('Id'))
if exit_code != 0:
raise Exception("Exit Code: {0}".format(exit_code))
| mit | 3,646,104,151,735,492,000 | 28.063107 | 176 | 0.496743 | false |
msoula/cosmicbox | board/cosmicbox/cosmicboxfs/lib/psogen.py | 1 | 4151 | #!/usr/bin/python
# Modificated ShoutBox Library
# enables further modifications for the ShoutBox
# Run without to generate htmlfile
# Run the following to enter a new line from command line
# psogen.py input Anonymous default "Text"
import os, re
import messages, broadcast
datafilename = os.environ["SHOUTBOX_CHATFILE"]
htmlfilename = os.environ["SHOUTBOX_GEN_HTMLFILE"]
style=("name { font-weight: bold; font-family:Tahoma } "
"data { font-family: Tahoma } "
"data.def { color: #000000 } "
"data.blue { color: #0000FF } "
"data.green { color: #008000 } "
"data.orange { color: #FF8040 } "
"data.red { color: #FF0000 }")
try:
raw_dest = os.environ["SHOUTBOX_BROADCAST_DESTINATIONS"]
finished_dest = re.sub ( '#' , '"' , raw_dest )
broadcast_destination = eval ( finished_dest )
except KeyError:
broadcast_destination = False
#--------------
# Generates Shoutbox-HTML-Frame ...
# Imports:
# content - String containing preformatted data
#--------------
def generate_html(content):
htmlstring = "<html><head><meta http-equiv='cache-control' content='no-cache'><meta name='GENERATOR' content='PyShoutOut'><title>Shout-Out Data</title>"
htmlstring += "<style type='text/css'>" + style + "</style></head><body>"
htmlstring += content
htmlstring += "</body></html>"
return htmlstring
#--------------
# Generates HTML Data based on given content and write it to static html file
# Imports:
# content - String containing preformatted data
#--------------
def generate_html_into_file(content):
htmlstring = generate_html ( content )
htmlfile = open( htmlfilename , 'w' )
htmlfile.write( htmlstring )
htmlfile.close()
#--------------
# Generates HTML Data based on datafilename 's content
#--------------
def generate_html_from_file():
old = read_data_file()
generate_html_into_file( old )
#--------------
# Generates and Displays generated HTML
#--------------
def generate_html_to_display_from_file():
old = read_data_file()
htmlstring = generate_html ( old )
print htmlstring
#--------------
# Reads Data file from datafilename given name
#--------------
def read_data_file():
datafile = open(datafilename, 'r')
old = datafile.read()
datafile.close()
return old
#--------------
# Function for saving new Shoubox-Content & Regenerate static HTML file -- usually called by HTML-Form
#--------------
def process_form( name , indata , color ):
content = save_input( name , indata , color )
if broadcast_destination == False:
generate_html_into_file ( content )
#--------------
# Acutally Saves SB-Content to datafile
#--------------
def save_input( name , indata , color ):
content = prepare_line ( name, indata, color )
if broadcast_destination != False:
return writeToNetwork( content , broadcast_destination )
else:
return writeToDisk ( content )
def writeToNetwork ( content , broadcast_destination ):
message = messages.shoutbox_message()
message.set(content)
casting = broadcast.broadcast( )
casting.setDestination(broadcast_destination)
casting.set( message.get_message() )
casting.send()
return None
def writeToDisk ( content ):
old = read_data_file()
finalcontent = content + old
datafile = open(datafilename, 'r+')
datafile.write(finalcontent)
datafile.close()
return finalcontent
def prepare_line ( name, indata, color ):
datapass = re.sub("<", "<", indata)
data = re.sub(">", ">", datapass)
content = "<name>" + name + ":</name> <data class='" + color + "'>" + data + "</data><br>\n"
return content
#--------------
# Testing or Generating static HTML File
#--------------
if __name__ == "__main__":
import sys
if sys.argv.count("input") >= 1 :
save_input( sys.argv[2] , sys.argv[3] , sys.argv[4] )
generate_html_to_display_from_file()
print "Entered Text."
generate_html_from_file ()
print "Generated HTML-Shoutbox File."
| gpl-2.0 | -128,107,883,562,125,100 | 28.65 | 158 | 0.611419 | false |