input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
testFileC]
for filesetFile in testFilesetC.files:
assert filesetFile in goldenFiles, \
"ERROR: Unknown file in fileset"
goldenFiles.remove(filesetFile)
assert len(goldenFiles) == 0, \
"ERROR: Fileset is missing files"
myThread.transaction.rollback()
testFilesetB.loadData()
testFilesetC.loadData()
assert len(testFilesetB.files) == 0, \
"ERROR: Fileset B has too many files"
assert len(testFilesetC.files) == 0, \
"ERROR: Fileset C has too many files"
testFilesetA.delete()
testFileA.delete()
testFileB.delete()
testFileC.delete()
def testMarkOpen(self):
"""
_testMarkOpen_
Test that setting the openess of a fileset in the constructor works as
well as changing it with the markOpen() method.
"""
testFilesetA = Fileset(name="TestFileset1", is_open=False)
testFilesetA.create()
testFilesetB = Fileset(name="TestFileset2", is_open=True)
testFilesetB.create()
testFilesetC = Fileset(name=testFilesetA.name)
testFilesetC.load()
testFilesetD = Fileset(name=testFilesetB.name)
testFilesetD.load()
assert testFilesetC.open is False, \
"ERROR: FilesetC should be closed."
assert testFilesetD.open is True, \
"ERROR: FilesetD should be open."
testFilesetA.markOpen(True)
testFilesetB.markOpen(False)
testFilesetE = Fileset(name=testFilesetA.name)
testFilesetE.load()
testFilesetF = Fileset(name=testFilesetB.name)
testFilesetF.load()
assert testFilesetE.open is True, \
"ERROR: FilesetE should be open."
assert testFilesetF.open is False, \
"ERROR: FilesetF should be closed."
myThread = threading.currentThread()
daoFactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger,
dbinterface=myThread.dbi)
openFilesetDAO = daoFactory(classname="Fileset.ListOpen")
openFilesetNames = openFilesetDAO.execute()
assert len(openFilesetNames) == 1, \
"ERROR: Too many open filesets."
assert "TestFileset1" in openFilesetNames, \
"ERROR: Wrong fileset listed as open."
return
def testFilesetClosing(self):
"""
_testFilesetClosing_
Verify the proper operation of the closable fileset DAO object. A
fileset is closable if:
- All of the subscriptions that feed it has completed processing all
files in their input fileset
_ All of the jobs for feeder subscriptions have completed
- The fileset that feeds the subscription is closed
- The workflow for the subscription is fully injected.
"""
testOutputFileset1 = Fileset(name="TestOutputFileset1")
testOutputFileset1.create()
testOutputFileset2 = Fileset(name="TestOutputFileset2")
testOutputFileset2.create()
testOutputFileset3 = Fileset(name="TestOutputFileset3")
testOutputFileset3.create()
testOutputFileset4 = Fileset(name="TestOutputFileset4")
testOutputFileset4.create()
testMergedOutputFileset1 = Fileset(name="TestMergedOutputFileset1")
testMergedOutputFileset1.create()
testMergedOutputFileset2 = Fileset(name="TestMergedOutputFileset2")
testMergedOutputFileset2.create()
testMergedOutputFileset3 = Fileset(name="TestMergedOutputFileset3")
testMergedOutputFileset3.create()
testMergedOutputFileset4 = Fileset(name="TestMergedOutputFileset4")
testMergedOutputFileset4.create()
testFilesetOpen = Fileset(name="TestFilesetOpen", is_open=True)
testFilesetOpen.create()
testFileA = File(lfn="/this/is/a/lfnA", size=1024,
events=20, checksums={'cksum': 3})
testFileB = File(lfn="/this/is/a/lfnB", size=1024,
events=20, checksums={'cksum': 3})
testFilesetOpen.addFile(testFileA)
testFilesetOpen.addFile(testFileB)
testFilesetOpen.commit()
testFilesetClosed = Fileset(name="TestFilesetClosed", is_open=False)
testFilesetClosed.create()
testFileC = File(lfn="/this/is/a/lfnC", size=1024,
events=20, checksums={'cksum': 3})
testFileD = File(lfn="/this/is/a/lfnD", size=1024,
events=20, checksums={'cksum': 3})
testFilesetClosed.addFile(testFileC)
testFilesetClosed.addFile(testFileD)
testFilesetClosed.commit()
testWorkflow1 = Workflow(spec="spec1.xml", owner="Steve",
name="wf001", task="sometask")
testWorkflow1.create()
testWorkflow1.addOutput("out1", testOutputFileset1, testMergedOutputFileset1)
testWorkflow1.addOutput("out2", testOutputFileset2, testMergedOutputFileset2)
testWorkflow2 = Workflow(spec="spec2.xml", owner="Steve",
name="wf002", task="sometask")
testWorkflow2.create()
testWorkflow2.addOutput("out3", testOutputFileset3, testMergedOutputFileset3)
testWorkflow3 = Workflow(spec="spec4.xml", owner="Steve",
name="wf004", task="sometask")
testWorkflow3.create()
testWorkflow3.addOutput("out4", testOutputFileset4, testMergedOutputFileset4)
testSubscription1 = Subscription(fileset=testFilesetClosed,
workflow=testWorkflow1)
testSubscription1.create()
testSubscription1.completeFiles([testFileC, testFileD])
testSubscription2 = Subscription(fileset=testFilesetOpen,
workflow=testWorkflow2)
testSubscription2.create()
testSubscription2.completeFiles([testFileA, testFileB])
testSubscription3 = Subscription(fileset=testFilesetClosed,
workflow=testWorkflow3)
testSubscription3.create()
testJobGroup = JobGroup(subscription=testSubscription1)
testJobGroup.create()
testJob = Job(name="TestJob1")
testJob.create(testJobGroup)
testJob["state"] = "executing"
myThread = threading.currentThread()
daoFactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger,
dbinterface=myThread.dbi)
changeStateDAO = daoFactory(classname="Jobs.ChangeState")
changeStateDAO.execute(jobs=[testJob])
closableFilesetDAO = daoFactory(classname="Fileset.ListClosable")
closableFilesets = closableFilesetDAO.execute()
assert len(closableFilesets) == 0, \
"Error: There should be no closable filesets."
testJob["state"] = "cleanout"
changeStateDAO.execute(jobs=[testJob])
closableFilesets = closableFilesetDAO.execute()
assert len(closableFilesets) == 0, \
"Error: There should be no closable filesets."
injected = daoFactory(classname="Workflow.MarkInjectedWorkflows")
injected.execute(names=["wf001", "wf002", "wf003"], injected=True)
closableFilesets = closableFilesetDAO.execute()
goldenFilesets = ["TestOutputFileset1", "TestOutputFileset2"]
for closableFileset in closableFilesets:
newFileset = Fileset(id=closableFileset)
newFileset.load()
assert newFileset.name in goldenFilesets, \
"Error: Unknown closable fileset"
goldenFilesets.remove(newFileset.name)
assert len(goldenFilesets) == 0, \
"Error: Filesets are missing"
return
def testFilesetClosing2(self):
"""
_testFilesetClosing2_
Verify that fileset closing works correctly in the case where multiple
subscriptions feed into a single fileset.
"""
testOutputFileset1 = Fileset(name="TestOutputFileset1")
testOutputFileset1.create()
testOutputFileset2 = Fileset(name="TestOutputFileset2")
testOutputFileset2.create()
testMergedOutputFileset1 = Fileset(name="TestMergedOutputFileset1")
testMergedOutputFileset1.create()
testMergedOutputFileset2 = Fileset(name="TestMergedOutputFileset2")
testMergedOutputFileset2.create()
testFilesetOpen = Fileset(name="TestFilesetOpen", is_open=True)
testFilesetOpen.create()
testFileA = File(lfn="/this/is/a/lfnA", size=1024,
events=20, checksums={'cksum': 3})
testFileB = File(lfn="/this/is/a/lfnB", size=1024,
events=20, checksums={'cksum': 3})
testFilesetOpen.addFile(testFileA)
testFilesetOpen.addFile(testFileB)
testFilesetOpen.commit()
testFilesetClosed1 = Fileset(name="TestFilesetClosed1", is_open=False)
testFilesetClosed1.create()
testFileC = File(lfn="/this/is/a/lfnC", size=1024,
events=20, checksums={'cksum': 3})
testFileD = File(lfn="/this/is/a/lfnD", size=1024,
events=20, checksums={'cksum': 3})
testFilesetClosed1.addFile(testFileC)
testFilesetClosed1.addFile(testFileD)
testFilesetClosed1.commit()
testFilesetClosed2 = Fileset(name="TestFilesetClosed2", is_open=False)
testFilesetClosed2.create()
testFileE = File(lfn="/this/is/a/lfnE", size=1024,
events=20, checksums={'cksum': 3})
testFileF = File(lfn="/this/is/a/lfnF", size=1024,
events=20, checksums={'cksum': 3})
testFilesetClosed2.addFile(testFileE)
testFilesetClosed2.addFile(testFileF)
testFilesetClosed2.commit()
testFilesetClosed3 = Fileset(name="TestFilesetClosed3", is_open=False)
testFilesetClosed3.create()
testFileG = File(lfn="/this/is/a/lfnG", size=1024,
events=20, checksums={'cksum': 3})
testFileH = File(lfn="/this/is/a/lfnH", size=1024,
events=20, checksums={'cksum': 3})
testFilesetClosed3.addFile(testFileG)
testFilesetClosed3.addFile(testFileH)
testFilesetClosed3.commit()
testWorkflow1 = Workflow(spec="spec1.xml", owner="Steve",
name="wf001", task="sometask")
testWorkflow1.create()
testWorkflow1.addOutput("out1", testOutputFileset1, testMergedOutputFileset1)
testWorkflow2 = Workflow(spec="spec2.xml", owner="Steve",
name="wf002", task="sometask")
testWorkflow2.create()
testWorkflow2.addOutput("out2", testOutputFileset2, testMergedOutputFileset2)
testSubscription1 = Subscription(fileset=testFilesetOpen,
workflow=testWorkflow1)
testSubscription1.create()
testSubscription1.completeFiles([testFileA, testFileB])
testSubscription2 = Subscription(fileset=testFilesetClosed1,
workflow=testWorkflow1)
testSubscription2.create()
testSubscription3 = Subscription(fileset=testFilesetClosed2,
workflow=testWorkflow2)
testSubscription3.create()
testSubscription3.completeFiles([testFileE, testFileF])
testSubscription4 = Subscription(fileset=testFilesetClosed3,
workflow=testWorkflow2)
testSubscription4.create()
testSubscription4.completeFiles([testFileG, testFileH])
myThread = threading.currentThread()
daoFactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger,
dbinterface=myThread.dbi)
injected = daoFactory(classname="Workflow.MarkInjectedWorkflows")
injected.execute(names=["wf001", "wf002"], injected=True)
closableFilesetDAO = daoFactory(classname="Fileset.ListClosable")
closableFilesets = closableFilesetDAO.execute()
goldenFilesets = ["TestOutputFileset2"]
for closableFileset in closableFilesets:
newFileset = Fileset(id=closableFileset)
newFileset.load()
assert newFileset.name in goldenFilesets, \
"Error: Unknown closable fileset"
goldenFilesets.remove(newFileset.name)
assert len(goldenFilesets) == 0, \
"Error: Filesets are missing"
return
def testFilesetClosing3(self):
"""
_testFilesetClosing3_
Verify that fileset closing works correctly in the case where multiple
subscriptions feed into a single fileset and accounts for running jobs
correctly.
"""
testOutputFileset1 = Fileset(name="TestOutputFileset1")
testOutputFileset1.create()
testOutputFileset2 = Fileset(name="TestOutputFileset2")
testOutputFileset2.create()
testMergedOutputFileset1 = Fileset(name="TestMergedOutputFileset1")
testMergedOutputFileset1.create()
testMergedOutputFileset2 = Fileset(name="TestMergedOutputFileset2")
testMergedOutputFileset2.create()
testFilesetOpen = Fileset(name="TestFilesetOpen", is_open=False)
testFilesetOpen.create()
testFileA = File(lfn="/this/is/a/lfnA", size=1024,
events=20, checksums={'cksum': 3})
testFileB = File(lfn="/this/is/a/lfnB", size=1024,
events=20, checksums={'cksum': 3})
testFilesetOpen.addFile(testFileA)
testFilesetOpen.addFile(testFileB)
testFilesetOpen.commit()
testFilesetClosed1 = Fileset(name="TestFilesetClosed1", is_open=False)
testFilesetClosed1.create()
testFileC = File(lfn="/this/is/a/lfnC", size=1024,
events=20, checksums={'cksum': 3})
testFileD = File(lfn="/this/is/a/lfnD", size=1024,
events=20, checksums={'cksum': 3})
testFilesetClosed1.addFile(testFileC)
testFilesetClosed1.addFile(testFileD)
testFilesetClosed1.commit()
testFilesetClosed2 = Fileset(name="TestFilesetClosed2", is_open=False)
testFilesetClosed2.create()
testFileE = File(lfn="/this/is/a/lfnE", size=1024,
events=20, checksums={'cksum': 3})
testFileF = File(lfn="/this/is/a/lfnF", size=1024,
events=20, checksums={'cksum': 3})
testFilesetClosed2.addFile(testFileE)
testFilesetClosed2.addFile(testFileF)
testFilesetClosed2.commit()
testFilesetClosed3 = Fileset(name="TestFilesetClosed3", is_open=False)
testFilesetClosed3.create()
testFileG = File(lfn="/this/is/a/lfnG", size=1024,
events=20, checksums={'cksum': 3})
testFileH = File(lfn="/this/is/a/lfnH", size=1024,
events=20, checksums={'cksum': 3})
testFilesetClosed3.addFile(testFileG)
testFilesetClosed3.addFile(testFileH)
testFilesetClosed3.commit()
testWorkflow1 = Workflow(spec="spec1.xml", owner="Steve",
name="wf001", task="sometask")
testWorkflow1.create()
testWorkflow1.addOutput("out1", testOutputFileset1, testMergedOutputFileset1)
testWorkflow2 = Workflow(spec="spec2.xml", owner="Steve",
name="wf002", task="sometask")
testWorkflow2.create()
testWorkflow2.addOutput("out2", testOutputFileset2, testMergedOutputFileset2)
testSubscription1 = Subscription(fileset=testFilesetOpen,
workflow=testWorkflow1)
testSubscription1.create()
testSubscription1.completeFiles([testFileA, testFileB])
testSubscription2 = Subscription(fileset=testFilesetClosed1,
workflow=testWorkflow1)
testSubscription2.create()
testJobGroup = JobGroup(subscription=testSubscription2)
testJobGroup.create()
testJob = Job(name="TestJob1")
testJob.create(testJobGroup)
testSubscription3 = Subscription(fileset=testFilesetClosed2,
workflow=testWorkflow2)
testSubscription3.create()
testSubscription3.completeFiles([testFileE, testFileF])
testSubscription4 = Subscription(fileset=testFilesetClosed3,
workflow=testWorkflow2)
testSubscription4.create()
testSubscription4.completeFiles([testFileG, testFileH])
myThread = threading.currentThread()
daoFactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger,
dbinterface=myThread.dbi)
injected = daoFactory(classname="Workflow.MarkInjectedWorkflows")
injected.execute(names=["wf001", "wf002"], injected=True)
closableFilesetDAO = daoFactory(classname="Fileset.ListClosable")
closableFilesets = closableFilesetDAO.execute()
goldenFilesets = ["TestOutputFileset2"]
for closableFileset in closableFilesets:
newFileset = Fileset(id=closableFileset)
newFileset.load()
assert newFileset.name in goldenFilesets, \
"Error: Unknown closable fileset"
goldenFilesets.remove(newFileset.name)
assert len(goldenFilesets) == 0, \
"Error: Filesets are missing"
return
def testFilesetClosing4(self):
"""
_testFilesetClosing4_
Verify that fileset closing works correctly when a workflow completly
fails out and does not produce any files.
"""
testOutputFileset1 = Fileset(name="TestOutputFileset1")
testOutputFileset1.create()
testOutputFileset2 = Fileset(name="TestOutputFileset2")
testOutputFileset2.create()
testOutputFileset3 = Fileset(name="TestOutputFileset3")
testOutputFileset3.create()
testMergedOutputFileset1 = Fileset(name="TestMergedOutputFileset1")
testMergedOutputFileset1.create()
testMergedOutputFileset2 = Fileset(name="TestMergedOutputFileset2")
testMergedOutputFileset2.create()
testMergedOutputFileset3 = Fileset(name="TestMergedOutputFileset3")
testMergedOutputFileset3.create()
testOutputFileset1.markOpen(False)
testOutputFileset2.markOpen(True)
testOutputFileset3.markOpen(True)
testInputFileset = Fileset(name="TestInputFileset")
testInputFileset.create()
testInputFileset.markOpen(False)
testWorkflow1 = Workflow(spec="spec1.xml", owner="Steve",
name="wf001", task="sometask")
testWorkflow1.create()
testWorkflow1.addOutput("out1", testOutputFileset1, testMergedOutputFileset1)
testWorkflow2 = Workflow(spec="spec2.xml", owner="Steve",
name="wf002", task="sometask")
testWorkflow2.create()
testWorkflow2.addOutput("out2", testOutputFileset2, testMergedOutputFileset2)
testWorkflow3 = Workflow(spec="spec3.xml", owner="Steve",
name="wf003", task="sometask")
testWorkflow3.create()
testWorkflow3.addOutput("out3", testOutputFileset3, testMergedOutputFileset3)
testSubscription1 = Subscription(fileset=testInputFileset,
workflow=testWorkflow1)
testSubscription1.create()
testSubscription2 = Subscription(fileset=testOutputFileset1,
workflow=testWorkflow2)
testSubscription2.create()
testSubscription3 = Subscription(fileset=testOutputFileset2,
workflow=testWorkflow3)
testSubscription3.create()
myThread = threading.currentThread()
daoFactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger,
dbinterface=myThread.dbi)
injected = daoFactory(classname="Workflow.MarkInjectedWorkflows")
injected.execute(names=["wf001", "wf002", "wf003"], injected=True)
closableFilesetDAO = daoFactory(classname="Fileset.ListClosable")
closableFilesets = closableFilesetDAO.execute()
assert len(closableFilesets) == 1, \
"Error: Wrong number of closable filesets"
assert closableFilesets[0] == testOutputFileset2.id, \
"Error: Wrong fileset is marked as closable."
return
def testFilesetClosing5(self):
"""
_testFilesetClosing5_
Verify that fileset closing works in the case where one cleanup
subscription is used to cleanup files from all the other merge
subscriptions in the request.
"""
inputFileset = Fileset(name="InputFileset")
inputFileset.create()
inputFileset.markOpen(False)
cleanupFileset = Fileset(name="CleanupFileset")
cleanupFileset.create()
cleanupFileset.markOpen(True)
testOutputFileset1 = Fileset(name="TestOutputFileset1")
testOutputFileset1.create()
testOutputFileset1.markOpen(True)
testOutputFileset2 = Fileset(name="TestOutputFileset2")
testOutputFileset2.create()
testOutputFileset2.markOpen(True)
testOutputFileset3 = Fileset(name="TestOutputFileset3")
testOutputFileset3.create()
testOutputFileset3.markOpen(True)
cleanupWorkflow = Workflow(spec="spec1.xml", owner="Steve",
name="wf001", task="cleanup")
cleanupWorkflow.create()
testWorkflow1 = Workflow(spec="spec1.xml", owner="Steve",
name="wf001", task="sometask1")
testWorkflow1.create()
testWorkflow1.addOutput("out1", testOutputFileset1)
testWorkflow1.addOutput("out1", cleanupFileset)
testWorkflow2 = Workflow(spec="spec1.xml", owner="Steve",
name="wf001", task="sometask2")
testWorkflow2.create()
testWorkflow2.addOutput("out1", testOutputFileset2)
testWorkflow2.addOutput("out1", cleanupFileset)
testWorkflow3 = Workflow(spec="spec1.xml", owner="Steve",
name="wf001", task="sometask3")
testWorkflow3.create()
testWorkflow3.addOutput("out1", testOutputFileset3)
testWorkflow3.addOutput("out1", cleanupFileset)
cleanupSubscription = Subscription(fileset=cleanupFileset,
workflow=cleanupWorkflow)
cleanupSubscription.create()
testSubscription1 = Subscription(fileset=inputFileset,
workflow=testWorkflow1)
testSubscription1.create()
testSubscription2 = Subscription(fileset=testOutputFileset1,
workflow=testWorkflow2)
testSubscription2.create()
testSubscription3 = Subscription(fileset=testOutputFileset2,
workflow=testWorkflow3)
testSubscription3.create()
testFileA = File(lfn="/this/is/a/lfnA", size=1024,
events=20, checksums={'cksum': 3},
locations=set(["T2_CH_CERN"]))
testFileA.addRun(Run(1, *[45]))
testFileA.create()
inputFileset.addFile(testFileA)
inputFileset.commit()
testJobGroupA = JobGroup(subscription=testSubscription1)
testJobGroupA.create()
testJobA = Job(name="TestJobA", files=[testFileA])
testJobA.create(testJobGroupA)
testJobA["state"] = "executing"
myThread = threading.currentThread()
daoFactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger,
dbinterface=myThread.dbi)
injected = daoFactory(classname="Workflow.MarkInjectedWorkflows")
injected.execute(names=["wf001"], injected=True)
changeStateDAO = daoFactory(classname="Jobs.ChangeState")
changeStateDAO.execute(jobs=[testJobA])
closableFilesetDAO = daoFactory(classname="Fileset.ListClosable")
closableFilesets = closableFilesetDAO.execute()
self.assertEqual(len(closableFilesets), 0,
"Error: There should be no closable filesets.")
testSubscription1.completeFiles(testFileA)
testJobA["state"] = "cleanout"
changeStateDAO.execute(jobs=[testJobA])
testFileB = File(lfn="/this/is/a/lfnB", size=1024,
events=20, checksums={'cksum': 3},
locations=set(["T2_CH_CERN"]))
testFileB.addRun(Run(1, *[45]))
testFileB.create()
testOutputFileset1.addFile(testFileB)
testOutputFileset1.commit()
cleanupFileset.addFile(testFileB)
cleanupFileset.commit()
| |
load balancers must use ipv4 .
:rtype: dict
:return: {
'LoadBalancers': [
{
'LoadBalancerArn': 'string',
'DNSName': 'string',
'CanonicalHostedZoneId': 'string',
'CreatedTime': datetime(2015, 1, 1),
'LoadBalancerName': 'string',
'Scheme': 'internet-facing'|'internal',
'VpcId': 'string',
'State': {
'Code': 'active'|'provisioning'|'failed',
'Reason': 'string'
},
'Type': 'application',
'AvailabilityZones': [
{
'ZoneName': 'string',
'SubnetId': 'string'
},
],
'SecurityGroups': [
'string',
],
'IpAddressType': 'ipv4'|'dualstack'
},
]
}
:returns:
(string) --
"""
pass
def create_rule(ListenerArn=None, Conditions=None, Priority=None, Actions=None):
"""
Creates a rule for the specified listener.
Each rule can have one action and one condition. Rules are evaluated in priority order, from the lowest value to the highest value. When the condition for a rule is met, the specified action is taken. If no conditions are met, the default action for the default rule is taken. For more information, see Listener Rules in the Application Load Balancers Guide .
To view your current rules, use DescribeRules . To update a rule, use ModifyRule . To set the priorities of your rules, use SetRulePriorities . To delete a rule, use DeleteRule .
See also: AWS API Documentation
Examples
This example creates a rule that forwards requests to the specified target group if the URL contains the specified pattern (for example, /img/*).
Expected Output:
:example: response = client.create_rule(
ListenerArn='string',
Conditions=[
{
'Field': 'string',
'Values': [
'string',
]
},
],
Priority=123,
Actions=[
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
]
)
:type ListenerArn: string
:param ListenerArn: [REQUIRED]
The Amazon Resource Name (ARN) of the listener.
:type Conditions: list
:param Conditions: [REQUIRED]
A condition. Each condition specifies a field name and a single value.
If the field name is host-header , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.
A-Z, a-z, 0-9
.
(matches 0 or more characters)
? (matches exactly 1 character)
If the field name is path-pattern , you can specify a single path pattern. A path pattern is case sensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.
A-Z, a-z, 0-9
_ - . $ / ~ ' ' @ : +
(using amp;)
(matches 0 or more characters)
? (matches exactly 1 character)
(dict) --Information about a condition for a rule.
Field (string) --The name of the field. The possible values are host-header and path-pattern .
Values (list) --The condition value.
If the field name is host-header , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.
A-Z, a-z, 0-9
.
(matches 0 or more characters)
? (matches exactly 1 character)
If the field name is path-pattern , you can specify a single path pattern (for example, /img/*). A path pattern is case sensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.
A-Z, a-z, 0-9
_ - . $ / ~ ' ' @ : +
(using amp;)
(matches 0 or more characters)
? (matches exactly 1 character)
(string) --
:type Priority: integer
:param Priority: [REQUIRED]
The priority for the rule. A listener can't have multiple rules with the same priority.
:type Actions: list
:param Actions: [REQUIRED]
An action. Each action has the type forward and specifies a target group.
(dict) --Information about an action.
Type (string) -- [REQUIRED]The type of action.
TargetGroupArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the target group.
:rtype: dict
:return: {
'Rules': [
{
'RuleArn': 'string',
'Priority': 'string',
'Conditions': [
{
'Field': 'string',
'Values': [
'string',
]
},
],
'Actions': [
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
],
'IsDefault': True|False
},
]
}
:returns:
A-Z, a-z, 0-9
.
(matches 0 or more characters)
? (matches exactly 1 character)
"""
pass
def create_target_group(Name=None, Protocol=None, Port=None, VpcId=None, HealthCheckProtocol=None, HealthCheckPort=None, HealthCheckPath=None, HealthCheckIntervalSeconds=None, HealthCheckTimeoutSeconds=None, HealthyThresholdCount=None, UnhealthyThresholdCount=None, Matcher=None):
"""
Creates a target group.
To register targets with the target group, use RegisterTargets . To update the health check settings for the target group, use ModifyTargetGroup . To monitor the health of targets in the target group, use DescribeTargetHealth .
To route traffic to the targets in a target group, specify the target group in an action using CreateListener or CreateRule .
To delete a target group, use DeleteTargetGroup .
For more information, see Target Groups for Your Application Load Balancers in the Application Load Balancers Guide .
See also: AWS API Documentation
Examples
This example creates a target group that you can use to route traffic to targets using HTTP on port 80. This target group uses the default health check configuration.
Expected Output:
:example: response = client.create_target_group(
Name='string',
Protocol='HTTP'|'HTTPS',
Port=123,
VpcId='string',
HealthCheckProtocol='HTTP'|'HTTPS',
HealthCheckPort='string',
HealthCheckPath='string',
HealthCheckIntervalSeconds=123,
HealthCheckTimeoutSeconds=123,
HealthyThresholdCount=123,
UnhealthyThresholdCount=123,
Matcher={
'HttpCode': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]
The name of the target group.
This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen.
:type Protocol: string
:param Protocol: [REQUIRED]
The protocol to use for routing traffic to the targets.
:type Port: integer
:param Port: [REQUIRED]
The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target.
:type VpcId: string
:param VpcId: [REQUIRED]
The identifier of the virtual private cloud (VPC).
:type HealthCheckProtocol: string
:param HealthCheckProtocol: The protocol the load balancer uses when performing health checks on targets. The default is the HTTP protocol.
:type HealthCheckPort: string
:param HealthCheckPort: The port the load balancer uses when performing health checks on targets. The default is traffic-port , which indicates the port on which each target receives traffic from the load balancer.
:type HealthCheckPath: string
:param HealthCheckPath: The ping path that is the destination on the targets for health checks. The default is /.
:type HealthCheckIntervalSeconds: integer
:param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target. The default is 30 seconds.
:type HealthCheckTimeoutSeconds: integer
:param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response from a target means a failed health check. The default is 5 seconds.
:type HealthyThresholdCount: integer
:param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy. The default is 5.
:type UnhealthyThresholdCount: integer
:param UnhealthyThresholdCount: The number of consecutive health check failures required before considering a target unhealthy. The default is 2.
:type Matcher: dict
:param Matcher: The HTTP codes to use when checking for a successful response from a target. The default is 200.
HttpCode (string) -- [REQUIRED]The HTTP codes. You can specify values between 200 and 499. The default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299').
:rtype: dict
:return: {
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS',
'HealthCheckPort': 'string',
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass
def delete_listener(ListenerArn=None):
"""
Deletes the specified listener.
Alternatively, your listener is deleted when you delete the load balancer it is attached to using DeleteLoadBalancer | |
<reponame>PennyHow/PyTrx<filename>DEM.py
#PyTrx (c) by <NAME>, <NAME>, <NAME>
#
#PyTrx is licensed under a MIT License.
#
#You should have received a copy of the license along with this
#work. If not, see <https://choosealicense.com/licenses/mit/>.
"""
The DEM module contains functionality for handling DEM data and implementing
this data into the :class:`PyTrx.CamEnv.CamEnv` object class.
"""
#Import packages
import numpy as np
import scipy.io as sio
import gdal
import math
from scipy import interpolate
from gdalconst import GA_ReadOnly
import struct
from scipy.interpolate import RectBivariateSpline
#------------------------------------------------------------------------------
class ExplicitRaster(object):
"""A class to represent a numeric Raster with explicit XY cell referencing
in each grid cell.
:param X: X data
:type X: arr
:param Y: Y data
:type Y: arr
:param Z: Z data
:type Z: arr
:param nodata: Condition for NaN data values, default to 'nan'
:type nodata: int, optional
"""
#Basic constuctor method
def __init__(self, X, Y, Z, nodata=float('nan')):
'''Explicit Raster initialisation.'''
#Check XYZ data is all the same size
if not (X.shape==Y.shape and X.shape==Z.shape):
print('Raster data and/or co-ordinate arrays are differently sized')
print('X-shape ' + str(X.shape))
print('Y-shape ' + str(Y.shape))
print('Z-shape ' + str(Z.shape))
return
#Define class atrributes
self._data=np.array([X,Y,Z])
self._nodata=nodata
self._extents=[X[0][0]-0.5*(X[0][1]-X[0][0]),X[-1][-1]+0.5*(X[-1][-1]-
X[-1][-2]),Y[0][0]-0.5*(Y[1][0]-Y[0][0]),Y[-1][-1]+0.5*
(Y[-1][-1]-Y[-2][-1])]
def getData(self,dim=None):
"""Return DEM data. XYZ dimensions can be individually called with the
dim input variable (integer: 0, 1, or 2).
:param dim: Dimension to retrieve (0, 1, or 2), default to None
:type dim: int
:returns: DEM dimension as array
:rtype: arr
"""
#Return all DEM data if no dimension is specified
if dim==None:
return self._data
#Return specific DEM dimension
elif (dim==0 or dim==1 or dim==2):
return self._data[dim]
#Return None if no DEM data present
else:
return None
def getZ(self):
"""Return height (Z) data of DEM.
:returns: DEM Z values
:rtype: arr
"""
return self.getData(2)
def getZcoord(self, x, y):
"""Return height (Z) at a given XY coordinate in DEM.
:param x: X coordinate
:type x: int
:param y: Y coordinate
:type y: int
:returns: DEM Z value for given coordinate
:rtype: int
"""
rowcoords = self.getData(0)[0,:]
colcoords = self.getData(1)[:,0]
demz = self.getZ()
xcoord = (np.abs(rowcoords-x)).argmin()
ycoord = (np.abs(colcoords-y)).argmin()
return demz[ycoord,xcoord]
def getShape(self):
"""Return the shape of the DEM data array.
:returns: DEM shape
:rtype: arr
"""
return self._data[0].shape
def getRows(self):
"""Return the number of rows in the DEM data array.
:returns: DEM row count
:rtype: int
"""
return self._data[0].shape[0]
def getCols(self):
"""Return the number of columns in the DEM data array.
:returns: DEM column count
:rtype: int
"""
return self._data[0].shape[1]
def getNoData(self):
"""Return fill value for no data in DEM array.
:returns: DEM nan fill value
:rtype: int
"""
return self._nodata
def getExtent(self):
"""Return DEM extent.
:returns: DEM extent
:rtype: list
"""
return self._extents
def subset(self,cmin,cmax,rmin,rmax):
"""Return a specified subset of the DEM array.
:param cmin: Column minimum extent
:type cmin: int
:param cmax: Column maximum extent
:type cmax: int
:param rmin: Row minimum extent
:type rmin: int
:param rmax: Row maximum extent
:type rmax: int
:returns: Subset of DEM
:rtype: :class:`PyTrx.DEM.ExplicitRaster`
"""
#Find minimum extent value
cmin=int(max(0,cmin))
rmin=int(max(0,rmin))
#Find maximum extent value
cmax=int(min(self._data[0].shape[1],cmax))
rmax=int(min(self._data[0].shape[0],rmax))
#Extract XYZ subset
X=self._data[0][rmin:rmax,cmin:cmax]
Y=self._data[1][rmin:rmax,cmin:cmax]
Z=self._data[2][rmin:rmax,cmin:cmax]
#Construct new XYZ array
return ExplicitRaster(X,Y,Z)
def densify(self, densefac=2):
"""Function to densify the DEM array by a given densification factor.
The array is multiplied by the given densification factor and then
subsequently values are interpolated using the SciPy function
RectBivariateSpline. The densification factor is set to 2 by default,
meaning that the size of the DEM array is doubled.
:param densefac: Densification factor
:type densefac: int
:returns: Densified DEM
:rtype: :class:`PyTrx.DEM.ExplicitRaster`
"""
#Get XYZ dem data
x=self._data[0,0,:]
y=self._data[1,:,0]
z=np.transpose(self._data[2])
#Multipy size of xy arrays by the densification factor
nx=((x.size-1)*densefac)+1
ny=((y.size-1)*densefac)+1
#Define new array data spacing
xd = np.linspace(x[0], x[-1], nx)
yd = np.linspace(y[0], y[-1], ny)
#Create mesh grid
yv,xv = np.meshgrid(yd,xd)
#Interpolate
f=RectBivariateSpline(x, y, z, bbox=[None, None, None, None],
kx=1, ky=1, s=0)
#Create empty array for Z data
zv=np.zeros((nx,ny))
#Reshape XYZ arrays
xv=np.reshape(xv,(nx*ny))
yv=np.reshape(yv,(nx*ny))
zv=np.reshape(zv,(nx*ny))
#Populate empty Z array
for i in range(xv.size):
zv[i]=f(xv[i],yv[i])
#Transpose arrays for compatibility
xv=np.transpose(np.reshape(xv,(nx,ny)))
yv=np.transpose(np.reshape(yv,(nx,ny)))
zv=np.transpose(np.reshape(zv,(nx,ny)))
#Construct new XYZ array
return ExplicitRaster(xv,yv,zv)
def reportDEM(self):
"""Self reporter for DEM class object. Returns the number of rows and
columns in the array, how NaN values in the array are filled, and the
data extent coordinates.
"""
print('\nDEM object reporting:\n')
print('Data has ' + str(self.getRows()) + ' rows by ' +
str(self.getCols()) + ' columns')
print('No data item is: ' + str(self.getNoData()))
print('Data Extent Coordinates are [xmin,xmax,ymin,ymax]: ' +
str(self.getExtent()))
def load_DEM(demfile):
"""Function for loading DEM data from different file types, which is
automatically detected. Recognised file types: .mat and .tif.
:param demfile: DEM filepath
:type demfile: str
:returns: A DEM object
:rtype: :class:`PyTrx.DEM.ExplicitRaster`
"""
#Determine file type based on filename suffix
suffix=demfile.split('.')[-1].upper()
#MAT file import if detected
if suffix==("MAT"):
return DEM_FromMat(demfile)
#TIF file import if detected
elif suffix==("TIF") or suffix==("TIFF"):
return DEM_FromTiff(demfile)
#No DEM data passed if file type is not recognised
else:
print('DEM format (suffix) not supported')
print('DEM file: ' + str(demfile) + ' not read')
return None
def DEM_FromMat(matfile):
"""Function for loading a DEM array from a Matlab (.mat) file containing
separate X, Y, Z matrices.
:param matfile: DEM .mat filepath
:type matfile: str
:returns: A DEM object
:rtype: :class:`PyTrx.DEM.ExplicitRaster`
"""
#Load Matlab file and XYZ matrices as arrays
mat = sio.loadmat(matfile)
X=np.ascontiguousarray(mat['X'])
Y=np.ascontiguousarray(mat['Y'])
Z=np.ascontiguousarray(mat['Z'])
#Flip array if not compatible
if Y[0][0]>Y[-1][0]:
print('\nFlipping input DEM')
X = np.flipud(X)
Y = np.flipud(Y)
Z = np.flipud(Z)
#Construct DEM array
dem=ExplicitRaster(X,Y,Z)
return dem
def DEM_FromTiff(tiffFile):
"""Function for loading a DEM array from a .tiff file containing
raster-formatted data. The tiff data importing is handled by GDAL.
:param tiffFile: DEM .tif filepath
:type tiffFile: str
:returns: A DEM object
:rtype: :class:`PyTrx.DEM.ExplicitRaster`
"""
#Open tiff file with GDAL
dataset = gdal.Open(tiffFile, GA_ReadOnly)
#Define columns and rows in raster
cols = dataset.RasterXSize
rows = dataset.RasterYSize
#Transform raster and define origins for populating
geotransform = dataset.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
#Get Z data from raster
band = dataset.GetRasterBand(1)
scanline = band.ReadRaster( 0, 0, band.XSize, band.YSize,band.XSize,
band.YSize, band.DataType)
value = struct.unpack('f' * band.XSize *band.YSize, scanline)
Z=np.array(value).reshape(rows,cols)
#Create empty arrays for XY data
X=np.zeros((rows,cols))
Y=np.zeros((rows,cols))
#Populate empty arrays from origins
originX=originX+(pixelWidth*0.5)
originY=originY+(pixelWidth*0.5)
for i in range(rows):
for j in range(cols):
X[i,j]=(j*pixelWidth)+originX
Y[i,j]=(i*pixelHeight)+originY
#Flip array if not compatible
if Y[0,0]>Y[-1,0]:
X=np.flipud(X)
Y=np.flipud(Y)
Z=np.flipud(Z)
#Construct DEM array
dem=ExplicitRaster(X,Y,Z)
return dem
def voxelviewshed(dem, viewpoint):
"""Calculate a viewshed over a DEM from a given viewpoint in the DEM scene.
This function is based on the viewshed function (voxelviewshed.m) available
in ImGRAFT. The ImGRAFT voxelviewshed.m script is available at:
http://github.com/grinsted/ImGRAFT/blob/master/voxelviewshed.m
:param dem: A DEM object
:type dem: :class:`PyTrx.DEM.ExplicitRaster`
:param viewpoint: 3-element vector specifying the viewpoint
:type viewpoint: list
:returns: Boolean visibility matrix (which is the same size as dem)
:rtype: arr
"""
#Get XYZ arrays
X=dem.getData(0)
Y=dem.getData(1)
Z=dem.getData(2)
#Get array shape
sz=Z.shape
#Get grid spacing
dx=abs(X[1,1]-X[0,0])
dy=abs(Y[1,1]-Y[0,0])
#Linearise the grid
X=np.reshape(X,X.shape[0]*X.shape[1],order='F')
Y=np.reshape(Y,Y.shape[0]*Y.shape[1],order='F')
Z=np.reshape(Z,Z.shape[0]*Z.shape[1],order='F')
#Define viewpoint in DEM grid space
X=(X-viewpoint[0])/dx
Y=(Y-viewpoint[1])/dy
Z=Z-viewpoint[2]
#Create empty array
d=np.zeros(len(X))
#Populate array
for i in range(len(X)):
if (np.isnan(X[i]) or np.isnan(Y[i]) or np.isnan(Z[i])):
d[i]=float('NaN')
else:
d[i]=np.sqrt(X[i]*X[i]+Y[i]*Y[i]+Z[i]*Z[i])
#Pythagoras' theorem
#ImGRAFT/Matlab equiv: x=atan2(Y,X)+math.pi)/(math.pi*2); (MAT)
dint=np.round(np.sqrt(X*X+Y*Y))
#Create empty array
x=np.empty(X.shape[0])
#Populate array
for i in range(X.shape[0]):
x[i]=(math.atan2(Y[i],X[i])+math.pi)/(math.pi*2)
y=Z/d
#Round values and sort array
#ImGRAFT/Matlab equiv: [~,ix]=sortrows([round(sqrt(X.^2+Y.^2)) x]); (MAT)
ix=np.lexsort((x,dint)).tolist()
#Return a boolean of all array values that are not zero
#ImGRAFT/Matlab equiv: loopix=find(diff(x(ix))<0); (MAT)
loopix=np.nonzero(np.diff(x[ix])<0)[0]
#Create boolean array of 1's
| |
import sys
import time
import tensorflow as tf
import numpy as np
from data_utils import SeqBatcher, Batcher
from cnn import CNN
from bilstm import BiLSTM
from bilstm_char import BiLSTMChar
from cnn_char import CNNChar
import eval_f1 as evaluation
import json
import tf_utils
from os import listdir
import os
import logging
from utils import make_sure_path_exists
FLAGS = tf.app.flags.FLAGS
def main(argv):
print("CUDA_VISIBLE_DEVICES=", os.environ.get('CUDA_VISIBLE_DEVICES', 0))
train_dir = FLAGS.train_dir
dev_dir = FLAGS.dev_dir
maps_dir = FLAGS.maps_dir
logger = init_logger()
logger.info(' '.join(sys.argv) + '\n')
if FLAGS.evaluate_only:
if FLAGS.load_dir == '':
FLAGS.load_dir = FLAGS.model_dir
if FLAGS.load_dir == '':
logger.error('Must supply load_dir in evaluation mode')
sys.exit(1)
if train_dir == '':
logger.error('Must supply input data directory generated from tsv_to_tfrecords.py')
sys.exit(1)
logger.info('\n'.join(sorted(["%s : %s" % (str(k), str(v)) for k, v in FLAGS.__dict__['__flags'].items()])))
with open(maps_dir + '/label.txt', 'r') as f:
labels_str_id_map = {l.split('\t')[0]: int(l.split('\t')[1].strip()) for l in f.readlines()}
labels_id_str_map = {i: s for s, i in labels_str_id_map.items()}
labels_size = len(labels_id_str_map)
with open(maps_dir + '/token.txt', 'r') as f:
vocab_str_id_map = {l.split('\t')[0]: int(l.split('\t')[1].strip()) for l in f.readlines()}
vocab_id_str_map = {i: s for s, i in vocab_str_id_map.items()}
vocab_size = len(vocab_id_str_map)
with open(maps_dir + '/shape.txt', 'r') as f:
shape_str_id_map = {l.split('\t')[0]: int(l.split('\t')[1].strip()) for l in f.readlines()}
shape_id_str_map = {i: s for s, i in shape_str_id_map.items()}
shape_domain_size = len(shape_id_str_map)
with open(maps_dir + '/char.txt', 'r') as f:
char_str_id_map = {l.split('\t')[0]: int(l.split('\t')[1].strip()) for l in f.readlines()}
char_id_str_map = {i: s for s, i in char_str_id_map.items()}
char_domain_size = len(char_id_str_map)
# with open(maps_dir + '/sizes.txt', 'r') as f:
# num_train_examples = int(f.readline()[:-1])
logger.info("num classes: %d" % labels_size)
size_files = [maps_dir + "/" + fname for fname in listdir(maps_dir) if fname.find("sizes") != -1]
num_train_examples = 0
num_tokens = 0
for size_file in size_files:
logger.info(size_file)
with open(size_file, 'r') as f:
num_train_examples += int(f.readline()[:-1])
num_tokens += int(f.readline()[:-1])
logger.info("num train examples: %d" % num_train_examples)
logger.info("num train tokens: %d" % num_tokens)
dev_top_dir = '/'.join(dev_dir.split("/")[:-2]) if dev_dir.find("*") != -1 else dev_dir
logger.info(dev_top_dir)
dev_size_files = [dev_top_dir + "/" + fname for fname in listdir(dev_top_dir) if fname.find("sizes") != -1]
num_dev_examples = 0
num_dev_tokens = 0
for size_file in dev_size_files:
logger.info(size_file)
with open(size_file, 'r') as f:
num_dev_examples += int(f.readline()[:-1])
num_dev_tokens += int(f.readline()[:-1])
logger.info("num dev examples: %d" % num_dev_examples)
logger.info("num dev tokens: %d" % num_dev_tokens)
# with open(dev_dir + '/sizes.txt', 'r') as f:
# num_dev_examples = int(f.readline()[:-1])
type_set = {}
type_int_int_map = {}
outside_set = ["O", "<PAD>", "<S>", "</S>", "<ZERO>"]
for label, id in labels_str_id_map.items():
label_type = label if label in outside_set else label[2:]
if label_type not in type_set:
type_set[label_type] = len(type_set)
type_int_int_map[id] = type_set[label_type]
logger.info(type_set) # All NER types
# load embeddings, if given; initialize in range [-.01, .01]
embeddings_shape = (vocab_size - 1, FLAGS.embed_dim)
embeddings = tf_utils.embedding_values(embeddings_shape, old=False)
used_words = set()
if FLAGS.embeddings != '':
with open(FLAGS.embeddings, 'r') as f:
for line in f.readlines():
split_line = line.strip().split(" ")
if len(split_line) != FLAGS.embed_dim + 1:
continue
word = split_line[0]
embedding = split_line[1:]
if word in vocab_str_id_map:
used_words.add(word)
# shift by -1 because we are going to add a 0 constant vector for the padding later
embeddings[vocab_str_id_map[word] - 1] = list(map(float, embedding))
embeddings_used = len(used_words)
logger.info("Loaded %d/%d embeddings (%2.2f%% coverage)" % (
embeddings_used, vocab_size, embeddings_used / vocab_size * 100))
layers_map = sorted(json.loads(FLAGS.layers.replace("'", '"')).items()) if FLAGS.model == 'cnn' else None
pad_width = int(layers_map[0][1]['width'] / 2) if layers_map is not None else 1
with tf.Graph().as_default():
train_batcher = Batcher(train_dir, FLAGS.batch_size) if FLAGS.memmap_train else SeqBatcher(train_dir,
FLAGS.batch_size)
dev_batch_size = FLAGS.batch_size # num_dev_examples
dev_batcher = SeqBatcher(dev_dir, dev_batch_size, num_buckets=0, num_epochs=1)
if FLAGS.ontonotes:
domain_dev_batchers = {domain: SeqBatcher(dev_dir.replace('*', domain),
dev_batch_size, num_buckets=0, num_epochs=1)
for domain in ['bc', 'nw', 'bn', 'wb', 'mz', 'tc']}
train_eval_batch_size = FLAGS.batch_size
train_eval_batcher = SeqBatcher(train_dir, train_eval_batch_size, num_buckets=0, num_epochs=1)
char_embedding_model = BiLSTMChar(char_domain_size, FLAGS.char_dim, int(FLAGS.char_tok_dim / 2)) \
if FLAGS.char_dim > 0 and FLAGS.char_model == "lstm" else \
(CNNChar(char_domain_size, FLAGS.char_dim, FLAGS.char_tok_dim, layers_map[0][1]['width'])
if FLAGS.char_dim > 0 and FLAGS.char_model == "cnn" else None)
char_embeddings = char_embedding_model.outputs if char_embedding_model is not None else None
if FLAGS.model == 'cnn':
model = CNN(
num_classes=labels_size,
vocab_size=vocab_size,
shape_domain_size=shape_domain_size,
char_domain_size=char_domain_size,
char_size=FLAGS.char_tok_dim,
embedding_size=FLAGS.embed_dim,
shape_size=FLAGS.shape_dim,
nonlinearity=FLAGS.nonlinearity,
layers_map=layers_map,
viterbi=FLAGS.viterbi,
projection=FLAGS.projection,
loss=FLAGS.loss,
margin=FLAGS.margin,
repeats=FLAGS.block_repeats,
share_repeats=FLAGS.share_repeats,
char_embeddings=char_embeddings,
embeddings=embeddings)
elif FLAGS.model == "bilstm":
model = BiLSTM(
num_classes=labels_size,
vocab_size=vocab_size,
shape_domain_size=shape_domain_size,
char_domain_size=char_domain_size,
char_size=FLAGS.char_dim,
embedding_size=FLAGS.embed_dim,
shape_size=FLAGS.shape_dim,
nonlinearity=FLAGS.nonlinearity,
viterbi=FLAGS.viterbi,
hidden_dim=FLAGS.lstm_dim,
char_embeddings=char_embeddings,
embeddings=embeddings)
else:
logger.info(FLAGS.model + ' is not a valid model type')
sys.exit(1)
# Define Training procedure
global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=model.lr, beta1=FLAGS.beta1, beta2=FLAGS.beta2,
epsilon=FLAGS.epsilon, name="optimizer")
model_vars = tf.global_variables()
logger.info("model vars: %d" % len(model_vars))
logger.info(map(lambda v: v.name, model_vars))
# todo put in func
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
total_parameters += variable_parametes
logger.info("Total trainable parameters: %d" % (total_parameters))
if FLAGS.clip_norm > 0:
grads, _ = tf.clip_by_global_norm(tf.gradients(model.loss, model_vars), FLAGS.clip_norm)
train_op = optimizer.apply_gradients(zip(grads, model_vars), global_step=global_step)
else:
train_op = optimizer.minimize(model.loss, global_step=global_step, var_list=model_vars)
tf.global_variables_initializer()
opt_vars = [optimizer.get_slot(s, n) for n in optimizer.get_slot_names() for s in model_vars if
optimizer.get_slot(s, n) is not None]
model_vars += opt_vars
if FLAGS.load_dir:
reader = tf.train.NewCheckpointReader(FLAGS.load_dir + "/model.tf")
saved_var_map = reader.get_variable_to_shape_map()
intersect_vars = [k for k in tf.global_variables() if
k.name.split(':')[0] in saved_var_map and k.get_shape() == saved_var_map[
k.name.split(':')[0]]]
leftovers = [k for k in tf.global_variables() if
k.name.split(':')[0] not in saved_var_map or k.get_shape() != saved_var_map[
k.name.split(':')[0]]]
logger.warning("WARNING: Loading pretrained model, but not loading: " + ' '.join(
list(map(lambda v: v.name, leftovers))))
loader = tf.train.Saver(var_list=intersect_vars)
else:
loader = tf.train.Saver(var_list=model_vars)
saver = tf.train.Saver(var_list=model_vars)
sv = tf.train.Supervisor(logdir=FLAGS.model_dir if FLAGS.model_dir != '' else None,
global_step=global_step,
saver=None,
save_model_secs=0,
save_summaries_secs=0)
training_start_time = time.time()
with sv.managed_session(FLAGS.master, config=tf.ConfigProto(allow_soft_placement=True)) as sess:
def run_evaluation(eval_batches, output=None, extra_text=""):
predictions = []
for b, (eval_label_batch, eval_token_batch, eval_shape_batch, eval_char_batch, eval_seq_len_batch,
eval_tok_len_batch, eval_mask_batch) in enumerate(eval_batches):
batch_size, batch_seq_len = eval_token_batch.shape
char_lens = np.sum(eval_tok_len_batch, axis=1)
max_char_len = np.max(eval_tok_len_batch)
eval_padded_char_batch = np.zeros((batch_size, max_char_len * batch_seq_len))
for b in range(batch_size):
char_indices = [item for sublist in [range(i * max_char_len, i * max_char_len + d) for i, d in
enumerate(eval_tok_len_batch[b])] for item in sublist]
eval_padded_char_batch[b, char_indices] = eval_char_batch[b][:char_lens[b]]
char_embedding_feeds = {} if FLAGS.char_dim == 0 else {
char_embedding_model.input_chars: eval_padded_char_batch,
char_embedding_model.batch_size: batch_size,
char_embedding_model.max_seq_len: batch_seq_len,
char_embedding_model.token_lengths: eval_tok_len_batch,
char_embedding_model.max_tok_len: max_char_len
}
basic_feeds = {
model.input_x1: eval_token_batch,
model.input_x2: eval_shape_batch,
model.input_y: eval_label_batch,
model.input_mask: eval_mask_batch,
model.max_seq_len: batch_seq_len,
model.batch_size: batch_size,
model.sequence_lengths: eval_seq_len_batch
}
basic_feeds.update(char_embedding_feeds)
total_feeds = basic_feeds.copy()
if FLAGS.viterbi:
preds, transition_params = sess.run([model.predictions, model.transition_params],
feed_dict=total_feeds)
viterbi_repad = np.empty((batch_size, batch_seq_len))
for batch_idx, (unary_scores, sequence_lens) in enumerate(zip(preds, eval_seq_len_batch)):
viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(unary_scores, transition_params)
viterbi_repad[batch_idx] = viterbi_sequence
predictions.append(viterbi_repad)
else:
preds, scores = sess.run([model.predictions, model.unflat_scores], feed_dict=total_feeds)
predictions.append(preds)
if output is not None:
evaluation.output_predicted_to_file(
(FLAGS.model_dir if FLAGS.model_dir != '' else FLAGS.load_dir) + "/" + output + ".txt",
eval_batches,
predictions, labels_id_str_map,
vocab_id_str_map, pad_width)
# print evaluation
precision, recall, f1_micro = evaluation.segment_eval(eval_batches, predictions, labels_id_str_map,
vocab_id_str_map,
pad_width=pad_width, start_end=FLAGS.start_end,
logger=logger,
extra_text="Segment evaluation %s:" % extra_text)
return f1_micro, precision
threads = tf.train.start_queue_runners(sess=sess)
log_every = int(max(100, num_train_examples / 5))
if FLAGS.load_dir != '':
logger.info("Deserializing model: " + FLAGS.load_dir + "/model.tf")
loader.restore(sess, FLAGS.load_dir + "/model.tf")
def get_dev_batches(seq_batcher):
batches = []
# load all the dev batches into memory
done = False
while not done:
try:
dev_batch = sess.run(seq_batcher.next_batch_op)
dev_label_batch, dev_token_batch, dev_shape_batch, dev_char_batch, dev_seq_len_batch, dev_tok_len_batch = dev_batch
mask_batch = np.zeros(dev_token_batch.shape)
actual_seq_lens = np.add(np.sum(dev_seq_len_batch, axis=1),
(2 if FLAGS.start_end else 1) * pad_width * (
(dev_seq_len_batch != 0).sum(axis=1) + (
0 if FLAGS.start_end else 1)))
for i, seq_len in enumerate(actual_seq_lens):
mask_batch[i, :seq_len] = 1
batches.append((dev_label_batch, dev_token_batch, dev_shape_batch, dev_char_batch,
dev_seq_len_batch, dev_tok_len_batch, mask_batch))
except:
done = True
return batches
dev_batches = get_dev_batches(dev_batcher)
train_batches = []
if FLAGS.train_eval:
# load all the train batches into memory
done = False
while not done:
try:
train_batch = sess.run(train_eval_batcher.next_batch_op)
train_label_batch, train_token_batch, train_shape_batch, train_char_batch, train_seq_len_batch, train_tok_len_batch = train_batch
mask_batch = np.zeros(train_token_batch.shape)
actual_seq_lens = np.add(np.sum(train_seq_len_batch, axis=1),
(2 if FLAGS.start_end else 1) * pad_width * (
(train_seq_len_batch != 0).sum(axis=1) + (
0 if FLAGS.start_end else 1)))
for i, seq_len in enumerate(actual_seq_lens):
mask_batch[i, :seq_len] = 1
train_batches.append((train_label_batch, train_token_batch, train_shape_batch, train_char_batch,
train_seq_len_batch, train_tok_len_batch, mask_batch))
except Exception as e:
done = True
if FLAGS.memmap_train:
train_batcher.load_and_bucket_data(sess)
def train(max_epochs, best_score, model_hidden_drop, model_input_drop, until_convergence, max_lower=6,
min_iters=20):
logger.info("Training on %d sentences (%d examples)" % (num_train_examples, num_train_examples))
start_time = time.time()
train_batcher._step = 1.0
converged = False
examples = 0
log_every_running = log_every
epoch_loss = 0.0
num_lower = 0
training_iteration = 0
| |
(other, distance) pairs that with all the
known objects at distance less or equal than near_distance to obj,
except obj itself.
Notice that it includes the ones colliding with obj.
obj is not required to be a known object
If the game logic wants the list ordered by ascending distances, use
ranked_objs_near instead.
"""
raise NotImplementedError(msg_abstract)
def ranked_objs_near(self, obj, near_distance):
"""
Same as objs_near_wdistance but the list is ordered in increasing distance
obj is not required to be a known object
"""
raise NotImplementedError(msg_abstract)
def iter_all_collisions(self):
"""
Iterator that exposes all collisions between known objects.
At each step it will yield a pair (obj, other).
If (obj1, obj2) is seen when consuming the iterator, then (obj2, obj1)
will not be seen.
In other worlds, 'obj1 collides with obj2' means (obj1, obj2) or
(obj2, obj1) will appear in the iterator output but not both.
"""
def knows(self, obj):
"""Returns True if obj was added to the collision manager, false otherwise
Used for debug and testing.
"""
raise NotImplementedError(msg_abstract)
def known_objs(self):
"""Returns a set with all the objects known by the CollisionManager
Used for debug and testing.
"""
raise NotImplementedError(msg_abstract)
def objs_touching_point(self, x, y):
"""Returns a container with known objects touching point (x, y)
Useful for mouse pick
"""
raise NotImplementedError(msg_abstract)
def objs_into_box(self, minx, maxx, miny, maxy):
"""Returns a container with know objects that fully fits into the axis
aligned rectangle defined by params
Useful for elastic box selection
"""
raise NotImplementedError(msg_abstract)
# Cshape implementations #################################################
class CircleShape(Cshape):
"""
Implements the Cshape interface that uses discs as geometric shape.
Distance is the euclidean distance.
Look at Cshape for other class and methods documentation.
"""
def __init__(self, center, r):
"""
:Parameters:
`center` : euclid.Vector2
rectangle center
`r` : float
disc radius
"""
self.center = center
self.r = r
def overlaps(self, other):
if isinstance(other, CircleShape):
return circle_overlaps_circle(self, other)
elif isinstance(other, AARectShape):
return aa_rect_overlaps_circle(other, self)
raise NotImplementedError(
"Collision between CircleShape and {0} is not implemented".format(other.__class__.__name__))
def distance(self, other):
if isinstance(other, CircleShape):
return circle_distance_circle(self, other)
elif isinstance(other, AARectShape):
return aa_rect_distance_circle(other, self)
raise NotImplementedError(
"Distance between CircleShape and {0} is not implemented".format(other.__class__.__name__))
def near_than(self, other, near_distance):
return self.distance(other) <= near_distance
def touches_point(self, x, y):
return abs(self.center - (x, y)) <= self.r
def fits_in_box(self, packed_box):
r = self.r
return (((packed_box[0] + r) <= self.center[0] <= (packed_box[1] - r)) and
((packed_box[2] + r) <= self.center[1] <= (packed_box[3] - r)))
def minmax(self):
r = self.r
return (self.center[0] - r, self.center[0] + r,
self.center[1] - r, self.center[1] + r)
def copy(self):
return CircleShape(eu.Vector2(*self.center), self.r)
class AARectShape(Cshape):
"""
Implements the Cshape interface that uses rectangles with sides
parallel to the coordinate axis as geometric shape.
Distance is not the euclidean distance but the rectangular or max-min
distance, max( min(x0 - x1), min(y0 - y1) : (xi, yi) in recti )
Good if actors don't rotate.
Look at Cshape for other class and methods documentation.
"""
def __init__(self, center, half_width, half_height):
"""
:Parameters:
`center` : euclid.Vector2
rectangle center
`half_width` : float
half width of rectangle
`half_height` : float
half height of rectangle
"""
self.center = center
self.rx = half_width
self.ry = half_height
def overlaps(self, other):
if isinstance(other, AARectShape):
return aa_rect_overlaps_aa_rect(self, other)
elif isinstance(other, CircleShape):
return aa_rect_overlaps_circle(self, other)
raise NotImplementedError(
"Collision between AARectShape and {0} is not implemented".format(other.__class__.__name__))
def distance(self, other):
if isinstance(other, AARectShape):
return aa_rect_distance_aa_rect(self, other)
elif isinstance(other, CircleShape):
return aa_rect_distance_circle(self, other)
raise NotImplementedError(
"Distance between AARectShape and {0} is not implemented".format(other.__class__.__name__))
def near_than(self, other, near_distance):
return self.distance(other) <= near_distance
def touches_point(self, x, y):
return (abs(self.center[0] - x) < self.rx and
abs(self.center[1] - y) < self.ry)
def fits_in_box(self, packed_box):
return ((packed_box[0] + self.rx <= self.center[0] <= packed_box[1] - self.rx) and
(packed_box[2] + self.ry <= self.center[1] <= packed_box[3] - self.ry))
def minmax(self):
return (self.center[0] - self.rx, self.center[0] + self.rx,
self.center[1] - self.ry, self.center[1] + self.ry)
def copy(self):
return AARectShape(eu.Vector2(*self.center), self.rx, self.ry)
def clamp(value, minimum, maximum):
return max(min(value, maximum), minimum)
def aa_rect_overlaps_aa_rect(aa_rect, other):
"""
Tells if two axis aligned rectangles overlap.
The rects must have members 'center', 'rx', 'ry' where the latest two are
the rect half_width and half_height.
"""
return abs(aa_rect.center[0] - other.center[0]) < aa_rect.rx + other.rx and \
abs(aa_rect.center[1] - other.center[1]) < aa_rect.ry + other.ry
def circle_overlaps_circle(circle, other):
"""
Tells if two circles overlap.
The circles must have members 'center', 'r', where the latest is the radius.
"""
return (circle.center - other.center).magnitude_squared() < (circle.r + other.r) ** 2
def aa_rect_overlaps_circle(aa_rect, circle):
"""
Tells if an axis aligned rectangle and a circle overlap.
The rect must have members 'center', 'rx', 'ry' where the latest two are
the rect half_width and half_height.
The circle must have members 'center', 'r', where the latest is the radius.
"""
d = circle.center - aa_rect.center
# Point in the rect nearest to circle center.
d_clamped = eu.Vector2(clamp(d.x, -aa_rect.rx, aa_rect.rx),
clamp(d.y, -aa_rect.ry, aa_rect.ry))
return (d - d_clamped).magnitude_squared() < circle.r ** 2
def circle_distance_circle(circle, other):
"""
Give the distance between two circles.
The circles must have members 'center', 'r', where the latest is the radius.
"""
d = abs(circle.center - other.center) - circle.r - other.r
if d < 0.0:
d = 0.0
return d
def aa_rect_distance_circle(aa_rect, circle):
"""
Give the distance between an axis-aligned rectangle and a circle.
The rect must have members 'center', 'rx', 'ry' where the latest two are
the rect half_width and half_height.
The circle must have members 'center', 'r', where the latest is the radius.
"""
d = circle.center - aa_rect.center
# Point in the rect nearest to circle center.
d_clamped = eu.Vector2(clamp(d.x, -aa_rect.rx, aa_rect.rx),
clamp(d.y, -aa_rect.ry, aa_rect.ry))
d = abs(d - d_clamped) - circle.r
if d < 0.0:
d = 0.0
return d
def aa_rect_distance_aa_rect(aa_rect, other):
"""
Give the distance between two axis-aligned rectangles.
The rect must have members 'center', 'rx', 'ry' where the latest two are
the rect half_width and half_height.
"""
d = max((abs(aa_rect.center[0] - other.center[0]) - aa_rect.rx - other.rx,
abs(aa_rect.center[1] - other.center[1]) - aa_rect.ry - other.ry))
if d < 0.0:
d = 0.0
return d
# CollisionManager implementations #######################################
class CollisionManagerBruteForce(CollisionManager):
"""
Implements the CollisionManager interface with with the simpler code possible.
Intended for reference and debugging, it has very bad performance.
Look at CollisionManager for other class and methods documentation.
"""
def __init__(self):
self.objs = set()
def add(self, obj):
# ? use weakref ? python 2.7 has weakset
self.objs.add(obj)
def remove_tricky(self, obj):
self.objs.remove(obj)
def clear(self):
self.objs.clear()
def they_collide(self, obj1, obj2):
return obj1.cshape.overlaps(obj2.cshape)
def objs_colliding(self, obj):
f_overlaps = obj.cshape.overlaps
return [other for other in self.objs if
(other is not obj) and f_overlaps(other.cshape)]
def iter_colliding(self, obj):
f_overlaps = obj.cshape.overlaps
for other in self.objs:
if other is not obj and f_overlaps(other.cshape):
yield other
def any_near(self, obj, near_distance):
f_near_than = obj.cshape.near_than
for other in self.objs:
if other is not obj and f_near_than(other.cshape, near_distance):
return other
return None
def objs_near(self, obj, near_distance):
f_near_than = obj.cshape.near_than
return [other for other in self.objs if
(other is not obj) and f_near_than(other.cshape, near_distance)]
def objs_near_wdistance(self, obj, near_distance):
f_distance = obj.cshape.distance
res = []
for other in self.objs:
if other is obj:
continue
d = f_distance(other.cshape)
if d <= near_distance:
res.append((other, d))
return res
# def objs_near_wdistance(self, obj, near_distance):
# # alternative version, needs python 2.5+
# f_distance = obj.cshape.distance
# def f(other):
# return other, f_distance(other.cshape)
# import itertools as it
# return [(other, d) for other,d in it.imap(f, self.objs) if
# (other is not obj) and
# (d <= near_distance)]
def ranked_objs_near(self, obj, near_distance):
tmp = self.objs_near_wdistance(obj, near_distance)
tmp.sort(key=op.itemgetter(1))
return tmp
def iter_all_collisions(self):
# O(n**2)
for i, obj in enumerate(self.objs):
f_overlaps = obj.cshape.overlaps
for j, other in enumerate(self.objs):
if j >= i:
break
if f_overlaps(other.cshape):
yield (obj, other)
def knows(self, obj):
return obj in self.objs
def known_objs(self):
return self.objs
def objs_touching_point(self, x, y):
touching = set()
for obj in self.objs:
if obj.cshape.touches_point(x, y):
touching.add(obj)
return touching
def objs_into_box(self, minx, maxx, miny, maxy):
into = set()
packed_box = minx, maxx, miny, maxy
for obj in self.objs:
if obj.cshape.fits_in_box(packed_box):
into.add(obj)
return into
class CollisionManagerGrid(CollisionManager):
"""
Implements the CollisionManager interface based on the scheme
known | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
class opts(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
# basic experiment setting
self.parser.add_argument('--task', default='gnn_mot', help='mot')
self.parser.add_argument('--exp_id', default='default')
self.parser.add_argument('--test', action='store_true')
self.parser.add_argument('--load_model', default='',
help='path to pretrained model')
self.parser.add_argument('--resume', action='store_true',
help='resume an experiment. '
'Reloaded the optimizer parameter and '
'set load_model to model_last.pth '
'in the exp dir if load_model is empty.')
# system
self.parser.add_argument('--gpus', default='0, 1',
help='-1 for CPU, use comma for multiple gpus')
self.parser.add_argument('--num_workers', type=int, default=8,
help='dataloader threads. 0 for single-thread.')
self.parser.add_argument('--not_cuda_benchmark', action='store_true',
help='disable when the input size is not fixed.')
self.parser.add_argument('--seed', type=int, default=317,
help='random seed') # from CornerNet
self.parser.add_argument('--port', type=str, default='8899',
help='port to run distributed training')
# log
self.parser.add_argument('--print_iter', type=int, default=0,
help='disable progress bar and print to screen.')
self.parser.add_argument('--hide_data_time', action='store_true',
help='not display time during training.')
self.parser.add_argument('--save_all', action='store_true',
help='save model to disk every 5 epochs.')
self.parser.add_argument('--metric', default='loss',
help='main metric to save best model')
self.parser.add_argument('--vis_thresh', type=float, default=0.5,
help='visualization threshold.')
# model
self.parser.add_argument('--arch', default='dla_34',
help='model architecture. Currently tested'
'resdcn_34 | resdcn_50 | resfpndcn_34 |'
'dla_34 | hrnet_32')
self.parser.add_argument('--head_conv', type=int, default=-1,
help='conv layer channels for output head'
'0 for no conv layer'
'-1 for default setting: '
'256 for resnets and 256 for dla.')
self.parser.add_argument('--down_ratio', type=int, default=4,
help='output stride. Currently only supports 4.')
self.parser.add_argument('--num_gnn_layers', type=int, default=1, help='number of gnn layers')
self.parser.add_argument('--gnn_type', type=str, default='GraphConv', help='type of gnn layers')
self.parser.add_argument('-n', '--nodes', default=1, type=int, help='number of node machines to train on')
self.parser.add_argument('--nr', default=0, type=int, help='ranking within the nodes')
self.parser.add_argument('--rank_offset', default=0, type=int,
help='offset the ranking so that multi-experiments wont conflict')
self.parser.add_argument('--omit_gnn', type=int, default=0, help='whether to omit GNN during model forward')
self.parser.add_argument('--use_residual', type=int, default=0, help='whether to omit GNN during model forward')
self.parser.add_argument('--return_pre_gnn_layer_outputs', type=int, default=0,
help='whether to return previous gnn layer outputs (i.e. before the last layer of GNN)')
self.parser.add_argument('--heads_share_params', type=int, default=0,
help='whether to share a same set of params for heads at each gnn layer')
self.parser.add_argument('--load_distributed_model', type=int, default=0,
help='whether the pretrained model is a distributed or not')
# input
self.parser.add_argument('--input_res', type=int, default=-1,
help='input height and width. -1 for default from '
'dataset. Will be overriden by input_h | input_w')
self.parser.add_argument('--input_h', type=int, default=-1,
help='input height. -1 for default from dataset.')
self.parser.add_argument('--input_w', type=int, default=-1,
help='input width. -1 for default from dataset.')
self.parser.add_argument('--crop_size', type=tuple, default=(96, 32),
help="the size of the crops from the previous frame")
self.parser.add_argument('--default_backbone_feature_resolution', type=tuple, default=[152, 272],
help='the default output resolution of dla34 backbone')
self.parser.add_argument('--save_some_time', type=int, default=0,
help='whether to just create a small dataset to save time for debugging')
# train
self.parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate for batch size 32.')
self.parser.add_argument('--lr_step', type=str, default='20,27',
help='drop learning rate by 10.')
self.parser.add_argument('--num_epochs', type=int, default=30,
help='total training epochs.')
self.parser.add_argument('--batch_size', type=int, default=12,
help='batch size')
self.parser.add_argument('--master_batch_size', type=int, default=-1,
help='batch size on the master gpu.')
self.parser.add_argument('--num_iters', type=int, default=-1,
help='default: #samples / batch_size.')
self.parser.add_argument('--val_intervals', type=int, default=5,
help='number of epochs to run validation.')
self.parser.add_argument('--trainval', action='store_true',
help='include validation in training and '
'test on test set')
self.parser.add_argument('--graph_type', type=str, default='global', choices=['global', 'local'],
help='the type of graph to construct')
self.parser.add_argument('--launch_distributed', type=int, default=1,
help='whether to launch distributed for training, or single-thread for debugging')
self.parser.add_argument('--trainable_modules', nargs='+',
default=['base', 'dla_up', 'ida_up', 'hm', 'wh', 'id', 'reg', 'hm_0', 'wh_0', 'id_0', 'reg_0', 'gnn_pool', 'gnn'],
help='parts of the network modules to train')
self.parser.add_argument('--module_lrs', nargs='+', type=float,
default=[],
help='network module learning rates')
self.parser.add_argument('--load_modules', nargs='+',
default=['base', 'dla_up', 'ida_up', 'hm', 'wh', 'id', 'reg', 'hm_0', 'wh_0', 'id_0', 'reg_0', 'gnn_pool', 'gnn'],
help='parts of the network modules to load')
self.parser.add_argument('--copy_head_weights', type=int, default=1,
help='whether to copy the head weights across heads')
self.parser.add_argument('--freeze_bn', type=int, default=0, help='whether to freeze batch norm')
self.parser.add_argument('--use_roi_align', type=int, default=0, help='whether to use roi_align to get crop features')
self.parser.add_argument('--edge_regression', type=int, default=0, help='whether to use edge regression')
self.parser.add_argument('--motion_model', type=int, default=0, help='whether to use motion model')
# test
self.parser.add_argument('--K', type=int, default=128,
help='max number of output objects.')
self.parser.add_argument('--p_K', type=int, default=None,
help='max number of previous frame objects.')
self.parser.add_argument('--not_prefetch_test', action='store_true',
help='not use parallal data pre-processing.')
self.parser.add_argument('--fix_res', action='store_true',
help='fix testing resolution or keep '
'the original resolution')
self.parser.add_argument('--keep_res', action='store_true',
help='keep the original resolution'
' during validation.')
self.parser.add_argument('--inference_gnn_output_layer', type=int, default=-1,
help='choose the gnn layer output for inference')
# Visualization
self.parser.add_argument('--viz_attention', type=int, default=0,
help='whether to visualize graph attention')
self.parser.add_argument('--vis_attn_frame', type=int, default=2, help='# frame to visualize')
self.parser.add_argument('--viz_heatmap_radius', type=int, default=100, help='heatmap radius to visualize')
self.parser.add_argument('--vis_attn_thres', type=float, default=6.3e-4, help='attn threshold to visualize')
# tracking
self.parser.add_argument('--test_mot16', default=False, help='test mot16')
self.parser.add_argument('--val_mot15', default=False, help='val mot15')
self.parser.add_argument('--test_mot15', default=False, help='test mot15')
self.parser.add_argument('--val_mot16', default=False, help='val mot16 or mot15')
self.parser.add_argument('--test_mot17', default=False, help='test mot17')
self.parser.add_argument('--val_mot17', default=False, help='val mot17')
self.parser.add_argument('--val_mot20', default=False, help='val mot20')
self.parser.add_argument('--test_mot20', default=False, help='test mot20')
# self.parser.add_argument('--conf_thres', type=float, default=0.6, help='confidence thresh for tracking')
self.parser.add_argument('--det_thres', type=float, default=0.3, help='confidence thresh for detection')
self.parser.add_argument('--nms_thres', type=float, default=0.4, help='iou thresh for nms')
self.parser.add_argument('--track_buffer', type=int, default=30, help='tracking buffer')
self.parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')
self.parser.add_argument('--input-video', type=str, default='../videos/MOT16-03.mp4',
help='path to the input video')
self.parser.add_argument('--output-format', type=str, default='video', help='video or text')
self.parser.add_argument('--output-root', type=str, default='../results', help='expected output root path')
self.parser.add_argument('--load_prev_from_det', type=int, default=0,
help='whether to load prev images directly from the provided det')
self.parser.add_argument('--use_letter_box', type=int, default=0,
help='whether to use letter box transform on prev crops')
self.parser.add_argument('--save_images', type=int, default=0, help='whether to save image visualizations')
self.parser.add_argument('--save_videos', type=int, default=0, help='whether to save video visualizations')
self.parser.add_argument('--exp_name', type=str, help='experiment name')
self.parser.add_argument('--eval_from_file_only', type=int, default=0,
help='whether to eval directly from saved result file')
self.parser.add_argument('--eval_result_dir', type=str,
help='when eval directly from file, this is the saved results')
self.parser.add_argument('--visualize_gt', type=int, default=0)
self.parser.add_argument('--visualize_compare', type=int, default=0)
self.parser.add_argument('--compare_seq', type=str, default=None)
self.parser.add_argument('--result_dir_1', type=str, default=None)
self.parser.add_argument('--result_dir_2', type=str, default=None)
self.parser.add_argument('--compile_images_only', type=int, default=0)
# mot
self.parser.add_argument('--data_cfg', type=str,
default='../src/lib/cfg/data.json',
help='load data from cfg')
self.parser.add_argument('--data_dir', type=str, default='./data')
# loss
self.parser.add_argument('--mse_loss', action='store_true',
help='use mse loss or focal loss to train '
'keypoint heatmaps.')
self.parser.add_argument('--reg_loss', default='l1',
help='regression loss: sl1 | l1 | l2')
self.parser.add_argument('--hm_weight', type=float, default=1,
help='loss weight for keypoint heatmaps.')
self.parser.add_argument('--off_weight', type=float, default=1,
help='loss weight for keypoint local offsets.')
self.parser.add_argument('--wh_weight', type=float, default=0.1,
help='loss weight for bounding box size.')
self.parser.add_argument('--id_loss', default='ce',
help='reid loss: ce | triplet')
self.parser.add_argument('--id_weight', type=float, default=1,
help='loss weight for id')
self.parser.add_argument('--edge_reg_weight', type=float, default=1,
help='loss weight for edge regression')
self.parser.add_argument('--reid_dim', type=int, default=512,
help='feature dim for reid')
self.parser.add_argument('--norm_wh', action='store_true',
help='L1(\hat(y) / y, 1) or L1(\hat(y), y)')
self.parser.add_argument('--dense_wh', action='store_true',
help='apply weighted regression near center or '
'just apply regression on center point.')
self.parser.add_argument('--cat_spec_wh', action='store_true',
help='category specific bounding box size.')
self.parser.add_argument('--not_reg_offset', action='store_true',
help='not regress local offset.')
def parse(self, args=''):
if args == '':
opt = self.parser.parse_args()
else:
opt = self.parser.parse_args(args)
opt.gpus_str = opt.gpus
opt.gpus = [int(gpu) for gpu in opt.gpus.split(',')]
opt.world_size = len(opt.gpus) * opt.nodes
opt.lr_step = [int(i) for i in opt.lr_step.split(',')]
opt.fix_res = not opt.keep_res
print('Fix size testing.' if opt.fix_res else 'Keep resolution testing.')
opt.reg_offset = not opt.not_reg_offset
if opt.head_conv == -1: # init default head_conv
opt.head_conv = 256 if 'dla' in opt.arch else 256
opt.pad = 31
opt.num_stacks = 1
if opt.trainval:
opt.val_intervals = 100000000
if opt.master_batch_size == -1:
opt.master_batch_size = opt.batch_size // len(opt.gpus)
rest_batch_size = (opt.batch_size - opt.master_batch_size)
opt.chunk_sizes = [opt.master_batch_size]
for i in range(len(opt.gpus) - 1):
slave_chunk_size = rest_batch_size // (len(opt.gpus) - 1)
if i < rest_batch_size % (len(opt.gpus) - 1):
slave_chunk_size += 1
opt.chunk_sizes.append(slave_chunk_size)
if opt.task == 'gnn_mot':
print('training chunk_sizes:', [opt.batch_size for _ in opt.gpus])
else:
print('training chunk_sizes:', opt.chunk_sizes)
opt.root_dir = os.path.join(os.path.dirname(__file__), '..', '..')
opt.exp_dir = os.path.join(opt.root_dir, 'exp', opt.task)
opt.save_dir = os.path.join(opt.exp_dir, opt.exp_id)
opt.debug_dir = os.path.join(opt.save_dir, 'debug')
print('The output will be saved to ', opt.save_dir)
if opt.resume and opt.load_model == '':
model_path = opt.save_dir[:-4] if opt.save_dir.endswith('TEST') \
else opt.save_dir
opt.load_model = os.path.join(model_path, 'model_last.pth')
return opt
def update_dataset_info_and_set_heads(self, opt, dataset):
input_h, input_w = dataset.default_resolution
opt.mean, opt.std = dataset.mean, dataset.std
opt.num_classes = dataset.num_classes
# input_h(w): opt.input_h overrides opt.input_res overrides dataset default
input_h = opt.input_res if opt.input_res > 0 else input_h
input_w = opt.input_res if opt.input_res > 0 else input_w
opt.input_h = opt.input_h if opt.input_h > 0 else input_h
opt.input_w = opt.input_w if opt.input_w > 0 else input_w
opt.output_h = opt.input_h // opt.down_ratio
opt.output_w = opt.input_w // opt.down_ratio
opt.input_res = max(opt.input_h, opt.input_w)
opt.output_res = max(opt.output_h, opt.output_w)
if opt.task == 'mot' or opt.task == 'gnn_mot':
opt.heads = {'hm': opt.num_classes,
'wh': 2 if not opt.cat_spec_wh else 2 * opt.num_classes,
'id': opt.reid_dim}
if opt.reg_offset:
opt.heads.update({'reg': 2})
opt.nID = dataset.nID
opt.img_size = (1088, 608)
else:
assert 0, 'task not defined!'
print('heads', opt.heads)
return opt
def init(self, args=''):
| |
pixel_value_offset = image_mean
image = tf.math.subtract(image, pixel_value_offset)
image = tf.math.divide(image, pixel_value_scale, name=scope)
# image = math_ops.div(image, pixel_value_scale, name=scope)
return image
def upsample_simple(images, shape_out, up, numClasses):
filter_up = tf.constant(1.0, shape=[up, up, numClasses, numClasses])
return tf.nn.conv2d_transpose(images, filter_up,
output_shape=shape_out,
strides=[1, up, up, 1])
# </editor-fold>
# <editor-fold desc="Loss Utilities (softmax_cross_entropy_with_logits_v2, sparse_softmax_cross_entropy_with_logits, sigmoid_cross_entropy_with_logits, l2_loss, nce_loss)">
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=None, name=None, dim=None):
return tf.nn.softmax_cross_entropy_with_logits_v2(labels, logits, axis=axis, name=name, dim=dim)
def sparse_softmax_cross_entropy_with_logits(labels=None, logits=None, name=None, _sentinel=None):
return tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name=name, _sentinel=_sentinel)
def sigmoid_cross_entropy_with_logits(labels=None, logits=None, name=None, _sentinel=None):
return tf.nn.sigmoid_cross_entropy_with_logits(_sentinel=_sentinel, labels=labels, logits=logits, name=name)
def l2_loss(t, name=None):
return tf.nn.l2_loss(t, name=name)
def nce_loss(weights, biases, labels, inputs, num_sampled, num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=False,
partition_strategy="mod",
name="nce_loss"):
return tf.nn.nce_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=num_true,
sampled_values=sampled_values,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
# </editor-fold>
# <editor-fold desc="Utilities (conv1d_op, conv2d_op)">
def conv1d_op(value=None,
filters=None,
stride=None,
padding=None,
use_cudnn_on_gpu=None,
data_format=None,
name=None,
input=None, # pylint: disable=redefined-builtin
dilations=None):
return tf.nn.conv1d(value=value,
filters=filters,
stride=stride,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
name=name,
input=input,
dilations=dilations)
def conv2d_op(input,
filter=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
return tf.compat.v1.nn.conv2d(input,
filter=filter,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations,
name=name)
# </editor-fold>
# <editor-fold desc="Backup (old stuff we do not want to delete yet)">
def conv2d_bn_lrn_drop(inputs,
kernel_shape,
is_training,
strides=None,
activation=relu,
use_bn=False,
renorm=False,
use_mvn=False,
use_lrn=False,
keep_prob=1.0,
dropout_maps=False,
initOpt=0,
biasInit=0.1,
padding='SAME',
name="conv2d"):
"""Adds a 2-D convolutional layer given 4-D `inputs` and `kernel` with optional BatchNorm, LocalResponseNorm and Dropout.
Args:
scope_or_name: `string` or `VariableScope`, the scope to open.
inputs: `4-D Tensor`, it is assumed that `inputs` is shaped `[batch_size, Y, X, Z]`.
kernel: `4-D Tensor`, [kernel_height, kernel_width, in_channels, out_channels] kernel.
bias: `1-D Tensor`, [out_channels] bias.
strides: list of `ints`, length 4, the stride of the sliding window for each dimension of `inputs`.
activation: activation function to be used (default: `relu`).
use_bn: `bool`, whether or not to include batch normalization in the layer.
is_training: `bool`, whether or not the layer is in training mode. This is only used if `use_bn` == True.
use_lrn: `bool`, whether or not to include local response normalization in the layer.
keep_prob: `double`, dropout keep prob.
dropout_maps: `bool`, If true whole maps are dropped or not, otherwise single elements.
padding: `string` from 'SAME', 'VALID'. The type of padding algorithm used in the convolution.
Returns:
`4-D Tensor`, has the same type `inputs`.
"""
with tf.compat.v1.variable_scope(name):
if strides is None:
strides = [1, 1, 1, 1]
stddev = 5e-2
if initOpt == 0:
stddev = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + kernel_shape[3]))
if initOpt == 1:
stddev = 5e-2
if initOpt == 2:
stddev = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])), 5e-2)
initializer = tf.random_normal_initializer(stddev=stddev)
if initOpt < 0:
initializer = tf.random.truncated_normal_initializer(0.0, -initOpt)
kernel = tf.compat.v1.get_variable("weights", kernel_shape,
initializer=initializer)
conv = conv2d_op(inputs, kernel, strides, padding=padding, name='conv')
bias = tf.compat.v1.get_variable("biases", kernel_shape[3],
initializer=tf.constant_initializer(value=biasInit))
outputs = tf.nn.bias_add(conv, bias, name='preActivation')
if use_bn:
print("WARNING BATCH NORM is deprcated")
raise AttributeError
# outputs = batch_norm(outputs, training=training, scale=True, fused=True, renorm=renorm,
# scope="batchNorm")
if use_mvn:
outputs = feat_norm(outputs, kernel_shape[3])
if activation:
outputs = activation(outputs, name='activation')
if use_lrn:
outputs = tf.nn.local_response_normalization(outputs, name='localResponseNorm')
if is_training:
if dropout_maps:
conv_shape = tf.shape(outputs)
n_shape = tf.stack([conv_shape[0], 1, 1, conv_shape[3]])
outputs = dropout(outputs, keep_prob=keep_prob, noise_shape=n_shape)
else:
outputs = dropout(outputs, keep_prob=keep_prob, is_training=is_training)
return outputs
# def sep_conv2d_bn_lrn_drop(scope_or_name,
# inputs,
# kernel_shape,
# depth_multiplier,
# training,
# strides=None,
# activation=relu,
# use_bn=False,
# renorm=False,
# use_mvn=False,
# use_lrn=False,
# keep_prob=1.0,
# dropout_maps=False,
# initOpt=0,
# biasInit=0.1,
# padding='SAME'):
# if strides is None:
# strides = [1, 1, 1, 1]
# with tf.compat.v1.variable_scope(scope_or_name):
# if initOpt == 0:
# stddev1 = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + 1))
# stddev2 = np.sqrt(2.0 / (kernel_shape[2] + kernel_shape[3]))
# if initOpt == 1:
# stddev1 = 5e-2
# stddev2 = 5e-2
# if initOpt == 2:
# stddev1 = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])), 5e-2)
# stddev2 = min(np.sqrt(2.0 / (kernel_shape[2])), 5e-2)
# kernel1 = tf.compat.v1.get_variable("weights_sep", [kernel_shape[0], kernel_shape[1], kernel_shape[2], depth_multiplier],
# initializer=tf.random_normal_initializer(stddev=stddev1))
# kernel2 = tf.compat.v1.get_variable("weights_1x1", [1, 1, depth_multiplier*kernel_shape[2], kernel_shape[3]],
# initializer=tf.random_normal_initializer(stddev=stddev2))
#
# conv = tf.nn.separable_conv2d(inputs, depthwise_filter=kernel1, pointwise_filter=kernel2, strides=strides,
# padding=padding, name="sep_conv")
# bias = tf.compat.v1.get_variable("biases", kernel_shape[3],
# initializer=tf.constant_initializer(value=biasInit))
# outputs = tf.nn.bias_add(conv, bias, name='preActivation')
# if use_bn:
# outputs = batch_norm(outputs, training=training, scale=True, fused=True, renorm=renorm,
# scope="batchNorm")
# if use_mvn:
# outputs = feat_norm(outputs, kernel_shape[3])
# if activation:
# outputs = activation(outputs, name='activation')
# if use_lrn:
# outputs = tf.nn.local_response_normalization(outputs, name='localResponseNorm')
# if training:
# if dropout_maps:
# conv_shape = tf.shape(outputs)
# n_shape = tf.stack([conv_shape[0], 1, 1, conv_shape[3]])
# outputs = dropout(outputs, keep_prob=keep_prob, noise_shape=n_shape)
# else:
# outputs = dropout(outputs, keep_prob=keep_prob)
# return outputs
# def dil_conv2d_bn_lrn_drop(scope_or_name,
# inputs,
# kernel_shape,
# rate,
# training,
# activation=relu,
# use_bn=False,
# use_mvn=False,
# use_lrn=True,
# keep_prob=1.0,
# dropout_maps=False,
# initOpt=0, padding="SAME"):
# """Adds a 2-D convolutional layer given 4-D `inputs` and `kernel` with optional BatchNorm, LocalResponseNorm and Dropout.
#
# Args:
# scope_or_name: `string` or `VariableScope`, the scope to open.
# inputs: `4-D Tensor`, it is assumed that `inputs` is shaped `[batch_size, Y, X, Z]`.
# kernel: `4-D Tensor`, [kernel_height, kernel_width, in_channels, out_channels] kernel.
# bias: `1-D Tensor`, [out_channels] bias.
# rate: `int`, Dilation factor.
# activation: activation function to be used (default: `relu`).
# use_bn: `bool`, whether or not to include batch normalization in the layer.
# training: `bool`, whether or not the layer is in training mode. This is only used if `use_bn` == True.
# use_lrn: `bool`, whether or not to include local response normalization in the layer.
# keep_prob: `double`, dropout keep prob.
# dropout_maps: `bool`, If true whole maps are dropped or not, otherwise single elements.
# padding: `string` from 'SAME', 'VALID'. The type of padding algorithm used in the convolution.
#
# Returns:
# `4-D Tensor`, has the same type `inputs`.
# """
# with tf.compat.v1.variable_scope(scope_or_name):
# if initOpt == 0:
# stddev = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + kernel_shape[3]))
# if initOpt == 1:
# stddev = 5e-2
# if initOpt == 2:
# stddev = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])), 5e-2)
# kernel = tf.compat.v1.get_variable("weights", kernel_shape,
# initializer=tf.random_normal_initializer(stddev=stddev))
# conv = tf.nn.atrous_conv2d(inputs, kernel, rate=rate, padding=padding)
# bias = tf.compat.v1.get_variable("bias", kernel_shape[3],
# initializer=tf.constant_initializer(value=0.1))
# outputs = tf.nn.bias_add(conv, bias, name='preActivation')
# if use_bn:
# outputs = batch_norm(outputs, training=training, scale=True, fused=True, scope="batchNorm")
# if use_mvn:
# outputs = feat_norm(outputs, kernel_shape[3])
# if activation:
# outputs = activation(outputs, name='activation')
# if use_lrn:
# outputs = tf.nn.local_response_normalization(outputs, name='localResponseNorm')
# if training:
# if dropout_maps:
# conv_shape = tf.shape(outputs)
# n_shape = tf.stack([conv_shape[0], 1, 1, conv_shape[3]])
# outputs = dropout(outputs, keep_prob=keep_prob, noise_shape=n_shape)
# else:
# outputs = dropout(outputs, keep_prob=keep_prob)
# return outputs
# def deconv2d_bn_lrn_drop(scope_or_name, inputs, kernel_shape, out_shape, training, subS=2, activation=relu,
# use_bn=False,
# use_mvn=False,
# use_lrn=False,
# keep_prob=1.0,
# dropout_maps=False,
# initOpt=0):
# with tf.compat.v1.variable_scope(scope_or_name):
# if initOpt == 0:
# stddev = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + kernel_shape[3]))
# if initOpt == 1:
# stddev = 5e-2
# if initOpt == 2:
# stddev = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])), 5e-2)
# kernel = tf.compat.v1.get_variable("weights", kernel_shape,
# initializer=tf.random_normal_initializer(stddev=stddev))
# bias = tf.compat.v1.get_variable("bias", kernel_shape[2],
# initializer=tf.constant_initializer(value=0.1))
# conv = tf.nn.conv2d_transpose(inputs, kernel, out_shape, strides=[1, subS, subS, 1], padding='SAME',
# name='conv')
# outputs = tf.nn.bias_add(conv, bias, name='preActivation')
# if use_bn:
# outputs = batch_norm(outputs, training=training, scale=True, fused=True, scope="batchNorm")
# if use_mvn:
# outputs = feat_norm(outputs, kernel_shape[3])
# if activation:
# outputs = activation(outputs, name='activation')
# if use_lrn:
# outputs = tf.nn.local_response_normalization(outputs, name='localResponseNorm')
# if training:
# if dropout_maps:
# conv_shape = tf.shape(outputs)
# n_shape = tf.stack([conv_shape[0], 1, 1, conv_shape[3]])
# outputs = dropout(outputs, keep_prob=keep_prob, noise_shape=n_shape)
# else:
# outputs = dropout(outputs, keep_prob=keep_prob)
# return outputs
# def cublstm_fix(scope_or_name,
# inputs,
# seq_length,
# n_hidden,
# training,
# use_bn=False,
# use_gpu=True, ):
# with tf.compat.v1.variable_scope(scope_or_name):
# if use_gpu:
# # forward direction
# with tf.compat.v1.variable_scope("culstm_forward"):
# culstm_fw = CudnnLSTM(num_layers=1, num_units=n_hidden, direction="unidirectional",
# dtype=tf.float32)
# culstm_fw.build(inputs.get_shape())
# outputs_fw, _ = culstm_fw(inputs, training=True)
# # culstm_fw = tf.keras.layers.CuDNNLSTM(units=n_hidden, return_sequences=True)
# # culstm_fw.build(inputs.get_shape())
# # outputs_fw = culstm_fw(inputs, training=training)
# # backward direction
# with tf.compat.v1.variable_scope("culstm_backward"):
# culstm_bw = CudnnLSTM(num_layers=1, num_units=n_hidden, direction="unidirectional",
# dtype=tf.float32)
# culstm_bw.build(inputs.get_shape())
# reverse_inputs = tf.reverse_sequence(inputs, seq_length, batch_axis=1, seq_axis=0)
# outputs_bw, _ = culstm_bw(reverse_inputs, training=True)
# outputs_bw = tf.reverse_sequence(outputs_bw, seq_length, batch_axis=1, seq_axis=0)
# # culstm_bw = tf.keras.layers.CuDNNLSTM(units=n_hidden, return_sequences=True)
# # culstm_bw.build(inputs.get_shape())
# # reverse_inputs = tf.reverse_sequence(inputs, seq_length, batch_axis=1, seq_axis=0)
# # outputs_bw = culstm_bw(reverse_inputs, training=training)
# # outputs_bw = tf.reverse_sequence(outputs_bw, seq_length, batch_axis=1, seq_axis=0)
# # concat
# outputs = tf.concat([outputs_fw, outputs_bw], axis=2)
#
# else:
# single_cell = lambda: CudnnCompatibleLSTMCell(n_hidden, reuse=tf.compat.v1.get_variable_scope().reuse)
# # forward direction
# with tf.compat.v1.variable_scope("culstm_forward"):
# cell_fw = MultiRNNCell([single_cell() for _ in range(1)])
# outputs_fw, _ = rnn(cell_fw, inputs, dtype=tf.float32, time_major=True)
# # backward direction
# with tf.compat.v1.variable_scope("culstm_backward"):
# cell_bw = MultiRNNCell([single_cell() for _ in range(1)])
# reverse_inputs = tf.reverse_sequence(inputs, seq_length, batch_axis=1, seq_axis=0)
# outputs_bw, _ = rnn(cell_bw, reverse_inputs, dtype=tf.float32, time_major=True)
# outputs_bw = tf.reverse_sequence(outputs_bw, seq_length, batch_axis=1, seq_axis=0)
# # concat
# outputs = tf.concat([outputs_fw, outputs_bw], axis=2)
# # forward direction
# # with tf.compat.v1.variable_scope("culstm_forward"):
# # culstm_fw = tf.keras.layers.LSTM(units=n_hidden,activation='tanh',recurrent_activation='sigmoid', return_sequences=True)
# # culstm_fw.build(inputs.get_shape())
# # outputs_fw = culstm_fw(inputs, training=training)
# # # backward direction
# # with tf.compat.v1.variable_scope("culstm_backward"):
# # culstm_bw = tf.keras.layers.LSTM(units=n_hidden,activation='tanh',recurrent_activation='sigmoid', return_sequences=True)
# # culstm_bw.build(inputs.get_shape())
# # reverse_inputs = tf.reverse_sequence(inputs, seq_length, batch_axis=1, seq_axis=0)
# # outputs_bw = culstm_bw(reverse_inputs, training=training)
# # outputs_bw = tf.reverse_sequence(outputs_bw, seq_length, batch_axis=1, seq_axis=0)
# # # | |
<gh_stars>1-10
"""
TODO: replaces previous versions 161110
Plots Accuracy for labeling for various learning and propagation methods
Since graph creation takes most time, especially for large graphs, saves graphs to a file format, then loads them later again.
First version: Nov 10, 2016
This version: Jan 26, 2020
"""
import numpy as np
import datetime
import random
import sys
sys.path.append('./../sslh')
from fileInteraction import save_csv_record
from utils import (from_dictionary_beliefs,
create_parameterized_H,
replace_fraction_of_rows,
to_centering_beliefs,
eps_convergence_linbp_parameterized,
matrix_difference,
matrix_difference_classwise,
introduce_errors,
showfig)
from estimation import (estimateH,
estimateH_baseline_serial,
estimateH_baseline_parallel)
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import LogLocator
import pandas as pd
pd.set_option('display.max_columns', None) # show all columns from pandas
pd.options.mode.chained_assignment = None # default='warn'
from graphGenerator import planted_distribution_model_H
from inference import linBP_symmetric_parameterized, beliefPropagation
# import seaborn.apionly as sns # importing without activating it. For color palette
# # -- Determine path to data *irrespective* of where the file is run from
# from os.path import abspath, dirname, join
# from inspect import getfile, currentframe
# current_path = dirname(abspath(getfile(currentframe())))
# figure_directory = join(current_path, 'figs')
# data_directory = join(current_path, 'data')
#
#
#
# def run(choice, variant, create_data=False, add_data=False, create_graph=False,
# create_fig=True, show_plot=True, show_pdf=True, shorten_length=False, show_arrows=True):
# """main parameterized method to produce all figures.
# Can be run from external jupyther notebook or method to produce all figures in PDF
# """
#
# # -- Setup
# CHOICE = choice # determines the CSV data file to use
# VARIANT = variant # determines the variant of how the figures are plotted
# CREATE_DATA = create_data # starts new CSV file and stores experimental timing results
# ADD_DATA = add_data # adds data to existing file
# CREATE_GRAPH = create_graph # creates the actual graph for experiments (stores W and X in CSV files)
# -- Determine path to data *irrespective* of where the file is run from
from os.path import abspath, dirname, join
from inspect import getfile, currentframe
current_path = dirname(abspath(getfile(currentframe())))
figure_directory = join(current_path, 'figs')
data_directory = join(current_path, 'datacache')
def run(choice, variant, create_data=False, add_data=False, show_plot=False, create_pdf=False, show_pdf=False, show_fig=True):
"""main parameterized method to produce all figures.
All relevant choice and parameters are encoded in this method.
Calling methods just choose the CHOICE and VARIANT
Can be run from external jupyther notebook or method to produce all figures in PDF
"""
# -- Setup
# 305, 315, 213, 108
CHOICE = choice
VARIANT = variant
CREATE_DATA = create_data
ADD_DATA = add_data
SHOW_FIG = show_fig
STD_FILL = True
SHORTEN_LENGTH = False
SHOW_PDF = show_pdf
SHOW_PLOT = show_plot
CREATE_PDF = create_pdf
SHOW_TITLE = True # show parameters in title of plot
LABEL_FONTSIZE = 16 # size of number labels in figure
csv_filename = 'Fig_End-to-End_accuracy_{}.csv'.format(CHOICE)
filename = 'Fig_End-to-End_accuracy_{}-{}'.format(CHOICE, VARIANT) # PDF filename includes CHOICE and VARIANT
header = ['currenttime',
'option',
'f',
'accuracy']
if CREATE_DATA:
save_csv_record(join(data_directory, csv_filename), header, append=False)
# -- Default Graph parameters
rep_DifferentGraphs = 10 # iterations on different graphs
rep_SameGraph = 10 # iterations on same graph
initial_h0 = None # initial vector to start finding optimal H
distribution = 'powerlaw'
exponent = -0.3
length = 5
variant = 1
EC = True # Non-backtracking for learning
ymin = 0.3
ymax = 1
xmin = 0.001
xmax = 1
xtick_lab = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
xtick_labels = ['1e-5', '0.01\%', '0.1\%', '1\%', '10\%', '100\%']
ytick_lab = np.arange(0, 1.1, 0.1)
f_vec = [0.9 * pow(0.1, 1 / 5) ** x for x in range(21)]
k = 3
a = 1 # this value was erroneously set to 5 previously!!! TODO: fix everywhere else
err = 0
avoidNeighbors = False
convergencePercentage_W = None
stratified = True
labels = ['*']*10
clip_on_vec = [True] * 10
draw_std_vec = range(10)
numberOfSplits = 1
linestyle_vec = ['dashed'] + ['solid'] * 10
linewidth_vec = [5, 4, 3, 3, 3, 3] + [3]*10
marker_vec = [None, None, 'o', 'x', 'o', '^'] + [None]*10
markersize_vec = [0, 0, 4, 8, 6, 6] + [6]*10
propagation_method_vec = ['Lin'] * 10
constraint_vec = [False]*15
alpha0 = np.array([a, 1., 1.])
facecolor_vec = ["#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD"]
# SEABORN_PALETTES = dict(
# deep=["#4C72B0", "#55A868", "#C44E52",
# "#8172B2", "#CCB974", "#64B5CD"],
# muted=["#4878CF", "#6ACC65", "#D65F5F",
# "#B47CC7", "#C4AD66", "#77BEDB"],
# pastel=["#92C6FF", "#97F0AA", "#FF9F9A",
# "#D0BBFF", "#FFFEA3", "#B0E0E6"],
# bright=["#003FFF", "#03ED3A", "#E8000B",
# "#8A2BE2", "#FFC400", "#00D7FF"],
# dark=["#001C7F", "#017517", "#8C0900",
# "#7600A1", "#B8860B", "#006374"],
# colorblind=["#0072B2", "#009E73", "#D55E00",
# "#CC79A7", "#F0E442", "#56B4E9"]
# )
# facecolors = ['darkorange', 'blue', 'black']
# facecolors = ['#6495ED', '#F08080', 'black']
# facecolors = ['#66c2a5', '#fc8d62', '#8da0cb']
# C = (sns.color_palette("colorblind", 4))
# facecolor_vec = [C[0], C[2], C[1], C[3]]
# facecolor_vec = ["#0072B2", "#D55E00", "#009E73", "#CC79A7",]
# -- Options with propagation variants
if CHOICE == 101:
n = 10000
h = 8
d = 25
option_vec = ['opt1', 'opt2', 'opt3']
learning_method_vec = ['GT'] * 2 + ['DHE']
weight_vec = [None] * 1 + [1] * 1 + [100] * 1
alpha_vec = [0] * 3
beta_vec = [0] * 1 + [1] * 2
gamma_vec = [0] * 3
s_vec = [0.5] + [3] * 2
numMaxIt_vec = [10] + [4]*2
randomize_vec = [False] * 2 + [True]
xmin = 0.0001
ymin = 0.6
ymax = 1
facecolor_vec = ['black', "#C44E52", "#4C72B0", "#8172B2", "#55A868", "#CCB974", "#64B5CD"]
linestyle_vec = ['dashed'] + ['solid'] * 10
labels = ['LinBP w/GT', 'CP w/GT', 'CP w/DCEr', 'BP']
linewidth_vec = [5, 5, 3, ]
marker_vec = [None, 'o', 'x']
markersize_vec = [0, 8, 8]
elif CHOICE == 111:
n = 10000
h = 3
d = 25
option_vec = ['opt1', 'opt2', 'opt3']
learning_method_vec = ['GT'] * 2 + ['DHE']
weight_vec = [None] * 1 + [1] * 1 + [100] * 1
alpha_vec = [0] * 3
beta_vec = [0] * 1 + [1] * 2
gamma_vec = [0] * 3
s_vec = [0.5] + [3] * 2
numMaxIt_vec = [10] + [4] * 2
randomize_vec = [False] * 2 + [True]
xmin = 0.0001
ymin = 0.6
ymax = 1
facecolor_vec = ['black', "#C44E52", "#4C72B0", "#8172B2", "#55A868", "#CCB974", "#64B5CD"]
linestyle_vec = ['dashed'] + ['solid'] * 10
labels = ['LinBP w/GT', 'CP w/GT', 'CP w/DCEr', 'BP']
linewidth_vec = [5, 5, 3]
marker_vec = [None, 'o', 'x']
markersize_vec = [0, 8, 8]
# BP options
elif CHOICE == 301: ## 101 with BP
n = 10000
h = 8
d = 25
option_vec = ['opt1', 'opt2', 'opt3', 'opt4']
learning_method_vec = ['GT'] * 2 + ['DHE'] + ['GT']
weight_vec = [None] * 1 + [1] * 1 + [100] * 1 + [None]
propagation_method_vec = ['Lin'] * 3 + ['BP']
alpha_vec = [0] * 3 + [None]
beta_vec = [0] * 1 + [1] * 2 + [None]
gamma_vec = [0] * 3 + [None]
s_vec = [0.5] + [3] * 2 + [0.1]
numMaxIt_vec = [10] + [4]*2 + [10]
randomize_vec = [False] * 2 + [True] + [False]
xmin = 0.0001
ymin = 0.6
ymax = 1
facecolor_vec = ['black', "#C44E52", "#4C72B0", "#8172B2", "#55A868", "#CCB974", "#64B5CD"]
linestyle_vec = ['dashed'] + ['solid'] * 10
labels = ['LinBP w/GT', 'CP w/GT', 'CP w/DCEr', 'BP']
linewidth_vec = [5, 5, 3, 3]
marker_vec = [None, 'o', 'x', '^']
markersize_vec = [0, 8, 8, 8]
elif CHOICE == 303: ### like 311 BP, but with fewer iterations
n = 10000
h = 8
d = 25
option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5']
learning_method_vec = ['GT'] * 2 + ['DHE'] + ['GT']*2
weight_vec = [None, 1, 100, None, None]
propagation_method_vec = ['Lin'] * 3 + ['BP'] * 2
alpha_vec = [0] * 3 + [None]*2
beta_vec = [0] * 1 + [1] * 2 + [None]*2
gamma_vec = [0] * 3 + [None]*2
s_vec = [0.5] + [3] * 2 + [None]*2
numMaxIt_vec = [10] + [4] * 2 + [4] + [20]
randomize_vec = [False] * 2 + [True] + [False]*2
xmin = 0.0001
ymin = 0.3
ymax = 1
xmax = 0.002
facecolor_vec = ['black', "#C44E52", "#4C72B0", "#8172B2", "#55A868", "#CCB974", "#64B5CD"]
linestyle_vec = ['dashed'] + ['solid'] * 10
labels = ['LinBP w/GT', 'CP | |
# This file was generated automatically by generate_protocols.py
from nintendo.nex import notification, rmc, common, streams
import logging
logger = logging.getLogger(__name__)
class RankingOrderCalc:
STANDARD = 0
ORDINAL = 1
class RankingMode:
GLOBAL = 0
GLOBAL_AROUND_SELF = 1
SELF = 4
class RankingStatFlags:
RANKING_COUNT = 1
TOTAL_SCORE = 2
LOWEST_SCORE = 4
HIGHEST_SCORE = 8
AVERAGE_SCORE = 16
ALL = 31
class RankingOrderParam(common.Structure):
def __init__(self):
super().__init__()
self.order_calc = 0
self.group_index = 255
self.group_num = 0
self.time_scope = 2
self.offset = None
self.count = None
def check_required(self, settings):
for field in ['offset', 'count']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.order_calc = stream.u8()
self.group_index = stream.u8()
self.group_num = stream.u8()
self.time_scope = stream.u8()
self.offset = stream.u32()
self.count = stream.u8()
def save(self, stream):
self.check_required(stream.settings)
stream.u8(self.order_calc)
stream.u8(self.group_index)
stream.u8(self.group_num)
stream.u8(self.time_scope)
stream.u32(self.offset)
stream.u8(self.count)
class RankingRankData(common.Structure):
def __init__(self):
super().__init__()
self.pid = None
self.unique_id = None
self.rank = None
self.category = None
self.score = None
self.groups = None
self.param = None
self.common_data = None
self.update_time = None
def check_required(self, settings):
for field in ['pid', 'unique_id', 'rank', 'category', 'score', 'groups', 'param', 'common_data']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
if settings["nex.version"] >= 40000:
for field in ['update_time']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.pid = stream.pid()
self.unique_id = stream.u64()
self.rank = stream.u32()
self.category = stream.u32()
self.score = stream.u32()
self.groups = stream.list(stream.u8)
self.param = stream.u64()
self.common_data = stream.buffer()
if stream.settings["nex.version"] >= 40000:
self.update_time = stream.datetime()
def save(self, stream):
self.check_required(stream.settings)
stream.pid(self.pid)
stream.u64(self.unique_id)
stream.u32(self.rank)
stream.u32(self.category)
stream.u32(self.score)
stream.list(self.groups, stream.u8)
stream.u64(self.param)
stream.buffer(self.common_data)
if stream.settings["nex.version"] >= 40000:
stream.datetime(self.update_time)
class RankingResult(common.Structure):
def __init__(self):
super().__init__()
self.data = None
self.total = None
self.since_time = None
def check_required(self, settings):
for field in ['data', 'total', 'since_time']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.data = stream.list(RankingRankData)
self.total = stream.u32()
self.since_time = stream.datetime()
def save(self, stream):
self.check_required(stream.settings)
stream.list(self.data, stream.add)
stream.u32(self.total)
stream.datetime(self.since_time)
class RankingCachedResult(RankingResult):
def __init__(self):
super().__init__()
self.created_time = None
self.expired_time = None
self.max_length = None
def check_required(self, settings):
for field in ['created_time', 'expired_time', 'max_length']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.created_time = stream.datetime()
self.expired_time = stream.datetime()
self.max_length = stream.u8()
def save(self, stream):
self.check_required(stream.settings)
stream.datetime(self.created_time)
stream.datetime(self.expired_time)
stream.u8(self.max_length)
common.DataHolder.register(RankingCachedResult, "RankingCachedResult")
class RankingStats(common.Structure):
def __init__(self):
super().__init__()
self.stats = None
def check_required(self, settings):
for field in ['stats']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.stats = stream.list(stream.double)
def save(self, stream):
self.check_required(stream.settings)
stream.list(self.stats, stream.double)
class RankingScoreData(common.Structure):
def __init__(self):
super().__init__()
self.category = None
self.score = None
self.order = None
self.update_mode = None
self.groups = None
self.param = None
def check_required(self, settings):
for field in ['category', 'score', 'order', 'update_mode', 'groups', 'param']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.category = stream.u32()
self.score = stream.u32()
self.order = stream.u8()
self.update_mode = stream.u8()
self.groups = stream.list(stream.u8)
self.param = stream.u64()
def save(self, stream):
self.check_required(stream.settings)
stream.u32(self.category)
stream.u32(self.score)
stream.u8(self.order)
stream.u8(self.update_mode)
stream.list(self.groups, stream.u8)
stream.u64(self.param)
class RankingChangeAttributesParam(common.Structure):
def __init__(self):
super().__init__()
self.flags = None
self.groups = None
self.param = None
def check_required(self, settings):
for field in ['flags', 'groups', 'param']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.flags = stream.u8()
self.groups = stream.list(stream.u8)
self.param = stream.u64()
def save(self, stream):
self.check_required(stream.settings)
stream.u8(self.flags)
stream.list(self.groups, stream.u8)
stream.u64(self.param)
class RankingProtocol:
METHOD_UPLOAD_SCORE = 1
METHOD_DELETE_SCORE = 2
METHOD_DELETE_ALL_SCORES = 3
METHOD_UPLOAD_COMMON_DATA = 4
METHOD_DELETE_COMMON_DATA = 5
METHOD_GET_COMMON_DATA = 6
METHOD_CHANGE_ATTRIBUTES = 7
METHOD_CHANGE_ALL_ATTRIBUTES = 8
METHOD_GET_RANKING = 9
METHOD_GET_APPROX_ORDER = 10
METHOD_GET_STATS = 11
METHOD_GET_RANKING_BY_PID_LIST = 12
METHOD_GET_RANKING_BY_UNIQUE_ID_LIST = 13
METHOD_GET_CACHED_TOPX_RANKING = 14
METHOD_GET_CACHED_TOPX_RANKINGS = 15
PROTOCOL_ID = 0x70
class RankingClient(RankingProtocol):
def __init__(self, client):
self.settings = client.settings
self.client = client
async def upload_score(self, score_data, unique_id):
logger.info("RankingClient.upload_score()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.add(score_data)
stream.u64(unique_id)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_UPLOAD_SCORE, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.upload_score -> done")
async def delete_score(self, category, unique_id):
logger.info("RankingClient.delete_score()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u32(category)
stream.u64(unique_id)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_DELETE_SCORE, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.delete_score -> done")
async def delete_all_scores(self, unique_id):
logger.info("RankingClient.delete_all_scores()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u64(unique_id)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_DELETE_ALL_SCORES, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.delete_all_scores -> done")
async def upload_common_data(self, common_data, unique_id):
logger.info("RankingClient.upload_common_data()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.buffer(common_data)
stream.u64(unique_id)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_UPLOAD_COMMON_DATA, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.upload_common_data -> done")
async def delete_common_data(self, unique_id):
logger.info("RankingClient.delete_common_data()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u64(unique_id)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_DELETE_COMMON_DATA, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.delete_common_data -> done")
async def get_common_data(self, unique_id):
logger.info("RankingClient.get_common_data()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u64(unique_id)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_GET_COMMON_DATA, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
data = stream.buffer()
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.get_common_data -> done")
return data
async def change_attributes(self, category, param, unique_id):
logger.info("RankingClient.change_attributes()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u32(category)
stream.add(param)
stream.u64(unique_id)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_CHANGE_ATTRIBUTES, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.change_attributes -> done")
async def change_all_attributes(self, param, unique_id):
logger.info("RankingClient.change_all_attributes()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.add(param)
stream.u64(unique_id)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_CHANGE_ALL_ATTRIBUTES, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.change_all_attributes -> done")
async def get_ranking(self, mode, category, order, unique_id, pid):
logger.info("RankingClient.get_ranking()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u8(mode)
stream.u32(category)
stream.add(order)
stream.u64(unique_id)
stream.pid(pid)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_GET_RANKING, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
result = stream.extract(RankingResult)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.get_ranking -> done")
return result
async def get_approx_order(self, category, order, score, unique_id, pid):
logger.info("RankingClient.get_approx_order()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u32(category)
stream.add(order)
stream.u32(score)
stream.u64(unique_id)
stream.pid(pid)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_GET_APPROX_ORDER, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
order = stream.u32()
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.get_approx_order -> done")
return order
async def get_stats(self, category, order, flags):
logger.info("RankingClient.get_stats()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u32(category)
stream.add(order)
stream.u32(flags)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_GET_STATS, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
stats = stream.extract(RankingStats)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.get_stats -> done")
return stats
async def get_ranking_by_pid_list(self, pids, mode, category, order, unique_id):
logger.info("RankingClient.get_ranking_by_pid_list()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.list(pids, stream.pid)
stream.u8(mode)
stream.u32(category)
stream.add(order)
stream.u64(unique_id)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_GET_RANKING_BY_PID_LIST, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
result = stream.extract(RankingResult)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.get_ranking_by_pid_list -> done")
return result
async def get_ranking_by_unique_id_list(self, ids, mode, category, order, unique_id):
logger.info("RankingClient.get_ranking_by_unique_id_list()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.list(ids, stream.u64)
stream.u8(mode)
stream.u32(category)
stream.add(order)
stream.u64(unique_id)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_GET_RANKING_BY_UNIQUE_ID_LIST, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
result = stream.extract(RankingResult)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.get_ranking_by_unique_id_list -> done")
return result
async def get_cached_topx_ranking(self, category, order):
logger.info("RankingClient.get_cached_topx_ranking()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.u32(category)
stream.add(order)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_GET_CACHED_TOPX_RANKING, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
result = stream.extract(RankingCachedResult)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.get_cached_topx_ranking -> done")
return result
async def get_cached_topx_rankings(self, categories, order):
logger.info("RankingClient.get_cached_topx_rankings()")
#--- request ---
stream = streams.StreamOut(self.settings)
stream.list(categories, stream.u32)
stream.list(order, stream.add)
data = await self.client.request(self.PROTOCOL_ID, self.METHOD_GET_CACHED_TOPX_RANKINGS, stream.get())
#--- response ---
stream = streams.StreamIn(data, self.settings)
results = stream.list(RankingCachedResult)
if not stream.eof():
raise ValueError("Response is bigger than expected (got %i bytes, but only %i were read)" %(stream.size(), stream.tell()))
logger.info("RankingClient.get_cached_topx_rankings -> done")
return results
class RankingServer(RankingProtocol):
def __init__(self):
self.methods = {
self.METHOD_UPLOAD_SCORE: self.handle_upload_score,
self.METHOD_DELETE_SCORE: self.handle_delete_score,
self.METHOD_DELETE_ALL_SCORES: self.handle_delete_all_scores,
self.METHOD_UPLOAD_COMMON_DATA: self.handle_upload_common_data,
self.METHOD_DELETE_COMMON_DATA: self.handle_delete_common_data,
self.METHOD_GET_COMMON_DATA: self.handle_get_common_data,
self.METHOD_CHANGE_ATTRIBUTES: self.handle_change_attributes,
self.METHOD_CHANGE_ALL_ATTRIBUTES: self.handle_change_all_attributes,
self.METHOD_GET_RANKING: self.handle_get_ranking,
self.METHOD_GET_APPROX_ORDER: self.handle_get_approx_order,
self.METHOD_GET_STATS: self.handle_get_stats,
self.METHOD_GET_RANKING_BY_PID_LIST: self.handle_get_ranking_by_pid_list,
self.METHOD_GET_RANKING_BY_UNIQUE_ID_LIST: self.handle_get_ranking_by_unique_id_list,
self.METHOD_GET_CACHED_TOPX_RANKING: self.handle_get_cached_topx_ranking,
self.METHOD_GET_CACHED_TOPX_RANKINGS: self.handle_get_cached_topx_rankings,
}
async def handle(self, client, method_id, input, output):
if method_id in self.methods:
await self.methods[method_id](client, input, output)
else:
logger.warning("Unknown method called on RankingServer: %i", method_id)
raise common.RMCError("Core::NotImplemented")
async def handle_upload_score(self, client, input, output):
logger.info("RankingServer.upload_score()")
#--- request ---
score_data = input.extract(RankingScoreData)
unique_id = input.u64()
await self.upload_score(client, score_data, unique_id)
async def handle_delete_score(self, client, input, output):
logger.info("RankingServer.delete_score()")
#--- request ---
category = input.u32()
unique_id = input.u64()
await self.delete_score(client, category, unique_id)
async def handle_delete_all_scores(self, client, input, output):
logger.info("RankingServer.delete_all_scores()")
#--- request ---
unique_id = input.u64()
await self.delete_all_scores(client, unique_id)
async def handle_upload_common_data(self, client, input, output):
logger.info("RankingServer.upload_common_data()")
#--- request ---
common_data = input.buffer()
unique_id = input.u64()
await self.upload_common_data(client, common_data, unique_id)
async def handle_delete_common_data(self, client, input, output):
logger.info("RankingServer.delete_common_data()")
#--- request ---
unique_id = input.u64()
await self.delete_common_data(client, unique_id)
async def handle_get_common_data(self, client, input, output):
logger.info("RankingServer.get_common_data()")
#--- request ---
unique_id = input.u64()
response = await self.get_common_data(client, unique_id)
#--- response ---
if not isinstance(response, bytes):
raise RuntimeError("Expected bytes, got %s" %response.__class__.__name__)
output.buffer(response)
async def handle_change_attributes(self, client, input, output):
logger.info("RankingServer.change_attributes()")
#--- request ---
category = input.u32()
param = input.extract(RankingChangeAttributesParam)
unique_id = input.u64()
await self.change_attributes(client, category, param, unique_id)
async def handle_change_all_attributes(self, client, input, output):
logger.info("RankingServer.change_all_attributes()")
#--- request ---
param = input.extract(RankingChangeAttributesParam)
unique_id = input.u64()
await self.change_all_attributes(client, param, unique_id)
async def handle_get_ranking(self, client, input, output):
logger.info("RankingServer.get_ranking()")
#--- request ---
mode = input.u8()
category = input.u32()
order = input.extract(RankingOrderParam)
unique_id = input.u64()
pid = input.pid()
response = await self.get_ranking(client, mode, category, order, unique_id, pid)
#--- response ---
if not isinstance(response, RankingResult):
raise RuntimeError("Expected RankingResult, got %s" %response.__class__.__name__)
output.add(response)
async def handle_get_approx_order(self, client, input, output):
logger.info("RankingServer.get_approx_order()")
#--- request ---
category = input.u32()
order = input.extract(RankingOrderParam)
score = input.u32()
unique_id = input.u64()
pid = input.pid()
response = await self.get_approx_order(client, category, order, score, unique_id, pid)
#--- response ---
if not isinstance(response, int):
raise RuntimeError("Expected int, got %s" %response.__class__.__name__)
output.u32(response)
async def handle_get_stats(self, client, input, output):
logger.info("RankingServer.get_stats()")
#--- request ---
category = input.u32()
order = input.extract(RankingOrderParam)
flags = input.u32()
response = await self.get_stats(client, category, order, flags)
#--- response ---
if not isinstance(response, RankingStats):
raise RuntimeError("Expected RankingStats, got %s" %response.__class__.__name__)
output.add(response)
async def handle_get_ranking_by_pid_list(self, client, input, output):
logger.info("RankingServer.get_ranking_by_pid_list()")
#--- request ---
pids = input.list(input.pid)
mode = input.u8()
category = input.u32()
order = input.extract(RankingOrderParam)
unique_id | |
of this class.
"""
return '%s:%s' % (exp_id, state_name)
@classmethod
def get_state_reference_for_question(cls, question_id):
"""Generate the state_reference for the state in the question.
Args:
question_id: str. The id of the question.
Returns:
str. The state_reference for a new instance of this class.
"""
return question_id
@classmethod
def get_instance_id(cls, entity_type, state_reference):
"""Generates the id for the newly created model instance.
Args:
entity_type: str. The type of entity i.e ENTITY_TYPE_EXPLORATION
or ENTITY_TYPE_QUESTION which are declared in feconf.py.
state_reference: str. The reference to the state for which model
instance is being created. For exploration state it will be of
the form 'exp_id:state_name', and for question it will be of
the form 'question_id'.
Returns:
instance_id: str. The generated id of the instance.
"""
instance_id = (
'%s:%s' % (entity_type, state_reference))
return instance_id
@classmethod
def create_model_instance(
cls, entity_type, state_reference, interaction_id,
learner_answer_info_list, learner_answer_info_schema_version,
accumulated_answer_info_json_size_bytes):
"""Creates a new LearnerAnswerDetailsModel for the given entity type
then writes it to the datastore.
Args:
entity_type: str. The type of entity i.e ENTITY_TYPE_EXPLORATION
or ENTITY_TYPE_QUESTION which are declared in feconf.py.
state_reference: str. The reference to the state for which model
instance is being created. For exploration state it will be of
the form 'exp_id:state_name', and for question it will be of
the form 'question_id'.
interaction_id: str. The ID of the interaction for which the
answer details are received.
learner_answer_info_list: list(LearnerAnswerInfo). The list of
LearnerAnswerInfo objects in dict format, which is defined in
the stats_domain.
learner_answer_info_schema_version: int. The version of
LearnerAnswerInfo dict, which is currently supported by
the Oppia.
accumulated_answer_info_json_size_bytes: int. The size of the
learner_answer_info_list in bytes.
"""
instance_id = cls.get_instance_id(entity_type, state_reference)
answer_details_instance = cls(
id=instance_id,
entity_type=entity_type,
state_reference=state_reference,
interaction_id=interaction_id,
learner_answer_info_list=learner_answer_info_list,
learner_answer_info_schema_version=(
learner_answer_info_schema_version),
accumulated_answer_info_json_size_bytes=(
accumulated_answer_info_json_size_bytes))
answer_details_instance.put()
@classmethod
def get_model_instance(
cls, entity_type, state_reference):
"""Returns the model instance related to the entity type and
state reference.
Args:
entity_type: str. The type of entity i.e ENTITY_TYPE_EXPLORATION
or ENTITY_TYPE_QUESTION which are declared in feconf.py.
state_reference: str. The reference to a state, for which the model
is to be fetched. Foe exploration state it will be of the form
'exp_id:state_name', and for question state it will be of the
form 'question_id'.
Returns:
LearnerAnswerDetailsModel or None. The answer details model
associated with the given entity type and state reference or
None if the instance is not found. Doesn't include deleted
entries.
"""
instance_id = cls.get_instance_id(entity_type, state_reference)
model_instance = cls.get(instance_id, strict=False)
if model_instance:
return model_instance
return None
@staticmethod
def get_export_policy():
"""Model does not contain user data."""
return base_models.EXPORT_POLICY.NOT_APPLICABLE
class ExplorationAnnotationsModel(base_models.BaseMapReduceBatchResultsModel):
"""Batch model for storing MapReduce calculation output for
exploration-level statistics.
This model is keyed using a custom ID of the format
{[EXPLORATION_ID]:[EXPLORATION_VERSION]}.
"""
# ID of exploration.
exploration_id = ndb.StringProperty(indexed=True)
# Version of exploration.
version = ndb.StringProperty(indexed=False)
# Number of students who started the exploration.
num_starts = ndb.IntegerProperty(indexed=False)
# Number of students who have completed the exploration.
num_completions = ndb.IntegerProperty(indexed=False)
# Keyed by state name that describes the numbers of hits for each state
# {state_name: {'first_entry_count': ...,
# 'total_entry_count': ...,
# 'no_answer_count': ...}}
state_hit_counts = ndb.JsonProperty(indexed=False)
@staticmethod
def get_deletion_policy():
"""ExplorationAnnotationsModels are aggregated and anonymized, and
cannot be tied back to an individual user.
"""
return base_models.DELETION_POLICY.NOT_APPLICABLE
@staticmethod
def get_user_id_migration_policy():
"""ExplorationAnnotationsModel doesn't have any field with user ID."""
return base_models.USER_ID_MIGRATION_POLICY.NOT_APPLICABLE
@classmethod
def get_entity_id(cls, exploration_id, exploration_version):
"""Gets entity_id for a batch model based on given exploration state.
Args:
exploration_id: str. ID of the exploration currently being played.
exploration_version: int. Version of the exploration currently
being played.
Returns:
str. Returns entity_id for a new instance of this class.
"""
return '%s:%s' % (exploration_id, exploration_version)
@classmethod
def create(
cls, exp_id, version, num_starts, num_completions,
state_hit_counts):
"""Creates a new ExplorationAnnotationsModel and
then writes it to the datastore.
Args:
exp_id: str. ID of the exploration currently being played.
version: int. Version of exploration.
num_starts: int. Number of students who started the exploration.
num_completions: int. Number of students who have completed
the exploration.
state_hit_counts: dict. Describes the number of hits
for each state.
"""
entity_id = cls.get_entity_id(exp_id, version)
cls(
id=entity_id,
exploration_id=exp_id,
version=version,
num_starts=num_starts,
num_completions=num_completions,
state_hit_counts=state_hit_counts).put()
@classmethod
def get_versions(cls, exploration_id):
"""This function returns a list containing versions of
ExplorationAnnotationsModel for a specific exploration_id.
Args:
exploration_id: str. ID of the exploration currently being played.
Returns:
list(int). List of versions corresponding to annotation models
with given exp_id.
"""
return [
annotations.version for annotations in cls.get_all().filter(
cls.exploration_id == exploration_id
).fetch(feconf.DEFAULT_QUERY_LIMIT)]
@staticmethod
def get_export_policy():
"""Model does not contain user data."""
return base_models.EXPORT_POLICY.NOT_APPLICABLE
class StateAnswersModel(base_models.BaseModel):
"""Store all answers of a state. This model encapsulates a sharded storage
system for answers. Multiple entries in the model may contain answers for
the same state. The initial entry has a shard ID of 0 and contains
information about how many shards exist for this state. All other meta
information is duplicated across all shards, since they are immutable or are
local to that shard.
This model is keyed using a custom ID of the format
{[EXPLORATION_ID]:[EXPLORATION_VERSION]:[STATE_NAME]:[SHARD_ID]}.
"""
# This provides about 124k of padding for the other properties and entity
# storage overhead (since the max entity size is 1MB). The meta data can
# get close to 50k or exceed it, so plenty of padding is left to avoid
# risking overflowing an entity.
_MAX_ANSWER_LIST_BYTE_SIZE = 900000
# Explicitly store exploration ID, exploration version and state name
# so we can easily do queries on them.
exploration_id = ndb.StringProperty(indexed=True, required=True)
exploration_version = ndb.IntegerProperty(indexed=True, required=True)
state_name = ndb.StringProperty(indexed=True, required=True)
# Which shard this corresponds to in the list of shards. If this is 0 it
# represents the master shard which includes the shard_count. All other
# shards look similar to the master shard except they do not populate
# shard_count.
shard_id = ndb.IntegerProperty(indexed=True, required=True)
# Store interaction type to know which calculations should be performed.
interaction_id = ndb.StringProperty(indexed=True, required=True)
# Store how many extra shards are associated with this state. This is only
# present when shard_id is 0. This starts at 0 (the main shard is not
# counted).
shard_count = ndb.IntegerProperty(indexed=True, required=False)
# The total number of bytes needed to store all of the answers in the
# submitted_answer_list, minus any overhead of the property itself. This
# value is found by summing the JSON sizes of all answer dicts stored inside
# submitted_answer_list.
# pylint: disable=invalid-name
accumulated_answer_json_size_bytes = ndb.IntegerProperty(
indexed=False, required=False, default=0)
# pylint: enable=invalid-name
# List of answer dicts, each of which is stored as JSON blob. The content
# of answer dicts is specified in core.domain.stats_domain.StateAnswers.
# NOTE: The answers stored in submitted_answers_list must be sorted
# according to the chronological order of their submission otherwise
# TopNUnresolvedAnswersByFrequency calculation in
# InteractionAnswerSummariesAggregator will output invalid results.
submitted_answer_list = ndb.JsonProperty(repeated=True, indexed=False)
# The version of the submitted_answer_list currently supported by Oppia. If
# the internal JSON structure of submitted_answer_list changes,
# CURRENT_SCHEMA_VERSION in this class needs to be incremented.
schema_version = ndb.IntegerProperty(
indexed=True, default=feconf.CURRENT_STATE_ANSWERS_SCHEMA_VERSION)
@staticmethod
def get_deletion_policy():
"""StateAnswersModels are aggregated and anonymized, and cannot be tied
back to an individual user.
"""
return base_models.DELETION_POLICY.NOT_APPLICABLE
@staticmethod
def get_user_id_migration_policy():
"""StateAnswersModel doesn't have any field with user ID."""
return base_models.USER_ID_MIGRATION_POLICY.NOT_APPLICABLE
@classmethod
def _get_model(
cls, exploration_id, exploration_version, state_name, shard_id):
"""Gets model instance based on given exploration state and shard_id.
Args:
exploration_id: str. The exploration ID.
exploration_version: int. The version of the exploration to
fetch answers for.
state_name: str. The name of the state to fetch answers for.
shard_id: int. The ID of the shard to fetch answers for.
Returns:
StateAnswersModel. The model associated with the specified
exploration state and shard ID, or None if no answers
have been submitted corresponding to this state.
"""
entity_id = cls._get_entity_id(
exploration_id, exploration_version, state_name, shard_id)
return cls.get(entity_id, strict=False)
@classmethod
def get_master_model(cls, exploration_id, exploration_version, state_name):
"""Retrieves the master model associated with the specific exploration
state. Returns None if no answers have yet been submitted to the
specified exploration state.
Args:
exploration_id: str. The exploration ID.
exploration_version: int. The version of the exploration | |
import itertools
import os
import sys
import pandas
from geopandas import GeoDataFrame, sjoin
from pandas import DataFrame
from shapely.geometry import Polygon
import numpy as np
import math
from gtfspy.util import wgs84_width, wgs84_height, df_to_utm_gdf, ut_to_utc_datetime, makedirs, wgs84_distance
import random
import matplotlib.pyplot as plt
def split_data_frame_list(df, target_column, separator=None):
""" df = dataframe to split,
target_column = the column containing the values to split
separator = the symbol used to perform the split
returns: a dataframe with each entry for the target column separated, with each element moved into a new row.
The values in the other columns are duplicated across the newly divided rows.
"""
row_accumulator = []
def split_list_to_rows(row, separate_by=None):
if separate_by:
split_row = row[target_column].split(separate_by)
else:
split_row = row[target_column]
for s in split_row:
new_row = row.to_dict()
new_row[target_column] = s
row_accumulator.append(new_row)
df.apply(split_list_to_rows, axis=1, args=(separator, ))
new_df = pandas.DataFrame(row_accumulator)
return new_df
def get_custom_spatial_bounds(distance, lat, lon):
height = wgs84_height(distance)
width = wgs84_width(distance, lat)
return {'lon_min': lon-width, 'lon_max': lon+width, 'lat_min': lat-height, 'lat_max': lat+height}
def create_grid_tesselation(xmin, ymin, xmax, ymax, width, height, random_seed=None):
random.seed(a=random_seed)
r_width = random.randint(0, width)
r_height = random.randint(0, height)
rows = int(np.ceil((ymax - ymin + r_height) / height))
cols = int(np.ceil((xmax - xmin + r_width) / width))
x_left_origin = xmin - r_width
x_right_origin = x_left_origin + width
y_top_origin = ymax + r_height
y_bottom_origin = y_top_origin - height
polygons = []
for i in range(cols):
y_top = y_top_origin
y_bottom = y_bottom_origin
for j in range(rows):
polygons.append(Polygon(
[(x_left_origin, y_top), (x_right_origin, y_top), (x_right_origin, y_bottom),
(x_left_origin, y_bottom)]))
y_top = y_top - height
y_bottom = y_bottom - height
x_left_origin = x_left_origin + width
x_right_origin = x_right_origin + width
return polygons
def stop_sample(gtfs, sample_size=None, sample_fraction=None, tesselation_distance=1000, random_seed=1, **kwargs):
stops, crs_utm = df_to_utm_gdf(gtfs.stops())
total_n_stops = len(stops.index)
assert sample_size or sample_fraction
if sample_fraction:
assert 0 < sample_fraction <= 1
if sample_size:
sample_size = max(sample_size, total_n_stops * sample_fraction)
else:
sample_size = total_n_stops * sample_fraction
sample_size = math.ceil(sample_size)
print("Using sample size:", sample_size)
polygons = create_grid_tesselation(*stops.total_bounds, height=tesselation_distance, width=tesselation_distance,
random_seed=random_seed)
grid = GeoDataFrame({'geometry': polygons}, crs=crs_utm)
grid["id"] = grid.index
stops = sjoin(stops, grid, how="left", op='within')
stops_grouped = stops.groupby(["id"])
stops_grouped = stops_grouped.agg({'stop_I': 'count'}, axis=1)
stops_grouped = stops_grouped.reset_index()
sample_sizes = []
for i in stops_grouped.itertuples():
(div, mod) = divmod(sample_size * i.stop_I, total_n_stops)
sample_sizes.append({"id": int(i.id), "div": div, "mod": mod})
to_allocate = sample_size - sum([x["div"] for x in sample_sizes])
sample_sizes = sorted(sample_sizes, key=lambda k: k['mod'], reverse=True)
sample_sizes = [{"id": x["id"], "div": x["div"] + 1, "mod": x['mod']} if i < to_allocate else
{"id": x["id"], "div": x["div"], "mod": x['mod']} for i, x in enumerate(sample_sizes)]
stops = stops.sort_values("stop_I")
sample = GeoDataFrame()
for row in sample_sizes:
if row["div"] > 0:
sample = sample.append(stops.loc[stops.id == row["id"]].sample(n=row["div"], random_state=random_seed))
import matplotlib.pyplot as plt
"""
plt.figure()
ax = grid.plot(facecolor="none", edgecolor='black', lw=0.7)
ax = stops.plot(ax=ax, column="id")
ax = sample.plot(ax=ax, color="red")
plt.show()
"""
return sample["stop_I"].tolist()
def split_into_equal_length_parts(array, n_splits):
# Taken from:
# http://stackoverflow.com/questions/2130016/splitting-a-list-of-arbitrary-size-into-only-roughly-n-equal-parts
# Pretty nice solution.
a = array
n = n_splits
k, m = divmod(len(a), n)
lists = [a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n)]
assert(lists[0][0] == array[0])
assert(lists[-1][-1] == array[-1])
return lists
def round_sigfigs(num, sig_figs):
"""Round to specified number of sigfigs.
source: http://code.activestate.com/recipes/578114-round-number-to-specified-number-of-significant-di/
"""
if num != 0:
return round(num, -int(math.floor(math.log10(abs(num))) - (sig_figs - 1)))
else:
return 0 # Can't take the log of 0
def split_by_char_closest_to_middle(text, delimiter=" ", filler="\n"):
n = len(text)/2
words = text.split(delimiter)
candidate_len = 0
prev_len = 0
for word in words:
candidate_len += len(word)
if candidate_len > n:
if n - prev_len < candidate_len - n:
split_len = prev_len
break
else:
split_len = candidate_len
break
prev_len = candidate_len
candidate_len += 1
char_list = list(text)
char_list[split_len] = filler
text = "".join(char_list)
text = text.replace(delimiter, " ")
return text
def apply_suffix(d, suffix, filler="_"):
d = {k+filler+suffix: v for k, v in d.items()}
return d
def check_day_start(gtfs, desired_weekday):
"""
Assuming a weekly extract, gets the utc of the start of the desired weekday
:param gtfs:
:param day_start:
:param desired_weekday:
:return:
"""
day_start_add = 24 * 3600
day_start, _ = gtfs.get_day_start_ut_span()
tz = gtfs.get_timezone_pytz()
print("original weekday:", ut_to_utc_datetime(day_start, tz).weekday())
weekday = ut_to_utc_datetime(day_start, tz).weekday()
day_start += day_start_add * (9-weekday if weekday > desired_weekday else 2-weekday)
print("day start:", day_start)
print("day start weekday:", ut_to_utc_datetime(day_start, tz).weekday())
return day_start
def subtract_dataframes(df1, df2, suffixes=("_x", "_y"), drop_cols=False, **kwargs):
"""
Merges the dataframes and subtracts the matching columns
:param df1: pandas DataFrame
:param df2: pandas DataFrame
:param suffixes:
:param drop_cols:
:param kwargs:
:return:
"""
cols1 = list(df1)
cols2 = list(df2)
common_cols = [col for col in cols1 if col in cols2]
kwargs["right_index"] = kwargs.get("right_index", True)
kwargs["left_index"] = kwargs.get("left_index", True)
diff_suffix = kwargs.get("diff_suffix", "|diff")
df = df1.merge(df2, suffixes=suffixes, **kwargs)
for col in common_cols:
df[col+diff_suffix] = df[col+suffixes[0]] - df[col+suffixes[1]]
if drop_cols:
df.drop([col+suffixes[0], col+suffixes[1]], inplace=True, axis=1)
return df
def get_differences_between_dataframes(dfs):
"""
Subtracts the difference of all combinations of dataframes. Dataframes are matched by index.
Columns with similar name are subtracted.
:param dfs: dict, {name str: df pandas.DataFrame }
:return: dfs_to_return list of pandas.DataFrame, names_to_return list of strings
"""
pairs = itertools.combinations(dfs.items(), 2)
dfs_to_return = []
names_to_return = []
for (name1, df1), (name2, df2) in pairs:
suffixes = ("|" + name1, "|" + name2)
df = subtract_dataframes(df1, df2, suffixes=suffixes)
df = df.reset_index()
dfs_to_return.append(df)
names_to_return.append((name1, name2))
return dfs_to_return, names_to_return
def drop_nans(df):
init_len = len(df.index)
df = df.replace([np.inf, -np.inf], np.nan).dropna()
len_after_clean = len(df.index)
print("WARNING! Of {init} rows, {removed} rows with missing data were removed"
.format(init=init_len, removed=init_len - len_after_clean))
return df
def flatten_2d_array(array):
return [item for sublist in array for item in sublist]
def stops_within_buffer(input_geometry, gtfs, buffer_m=0):
"""
Returns the stops that are within the given buffer of a given geometry
:param input_geometry: GeoDataFrame or shapely
:param gtfs: GTFS
:param buffer_m: int
:return:
"""
stops_gdf, crs = df_to_utm_gdf(gtfs.stops())
len_stops_init = len(stops_gdf.index)
if isinstance(input_geometry, GeoDataFrame):
buffer_gdf = input_geometry
elif isinstance(input_geometry, DataFrame):
buffer_gdf, crs = df_to_utm_gdf(input_geometry)
else:
raise NotImplementedError
buffer_gdf = buffer_gdf.copy()
buffer_gdf["geometry"] = buffer_gdf["geometry"].buffer(buffer_m)
stops_gdf = sjoin(stops_gdf, buffer_gdf, how='inner')
len_stops_final = len(stops_gdf.index)
print("filetered from {init} to {final} stops".format(init=len_stops_init, final=len_stops_final))
return stops_gdf
def filter_stops_spatially(df, gtfs, cols, buffer=None, geometry=None):
"""
filters a dataframe spatially based on stops
:param buffer: int or list
:param df: DataFrame
:param gtfs: GTFS
:param cols: name of the stop column or list
:param geometry: GeoDataFrame or list
:return: DataFrame
"""
if not isinstance(cols, list):
cols = [cols]
if not isinstance(buffer, list):
buffer = [buffer]
if not isinstance(geometry, list):
geometry = [geometry]
assert len(cols) == len(buffer) == len(geometry)
for col_arg, buffer_m, gdf in zip(cols, buffer, geometry):
if buffer_m and gdf is not None:
stops = stops_within_buffer(gdf, gtfs, buffer_m=buffer_m)
else:
stops = stops_within_buffer(gdf, gtfs)
df = df.loc[df[col_arg].isin(stops["stop_I"])].copy()
return df
def tidy_value(v, ft=3):
if abs(v) <= 10 ** (-1 * ft) or abs(v) >= 10 ** ft:
return format(v, '.2E')
else:
return format(v, ".3f")
def tidy_label(label, capitalize=False):
if capitalize:
label = label.capitalize()
label = label.replace("_", " ")
return label
def find_df_value(df, key_col, key, value_col):
print(key)
return df.loc[df[key_col] == key, value_col].iloc[0]
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def save_fig(func):
def inner(*args, **kwargs):
if kwargs.get("plot_separately", False):
fig = plt.figure(figsize=kwargs.get("figsize", [9, 5]))
fig.add_subplot(111, projection=kwargs.get("projection", None))
func(*args)
#plt.tight_layout()
# plt.show()
fig_format = kwargs.get("fig_format", "png")
folder = kwargs.get("folder", "")
fname = kwargs.get("fname", "")
plotname = kwargs.get("plotname", "")
fname = fname + plotname + "." + fig_format
plt.tight_layout()
plt.savefig(os.path.join(makedirs(folder), fname), bbox_inches='tight', format=fig_format, dpi=300)
else:
ax = func(*args, | |
if mit_param(k):
return res
if kwargs.get('d'):
return float(res)
return res
except ZeroDivisionError:
print("agla: Division durch Null (Krümmung)")
return
print("agla: nur einen Parameterwert angeben")
return
else:
if len(wert) != 1:
print("agla: einen Punkt in der Ebene angeben")
return
gl = self.args[0]
p = wert[0]
if not (isinstance(p, Vektor) and p.dim == 2):
print('agla: einen Punkt der Kurve angeben')
return
x, y = Symbol('x'), Symbol('y')
if gl.subs({x:p.x, y:p.y}).lhs != 0:
print('agla: einen Punkt der Kurve angeben')
return
try:
r = abs(einfach(1 / self.kruemm(p)))
except ZeroDivisionError:
print('agla: Division durch Null (Krümmung)')
return
if mit_param(r):
return einfach(r)
if kwargs.get('d'):
return float(r)
return einfach(r)
krRadius = kr_radius
def stueck(self, *bereich, **kwargs):
"""Kurvenstück / Änderung des Parameterbereiches"""
if kwargs.get('h'):
print("\nStück einer Kurve\n")
print("Im Raum R^3 und in der Ebene R^2 (Parameterform,")
print("Funktionsgleichung bzw. Polarkoordinaten):\n")
print("Aufruf kurve . stück( par_unt, par_ob )\n")
print(" kurve Kurve")
print(" par_unt untere und obere Bereichsgrenzen")
print(" par_ob des Kurvenparameters\n")
return
if self.dim == 2 and self._typ == 'imp':
print('agla: nicht verfügbar (implizite Gleichung)')
return
bereich = sympify(bereich)
if not (isinstance(bereich, Tuple) and len(bereich) == 2):
print("agla: untere und obere Bereichsgrenzen angeben")
return
if not (is_zahl(bereich[0]) and is_zahl(bereich[1])):
print("agla: zwei Zahlenwerte angeben")
return
return Kurve(self.pkt(), (self.par, bereich[0], bereich[1]))
def kruemm(self, *wert, **kwargs):
"""Krümmung"""
if kwargs.get('h'):
print("\nKrümmung der Kurve\n")
print("Im Raum R^3 und in der Ebene R^2 (Parameterform,")
print("Funktionsgleichung bzw. Polarkoordinaten):\n")
print("Aufruf kurve . krümm( /[ wert ] )\n")
print(" kurve Kurve")
print(" wert Wert des Kurvenparameters\n")
print("Rückgabe bei Angabe eines Parameterwertes:")
print(" Krümmung im zugehörigen Kurvenpunkt")
print(" bei leerer Argumentliste oder freiem Bezeichner:")
print(" Krümmung im allgemeinen Kurvenpunkt\n")
print("In der Ebene R^2 (für Kurven, die mittels impliziter Gleichung")
print("erzeugt wurden):\n")
print("Aufruf kurve . krümm( punkt )\n")
print(" punkt Punkt der Kurve\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if self.dim == 3 or (self.dim == 2 and self._typ != 'imp'):
par = self.par
pkt = self.pkt(par)
p1 = pkt.diff(par)
p2 = p1.diff(par)
if self.dim == 3:
try:
k = einfach( ( p1.sp(p1) * p2.sp(p2) - (p1.sp(p2))**2 )
/ (p1.sp(p1))**3 )
from sympy import sqrt # zur Vermeidung eines Fehlers
k = sqrt(k)
except ZeroDivisionError:
return
elif self.dim == 2 and self._typ != 'imp':
try:
k = determinante(Vektor(p1.x, p2.x), Vektor(p1.y, p2.y)) / \
(p1.x**2 + p1.y**2)**Rational(3, 2)
except ZeroDivisionError:
return
if self.dim == 3 or (self.dim == 2 and self._typ != 'imp'):
if not wert:
if mit_param(k):
return einfach(k)
if kwargs.get('d'):
return float(k)
return einfach(k)
if len(wert) == 1:
pw = sympify(wert[0])
if not is_zahl(pw):
print("agla: einen Zahlenwert angeben")
return
try:
pw = nsimplify(pw)
except RecursionError:
pass
res = k.subs(par, pw)
if mit_param(res):
return einfach(res)
if kwargs.get('d'):
return float(res)
return einfach(res)
print("agla: nur einen Parameterwert angeben")
return
else:
if len(wert) != 1:
print("agla: einen Punkt der Kurve angeben")
return
gl = self.args[0]
p = wert[0]
if not (isinstance(p, Vektor) and p.dim == 2):
print('agla: einen Punkt in der Ebene angeben')
return
x, y = Symbol('x'), Symbol('y')
if gl.subs({x:p.x, y:p.y}).lhs != 0:
print('agla: einen Punkt der Kurve angeben')
return
Fx, Fy = gl.lhs.diff(x), gl.lhs.diff(y)
zahl = (int, Integer, float, Float, Rational)
Fxx = 0 if isinstance(Fx, zahl) else Fx.diff(x)
Fyy = 0 if isinstance(Fy, zahl) else Fy.diff(y)
Fxy = 0 if isinstance(Fx, zahl) else Fy.diff(y)
Fyx = 0 if isinstance(Fy, zahl) else Fy.diff(x)
d = determinante(Vektor(Fxx, Fyx, Fx), Vektor(Fxy, Fyy, Fy), \
Vektor(Fx, Fy, 0))
try:
k = d / (Fx**2 + Fy**2)**Rational(3, 2)
if not isinstance(k, zahl):
k = k.subs({x:p.x, y:p.y})
except ZeroDivisionError:
print('agla: Division durch Null')
return
if mit_param(k):
return einfach(k)
if kwargs.get('d'):
return float(k)
return einfach(k)
def wind(self, *wert, **kwargs):
"""Windung, Torsion"""
if self.dim != 3:
print('agla: nur im Raum R^3 definiert')
return
if kwargs.get('h'):
print("\nWindung / Torsion der Kurve\n")
print("Im Raum R^3:\n")
print("Aufruf kurve . wind( /[ wert ] )\n")
print(" kurve Kurve")
print(" wert Wert des Kurvenparameters\n")
print("Rückgabe bei Angabe eines Parameterwertes:")
print(" Windung im zugehörigen Kurvenpunkt")
print(" bei leerer Argumentliste oder freiem Bezeichner:")
print(" Windung im allgemeinen Kurvenpunkt\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
par = self.par
pkt = self.pkt(par)
p1 = pkt.diff(par)
p2 = p1.diff(par)
p3 = p2.diff(par)
k = self.kruemm(par)
if k != 0:
w = einfach( 1/k**2 * ( p1.vp(p2).sp(p3) ) / (p1.sp(p1))**3 )
else:
print("agla: Division durch Null (Krümmung)")
return
if not wert:
if mit_param(w):
return w
if kwargs.get('d'):
return float(w)
return w
if len(wert) == 1:
pw = sympify(wert[0])
if not is_zahl(pw):
print("agla: einen Zahlenwert angeben")
return
try:
pw = nsimplify(pw)
except RecursionError:
pass
res = w.subs(par, pw)
if mit_param(res):
return res
if kwargs.get('d'):
return float(res)
return res
print("agla: nur einen Parameterwert angeben")
return
tors = wind
def par_wert(self, *args, **kwargs):
"""Parameterwert eines Kurvenpunktes"""
if mit_param(self):
print('agla: nicht implementiert (Parameter)')
return
if self.dim == 2 and self._typ == 'imp':
print('agla: nicht verfügbar (implizite Gleichung)')
return
if kwargs.get('h'):
print("\nParameterwert eines Punktes der Kurve\n")
print("Im Raum R^3 und in der Ebene R^2 (Parameterform,")
print("Funktionsgleichung bzw. Polarkoordinaten):\n")
print("Aufruf kurve . par_wert( punkt, start )\n")
print(" kurve Kurve")
print(" punkt Punkt")
print(" start Startwert des nummerischen")
print(" Verfahrens\n")
print("Der Parameterwert wird über die Minimierung des Abstandes")
print("des Punktes zu einem Kurvenpunkt gesucht; es wird 'nsolve'")
print("verwendet (siehe SymPy-Dokumentation)\n")
print("Zusatz d=1 - Dezimaldarstellung")
print(" g=1 - Grafik der Abstandsfunktion")
print(" (Abstand des gegebenen Punktes zu den Kur-")
print(" venpunkten)\n")
return
if len(args) != 2:
print("agla: einen Punkt und einen Startwert angeben")
return
punkt, start = args
start = sympify(start)
if not (isinstance(punkt, Vektor) and punkt.dim == self.dim and \
is_zahl(start)):
if self.dim == 3:
print("agla: einen Punkt im Raum und einen Startwert angeben")
else:
print("agla: einen Punkt in der Ebene und einen " + \
"Startwert angeben")
return
from numpy import abs
start = float(start)
if kwargs.get('g'):
import numpy as np
from numpy import (pi, sqrt, sin, cos, tan, exp, log, sinh, cosh,
tanh, arcsin, arccos, arctan, arcsinh, arccosh, arctanh)
ln = log
import matplotlib.pyplot as plt
print("\nAbstandsfunktion")
fig = plt.figure(figsize=(4, 3))
plt.axes().set_aspect('equal')
ax = fig.add_subplot(1, 1, 1)
t = Symbol('t')
aa = str(punkt.abstand(self.pkt(t))**2)
t = np.arange(float(self.ber[0]), float(self.ber[1]), 1/200.0)
y = sqrt(abs(eval(aa)))
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(9)
tick.label1.set_fontname('Times New Roman')
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(9)
tick.label1.set_fontname('Times New Roman')
for pos in ('top', 'bottom', 'right', 'left'):
ax.spines[pos].set_linewidth(0.5)
plt.plot(t, y)
plt.show()
return
t = Symbol('t')
f = lambda t: punkt.abstand(self.pkt(t))**2
f1 = lambda t: punkt.abstand(self.pkt(t)).evalf()
d = kwargs.get('d')
try:
res = nsolve(f(t).diff(t), start)
except:
print("agla: mit einem anderen Startwert versuchen\n" + \
" der Punkt ist eventuell kein Kurvenpunkt")
return
if abs(f1(res)) < 10**(-6):
try:
res = nsimplify(res)
if d:
return float(res)
return res
except RecursionError:
pass
if d:
return float(res)
return res
else:
print("agla: mit einem anderen Startwert versuchen\n" + \
" der Punkt ist eventuell kein Kurvenpunkt")
parWert = par_wert
def schnitt(self, *args, **kwargs):
"""Schnitt mit einer anderen Kurve"""
if self.dim == 2 and self._typ == 'imp':
print('agla: nicht implementiert (implizite Gleichung)')
return
if mit_param(self):
print('agla: nicht implementiert (Parameter)')
return
if kwargs.get('h'):
print("\nParameterwerte eines Schnittpunktes mit einer anderen")
print("parametrisierten Kurve\n")
print("Im Raum R^3 und in der Ebene R^2 (Parameterform,")
print("Funktionsgleichung bzw. Polarkoordinaten):\n")
print("Aufruf kurve . schnitt( kurve1, start1, start2 )\n")
print(" kurve Kurve")
print(" start Startwert des nummerischen Verfahrens\n")
print("Rückgabe ( Parameterwert für die gegebene Kurve,")
print(" Parameterwert für die andere Kurve )\n")
print("Die beiden Startwerte für die Kurven sind so genau wie möglich")
print("anzugeben; die Parameterwerte werden über die Minimierung des")
print("Abstandes der Kurvenpunkte zueinander gesucht; es wird 'nsolve'")
print("verwendet (siehe SymPy-Dokumentation)\n")
return
if len(args) != 3:
print("agla: drei Argumente angeben")
return
kurve, start1, start2 = args
if isinstance(kurve, Kurve) and mit_param(kurve):
print("agla: nicht implementiert(Parameter)")
return
start1 = sympify(start1)
start2 = sympify(start2)
if not (isinstance(kurve, Kurve) and kurve.dim == self.dim
and is_zahl(start1) and is_zahl(start2)):
if self.dim == | |
<reponame>davidt/rbtools<gh_stars>0
from __future__ import print_function, unicode_literals
import fnmatch
import logging
import marshal
import os
import re
import six
import socket
import stat
import string
import subprocess
from rbtools.clients import SCMClient, RepositoryInfo
from rbtools.clients.errors import (AmendError,
EmptyChangeError,
InvalidRevisionSpecError,
SCMError,
TooManyRevisionsError)
from rbtools.utils.checks import check_gnu_diff, check_install
from rbtools.utils.filesystem import make_empty_files, make_tempfile
from rbtools.utils.process import die, execute
class P4Wrapper(object):
"""A wrapper around p4 commands.
All calls out to p4 go through an instance of this class. It keeps a
separation between all the standard SCMClient logic and any parsing
and handling of p4 invocation and results.
"""
KEYVAL_RE = re.compile('^([^:]+): (.+)$')
COUNTERS_RE = re.compile('^([^ ]+) = (.+)$')
def __init__(self, options):
self.options = options
def is_supported(self):
return check_install(['p4', 'help'])
def counters(self):
lines = self.run_p4(['counters'], split_lines=True)
return self._parse_keyval_lines(lines, self.COUNTERS_RE)
def change(self, changenum, marshalled=True, password=None):
return self.run_p4(['change', '-o', str(changenum)],
password=password, ignore_errors=True,
none_on_ignored_error=True,
marshalled=marshalled)
def modify_change(self, new_change_spec):
"""new_change_spec must contain the changelist number."""
return self.run_p4(['change', '-i'], input_string=new_change_spec)
def files(self, path):
return self.run_p4(['files', path], marshalled=True)
def filelog(self, path):
return self.run_p4(['filelog', path], marshalled=True)
def fstat(self, depot_path, fields=[]):
args = ['fstat']
if fields:
args += ['-T', ','.join(fields)]
args.append(depot_path)
lines = self.run_p4(args, split_lines=True)
stat_info = {}
for line in lines:
line = line.strip()
if line.startswith('... '):
parts = line.split(' ', 2)
stat_info[parts[1]] = parts[2]
return stat_info
def info(self):
lines = self.run_p4(['info'],
ignore_errors=True,
split_lines=True)
return self._parse_keyval_lines(lines)
def opened(self, changenum):
return self.run_p4(['opened', '-c', str(changenum)],
marshalled=True)
def print_file(self, depot_path, out_file=None):
cmd = ['print']
if out_file:
cmd += ['-o', out_file]
cmd += ['-q', depot_path]
return self.run_p4(cmd)
def where(self, depot_path):
return self.run_p4(['where', depot_path], marshalled=True)
def run_p4(self, p4_args, marshalled=False, password=<PASSWORD>,
ignore_errors=False, input_string=None, *args, **kwargs):
"""Invoke p4.
In the current implementation, the arguments 'marshalled' and
'input_string' cannot be used together, i.e. this command doesn't
allow inputting and outputting at the same time.
"""
cmd = ['p4']
if marshalled:
cmd += ['-G']
if getattr(self.options, 'p4_client', None):
cmd += ['-c', self.options.p4_client]
if getattr(self.options, 'p4_port', None):
cmd += ['-p', self.options.p4_port]
if getattr(self.options, 'p4_passwd', None):
cmd += ['-P', self.options.p4_passwd]
cmd += p4_args
if password is not None:
cmd += ['-P', password]
if marshalled:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
result = []
has_error = False
while 1:
try:
data = marshal.load(p.stdout)
except EOFError:
break
else:
result.append(data)
if data.get('code', None) == 'error':
has_error = True
rc = p.wait()
if not ignore_errors and (rc or has_error):
for record in result:
if 'data' in record:
print(record['data'])
raise SCMError('Failed to execute command: %s\n' % cmd)
return result
elif input_string is not None:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
p.communicate(input_string) # Send input, wait, set returncode
if not ignore_errors and p.returncode:
raise SCMError('Failed to execute command: %s\n' % cmd)
return None
else:
result = execute(cmd, ignore_errors=ignore_errors, *args, **kwargs)
return result
def _parse_keyval_lines(self, lines, regex=KEYVAL_RE):
keyvals = {}
for line in lines:
m = regex.match(line)
if m:
key = m.groups()[0]
value = m.groups()[1]
keyvals[key] = value.strip()
return keyvals
class PerforceClient(SCMClient):
"""
A wrapper around the p4 Perforce tool that fetches repository information
and generates compatible diffs.
"""
name = 'Perforce'
can_amend_commit = True
supports_diff_exclude_patterns = True
supports_diff_extra_args = True
supports_patch_revert = True
DATE_RE = re.compile(br'(\w+)\s+(\w+)\s+(\d+)\s+(\d\d:\d\d:\d\d)\s+'
br'(\d\d\d\d)')
ENCODED_COUNTER_URL_RE = re.compile('reviewboard.url\.(\S+)')
REVISION_CURRENT_SYNC = '--rbtools-current-sync'
REVISION_PENDING_CLN_PREFIX = '--rbtools-pending-cln:'
REVISION_DEFAULT_CLN = 'default'
ADDED_FILES_RE = re.compile(r'^==== //depot/(\S+)#\d+ ==A== \S+ ====$',
re.M)
DELETED_FILES_RE = re.compile(r'^==== //depot/(\S+)#\d+ ==D== \S+ ====$',
re.M)
def __init__(self, p4_class=P4Wrapper, **kwargs):
super(PerforceClient, self).__init__(**kwargs)
self.p4 = p4_class(self.options)
def get_repository_info(self):
if not self.p4.is_supported():
logging.debug('Unable to execute "p4 help": skipping Perforce')
return None
p4_info = self.p4.info()
# For the repository path, we first prefer p4 brokers, then the
# upstream p4 server. If neither of those are found, just return None.
repository_path = (p4_info.get('Broker address') or
p4_info.get('Server address'))
if repository_path is None:
return None
client_root = p4_info.get('Client root')
if client_root is None:
return None
norm_cwd = os.path.normcase(os.path.realpath(os.getcwd()) +
os.path.sep)
norm_client_root = os.path.normcase(os.path.realpath(client_root) +
os.path.sep)
# Don't accept the repository if the current directory is outside the
# root of the Perforce client.
if not norm_cwd.startswith(norm_client_root):
return None
try:
parts = repository_path.split(':')
hostname = None
if len(parts) == 3 and parts[0] == 'ssl':
hostname = parts[1]
port = parts[2]
elif len(parts) == 2:
hostname, port = parts
if not hostname:
die('Path %s is not a valid Perforce P4PORT' % repository_path)
info = socket.gethostbyaddr(hostname)
# Build the list of repository paths we want to tr to look up.
servers = [hostname]
if info[0] != hostname:
servers.append(info[0])
# If aliases exist for hostname, create a list of alias:port
# strings for repository_path.
if info[1]:
servers += info[1]
repository_path = ['%s:%s' % (server, port)
for server in servers]
# If there's only one repository path found, then we don't
# need to do a more expensive lookup of all registered
# paths. We can look up just this path directly.
if len(repository_path) == 1:
repository_path = repository_path[0]
except (socket.gaierror, socket.herror):
pass
server_version = p4_info.get('Server version', None)
if not server_version:
return None
m = re.search(r'[^ ]*/([0-9]+)\.([0-9]+)/[0-9]+ .*$',
server_version, re.M)
if m:
self.p4d_version = int(m.group(1)), int(m.group(2))
else:
# Gracefully bail if we don't get a match
return None
# Now that we know it's Perforce, make sure we have GNU diff
# installed, and error out if we don't.
check_gnu_diff()
return RepositoryInfo(path=repository_path, supports_changesets=True)
def parse_revision_spec(self, revisions=[]):
"""Parses the given revision spec.
The 'revisions' argument is a list of revisions as specified by the
user. Items in the list do not necessarily represent a single revision,
since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2".
SCMTool-specific overrides of this method are expected to deal with
such syntaxes.
This will return a dictionary with the following keys:
'base': A revision to use as the base of the resulting diff.
'tip': A revision to use as the tip of the resulting diff.
These will be used to generate the diffs to upload to Review Board (or
print). The diff for review will include the changes in (base, tip].
If zero revisions are passed in, this will return the 'default'
changelist.
If a single revision is passed in, this will return the parent of that
revision for 'base' and the passed-in revision for 'tip'. The result
may have special internal revisions or prefixes based on whether the
changeset is submitted, pending, or shelved.
If two revisions are passed in, they need to both be submitted
changesets.
"""
n_revs = len(revisions)
if n_revs == 0:
return {
'base': self.REVISION_CURRENT_SYNC,
'tip': (self.REVISION_PENDING_CLN_PREFIX +
self.REVISION_DEFAULT_CLN)
}
elif n_revs == 1:
# A single specified CLN can be any of submitted, pending, or
# shelved. These are stored with special prefixes and/or names
# because the way that we get the contents of the files changes
# based on which of these is in effect.
status = self._get_changelist_status(revisions[0])
# Both pending and shelved changes are treated as "pending",
# through the same code path. This is because the documentation for
# 'p4 change' tells a filthy lie, saying that shelved changes will
# have their status listed as shelved. In fact, when you shelve
# changes, it sticks the data up on the server, but leaves your
# working copy intact, and the change is still marked as pending.
# Even after reverting the working copy, the change won't have its
# status as "shelved". That said, there's perhaps a way that it
# could (perhaps from other clients?), so it's still handled in
# this conditional.
#
# The diff routine will first look for opened files in the client,
# and if that fails, it will then do the diff against the shelved
# copy.
if status in ('pending', 'shelved'):
return {
'base': self.REVISION_CURRENT_SYNC,
'tip': self.REVISION_PENDING_CLN_PREFIX + revisions[0],
}
elif status == 'submitted':
try:
cln = int(revisions[0])
return {
'base': str(cln - 1),
'tip': str(cln),
}
except ValueError:
raise InvalidRevisionSpecError(
'%s does not appear to be a valid changelist' %
revisions[0])
else:
raise InvalidRevisionSpecError(
'%s does not appear to be a valid changelist' %
revisions[0])
elif n_revs == 2:
result = {}
# The base revision must be a submitted CLN
status = self._get_changelist_status(revisions[0])
if | |
test_failure=SHARED["netfac_r_ok"].id)
# re-create deleted netfac
r_data = self.assert_create(self.db_org_admin, "netfac", data)
# re-delete
self.assert_delete(self.db_org_admin, "netfac",
test_success=SHARED["netfac_id"])
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_poc(self):
data = self.make_data_poc(net_id=SHARED["net_rw_ok"].id)
r_data = self.assert_create(
self.db_org_admin,
"poc",
data,
test_failures={
"invalid": {
"net_id": ""
},
"perms": {
# set network to one the user doesnt have perms to
"net_id": SHARED["net_r_ok"].id
},
"status": {
"net_id": SHARED["net_rw_pending"].id
}
})
SHARED["poc_id"] = r_data.get("id")
self.assert_update(self.db_org_admin, "poc", SHARED["poc_id"],
{"role": "Sales"}, test_failures={
"invalid": {
"role": "NOPE"
},
"perms": {
"net_id": SHARED["net_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "poc",
test_success=SHARED["poc_id"],
test_failure=SHARED["poc_r_ok_users"].id)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_ixlan(self):
data = self.make_data_ixlan(ix_id=SHARED["ix_rw_ok"].id)
r_data = self.assert_create(
self.db_org_admin, "ixlan", data, test_failures={
"invalid": {
"ix_id": ""
},
"perms": {
"ix_id": SHARED["ix_r_ok"].id
},
"status": {
"ix_id": SHARED["ix_rw_pending"].id
}
})
SHARED["ixlan_id"] = r_data["id"]
self.assert_update(self.db_org_admin, "ixlan", SHARED["ixlan_id"],
{"name": self.make_name("Test")}, test_failures={
"invalid": {
"mtu": "NEEDS TO BE INT"
},
"perms": {
"ix_id": SHARED["ix_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "ixlan",
test_success=SHARED["ixlan_id"],
test_failure=SHARED["ixlan_r_ok"].id)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_ixpfx(self):
data = self.make_data_ixpfx(ixlan_id=SHARED["ixlan_rw_ok"].id,
prefix="192.168.3.11/25")
r_data = self.assert_create(
self.db_org_admin, "ixpfx", data, test_failures={
"invalid": {
"prefix": "127.0.0.0/8"
},
"perms": {
"prefix": "192.168.127.12/24",
"ixlan_id": SHARED["ixlan_r_ok"].id
},
"status": {
"prefix": "192.168.127.12/24",
"ixlan_id": SHARED["ixlan_rw_pending"].id
}
})
SHARED["ixpfx_id"] = r_data["id"]
#self.assert_create(self.db_org_admin, "ixpfx", data, test_failures={
# "invalid": {
# "prefix": "206.126.236.0/25"
# },
#}, test_success=False)
self.assert_update(self.db_org_admin, "ixpfx", SHARED["ixpfx_id"],
{"prefix": "192.168.3.11/24"}, test_failures={
"invalid": {
"prefix": "NEEDS TO BE VALID PREFIX"
},
"perms": {
"ixlan_id": SHARED["ixlan_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "ixpfx",
test_success=SHARED["ixpfx_id"],
test_failure=SHARED["ixpfx_r_ok"].id)
# re-create deleted ixpfx
r_data = self.assert_create(self.db_org_admin, "ixpfx", data)
# re-delete
self.assert_delete(self.db_org_admin, "ixpfx",
test_success=SHARED["ixpfx_id"])
# re-creating a deleted ixpfx that we dont have write permissions do
# should fail
pfx = IXLanPrefix.objects.create(ixlan=SHARED["ixlan_r_ok"],
prefix=u"192.168.127.12/24",
protocol="IPv4")
pfx.delete()
data.update(prefix="192.168.127.12/24")
r_data = self.assert_create(self.db_org_admin, "ixpfx", data,
test_failures={"invalid": {
}}, test_success=False)
# make sure protocols are validated
r_data = self.assert_create(self.db_org_admin, "ixpfx", data,
test_failures={
"invalid": {
"prefix": "192.168.3.11/24",
"protocol": "IPv6"
},
}, test_success=False)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_netixlan(self):
data = self.make_data_netixlan(net_id=SHARED["net_rw_ok"].id,
ixlan_id=SHARED["ixlan_rw_ok"].id)
r_data = self.assert_create(
self.db_org_admin,
"netixlan",
data,
test_failures={
"invalid": {
"ipaddr4": u"a b c"
},
"perms": {
# set network to one the user doesnt have perms to
"ipaddr4": self.get_ip4(),
"ipaddr6": self.get_ip6(),
"net_id": SHARED["net_r_ok"].id
}
})
SHARED["netixlan_id"] = r_data.get("id")
self.assert_update(self.db_org_admin, "netixlan",
SHARED["netixlan_id"], {"speed": 2000},
test_failures={
"invalid": {
"ipaddr4": "NEEDS TO BE VALID IP"
},
"perms": {
"net_id": SHARED["net_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "netixlan",
test_success=SHARED["netixlan_id"],
test_failure=SHARED["netixlan_r_ok"].id)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_ixfac(self):
data = {
"fac_id": SHARED["fac_rw2_ok"].id,
"ix_id": SHARED["ix_rw2_ok"].id
}
r_data = self.assert_create(
self.db_org_admin,
"ixfac",
data,
test_failures={
"invalid": {
"ix_id": ""
},
"perms": {
# set network to one the user doesnt have perms to
"ix_id": SHARED["ix_r_ok"].id
},
"status": {
"fac_id": SHARED["fac_rw2_pending"].id,
"ix_id": SHARED["ix_rw2_pending"].id
}
})
SHARED["ixfac_id"] = r_data.get("id")
self.assert_update(self.db_org_admin, "ixfac", SHARED["ixfac_id"],
{"fac_id": SHARED["fac_r2_ok"].id}, test_failures={
"invalid": {
"fac_id": ""
},
"perms": {
"ix_id": SHARED["ix_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "ixfac",
test_success=SHARED["ixfac_id"],
test_failure=SHARED["ixfac_r_ok"].id)
##########################################################################
def test_org_admin_003_PUT_org(self):
self.assert_update(self.db_org_admin, "org", SHARED["org_rw_ok"].id,
{"name": self.make_name("Test")}, test_failures={
"invalid": {
"name": ""
},
"perms": {
"id": SHARED["org_r_ok"].id
}
})
##########################################################################
def test_zz_org_admin_004_DELETE_org(self):
self.assert_delete(self.db_org_admin, "org",
test_success=SHARED["org_rw_ok"].id,
test_failure=SHARED["org_r_ok"].id)
##########################################################################
# GUEST TESTS
##########################################################################
def test_guest_001_GET_org(self):
self.assert_get_handleref(self.db_guest, "org", SHARED["org_r_ok"].id)
##########################################################################
def test_guest_001_GET_net(self):
data = self.assert_get_handleref(self.db_guest, "net",
SHARED["net_r_ok"].id)
for poc in data.get("poc_set"):
self.assertEqual(poc["visible"], "Public")
##########################################################################
def __test_guest_001_GET_asn(self):
"""
ASN endpoint is currently disabled
"""
return
self.assert_get_handleref(self.db_guest, "asn", SHARED["net_r_ok"].asn)
with self.assertRaises(InvalidRequestException) as inst:
self.assert_get_handleref(self.db_guest, "asn",
"%s[" % SHARED["net_r_ok"].asn)
##########################################################################
def test_guest_001_GET_ix(self):
self.assert_get_handleref(self.db_guest, "ix", SHARED["ix_r_ok"].id)
##########################################################################
def test_guest_001_GET_fac(self):
self.assert_get_handleref(self.db_guest, "fac", SHARED["fac_r_ok"].id)
##########################################################################
def test_guest_001_GET_poc_private(self):
self.assert_get_forbidden(self.db_guest, "poc",
SHARED["poc_r_ok_private"].id)
##########################################################################
def test_guest_001_GET_poc_users(self):
self.assert_get_forbidden(self.db_guest, "poc",
SHARED["poc_r_ok_users"].id)
##########################################################################
def test_guest_001_GET_poc_public(self):
self.assert_get_handleref(self.db_guest, "poc",
SHARED["poc_r_ok_public"].id)
##########################################################################
def test_guest_001_GET_nefac(self):
self.assert_get_handleref(self.db_guest, "netfac",
SHARED["netfac_r_ok"].id)
##########################################################################
def test_guest_001_GET_netixlan(self):
self.assert_get_handleref(self.db_guest, "netixlan",
SHARED["netixlan_r_ok"].id)
##########################################################################
def test_guest_001_GET_ixfac(self):
self.assert_get_handleref(self.db_guest, "ixfac",
SHARED["ixfac_r_ok"].id)
##########################################################################
def test_guest_001_GET_ixlan(self):
self.assert_get_handleref(self.db_guest, "ixlan",
SHARED["ixlan_r_ok"].id)
##########################################################################
def test_guest_001_GET_ixpfx(self):
self.assert_get_handleref(self.db_guest, "ixpfx",
SHARED["ixpfx_r_ok"].id)
##########################################################################
def test_guest_001_GET_list_404(self):
for tag in REFTAG_MAP:
with self.assertRaises(NotFoundException) as inst:
data = self.db_guest.all(tag, limit=1, id=99999999)
if tag == "net":
with self.assertRaises(NotFoundException) as inst:
data = self.db_guest.all(tag, limit=1, asn=99999999999)
for tag in REFTAG_MAP:
if tag == "poc":
data = self.db_guest.all(tag, id=SHARED["poc_r_ok_public"].id)
else:
data = self.db_guest.all(tag, id=SHARED["%s_r_ok" % tag].id)
self.assertEqual(len(data), 1)
self.assert_handleref_integrity(data[0])
##########################################################################
def test_guest_005_list_all(self):
data = self.db_guest.all("org")
self.assertGreater(len(data), 1)
self.assert_handleref_integrity(data[0])
self.assert_data_integrity(data[0], "org")
##########################################################################
def test_guest_005_list_all_tags(self):
for tag in REFTAG_MAP:
if tag == "poc":
continue
data = self.db_guest.all(tag, limit=10)
self.assertLess(len(data), 11)
self.assert_handleref_integrity(data[0])
data = self.db_guest.all("poc", limit=10, visible="Public")
self.assertLess(len(data), 11)
self.assert_handleref_integrity(data[0])
##########################################################################
def test_org_admin_005_list(self):
for tag in REFTAG_MAP:
data = self.db_org_admin.all(tag, limit=10)
self.assertLess(len(data), 11)
self.assert_handleref_integrity(data[0])
for row in data:
self.assertEqual(row["status"], "ok")
##########################################################################
def test_guest_005_fields_filter(self):
data = self.db_guest.all("org", limit=10, fields=",".join(
["name", "status"]))
self.assertGreater(len(data), 0)
for row in data:
self.assertEqual(sorted(row.keys()), sorted([u"name", u"status"]))
data = self.db_guest.get("org", 1, fields=",".join(["name", "status"]))
self.assertGreater(len(data), 0)
self.assertEqual(sorted(data[0].keys()), sorted([u"name", u"status"]))
##########################################################################
def test_guest_005_list_limit(self):
data = self.db_guest.all("org", limit=10)
self.assertEqual(len(data), 10)
self.assert_handleref_integrity(data[0])
self.assert_data_integrity(data[0], "org")
##########################################################################
def test_guest_005_list_pagination(self):
n = 1
for i in range(0, 10):
data = self.db_guest.all("org", skip=i * 10, limit=10)
for row in data:
self.assertEqual(row.get("id"), n)
n += 1
##########################################################################
def test_guest_005_list_since(self):
data = self.db_guest.all("net", since=int(START_TIMESTAMP) - 10,
status="deleted")
self.assertEqual(len(data), 2)
self.assert_handleref_integrity(data[0])
self.assert_data_integrity(data[0], "net")
##########################################################################
def test_guest_005_get_depth_all(self):
"""
Test all end points single object GET with all valid depths
This also asserts data structure integrity for objects expanded
by the depth parameter
"""
for depth in [0, 1, 2, 3, 4]:
for tag, slz in REFTAG_MAP_SLZ.items():
note_tag = "(%s %s)" % (tag, depth)
if tag == "poc":
o = SHARED["%s_r_ok_public" % tag]
else:
o = SHARED["%s_r_ok" % tag]
data = self.db_guest.get(tag, o.id, depth=depth)
self.assertEqual(len(data), 1, msg="Data length %s" % note_tag)
pk_flds, n_flds = self.serializer_related_fields(slz)
obj = data[0]
self.assert_related_depth(obj, slz, depth, depth, note_tag,
typ="single")
##########################################################################
def test_guest_005_list_depth_all(self):
"""
Tests all end points multiple object GET with all valid depths
This also asserts data structure integrity for objects expanded
by the depth parameter
"""
for depth in [0, 1, 2, 3]:
for tag, slz in REFTAG_MAP_SLZ.items():
note_tag = "(%s %s)" % (tag, depth)
if tag == "poc":
o = SHARED["%s_r_ok_public" % tag]
else:
o = SHARED["%s_r_ok" % tag]
data = self.db_guest.all(tag, id=o.id, depth=depth)
self.assertEqual(len(data), 1, msg="Data length %s" % note_tag)
pk_flds, n_flds = self.serializer_related_fields(slz)
obj = data[0]
self.assert_related_depth(obj, slz, depth, depth, note_tag,
typ="listing")
##########################################################################
def test_guest_005_list_depth_not_set(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id)
self.assertEqual(data[0].get("net_set"), None)
##########################################################################
def test_guest_005_list_depth_0(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id, depth=0)
self.assertEqual(data[0].get("net_set"), None)
##########################################################################
def test_guest_005_list_depth_1(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id, depth=1)
self.assertEqual(len(data[0].get("net_set")), 3)
self.assertEqual(data[0].get("net_set")[0], SHARED["net_r_ok"].id)
self.assertEqual(data[0].get("net_set")[1], SHARED["net_r2_ok"].id)
self.assertEqual(data[0].get("net_set")[2], SHARED["net_r3_ok"].id)
#############################################################################
def test_guest_005_list_depth_2(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id, depth=2)
self.assertEqual(len(data[0].get("net_set")), 3)
obj = data[0].get("net_set")[0]
self.assertEqual(obj.get("id"), SHARED["net_r_ok"].id)
self.assert_data_integrity(obj, "net", ignore=["org_id"])
#############################################################################
def test_guest_005_list_depth_3(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id, depth=3)
self.assertEqual(len(data[0].get("net_set")), 3)
obj = data[0].get("net_set")[0]
self.assertEqual(obj.get("id"), SHARED["net_r_ok"].id)
self.assert_data_integrity(obj, "net", ignore=["org_id"])
obj = obj.get("netfac_set")
self.assertEqual(len(obj), 1)
self.assertEqual(obj[0], SHARED["netfac_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_dates_numeric(self):
for flt, ass in NUMERIC_TESTS.items():
for fld in ["created", "updated"]:
if flt in ["gt", "gte"]:
DATE = DATES["yesterday"]
elif flt in ["lt"]:
DATE = DATES["tomorrow"]
else:
DATE = DATES["today"]
if flt:
kwargs = {"%s__%s" % (fld, flt): DATE[1]}
else:
kwargs = {fld: DATE[1]}
data = self.db_guest.all("fac", limit=10, **kwargs)
self.assertGreater(
len(data), 0, msg="%s_%s - data length assertion" % (fld,
flt))
for row in data:
self.assert_data_integrity(row, "fac")
try:
dt = datetime.datetime.strptime(
row[fld], "%Y-%m-%dT%H:%M:%SZ").date()
except ValueError:
dt = datetime.datetime.strptime(
row[fld], "%Y-%m-%dT%H:%M:%S.%fZ").date()
fnc = getattr(self, "assert%s" % ass)
fnc(dt, DATE[0],
msg="%s__%s: %s, %s" % (fld, flt, row[fld], DATE[1]))
##########################################################################
def test_guest_005_list_filter_numeric(self):
data = self.db_guest.all("net", asn=SHARED["net_r_ok"].asn)
self.assertEqual(len(data), 1)
self.assert_data_integrity(data[0], "net")
self.assertEqual(data[0]["asn"], SHARED["net_r_ok"].asn)
##########################################################################
def test_guest_005_list_filter_numeric_lte(self):
data = self.db_guest.all("fac", id__lte=SHARED["fac_rw_ok"].id)
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertLessEqual(long(fac["id"]), SHARED["fac_rw_ok"].id)
##########################################################################
def test_guest_005_list_filter_numeric_lt(self):
data = self.db_guest.all("fac", id__lt=SHARED["fac_rw_ok"].id)
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertLess(long(fac["id"]), SHARED["fac_rw_ok"].id)
##########################################################################
def test_guest_005_list_filter_numeric_gte(self):
data = self.db_guest.all("fac", id__gte=SHARED["fac_r_ok"].id)
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertGreaterEqual(long(fac["id"]), SHARED["fac_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_numeric_gt(self):
data = self.db_guest.all("fac", id__gt=SHARED["fac_r_ok"].id)
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertGreater(long(fac["id"]), SHARED["fac_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_numeric_in(self):
ids = [SHARED["fac_r_ok"].id, SHARED["fac_rw_ok"].id]
data = self.db_guest.all("fac", id__in="%s,%s" % tuple(ids))
self.assertEqual(len(data), len(ids))
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertIn(long(fac["id"]), ids)
##########################################################################
def test_guest_005_list_filter_string(self):
data = self.db_guest.all("ix", name=SHARED["ix_r_ok"].name)
self.assertEqual(len(data), 1)
self.assert_data_integrity(data[0], "ix")
self.assertEqual(data[0]["name"], SHARED["ix_r_ok"].name)
##########################################################################
def test_guest_005_list_filter_string_contains(self):
token = SHARED["ix_r_ok"].name[3:5]
data = self.db_guest.all("ix", name__contains=token.lower())
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "ix")
for ix in data:
self.assertIn(token, ix["name"])
##########################################################################
def test_guest_005_list_filter_string_startswith(self):
token = SHARED["ix_r_ok"].name[0:5]
data = self.db_guest.all("ix", name__startswith=token.lower())
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "ix")
for ix in data:
self.assertEqual(ix["name"][:5], token)
##########################################################################
def test_guest_005_list_filter_string_in(self):
cities = ["API Test:IX:RW:ok", "API Test:IX:R:ok"]
data = self.db_guest.all("ix", name__in="%s,%s" % tuple(cities))
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "ix")
for ix in data:
self.assertIn(ix["name"], cities)
##########################################################################
| |
<filename>performance/driver/classes/observer/marathonevents.py<gh_stars>1-10
import json
import requests
import threading
import time
from .utils import CurlSSE, CurlSSEDisconnectedError
from .utils import RawSSE, RawSSEDisconnectedError
from datetime import datetime
from performance.driver.core.classes import Observer
from performance.driver.core.template import TemplateString, TemplateDict
from performance.driver.core.events import LogLineEvent, TeardownEvent, StartEvent
from performance.driver.core.utils.http import is_accessible
from performance.driver.core.reflection import subscribesToHint, publishesHint
from performance.driver.classes.channel.marathon import MarathonDeploymentRequestedEvent
from performance.driver.classes.observer.events.marathon import *
from queue import Queue
################################################################################
class MarathonEventsObserver(Observer):
"""
The *Marathon Events Observer* is extracting high-level events by subscribing
to the Server-Side Events endpoint on marathon.
::
observers:
- class: observer.MarathonEventsObserver
# The URL to the marathon SSE endpoint
url: "{{marathon_url}}/v2/events"
# [Optional] Use an external curl process for receiving the events
# instead of the built-in raw SSE client
curl: no
# [Optional] Use the timestamp from the event. If set to no, the time
# the event is arrived to the perf-driver is used
useEventTimestamp: no
# [Optional] Additional headers to send
headers:
Accept: test/plain
Since this observer requires an active HTTP session to marathon, it also
publishes the ``MarathonStartedEvent`` when an HTTP connection was
successfully established.
The following events are forwarded from the event bus:
* ``MarathonDeploymentStepSuccessEvent``
* ``MarathonDeploymentStepFailureEvent``
* ``MarathonDeploymentInfoEvent``
* ``MarathonDeploymentSuccessEvent``
* ``MarathonDeploymentFailedEvent``
.. note::
In order to properly populcate the event's trace ID, this observer is also
listening for `http` channel requests in order to extract the affected
application name(s).
.. note::
This observer will automatically inject an ``Authorization`` header if
a ``dcos_auth_token`` definition exists, so you don't have to specify
it through the ``headers`` configuration.
Note that a ``dcos_auth_token`` can be dynamically injected via an
authentication task.
"""
@subscribesToHint(MarathonDeploymentRequestedEvent, TeardownEvent,
StartEvent)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
config = self.getRenderedConfig()
self.useCurl = config.get('curl', False)
self.eventReceiverThread = None
self.eventEmitterThread = None
self.eventQueue = Queue()
self.instanceTraceIDs = {}
self.instanceTraceIDsLock = threading.Lock()
self.running = True
self.activeSse = None
# # Subscribe into receiving LogLine events, and place us above the
# # average priority in order to provide translated, high-level events
# # to the rest of the components that reside on order=5 (default)
# self.eventbus.subscribe(self.handleLogLineEvent, events=(LogLineEvent,), order=2)
# When an HTTP request is initiated, get the application name and use this
# as the means of linking the traceids to the source
self.eventbus.subscribe(
self.handleDeploymentRequest,
events=(MarathonDeploymentRequestedEvent, ),
order=2)
# Also subscribe to the teardown event in order to cleanly stop the event
# handling thread. The order=2 here is ensuring that the `running` flag is
# set to `False` before marathon thread is killed.
self.eventbus.subscribe(
self.handleTeardownEvent, events=(TeardownEvent, ), order=2)
# Also subscribe to the setup event in order to start the polling loop.
self.eventbus.subscribe(self.handleStartEvent, events=(StartEvent, ))
def handleDeploymentRequest(self, event):
"""
Look for an HTTP request that could trigger a deployment, and get the ID
in order to resolve it to a deployment at a later time
"""
with self.instanceTraceIDsLock:
self.instanceTraceIDs[event.instance] = event.traceids
# @publishesHint(MarathonStartedEvent)
# def handleLogLineEvent(self, event):
# """
# Provide translations for some well-known marathon log lines
# """
# if 'All services up and running.' in event.line:
# self.logger.info('Marathon web server started')
# self.eventbus.publish(MarathonStartedEvent())
def handleTeardownEvent(self, event):
"""
The teardown event is stopping the event handling thread
"""
self.logger.debug('Tearing down marathon event monitor')
self.running = False
# Interrupt any active request
if self.activeSse:
self.activeSse.close()
# Join queue
self.eventQueue.put((None, None))
self.eventQueue.join()
# Join threads
if self.eventReceiverThread:
self.eventReceiverThread.join()
self.eventReceiverThread = None
if self.eventEmitterThread:
self.eventEmitterThread.join()
self.eventEmitterThread = None
def handleStartEvent(self, event):
"""
The start event is starting the event handling thread
"""
# Start the event reader thread
self.eventReceiverThread = threading.Thread(
target=self.eventReceiverHandler,
name="marathonevents-drain")
self.eventReceiverThread.start()
# Start the event emmiter thread
self.eventEmitterThread = threading.Thread(
target=self.eventEmitterThreadHandler,
name="marathonevents-emitter")
self.eventEmitterThread.start()
def allTraceIDs(self):
"""
Return the trace IDs of all affected instances
"""
traceids = set()
with self.instanceTraceIDsLock:
for key, ids in self.instanceTraceIDs.items():
traceids.update(ids)
return traceids
def getTraceIDs(self, ids):
"""
Collect the unique trace IDs for the given app ids
"""
traceids = set()
with self.instanceTraceIDsLock:
for id in ids:
if id in self.instanceTraceIDs:
traceids.update(self.instanceTraceIDs[id])
return traceids
def getStepsAffectedIDs(self, steps):
"""
Collect the IDs affected from this deployment
"""
ids = set()
# Collect the apps from the deployment steps
for step in steps:
for action in step['actions']:
if 'app' in action:
ids.update([action['app']])
elif 'pod' in action:
ids.update([action['pod']])
return list(ids)
def removeIDs(self, ids):
"""
Remove IDs from the list
"""
with self.instanceTraceIDsLock:
for id in ids:
if id in self.instanceTraceIDs:
del self.instanceTraceIDs[id]
@publishesHint(MarathonStartedEvent, MarathonGroupChangeSuccessEvent,
MarathonGroupChangeFailedEvent,
MarathonDeploymentSuccessEvent, MarathonDeploymentFailedEvent,
MarathonDeploymentStatusEvent,
MarathonDeploymentStepSuccessEvent,
MarathonDeploymentStepFailureEvent, MarathonSSEEvent)
def eventEmitterThreadHandler(self):
"""
This event is draining the receiver queue and is forwarding the events
to the internal event bus
"""
config = self.getRenderedConfig();
useEventTimestamp = config.get("useEventTimestamp", True)
while self.running:
(eventName, eventData) = self.eventQueue.get()
# If we have drained the queue and we are instructed to quit, exit now
if eventName is None:
self.logger.debug('Received interrupt event')
self.eventQueue.task_done()
break
# If we were interrupted, drain queue
if not self.running:
self.logger.debug('Ignoring event because we are shutting down')
self.eventQueue.task_done()
continue
# Dispatch raw event
self.logger.debug('Received event {}: {}'.format(eventName, eventData))
eventInst = MarathonSSEEvent(eventName, eventData)
# If we should use the timestamp from the event, replace ts
if useEventTimestamp and 'timestamp' in eventData:
utc_time = datetime.strptime(eventData["timestamp"], "%Y-%m-%dT%H:%M:%S.%fZ")
eventTs = (utc_time - datetime(1970, 1, 1)).total_seconds()
self.logger.debug('Using event ts={}, instead of ts={}'.format(eventTs, eventInst.ts))
eventInst.ts = eventTs
# Publish event & Release pointer
self.eventbus.publish(eventInst)
eventInst = None
#
# group_change_success
#
if eventName == 'group_change_success':
deploymentId = None
affectedIds = [eventData['groupId']]
self.eventbus.publish(
MarathonGroupChangeSuccessEvent(
deploymentId,
affectedIds,
traceid=self.getTraceIDs(affectedIds)))
#
# group_change_failed
#
elif eventName == 'group_change_failed':
deploymentId = None
affectedIds = [eventData['groupId']]
self.eventbus.publish(
MarathonGroupChangeFailedEvent(
deploymentId,
affectedIds,
eventData['reason'],
traceid=self.getTraceIDs(affectedIds)))
#
# deployment_success
#
elif eventName == 'deployment_success':
plan = eventData.get('plan', {})
deploymentId = plan.get('id', None)
affectedIds = self.getStepsAffectedIDs(plan.get('steps', []))
self.eventbus.publish(
MarathonDeploymentSuccessEvent(
deploymentId, eventData, traceid=self.getTraceIDs(
affectedIds)))
self.removeIDs(affectedIds)
#
# deployment_failed
#
elif eventName == 'deployment_failed':
plan = eventData.get('plan', {})
deploymentId = plan.get('id', None)
affectedIds = self.getStepsAffectedIDs(plan.get('steps', []))
self.eventbus.publish(
MarathonDeploymentFailedEvent(
deploymentId,
affectedIds,
traceid=self.getTraceIDs(affectedIds)))
self.removeIDs(affectedIds)
#
# deployment_info
#
elif eventName == 'deployment_info':
plan = eventData.get('plan', {})
deploymentId = plan.get('id', None)
affectedIds = self.getStepsAffectedIDs([eventData.get('currentStep')])
self.eventbus.publish(
MarathonDeploymentStatusEvent(
deploymentId,
affectedIds,
traceid=self.getTraceIDs(affectedIds)))
#
# deployment_step_success
#
elif eventName == 'deployment_step_success':
plan = eventData.get('plan', {})
deploymentId = plan.get('id', None)
affectedIds = self.getStepsAffectedIDs([eventData.get('currentStep')])
self.eventbus.publish(
MarathonDeploymentStepSuccessEvent(
deploymentId,
affectedIds,
traceid=self.getTraceIDs(affectedIds)))
#
# deployment_step_failure
#
elif eventName == 'deployment_step_failure':
plan = eventData.get('plan', {})
deploymentId = plan.get('id', None)
affectedIds = self.getStepsAffectedIDs([eventData.get('currentStep')])
self.eventbus.publish(
MarathonDeploymentStepFailureEvent(
deploymentId,
affectedIds,
traceid=self.getTraceIDs(affectedIds)))
# Warn unknown events
else:
self.logger.debug(
'Unhandled marathon event \'{}\' received'.format(eventName))
# Inform queue that the task is done
self.eventQueue.task_done()
self.logger.debug('Terminated event receiver thread')
def eventReceiverHandler(self):
"""
This thread is responsible for receiving events from the SSE bus as
quickly as possible, in order to avoid slowing down marathon.
"""
# Render URL
definitions = self.getDefinitions()
config = self.getRenderedConfig();
url = config.get('url')
headers = config.get('headers', {})
# Wait til endpoint responds
while self.running:
# If we are missing an `Authorization` header but we have a
# `dcos_auth_token` definition, allocate an `Authorization` header now
#
# Note: We are putting this within the loop because the `dcos_auth_token`
# might appear at a later time if an authentication task is already
# in progress.
#
if not 'Authorization' in headers \
and 'dcos_auth_token' in definitions:
headers['Authorization'] = 'token={}'.format(
definitions['dcos_auth_token'])
#
# Poll the endpoint until it responds
#
self.logger.info('Checking if {} is alive'.format(url))
if is_accessible(url, headers=headers, status_code=[200, 405, 400]):
break
# Wait for 5 seconds
counter = 5
while counter > 0:
time.sleep(0.1)
counter -= 0.1
# Make this loop breakable
if not self.running:
return
# We are ready
self.logger.info('Marathon web server is responding')
self.eventbus.publish(MarathonStartedEvent())
# Append our required headers
headers['Accept'] = 'text/event-stream'
# Bind on event stream
is_connected = False
while self.running:
#
# Process server-side events in per-line basis. The SSE protocol has the
# following response syntax:
#
# event: event-name
# data: {event json payload}
# <empty line>
# ...
#
try:
# If we were instructed to use the external CURL create a CurlSSE
# instance, otherwise use the default RawSSE
if self.useCurl:
self.activeSse = CurlSSE(url, headers=headers)
else:
self.activeSse = RawSSE(url, headers=headers)
# Handle events from the | |
:param password:
:param port_override:
:param port:
"""
__slots__ = [
'id',
'name',
'healthy',
'tags',
'secret_store_id',
'egress_filter',
'hostname',
'username',
'password',
'port_override',
'port',
]
def __init__(
self,
id=None,
name=None,
healthy=None,
tags=None,
secret_store_id=None,
egress_filter=None,
hostname=None,
username=None,
password=None,
port_override=None,
port=None,
):
self.id = id
self.name = name
self.healthy = healthy
self.tags = tags
self.secret_store_id = secret_store_id
self.egress_filter = egress_filter
self.hostname = hostname
self.username = username
self.password = password
self.port_override = port_override
self.port = port
def __repr__(self):
return '<sdm.Teradata ' + \
'id: ' + repr(self.id) + ' ' +\
'name: ' + repr(self.name) + ' ' +\
'healthy: ' + repr(self.healthy) + ' ' +\
'tags: ' + repr(self.tags) + ' ' +\
'secret_store_id: ' + repr(self.secret_store_id) + ' ' +\
'egress_filter: ' + repr(self.egress_filter) + ' ' +\
'hostname: ' + repr(self.hostname) + ' ' +\
'username: ' + repr(self.username) + ' ' +\
'password: ' + repr(self.password) + ' ' +\
'port_override: ' + repr(self.port_override) + ' ' +\
'port: ' + repr(self.port) + ' ' +\
'>'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'healthy': self.healthy,
'tags': self.tags,
'secret_store_id': self.secret_store_id,
'egress_filter': self.egress_filter,
'hostname': self.hostname,
'username': self.username,
'password': <PASSWORD>,
'port_override': self.port_override,
'port': self.port,
}
@classmethod
def from_dict(cls, d):
return cls(
id=d.get('id'),
name=d.get('name'),
healthy=d.get('healthy'),
tags=d.get('tags'),
secret_store_id=d.get('secret_store_id'),
egress_filter=d.get('egress_filter'),
hostname=d.get('hostname'),
username=d.get('username'),
password=d.get('password'),
port_override=d.get('port_override'),
port=d.get('port'),
)
class NodeCreateResponse:
"""NodeCreateResponse reports how the Nodes were created in the system.
:param meta: Reserved for future use.
:param node: The created Node.
:param token: The auth token generated for the Node. The Node will use this token to
authenticate with the strongDM API.
:param rate_limit: Rate limit information.
"""
__slots__ = [
'meta',
'node',
'token',
'rate_limit',
]
def __init__(
self,
meta=None,
node=None,
token=None,
rate_limit=None,
):
self.meta = meta
self.node = node
self.token = token
self.rate_limit = rate_limit
def __repr__(self):
return '<sdm.NodeCreateResponse ' + \
'meta: ' + repr(self.meta) + ' ' +\
'node: ' + repr(self.node) + ' ' +\
'token: ' + repr(self.token) + ' ' +\
'rate_limit: ' + repr(self.rate_limit) + ' ' +\
'>'
def to_dict(self):
return {
'meta': self.meta,
'node': self.node,
'token': self.token,
'rate_limit': self.rate_limit,
}
@classmethod
def from_dict(cls, d):
return cls(
meta=d.get('meta'),
node=d.get('node'),
token=d.get('token'),
rate_limit=d.get('rate_limit'),
)
class NodeGetResponse:
"""NodeGetResponse returns a requested Node.
:param meta: Reserved for future use.
:param node: The requested Node.
:param rate_limit: Rate limit information.
"""
__slots__ = [
'meta',
'node',
'rate_limit',
]
def __init__(
self,
meta=None,
node=None,
rate_limit=None,
):
self.meta = meta
self.node = node
self.rate_limit = rate_limit
def __repr__(self):
return '<sdm.NodeGetResponse ' + \
'meta: ' + repr(self.meta) + ' ' +\
'node: ' + repr(self.node) + ' ' +\
'rate_limit: ' + repr(self.rate_limit) + ' ' +\
'>'
def to_dict(self):
return {
'meta': self.meta,
'node': self.node,
'rate_limit': self.rate_limit,
}
@classmethod
def from_dict(cls, d):
return cls(
meta=d.get('meta'),
node=d.get('node'),
rate_limit=d.get('rate_limit'),
)
class NodeUpdateResponse:
"""NodeUpdateResponse returns the fields of a Node after it has been updated by
a NodeUpdateRequest.
:param meta: Reserved for future use.
:param node: The updated Node.
:param rate_limit: Rate limit information.
"""
__slots__ = [
'meta',
'node',
'rate_limit',
]
def __init__(
self,
meta=None,
node=None,
rate_limit=None,
):
self.meta = meta
self.node = node
self.rate_limit = rate_limit
def __repr__(self):
return '<sdm.NodeUpdateResponse ' + \
'meta: ' + repr(self.meta) + ' ' +\
'node: ' + repr(self.node) + ' ' +\
'rate_limit: ' + repr(self.rate_limit) + ' ' +\
'>'
def to_dict(self):
return {
'meta': self.meta,
'node': self.node,
'rate_limit': self.rate_limit,
}
@classmethod
def from_dict(cls, d):
return cls(
meta=d.get('meta'),
node=d.get('node'),
rate_limit=d.get('rate_limit'),
)
class NodeDeleteResponse:
"""NodeDeleteResponse returns information about a Node that was deleted.
:param meta: Reserved for future use.
:param rate_limit: Rate limit information.
"""
__slots__ = [
'meta',
'rate_limit',
]
def __init__(
self,
meta=None,
rate_limit=None,
):
self.meta = meta
self.rate_limit = rate_limit
def __repr__(self):
return '<sdm.NodeDeleteResponse ' + \
'meta: ' + repr(self.meta) + ' ' +\
'rate_limit: ' + repr(self.rate_limit) + ' ' +\
'>'
def to_dict(self):
return {
'meta': self.meta,
'rate_limit': self.rate_limit,
}
@classmethod
def from_dict(cls, d):
return cls(
meta=d.get('meta'),
rate_limit=d.get('rate_limit'),
)
class Relay:
"""Relay represents a StrongDM CLI installation running in relay mode.
:param id: Unique identifier of the Relay.
:param name: Unique human-readable name of the Relay. Node names must include only letters, numbers, and hyphens (no spaces, underscores, or other special characters). Generated if not provided on create.
:param state: The current state of the relay. One of: "new", "verifying_restart",
"awaiting_restart", "restarting", "started", "stopped", "dead",
"unknown".
:param tags: Tags is a map of key, value pairs.
:param gateway_filter: GatewayFilter can be used to restrict the peering between relays and
gateways.
"""
__slots__ = [
'id',
'name',
'state',
'tags',
'gateway_filter',
]
def __init__(
self,
id=None,
name=None,
state=None,
tags=None,
gateway_filter=None,
):
self.id = id
self.name = name
self.state = state
self.tags = tags
self.gateway_filter = gateway_filter
def __repr__(self):
return '<sdm.Relay ' + \
'id: ' + repr(self.id) + ' ' +\
'name: ' + repr(self.name) + ' ' +\
'state: ' + repr(self.state) + ' ' +\
'tags: ' + repr(self.tags) + ' ' +\
'gateway_filter: ' + repr(self.gateway_filter) + ' ' +\
'>'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'state': self.state,
'tags': self.tags,
'gateway_filter': self.gateway_filter,
}
@classmethod
def from_dict(cls, d):
return cls(
id=d.get('id'),
name=d.get('name'),
state=d.get('state'),
tags=d.get('tags'),
gateway_filter=d.get('gateway_filter'),
)
class Gateway:
"""Gateway represents a StrongDM CLI installation running in gateway mode.
:param id: Unique identifier of the Gateway.
:param name: Unique human-readable name of the Gateway. Node names must include only letters, numbers, and hyphens (no spaces, underscores, or other special characters). Generated if not provided on create.
:param state: The current state of the gateway. One of: "new", "verifying_restart",
"restarting", "started", "stopped", "dead", "unknown"
:param listen_address: The public hostname/port tuple at which the gateway will be accessible to clients.
:param bind_address: The hostname/port tuple which the gateway daemon will bind to.
If not provided on create, set to "0.0.0.0:<listen_address_port>".
:param tags: Tags is a map of key, value pairs.
:param gateway_filter: GatewayFilter can be used to restrict the peering between relays and
gateways.
"""
__slots__ = [
'id',
'name',
'state',
'listen_address',
'bind_address',
'tags',
'gateway_filter',
]
def __init__(
self,
id=None,
name=None,
state=None,
listen_address=None,
bind_address=None,
tags=None,
gateway_filter=None,
):
self.id = id
self.name = name
self.state = state
self.listen_address = listen_address
self.bind_address = bind_address
self.tags = tags
self.gateway_filter = gateway_filter
def __repr__(self):
return '<sdm.Gateway ' + \
'id: ' + repr(self.id) + ' ' +\
'name: ' + repr(self.name) + ' ' +\
'state: ' + repr(self.state) + ' ' +\
'listen_address: ' + repr(self.listen_address) + ' ' +\
'bind_address: ' + repr(self.bind_address) + ' ' +\
'tags: ' + repr(self.tags) + ' ' +\
'gateway_filter: ' + repr(self.gateway_filter) + ' ' +\
'>'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'state': self.state,
'listen_address': self.listen_address,
'bind_address': self.bind_address,
'tags': self.tags,
'gateway_filter': self.gateway_filter,
}
@classmethod
def from_dict(cls, d):
return cls(
id=d.get('id'),
name=d.get('name'),
state=d.get('state'),
listen_address=d.get('listen_address'),
bind_address=d.get('bind_address'),
tags=d.get('tags'),
gateway_filter=d.get('gateway_filter'),
)
class ResourceCreateResponse:
"""ResourceCreateResponse reports how the Resources were created in the system.
:param meta: Reserved for future use.
:param resource: The created Resource.
:param rate_limit: Rate limit information.
"""
__slots__ = [
'meta',
'resource',
'rate_limit',
]
def __init__(
self,
meta=None,
resource=None,
rate_limit=None,
):
self.meta = meta
self.resource = resource
self.rate_limit = rate_limit
def __repr__(self):
return '<sdm.ResourceCreateResponse ' + \
'meta: ' + repr(self.meta) + ' ' +\
'resource: ' + repr(self.resource) + ' ' +\
'rate_limit: ' + repr(self.rate_limit) + ' ' +\
'>'
def to_dict(self):
return {
'meta': self.meta,
'resource': self.resource,
'rate_limit': self.rate_limit,
}
@classmethod
def from_dict(cls, d):
return cls(
meta=d.get('meta'),
resource=d.get('resource'),
rate_limit=d.get('rate_limit'),
)
class ResourceGetResponse:
"""ResourceGetResponse returns a requested Resource.
:param meta: Reserved for future use.
:param resource: The requested Resource.
:param rate_limit: Rate limit information.
"""
__slots__ = [
'meta',
'resource',
'rate_limit',
]
def __init__(
self,
meta=None,
resource=None,
rate_limit=None,
):
self.meta = meta
self.resource = resource
self.rate_limit = rate_limit
def __repr__(self):
return '<sdm.ResourceGetResponse ' + \
'meta: ' + repr(self.meta) + ' ' +\
'resource: ' + repr(self.resource) + | |
"""finds bad channels."""
import mne
import numpy as np
from mne.channels.interpolation import _make_interpolation_matrix
from psutil import virtual_memory
from scipy import signal
from scipy.stats import iqr
from statsmodels import robust
from pyprep.removeTrend import removeTrend
from pyprep.utilities import filter_design
class NoisyChannels:
"""Implements the functionality of the `findNoisyChannels` function.
It is a part of the PREP (preprocessing pipeline) for EEG data recorded using 10-20 montage style described in [1].
References
----------
[1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.
(2015). The PREP pipeline: standardized preprocessing for large-scale
EEG analysis. Frontiers in Neuroinformatics, 9, 16.
"""
def __init__(self, raw):
"""Initialize the class."""
# Make sure that we got an MNE object
assert isinstance(raw, mne.io.BaseRaw)
self.raw_mne = raw.copy()
self.sample_rate = raw.info["sfreq"]
self.EEGData = self.raw_mne.get_data(picks="eeg")
self.EEGData = removeTrend(self.EEGData, sample_rate=self.sample_rate)
self.EEGData_beforeFilt = self.EEGData
self.ch_names_original = np.asarray(raw.info["ch_names"])
self.n_chans_original = len(self.ch_names_original)
self.n_chans_new = self.n_chans_original
self.signal_len = len(self.raw_mne.times)
self.original_dimensions = np.shape(self.EEGData)
self.new_dimensions = self.original_dimensions
self.original_channels = np.arange(self.original_dimensions[0])
self.new_channels = self.original_channels
self.ch_names_new = self.ch_names_original
self.channels_interpolate = self.original_channels
# The identified bad channels
self.bad_by_nan = []
self.bad_by_flat = []
self.bad_by_deviation = []
self.bad_by_hf_noise = []
self.bad_by_correlation = []
self.bad_by_SNR = []
self.bad_by_dropout = []
self.bad_by_ransac = []
def get_bads(self, verbose=False):
"""Get a list of all bad channels.
This function makes a list of all the bad channels and prints them if verbose is True.
Parameters
----------
verbose : boolean
If verbose, print a summary of bad channels.
"""
bads = (
self.bad_by_nan
+ self.bad_by_flat
+ self.bad_by_deviation
+ self.bad_by_hf_noise
+ self.bad_by_SNR
+ self.bad_by_correlation
+ self.bad_by_dropout
+ self.bad_by_ransac
)
bads = list(set(bads))
if verbose:
print("Found {} uniquely bad channels.".format(len(bads)))
print("\n{} by n/a: {}".format(len(self.bad_by_nan), self.bad_by_nan))
print("\n{} by flat: {}".format(len(self.bad_by_flat), self.bad_by_flat))
print(
"\n{} by deviation: {}".format(
len(self.bad_by_deviation), self.bad_by_deviation
)
)
print(
"\n{} by hf noise: {}".format(
len(self.bad_by_hf_noise), self.bad_by_hf_noise
)
)
print(
"\n{} by correl: {}".format(
len(self.bad_by_correlation), self.bad_by_correlation
)
)
print("\n{} by SNR {}".format(len(self.bad_by_SNR), self.bad_by_SNR))
print(
"\n{} by dropout: {}".format(
len(self.bad_by_dropout), self.bad_by_dropout
)
)
print(
"\n{} by ransac: {}".format(len(self.bad_by_ransac), self.bad_by_ransac)
)
return bads
def find_all_bads(self, ransac=True):
"""Call all the functions to detect bad channels.
This function calls all the bad-channel detecting functions.
Parameters
----------
ransac: boolean
To detect channels by ransac or not.
"""
self.find_bad_by_nan_flat()
self.find_bad_by_deviation()
self.find_bad_by_SNR()
if ransac:
self.find_bad_by_ransac()
return None
def find_bad_by_nan_flat(self):
"""Detect channels that have zero or NaN values."""
nan_channel_mask = [False] * self.original_dimensions[0]
no_signal_channel_mask = [False] * self.original_dimensions[0]
for i in range(0, self.original_dimensions[0]):
nan_channel_mask[i] = np.sum(np.isnan(self.EEGData[i, :])) > 0
for i in range(0, self.original_dimensions[0]):
no_signal_channel_mask[i] = robust.mad(self.EEGData[i, :], c=1) < 10 ** (
-10
) or np.std(self.EEGData[i, :]) < 10 ** (-10)
nan_channels = self.channels_interpolate[nan_channel_mask]
flat_channels = self.channels_interpolate[no_signal_channel_mask]
nans_no_data_channels = np.union1d(nan_channels, flat_channels)
self.channels_interpolate = np.setdiff1d(
self.channels_interpolate, nans_no_data_channels
)
for i in range(0, len(nan_channels)):
self.bad_by_nan.append(self.ch_names_original[nan_channels[i]])
for i in range(0, len(flat_channels)):
self.bad_by_flat.append(self.ch_names_original[flat_channels[i]])
self.raw_mne.drop_channels(list(set(self.bad_by_nan + self.bad_by_flat)))
self.EEGData = self.raw_mne.get_data(picks="eeg")
self.ch_names_new = np.asarray(self.raw_mne.info["ch_names"])
self.n_chans_new = len(self.ch_names_new)
self.new_dimensions = np.shape(self.EEGData)
return None
def find_bad_by_deviation(self, deviation_threshold=5.0):
"""Robust z-score of the robust standard deviation for each channel is calculated.
Channels having a z-score greater than 5 are detected as bad.
Parameters
----------
deviation_threshold: float
z-score threshold above which channels will be labelled bad.
"""
deviation_channel_mask = [False] * (self.new_dimensions[0])
channel_deviation = np.zeros(self.new_dimensions[0])
for i in range(0, self.new_dimensions[0]):
channel_deviation[i] = 0.7413 * iqr(self.EEGData[i, :])
channel_deviationSD = 0.7413 * iqr(channel_deviation)
channel_deviationMedian = np.nanmedian(channel_deviation)
robust_channel_deviation = np.divide(
np.subtract(channel_deviation, channel_deviationMedian), channel_deviationSD
)
for i in range(0, self.new_dimensions[0]):
deviation_channel_mask[i] = abs(
robust_channel_deviation[i]
) > deviation_threshold or np.isnan(robust_channel_deviation[i])
deviation_channels = self.channels_interpolate[deviation_channel_mask]
for i in range(0, len(deviation_channels)):
self.bad_by_deviation.append(self.ch_names_original[deviation_channels[i]])
return None
def find_bad_by_hfnoise(self, HF_zscore_threshold=5.0):
"""Noisiness of the channel is determined by finding the ratio of the median absolute deviation of high frequency to low frequency components.
Low pass 50 Hz filter is used to separate the frequency components. A robust z-score is then calculated relative to all the channels.
Parameters
----------
HF_zscore_threshold: float
z-score threshold above which channels would be labelled as bad.
"""
data_tmp = np.transpose(self.EEGData)
dimension = np.shape(data_tmp)
if self.sample_rate > 100:
EEG_filt = np.zeros((dimension[0], dimension[1]))
bandpass_filter = filter_design(
N_order=100,
amp=np.array([1, 1, 0, 0]),
freq=np.array([0, 90 / self.sample_rate, 100 / self.sample_rate, 1]),
)
for i in range(0, dimension[1]):
EEG_filt[:, i] = signal.filtfilt(bandpass_filter, 1, data_tmp[:, i])
noisiness = np.divide(
robust.mad(np.subtract(data_tmp, EEG_filt), c=1),
robust.mad(EEG_filt, c=1),
)
noisiness_median = np.nanmedian(noisiness)
noiseSD = (
np.median(np.absolute(np.subtract(noisiness, np.median(noisiness))))
* 1.4826
)
zscore_HFNoise = np.divide(
np.subtract(noisiness, noisiness_median), noiseSD
)
HFnoise_channel_mask = [False] * self.new_dimensions[0]
for i in range(0, self.new_dimensions[0]):
HFnoise_channel_mask[i] = zscore_HFNoise[
i
] > HF_zscore_threshold or np.isnan(zscore_HFNoise[i])
HFNoise_channels = self.channels_interpolate[HFnoise_channel_mask]
else:
EEG_filt = data_tmp
noisiness_median = 0
noisinessSD = 1
zscore_HFNoise = np.zeros((self.new_dimensions[0], 1))
HFNoise_channels = []
self.EEGData_beforeFilt = data_tmp
self.EEGData = np.transpose(EEG_filt)
for i in range(0, len(HFNoise_channels)):
self.bad_by_hf_noise.append(self.ch_names_original[HFNoise_channels[i]])
return None
def find_bad_by_correlation(
self, correlation_secs=1.0, correlation_threshold=0.4, frac_bad=0.1
):
"""Find correlation between the low frequency components of the EEG below 50 Hz.
Correlation is done using a sliding non-overlapping time window. The maximum absolute correlation is
as the 98th percentile of the absolute values of the correlations with the other channels
If the maximum correlation is less than 0.4 then the channel is designated as bad by corre-
lation.
Parameters
----------
correlation_secs: float
length of the correlation time window (default: 1 secs).
correlation_threshold: float
correlation threshold below which channel is marked bad.
frac_bad: float
percentage of data windows in which the correlation threshold was not surpassed and
if a channel gets a value of greater than 1%, it is designated bad.
"""
self.find_bad_by_hfnoise() # since filtering is performed there
correlation_frames = correlation_secs * self.sample_rate
correlation_window = np.arange(correlation_frames)
correlation_offsets = np.arange(
1, (self.new_dimensions[1] - correlation_frames), correlation_frames
)
w_correlation = len(correlation_offsets)
maximum_correlations = np.ones((self.original_dimensions[0], w_correlation))
drop_out = np.zeros((self.new_dimensions[0], w_correlation))
channel_correlation = np.ones((w_correlation, self.new_dimensions[0]))
noiselevels = np.zeros((w_correlation, self.new_dimensions[0]))
channel_deviations = np.zeros((w_correlation, self.new_dimensions[0]))
drop = np.zeros((w_correlation, self.new_dimensions[0]))
len_correlation_window = len(correlation_window)
EEGData = np.transpose(self.EEGData)
EEG_new_win = np.reshape(
np.transpose(EEGData[0 : len_correlation_window * w_correlation, :]),
(self.new_dimensions[0], len_correlation_window, w_correlation),
order="F",
)
data_win = np.reshape(
np.transpose(
self.EEGData_beforeFilt[0 : len_correlation_window * w_correlation, :]
),
(self.new_dimensions[0], len_correlation_window, w_correlation),
order="F",
)
for k in range(0, w_correlation):
eeg_portion = np.transpose(np.squeeze(EEG_new_win[:, :, k]))
data_portion = np.transpose(np.squeeze(data_win[:, :, k]))
window_correlation = np.corrcoef(np.transpose(eeg_portion))
abs_corr = np.abs(
np.subtract(window_correlation, np.diag(np.diag(window_correlation)))
)
channel_correlation[k, :] = np.quantile(abs_corr, 0.98, axis=0)
noiselevels[k, :] = np.divide(
robust.mad(np.subtract(data_portion, eeg_portion), c=1),
robust.mad(eeg_portion, c=1),
)
channel_deviations[k, :] = 0.7413 * iqr(data_portion, axis=0)
for i in range(0, w_correlation):
for j in range(0, self.new_dimensions[0]):
drop[i, j] = np.int(
np.isnan(channel_correlation[i, j]) or np.isnan(noiselevels[i, j])
)
if drop[i, j] == 1:
channel_deviations[i, j] = 0
noiselevels[i, j] = 0
maximum_correlations[self.channels_interpolate, :] = np.transpose(
channel_correlation
)
drop_out[:] = np.transpose(drop)
thresholded_correlations = maximum_correlations < correlation_threshold
thresholded_correlations = thresholded_correlations.astype(int)
fraction_BadCorrelationWindows = np.mean(thresholded_correlations, axis=1)
fraction_BadDropOutWindows = np.mean(drop_out, axis=1)
bad_correlation_channels_idx = np.argwhere(
fraction_BadCorrelationWindows > frac_bad
)
bad_correlation_channels_name = self.ch_names_original[
bad_correlation_channels_idx.astype(int)
]
self.bad_by_correlation = [i[0] for i in bad_correlation_channels_name]
dropout_channels_idx = np.argwhere(fraction_BadDropOutWindows > frac_bad)
dropout_channels_name = self.ch_names_original[dropout_channels_idx.astype(int)]
self.bad_by_dropout = [i[0] for i in dropout_channels_name]
return None
def find_bad_by_SNR(self):
"""Determine the channels that fail both by correlation and HF noise."""
self.find_bad_by_correlation()
set_hf = set(self.bad_by_hf_noise)
set_correlation = set(self.bad_by_correlation)
not_hf = set_correlation - set_hf
self.bad_by_SNR = self.bad_by_hf_noise + list(not_hf)
return None
def find_bad_by_ransac(
self,
n_samples=50,
fraction_good=0.25,
corr_thresh=0.75,
fraction_bad=0.4,
corr_window_secs=5.0,
):
"""Detect channels that are not predicted well by other channels.
Here, a ransac approach (see [1], and a short discussion in [2]) is
adopted to predict a "clean EEG" dataset. After identifying clean EEG
channels through the other methods, the clean EEG dataset is
constructed by repeatedly sampling a small subset of clean EEG channels
and interpolation the complete data. The median of all those
repetitions forms the clean EEG dataset. In a second step, the original
and the ransac predicted data are correlated and channels, which do not
correlate well with themselves across the two datasets are considered
`bad_by_ransac`.
Parameters
----------
n_samples : int
Number of samples used for computation of ransac.
fraction_good : float
Fraction of channels used for robust reconstruction of the signal.
This needs to be in the range [0, 1], where obviously neither 0
nor 1 would make sense.
corr_thresh : float
The minimum correlation threshold that should be attained within a
data window.
fraction_bad : float
If this | |
+= A[k, d] * np.conj(B[c - k, d])
return suma
def helm_coefficients_josep(Ybus, Yseries, V0, S0, Ysh0, pq, pv, sl, pqpv, tolerance=1e-6, max_coeff=30, verbose=False):
"""
Holomorphic Embedding LoadFlow Method as formulated by <NAME> in 2020
THis function just returns the coefficients for further usage in other routines
:param Yseries: Admittance matrix of the series elements
:param V0: vector of specified voltages
:param S0: vector of specified power
:param Ysh0: vector of shunt admittances (including the shunts of the branches)
:param pq: list of pq nodes
:param pv: list of pv nodes
:param sl: list of slack nodes
:param pqpv: sorted list of pq and pv nodes
:param tolerance: target error (or tolerance)
:param max_coeff: maximum number of coefficients
:param verbose: print intermediate information
:return: U, X, Q, iterations
"""
npqpv = len(pqpv)
npv = len(pv)
nsl = len(sl)
n = Yseries.shape[0]
# --------------------------- PREPARING IMPLEMENTATION -------------------------------------------------------------
U = np.zeros((max_coeff, npqpv), dtype=complex) # voltages
W = np.zeros((max_coeff, npqpv), dtype=complex) # compute X=1/conj(U)
Q = np.zeros((max_coeff, npqpv), dtype=complex) # unknown reactive powers
Vm0 = np.abs(V0)
Vm2 = Vm0 * Vm0
if n < 2:
return U, W, Q, 0
if verbose:
print('Yseries')
print(Yseries.toarray())
df = pd.DataFrame(data=np.c_[Ysh0.imag, S0.real, S0.imag, Vm0],
columns=['Ysh', 'P0', 'Q0', 'V0'])
print(df)
Yred = Yseries[np.ix_(pqpv, pqpv)] # admittance matrix without slack buses
Yslack = -Yseries[np.ix_(pqpv, sl)] # yes, it is the negative of this
Yslack_vec = Yslack.sum(axis=1).A1
G = np.real(Yred) # real parts of Yij
B = np.imag(Yred) # imaginary parts of Yij
P_red = S0.real[pqpv]
Q_red = S0.imag[pqpv]
Vslack = V0[sl]
Ysh_red = Ysh0[pqpv]
# indices 0 based in the internal scheme
nsl_counted = np.zeros(n, dtype=int)
compt = 0
for i in range(n):
if i in sl:
compt += 1
nsl_counted[i] = compt
pq_ = pq - nsl_counted[pq]
pv_ = pv - nsl_counted[pv]
# .......................CALCULATION OF TERMS [0] ------------------------------------------------------------------
U[0, :] = spsolve(Yred, Yslack_vec)
W[0, :] = 1 / np.conj(U[0, :])
# .......................CALCULATION OF TERMS [1] ------------------------------------------------------------------
valor = np.zeros(npqpv, dtype=complex)
# get the current injections that appear due to the slack buses reduction
I_inj_slack = Yslack * Vslack
valor[pq_] = I_inj_slack[pq_] - Yslack_vec[pq_] + (P_red[pq_] - Q_red[pq_] * 1j) * W[0, pq_] - U[0, pq_] * Ysh_red[pq_]
valor[pv_] = I_inj_slack[pv_] - Yslack_vec[pv_] + P_red[pv_] * W[0, pv_] - U[0, pv_] * Ysh_red[pv_]
# compose the right-hand side vector
RHS = np.r_[valor.real,
valor.imag,
Vm2[pv] - (U[0, pv_] * U[0, pv_]).real]
# Form the system matrix (MAT)
Upv = U[0, pv_]
Xpv = W[0, pv_]
VRE = coo_matrix((2 * Upv.real, (np.arange(npv), pv_)), shape=(npv, npqpv)).tocsc()
VIM = coo_matrix((2 * Upv.imag, (np.arange(npv), pv_)), shape=(npv, npqpv)).tocsc()
XIM = coo_matrix((-Xpv.imag, (pv_, np.arange(npv))), shape=(npqpv, npv)).tocsc()
XRE = coo_matrix((Xpv.real, (pv_, np.arange(npv))), shape=(npqpv, npv)).tocsc()
EMPTY = csc_matrix((npv, npv))
MAT = vs((hs((G, -B, XIM)),
hs((B, G, XRE)),
hs((VRE, VIM, EMPTY))), format='csc')
if verbose:
print('MAT')
print(MAT.toarray())
# factorize (only once)
MAT_LU = factorized(MAT.tocsc())
# solve
LHS = MAT_LU(RHS)
# update coefficients
U[1, :] = LHS[:npqpv] + 1j * LHS[npqpv:2 * npqpv]
Q[0, pv_] = LHS[2 * npqpv:]
W[1, :] = -W[0, :] * np.conj(U[1, :]) / np.conj(U[0, :])
# .......................CALCULATION OF TERMS [>=2] ----------------------------------------------------------------
iter_ = 1
range_pqpv = np.arange(npqpv, dtype=np.int64)
V = V0.copy()
c = 2
converged = False
norm_f = tolerance + 1.0 # any number that violates the convergence
while c < max_coeff and not converged: # c defines the current depth
valor[pq_] = (P_red[pq_] - Q_red[pq_] * 1j) * W[c - 1, pq_] - U[c - 1, pq_] * Ysh_red[pq_]
valor[pv_] = -1j * conv2(W, Q, c, pv_) - U[c - 1, pv_] * Ysh_red[pv_] + W[c - 1, pv_] * P_red[pv_]
RHS = np.r_[valor.real,
valor.imag,
-conv3(U, U, c, pv_).real]
LHS = MAT_LU(RHS)
# update voltage coefficients
U[c, :] = LHS[:npqpv] + 1j * LHS[npqpv:2 * npqpv]
# update reactive power
Q[c - 1, pv_] = LHS[2 * npqpv:]
# update voltage inverse coefficients
W[c, range_pqpv] = -conv1(U, W, c, range_pqpv) / np.conj(U[0, range_pqpv])
# compute power mismatch
if not np.mod(c, 2): # check the mismatch every 4 iterations
V[pqpv] = U.sum(axis=0)
Scalc = V * np.conj(Ybus * V)
dP = np.abs(S0[pqpv].real - Scalc[pqpv].real)
dQ = np.abs(S0[pq].imag - Scalc[pq].imag)
norm_f = np.linalg.norm(np.r_[dP, dQ], np.inf) # same as max(abs())
# check convergence
converged = norm_f < tolerance
print('mismatch check at c=', c)
c += 1
iter_ += 1
return U, W, Q, iter_, norm_f
def helm_josep(Ybus, Yseries, V0, S0, Ysh0, pq, pv, sl, pqpv, tolerance=1e-6, max_coeff=30, use_pade=True,
verbose=False):
"""
Holomorphic Embedding LoadFlow Method as formulated by <NAME> in 2020
:param Ybus: Complete admittance matrix
:param Yseries: Admittance matrix of the series elements
:param V0: vector of specified voltages
:param S0: vector of specified power
:param Ysh0: vector of shunt admittances (including the shunts of the branches)
:param pq: list of pq nodes
:param pv: list of pv nodes
:param sl: list of slack nodes
:param pqpv: sorted list of pq and pv nodes
:param tolerance: target error (or tolerance)
:param max_coeff: maximum number of coefficients
:param use_pade: Use the Padè approximation? otherwise a simple summation is done
:param verbose: print intermediate information
:return: V, converged, norm_f, Scalc, iter_, elapsed
"""
start_time = time.time()
# compute the series of coefficients
U, X, Q, iter_, norm_f = helm_coefficients_josep(Ybus, Yseries, V0, S0, Ysh0, pq, pv, sl, pqpv,
tolerance=tolerance, max_coeff=max_coeff, verbose=verbose)
# --------------------------- RESULTS COMPOSITION ------------------------------------------------------------------
if verbose:
print('V coefficients')
print(U)
# compute the final voltage vector
V = V0.copy()
if use_pade:
try:
V[pqpv] = pade4all(iter_, U, 1.0)
except:
warn('Padè failed :(, using coefficients summation')
V[pqpv] = U.sum(axis=0)
else:
V[pqpv] = U.sum(axis=0)
# compute power mismatch
Scalc = V * np.conj(Ybus * V)
dP = np.abs(S0[pqpv].real - Scalc[pqpv].real)
dQ = np.abs(S0[pq].imag - Scalc[pq].imag)
norm_f = np.linalg.norm(np.r_[dP, dQ], np.inf) # same as max(abs())
# check convergence
converged = norm_f < tolerance
elapsed = time.time() - start_time
return V, converged, norm_f, Scalc, iter_, elapsed
def test_voltage(grid):
"""
Grid solution test
:param grid: MultiCircuit instance
:return: True/False
"""
nc = grid.compile_snapshot()
inputs = nc.compute()[0] # pick the first island
tolerance = 1e-6
V, converged_, error, Scalc_, iter_, elapsed_ = helm_josep(Ybus=inputs.Ybus,
Yseries=inputs.Yseries,
V0=inputs.Vbus,
S0=inputs.Sbus,
Ysh0=inputs.Ysh_helm,
pq=inputs.pq,
pv=inputs.pv,
sl=inputs.ref,
pqpv=inputs.pqpv,
tolerance=tolerance,
max_coeff=50,
use_pade=True,
verbose=True)
Vm = np.abs(V)
Va = np.angle(V)
dP = np.abs(inputs.Sbus.real - Scalc_.real)
dP[inputs.ref] = 0
dQ = np.abs(inputs.Sbus.imag - Scalc_.imag)
dQ[inputs.pv] = np.nan
dQ[inputs.ref] = np.nan
df = pd.DataFrame(data=np.c_[inputs.types, Vm, Va, np.abs(inputs.Vbus), dP, dQ],
columns=['Types', 'Vm', 'Va', 'Vset', 'P mismatch', 'Q mismatch'])
print(df)
print('Error', error)
print('P error', np.max(np.abs(dP)))
print('Elapsed', elapsed_)
print('Iterations', iter_)
return error < tolerance
def test_sigma(grid):
"""
Sigma-distances test
:param grid:
:return:
"""
nc = grid.compile_snapshot()
inputs = nc.compute()[0] # pick the first island
U_, X_, Q_, iter_, normF = helm_coefficients_josep(Ybus=inputs.Ybus,
Yseries=inputs.Yseries,
V0=inputs.Vbus,
S0=inputs.Sbus,
Ysh0=-inputs.Ysh_helm,
pq=inputs.pq,
pv=inputs.pv,
sl=inputs.ref,
pqpv=inputs.pqpv,
tolerance=1e-6,
max_coeff=50,
verbose=False)
n = inputs.nbus
Sig_re = np.zeros(n, dtype=float)
Sig_im = np.zeros(n, dtype=float)
Sigma = sigma_function(U_, X_, iter_ - 1, inputs.Vbus[inputs.ref])
Sig_re[inputs.pqpv] = np.real(Sigma)
Sig_im[inputs.pqpv] = np.imag(Sigma)
sigma_distances = sigma_distance(Sig_re, Sig_im)
# sigma plot
sx = np.linspace(-0.25, np.max(Sig_re)+0.1, 100)
sy1 = np.sqrt(0.25 + sx)
sy2 = -np.sqrt(0.25 + sx)
fig = plt.figure(figsize=(8, 7))
ax = fig.add_subplot(111)
ax.plot(Sig_re, Sig_im, 'o')
ax.plot(sx, sy1, 'b')
ax.plot(sx, sy2, 'r')
ax.set_title('Sigma plot')
ax.set_xlabel('$\sigma_{re}$')
ax.set_ylabel('$\sigma_{im}$')
plt.show()
return sigma_distances
def test_pade(grid):
"""
Sigma-distances test
:param grid:
:return:
"""
nc = grid.compile_snapshot()
inputs = nc.compute()[0] # pick the first island
U_, X_, Q_, iter_, normF = helm_coefficients_josep(Ybus=inputs.Ybus,
Yseries=inputs.Yseries,
V0=inputs.Vbus,
S0=inputs.Sbus,
Ysh0=-inputs.Ysh_helm,
pq=inputs.pq,
pv=inputs.pv,
sl=inputs.ref,
pqpv=inputs.pqpv,
tolerance=1e-6,
max_coeff=50,
verbose=False)
alphas = np.arange(0, 1.1, 0.01)
n = inputs.nbus
na = len(alphas)
V = np.zeros((na, n))
V[:, inputs.ref] = np.abs(inputs.Vbus[inputs.ref])
for i, alpha in enumerate(alphas):
V[i, inputs.pqpv] = np.abs(pade4all(order=iter_, coeff_mat=U_, s=alpha))
plt.axvline(0, c='k')
plt.axvline(1, c='k')
plt.plot(alphas, V)
plt.ylabel('Voltage (p.u.)')
plt.xlabel('$\lambda$')
plt.show()
if __name__ == '__main__':
from GridCal.Engine import FileOpen
import pandas as pd
np.set_printoptions(linewidth=2000, suppress=True)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39_1W.gridcal'
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/lynn5buspv.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 118.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/1354 Pegase.xlsx'
# fname = 'helm_data1.gridcal'
# | |
= copy.copy(orig)
for k, v in test_failures["perms"].items():
data_perms[k] = v
with self.assertRaises(PermissionDeniedException) as inst:
db.update(typ, **data_perms)
# we test failure to update readonly fields
if "readonly" in test_failures:
data_ro = copy.copy(orig)
b_data = self.assert_get_handleref(db, typ, id)
data_ro.update(**test_failures["readonly"])
db.update(typ, **data_ro)
u_data = self.assert_get_handleref(db, typ, id)
for k, v in test_failures["readonly"].items():
self.assertEqual(u_data.get(k), b_data.get(k))
##########################################################################
def assert_list_filter_related(self, target, rel, fld="id", valid=None,
valid_m=None):
#if not valid:
# valid = [o.id for k, o in SHARED.items() if type(
# o) != int and k.find("%s_" % target) == 0]
if fld != "id":
qfld = "_%s" % fld
else:
qfld = fld
ids = [
getattr(SHARED["%s_r_ok" % rel], fld),
getattr(SHARED["%s_rw_ok" % rel], fld)
]
kwargs_s = {
"%s_%s" % (rel, qfld): getattr(SHARED["%s_r_ok" % rel], fld)
}
kwargs_m = {
"%s_%s__in" % (rel, qfld): ",".join([str(id) for id in ids])
}
if hasattr(REFTAG_MAP[target], "%s" % rel):
valid_s = [
r.id
for r in REFTAG_MAP[target].objects.filter(**kwargs_s)
.filter(status="ok")
]
valid_m = [
r.id
for r in REFTAG_MAP[target]
.objects.filter(**{
"%s_%s__in" % (rel, qfld): ids
}).filter(status="ok")
]
elif target == "poc":
valid_s = [SHARED["%s_r_ok_public" % target].id]
valid_m = [
SHARED["%s_r_ok_public" % target].id,
SHARED["%s_rw_ok_public" % target].id
]
else:
valid_s = [SHARED["%s_r_ok" % target].id]
valid_m = [
SHARED["%s_r_ok" % target].id, SHARED["%s_rw_ok" % target].id
]
# exact
data = self.db_guest.all(target, **kwargs_s)
self.assertGreater(len(data), 0)
for row in data:
self.assert_data_integrity(row, target)
self.assertIn(row["id"], valid_s)
# in
data = self.db_guest.all(target, **kwargs_m)
self.assertGreater(len(data), 0)
for row in data:
self.assert_data_integrity(row, target)
self.assertIn(row["id"], valid_m)
##########################################################################
def assert_related_depth(self, obj, serializer_class, r_depth, t_depth,
note_tag, typ="listing", list_exclude=[]):
"""
Assert the data indegrity of structures within a result that have
been expanded via the depth parameter
"""
# get all the realtion ship properties declared in the serializer
pk_flds, n_flds = self.serializer_related_fields(serializer_class)
# some tag so we can track where the assertions fail since this will
# be doing nested checks
note_tag = "%s(%d/%d)" % (note_tag, r_depth, t_depth)
# first check that the provided object is not None, as this should
# never be the case
self.assertNotEqual(type(obj), NoneType, msg=note_tag)
# single primary key relation fields
for pk_fld in pk_flds:
# serializer has marked field as to be excluded from serialized data
# dont check for it
if pk_fld in list_exclude:
continue
if typ == "listing":
# in listing mode, depth should never expand pk relations
self.assertEqual(
obj.get(pk_fld), None, msg="PK Relation %s %s" % (note_tag,
pk_fld))
else:
# in single get mode, expand everything as long as we are at
# a relative depth greater than 1
if r_depth >= 1:
self.assert_related_depth(
obj.get(pk_fld), REFTAG_MAP_SLZ.get(pk_fld),
r_depth - 1, t_depth, "%s.%s" % (note_tag,
pk_fld), typ=typ)
else:
self.assertIn(
type(obj.get(pk_fld)),
[int, long, NoneType],
msg="PK Relation %s %s" % (note_tag, pk_fld))
# nested set relations
for n_fld, n_fld_cls in n_flds:
if r_depth > 1:
# sets should be expanded to objects
self.assertIn(n_fld, obj,
msg="Nested set existing (dN) %s %s" % (note_tag,
n_fld))
# make sure set exists and is of the correct type
self.assertEqual(
type(obj[n_fld]), list,
msg="Nested set list type (dN) %s %s" % (note_tag, n_fld))
# assert further depth expansions on all expanded objects in
# the set
for row in obj[n_fld]:
self.assert_related_depth(
row, n_fld_cls, r_depth - 2, t_depth, "%s.%s" %
(note_tag, n_fld), typ=typ, list_exclude=getattr(
n_fld_cls.Meta, "list_exclude", []))
elif r_depth == 1:
# sets should be expanded to ids
self.assertIn(n_fld, obj,
msg="Nested set existing (d1) %s %s" % (note_tag,
n_fld))
# make sure set exists and is of the correct type
self.assertEqual(
type(obj[n_fld]), list,
msg="Nested set list type (d1) %s %s" % (note_tag, n_fld))
# make all values in the set are of type int or long
for row in obj[n_fld]:
self.assertIn(
type(row),
[long, int],
msg="Nested set containing ids (d1) %s %s" % (note_tag,
n_fld))
else:
# sets should not exist
self.assertNotIn(n_fld, obj,
msg="Netsted set not existing (d0) %s %s" %
(note_tag, n_fld))
##########################################################################
# TESTS WITH USER THAT IS NOT A MEMBER OF AN ORGANIZATION
##########################################################################
def test_user_001_GET_org(self):
self.assert_get_handleref(self.db_user, "org", SHARED["org_r_ok"].id)
##########################################################################
def test_user_001_GET_net(self):
data = self.assert_get_handleref(self.db_user, "net",
SHARED["net_r_ok"].id)
self.assertNotEqual(len(data.get("poc_set")), 0)
##########################################################################
def test_user_001_GET_ix(self):
self.assert_get_handleref(self.db_user, "ix", SHARED["ix_r_ok"].id)
##########################################################################
def test_user_001_GET_fac(self):
self.assert_get_handleref(self.db_user, "fac", SHARED["fac_r_ok"].id)
##########################################################################
def test_user_001_GET_fac_netcount(self):
data = self.assert_get_handleref(self.db_user, "fac",
SHARED["fac_r_ok"].id)
self.assertEqual(data.get("net_count"), 1)
##########################################################################
def test_user_001_GET_poc_public(self):
self.assert_get_handleref(self.db_user, "poc",
SHARED["poc_r_ok_public"].id)
##########################################################################
def test_user_001_GET_poc_users(self):
self.assert_get_handleref(self.db_user, "poc",
SHARED["poc_r_ok_users"].id)
##########################################################################
def test_user_001_GET_poc_private(self):
self.assert_get_forbidden(self.db_user, "poc",
SHARED["poc_r_ok_private"].id)
##########################################################################
def test_user_001_GET_nefac(self):
self.assert_get_handleref(self.db_user, "netfac",
SHARED["netfac_r_ok"].id)
##########################################################################
def test_user_001_GET_netixlan(self):
self.assert_get_handleref(self.db_user, "netixlan",
SHARED["netixlan_r_ok"].id)
##########################################################################
def test_user_001_GET_ixfac(self):
self.assert_get_handleref(self.db_user, "ixfac",
SHARED["ixfac_r_ok"].id)
##########################################################################
def test_user_001_GET_ixlan(self):
self.assert_get_handleref(self.db_user, "ixlan",
SHARED["ixlan_r_ok"].id)
##########################################################################
def test_user_001_GET_ixpfx(self):
self.assert_get_handleref(self.db_user, "ixpfx",
SHARED["ixpfx_r_ok"].id)
##########################################################################
def test_user_005_list_poc(self):
data = self.db_guest.all("poc", limit=1000)
for row in data:
self.assertIn(row.get("visible"), ["Users", "Public"])
data = self.db_guest.all("poc", visible="Private", limit=100)
self.assertEqual(0, len(data))
##########################################################################
# TESTS WITH USER THAT IS ORGANIZATION MEMBER
##########################################################################
def test_org_member_001_GET_poc_public(self):
self.assert_get_handleref(self.db_org_member, "poc",
SHARED["poc_r_ok_public"].id)
##########################################################################
def test_org_member_001_GET_poc_users(self):
self.assert_get_handleref(self.db_org_member, "poc",
SHARED["poc_r_ok_users"].id)
##########################################################################
def test_org_member_001_GET_poc_private(self):
self.assert_get_handleref(self.db_org_member, "poc",
SHARED["poc_r_ok_private"].id)
##########################################################################
# TESTS WITH USER THAT IS ORGANIZATION ADMINISTRATOR
##########################################################################
##########################################################################
def test_org_admin_001_GET_poc_public(self):
self.assert_get_handleref(self.db_org_admin, "poc",
SHARED["poc_r_ok_public"].id)
##########################################################################
def test_org_admin_001_GET_poc_users(self):
self.assert_get_handleref(self.db_org_admin, "poc",
SHARED["poc_r_ok_users"].id)
##########################################################################
def test_org_admin_001_GET_poc_private(self):
# org admin is admin of rw org, so trying to access the private poc of the r org
# should still be forbidden
self.assert_get_forbidden(self.db_org_admin, "poc",
SHARED["poc_r_ok_private"].id)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_ix(self):
data = self.make_data_ix(prefix=self.get_prefix4())
r_data = self.assert_create(
self.db_org_admin,
"ix",
data,
ignore=["prefix"],
test_failures={
"invalid": {
"prefix": self.get_prefix4(),
"name": ""
},
"perms": {
"prefix": self.get_prefix4(),
# need to set name again so it doesnt fail unique validation
"name": self.make_name("Test"),
# set org to an organization the user doesnt have perms to
"org_id": SHARED["org_r_ok"].id
},
"status": {
# need to set name again so it doesnt fail unique validation
"prefix": self.get_prefix4(),
"name": self.make_name("Test"),
"org_id": SHARED["org_rwp"].id
}
})
SHARED["ix_id"] = r_data.get("id")
self.assert_update(self.db_org_admin, "ix", SHARED["ix_id"],
{"name": self.make_name("Test")}, test_failures={
"invalid": {
"name": ""
},
"perms": {
"id": SHARED["ix_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "ix",
test_success=SHARED["ix_id"],
test_failure=SHARED["ix_r_ok"].id)
self.assert_create(
self.db_org_admin, "ix", data, test_success=False, test_failures={
"invalid": {
"prefix": self.get_prefix4(),
"policy_email": "",
"tech_email": ""
},
})
self.assert_create(self.db_org_admin, "ix", data, test_success=False,
test_failures={
"invalid": {
"prefix": ""
},
})
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_fac(self):
data = self.make_data_fac()
r_data = self.assert_create(
self.db_org_admin,
"fac",
data,
test_failures={
"invalid": {
"name": ""
},
"perms": {
# need to set name again so it doesnt fail unique validation
"name": self.make_name("Test"),
# set org to an organization the user doesnt have perms to
"org_id": SHARED["org_r_ok"].id
},
"status": {
"name": self.make_name("Test"),
"org_id": SHARED["org_rwp"].id
}
})
SHARED["fac_id"] = r_data.get("id")
self.assert_update(
self.db_org_admin,
"fac",
SHARED["fac_id"],
{"name": self.make_name("Test")},
test_failures={
"invalid": {
"name": ""
},
"perms": {
"id": SHARED["fac_r_ok"].id
},
"readonly": {
"latitude": 1, #this should not take as it is read only
"longitude": 1 #this should not take as it is read only
}
},
)
self.assert_delete(self.db_org_admin, "fac",
test_success=SHARED["fac_id"],
test_failure=SHARED["fac_r_ok"].id)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_net(self):
data = self.make_data_net(asn=9000900)
r_data = self.assert_create(
self.db_org_admin,
"net",
data,
test_failures={
"invalid": {
"name": ""
},
"perms": {
# need to set name again so it doesnt fail unique validation
"name": self.make_name("Test"),
"asn": data["asn"] + 1,
# set org to an organization the user doesnt have perms to
"org_id": SHARED["org_r_ok"].id
},
"status": {
"org_id": SHARED["org_rwp"].id,
"asn": data["asn"] + 1,
"name": self.make_name("Test")
}
})
SHARED["net_id"] = r_data.get("id")
self.assert_update(self.db_org_admin, "net", SHARED["net_id"],
{"name": self.make_name("Test")}, test_failures={
"invalid": {
"name": ""
},
"perms": {
"id": SHARED["net_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "net",
test_success=SHARED["net_id"],
test_failure=SHARED["net_r_ok"].id)
# Test RiR not found failure
r_data = self.assert_create(
self.db_org_admin, "net", data,
test_failures={"invalid": {
"asn": 9999999
}}, test_success=False)
##########################################################################
def test_org_admin_002_PUT_net_write_only_fields(self):
"""
with this we check that certain fields that are allowed to be
set via the api, but sre not supposed to be rendered in the
api data, work correctly
"""
def test_write_only_fields_missing(orig, updated):
assert (updated.has_key("allow_ixp_update") == False)
net = SHARED["net_rw_ok"]
self.assertEqual(net.allow_ixp_update, False)
self.assert_update(self.db_org_admin, "net", net.id,
{"allow_ixp_update": True},
test_success=[test_write_only_fields_missing])
net.refresh_from_db()
self.assertEqual(net.allow_ixp_update, True)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_netfac(self):
data = {
"net_id": SHARED["net_rw_ok"].id,
"fac_id": SHARED["fac_rw_ok"].id,
"local_asn": 12345
}
r_data = self.assert_create(
self.db_org_admin,
"netfac",
data,
test_failures={
"invalid": {
"net_id": ""
},
"perms": {
# set network to one the user doesnt have perms to
"net_id": SHARED["net_r_ok"].id
},
"status": {
"net_id": SHARED["net_rw_pending"].id,
"fac_id": SHARED["fac_rw_pending"].id,
}
})
SHARED["netfac_id"] = r_data.get("id")
self.assert_update(self.db_org_admin, "netfac", SHARED["netfac_id"],
{"local_asn": random.randint(999, 9999)},
test_failures={
"invalid": {
"fac_id": ""
},
"perms": {
"net_id": SHARED["net_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "netfac",
test_success=SHARED["netfac_id"],
| |
import io
import tempfile
import unittest
import numpy
from gensound.sound import *
from gensound.sound import _repeat_array
class SoundUtilsTest(unittest.TestCase):
def test_repeat_array_single_channel(self):
array = numpy.array([[1, 2, 3]]).T
repeated = [1, 2, 3] * 3
self.assertEqual(tuple(_repeat_array(array, 6).flatten()),
tuple(repeated[:6]))
self.assertEqual(tuple(_repeat_array(array, 8).flatten()),
tuple(repeated[:8]))
self.assertEqual(tuple(_repeat_array(array, 2).flatten()),
tuple(repeated[:2]))
def test_repeat_array_two_channel(self):
array = numpy.array([[1, 2, 3], [4, 5, 6]]).T
repeated = numpy.array([[1, 2, 3] * 3, [4, 5, 6] * 3]).T
six = _repeat_array(array, 6)
self.assertEqual(six.shape, (6, 2))
self.assertEqual(six.tolist(), repeated[:6].tolist())
eight = _repeat_array(array, 8)
self.assertEqual(eight.shape, (8, 2))
self.assertEqual(eight.tolist(), repeated[:8].tolist())
two = _repeat_array(array, 2)
self.assertEqual(two.shape, (2, 2))
self.assertEqual(two.tolist(), repeated[:2].tolist())
def test_repeat_array_invalid_input(self):
array = numpy.array([[1, 2, 3]])
null = numpy.array([[]])
lessdimen = numpy.array([1, 2, 3])
overdimen = numpy.array([[[1, 2]], [[3, 4]]])
with self.assertRaises(ValueError) as cm:
_repeat_array(array, 0)
self.assertEqual(str(cm.exception),
'want_length must be greater than 0 but got 0')
with self.assertRaises(ValueError) as cm:
_repeat_array(array, -1)
self.assertEqual(str(cm.exception),
'want_length must be greater than 0 but got -1')
with self.assertRaises(ValueError) as cm:
_repeat_array(null, 1)
self.assertEqual(str(cm.exception),
'sound should have least one element')
with self.assertRaises(ValueError) as cm:
_repeat_array(lessdimen, 1)
self.assertEqual(str(cm.exception), 'sound should two dimensions')
with self.assertRaises(ValueError) as cm:
_repeat_array(overdimen, 1)
self.assertEqual(str(cm.exception), 'sound should two dimensions')
def test_overlay(self):
a = Sound.from_array([0.0, 0.1], 2)
b = Sound.from_array([0.2, 0.3], 2)
c = Sound.from_array([0.4, 0.5], 2)
self.assertEqual(overlay(a, b, c).samplerate, 2)
self.assertEqual(overlay(a, b, c).duration, 1)
self.assertEqual(overlay(a, b, c),
Sound.from_array([0.6, 0.9], 2))
self.assertEqual(overlay(overlay(a, b), c),
Sound.from_array([0.6, 0.9], 2))
self.assertEqual(overlay(a, overlay(b, c)),
Sound.from_array([0.6, 0.9], 2))
self.assertEqual(overlay(overlay(a, c), b),
Sound.from_array([0.6, 0.9], 2))
def test_overlay_different_duration(self):
a = Sound.from_array([0.0, 0.1], 2)
b = Sound.from_array([0.2, 0.3], 2)
c = Sound.from_array([0.4, 0.5, 0.6, 0.7], 2)
self.assertEqual(overlay(a, b, c).samplerate, 2)
self.assertEqual(overlay(a, b, c).duration, 2)
self.assertEqual(overlay(a, b, c),
Sound.from_array([0.6, 0.9, 0.6, 0.7], 2))
def test_overlay_different_samplerate(self):
a = Sound.from_array([0.0, 0.1], 2)
b = Sound.from_array([0.2, 0.3], 2)
c = Sound.from_array([0.4, 0.5], 3)
with self.assertRaises(DifferentSamplerateError) as cm:
overlay(a, b, c)
self.assertEqual(cm.exception.frequency, (2, 2, 3))
def test_overlay_different_channels(self):
a = Sound.from_sinwave(220)
b = Sound.from_sinwave(440)
c = Sound.from_sinwave(880).as_stereo()
with self.assertRaises(DifferentChannelsError) as cm:
overlay(a, b, c)
self.assertEqual(cm.exception.channels, (1, 1, 2))
def test_concat(self):
a = Sound.from_array([0.0, 0.1], 2)
b = Sound.from_array([0.2, 0.3], 2)
c = Sound.from_array([0.4, 0.5], 2)
self.assertEqual(concat(a, b, c).samplerate, 2)
self.assertEqual(concat(a, b, c).duration, 3)
self.assertEqual(concat(a, b, c),
Sound.from_array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], 2))
self.assertEqual(concat(concat(a, b), c),
Sound.from_array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], 2))
self.assertEqual(concat(a, concat(b, c)),
Sound.from_array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], 2))
self.assertEqual(concat(c, b, a),
Sound.from_array([0.4, 0.5, 0.2, 0.3, 0.0, 0.1], 2))
def test_concat_different_duration(self):
a = Sound.from_array([0.0, 0.1], 2)
b = Sound.from_array([0.2, 0.3], 2)
c = Sound.from_array([0.4, 0.5, 0.6, 0.7], 2)
self.assertEqual(concat(a, b, c).samplerate, 2)
self.assertEqual(concat(a, b, c).duration, 4)
self.assertEqual(concat(a, b, c), Sound.from_array([
0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7
], 2))
def test_concat_different_samplerate(self):
a = Sound.from_array([0.0, 0.1], 2)
b = Sound.from_array([0.2, 0.3], 2)
c = Sound.from_array([0.4, 0.5], 3)
with self.assertRaises(DifferentSamplerateError) as cm:
concat(a, b, c)
self.assertEqual(cm.exception.frequency, (2, 2, 3))
def test_concat_different_channels(self):
a = Sound.from_sinwave(220)
b = Sound.from_sinwave(440)
c = Sound.from_sinwave(880).as_stereo()
with self.assertRaises(DifferentChannelsError) as cm:
concat(a, b, c)
self.assertEqual(cm.exception.channels, (1, 1, 2))
def test_merge_channels(self):
a = Sound.from_array([0.0, 0.1], 2)
b = Sound.from_array([0.2, 0.3], 2)
c = Sound.from_array([0.4, 0.5], 2)
abc = Sound.from_array([[0.0, 0.2, 0.4], [0.1, 0.3, 0.5]], 2)
self.assertEqual(merge_channels(a, b, c), abc)
c_stereo = merge_channels(Sound.from_array([0.3, 0.6], 2),
Sound.from_array([0.5, 0.4], 2))
self.assertEqual(merge_channels(a, b, c_stereo), abc)
class SoundTest(unittest.TestCase):
def test_constructor(self):
sound = Sound(numpy.array([-1.0, 0.5, 1.0]), 1)
self.assertEqual(sound.samplerate, 1)
self.assertEqual(sound.duration, 3)
self.assertEqual(tuple(sound.data), (-1.0, 0.5, 1.0))
def test_constructor_clip(self):
sound = Sound(numpy.array([-1.1, -1.0, 1.0, 1.1]), 2)
self.assertEqual(sound.samplerate, 2)
self.assertEqual(sound.duration, 2)
self.assertEqual(tuple(sound.data), (-1.0, -1.0, 1.0, 1.0))
def test_constructor_invalid(self):
with self.assertRaises(InvalidSamplerateError) as cm:
Sound(numpy.array([0]), 0)
self.assertEqual(cm.exception.frequency, 0)
with self.assertRaises(InvalidSamplerateError) as cm:
Sound(numpy.array([0]), -1)
self.assertEqual(cm.exception.frequency, -1)
with self.assertRaises(InvalidDurationError) as cm:
Sound(numpy.array([]), 1)
self.assertEqual(cm.exception.duration, 0)
with self.assertRaises(ValueError) as cm:
Sound(numpy.array([[[1], [2]], [[3], [4]]]), 1)
self.assertEqual(str(cm.exception),
'data dimensions must be 1 or 2 but got 3')
def test_equals(self):
a = Sound.from_array([0.1, 0.2, 0.3], 1)
b = Sound.from_array([0.4, 0.5, 0.6], 1)
self.assertEqual(a, a)
self.assertEqual(b, b)
self.assertNotEqual(a, b)
self.assertNotEqual(a, None)
self.assertNotEqual(a, 1)
self.assertNotEqual(a, 'a')
def test_from_array(self):
self.assertEqual(Sound(numpy.array([-0.5, 0.5]), 44100),
Sound.from_array([-0.5, 0.5], 44100))
self.assertEqual(Sound(numpy.array([0.1, -0.1]), 100),
Sound.from_array([0.1, -0.1], 100))
def test_from_array_invalid(self):
with self.assertRaises(InvalidDurationError) as cm:
Sound.from_array([], 44100)
self.assertEqual(cm.exception.duration, 0)
with self.assertRaises(InvalidSamplerateError) as cm:
Sound.from_array([0], 0)
self.assertEqual(cm.exception.frequency, 0)
def test_from_sinwave_with_smooth_end(self):
sound = Sound.from_sinwave(440,
duration=1,
volume=0.5,
samplerate=44100,
smooth_end=True)
self.assertEqual(sound.samplerate, 44100)
self.assertAlmostEqual(sound.duration, 1, places=1)
self.assertAlmostEqual(sound.volume, 0.5, places=4)
sound = Sound.from_sinwave(880,
duration=2,
volume=0.8,
samplerate=88200,
smooth_end=True)
self.assertEqual(sound.samplerate, 88200)
self.assertAlmostEqual(sound.duration, 2, places=1)
self.assertAlmostEqual(sound.volume, 0.8, places=4)
def test_from_sinwave_without_smooth_end(self):
sound = Sound.from_sinwave(440,
duration=1,
volume=0.5,
samplerate=44100,
smooth_end=False)
self.assertEqual(sound.samplerate, 44100)
self.assertEqual(sound.duration, 1.0)
self.assertAlmostEqual(sound.volume, 0.5, places=4)
sound = Sound.from_sinwave(880,
duration=2,
volume=0.8,
samplerate=88200,
smooth_end=False)
self.assertEqual(sound.samplerate, 88200)
self.assertEqual(sound.duration, 2.0)
self.assertAlmostEqual(sound.volume, 0.8, places=4)
def test_from_sinwave_invalid(self):
with self.assertRaises(InvalidFrequencyError) as cm:
Sound.from_sinwave(0)
self.assertEqual(cm.exception.frequency, 0)
with self.assertRaises(InvalidDurationError) as cm:
Sound.from_sinwave(440, duration=0)
self.assertEqual(cm.exception.duration, 0)
with self.assertRaises(InvalidVolumeError) as cm:
Sound.from_sinwave(440, volume=-0.1)
self.assertEqual(cm.exception.volume, -0.1)
with self.assertRaises(InvalidVolumeError) as cm:
Sound.from_sinwave(440, volume=1.1)
self.assertEqual(cm.exception.volume, 1.1)
with self.assertRaises(InvalidSamplerateError) as cm:
Sound.from_sinwave(440, samplerate=0)
self.assertEqual(cm.exception.frequency, 0)
def test_from_sawtoothwave(self):
sound = Sound.from_sawtoothwave(440,
duration=1,
volume=0.5,
samplerate=44100)
self.assertEqual(sound.samplerate, 44100)
self.assertEqual(sound.duration, 1.0)
self.assertTrue((-0.5 <= sound.data).all())
self.assertTrue((sound.data <= 0.5).all())
self.assertEqual(sound.volume, 0.5)
sound = Sound.from_sawtoothwave(880,
duration=2,
volume=0.8,
samplerate=88200)
self.assertEqual(sound.samplerate, 88200)
self.assertTrue(sound.duration, 2.0)
self.assertTrue((-0.8 <= sound.data).all())
self.assertTrue((sound.data <= 0.8).all())
self.assertTrue(sound.volume, 0.8)
sound = Sound.from_sawtoothwave(1, duration=2, samplerate=3)
self.assertTrue(numpy.allclose(sound.data[:, 0],
(-1.0, 0.0, 1.0, -1.0, 0.0, 1.0)))
def test_from_sawtoothwave_invalid(self):
with self.assertRaises(InvalidFrequencyError) as cm:
Sound.from_sawtoothwave(0)
self.assertEqual(cm.exception.frequency, 0)
with self.assertRaises(InvalidDurationError) as cm:
Sound.from_sawtoothwave(440, duration=0)
self.assertEqual(cm.exception.duration, 0)
with self.assertRaises(InvalidVolumeError) as cm:
Sound.from_sawtoothwave(440, volume=-0.1)
self.assertEqual(cm.exception.volume, -0.1)
with self.assertRaises(InvalidVolumeError) as cm:
Sound.from_sawtoothwave(440, volume=1.1)
self.assertEqual(cm.exception.volume, 1.1)
with self.assertRaises(InvalidSamplerateError) as cm:
Sound.from_sawtoothwave(440, samplerate=0)
self.assertEqual(cm.exception.frequency, 0)
def test_from_squarewave(self):
sound = Sound.from_squarewave(440,
duration=1,
volume=0.5,
samplerate=44100)
self.assertEqual(sound.samplerate, 44100)
self.assertEqual(sound.duration, 1.0)
self.assertTrue((-0.5 <= sound.data).all())
self.assertTrue((sound.data <= 0.5).all())
self.assertEqual(sound.volume, 0.5)
sound = Sound.from_squarewave(880,
duration=2,
volume=0.8,
samplerate=88200)
self.assertEqual(sound.samplerate, 88200)
self.assertTrue(sound.duration, 2.0)
self.assertTrue((-0.8 <= sound.data).all())
self.assertTrue((sound.data <= 0.8).all())
self.assertTrue(sound.volume, 0.8)
sound = Sound.from_squarewave(1, duration=2, samplerate=4)
self.assertTrue(numpy.allclose(sound.data[:, 0],
(1, 1, -1, -1, 1, 1, -1, -1)))
def test_from_squarewave_invalid(self):
with self.assertRaises(InvalidFrequencyError) as cm:
Sound.from_squarewave(0)
self.assertEqual(cm.exception.frequency, 0)
with self.assertRaises(InvalidDurationError) as cm:
Sound.from_squarewave(440, duration=0)
self.assertEqual(cm.exception.duration, 0)
with self.assertRaises(InvalidVolumeError) as cm:
Sound.from_squarewave(440, volume=-0.1)
self.assertEqual(cm.exception.volume, -0.1)
with self.assertRaises(InvalidVolumeError) as cm:
Sound.from_squarewave(440, volume=1.1)
self.assertEqual(cm.exception.volume, 1.1)
with self.assertRaises(InvalidSamplerateError) as cm:
Sound.from_squarewave(440, samplerate=0)
self.assertEqual(cm.exception.frequency, 0)
def test_silence(self):
sound = Sound.silence(duration=1.0, samplerate=100)
self.assertEqual(sound.samplerate, 100)
self.assertEqual(sound.duration, 1.0)
self.assertTrue((sound.data == 0).all())
sound = Sound.silence(duration=2.0, samplerate=20)
self.assertEqual(sound.samplerate, 20)
self.assertEqual(sound.duration, 2.0)
self.assertTrue((sound.data == 0).all())
def test_silence_invlid(self):
with self.assertRaises(InvalidDurationError) as cm:
Sound.silence(duration=0)
self.assertEqual(cm.exception.duration, 0)
with self.assertRaises(InvalidSamplerateError) as cm:
Sound.silence(samplerate=0)
self.assertEqual(cm.exception.frequency, 0)
def test_whitenoise(self):
sound = Sound.from_whitenoise(duration=2, volume=0.1, samplerate=100)
self.assertEqual(sound.samplerate, 100)
self.assertEqual(sound.duration, 2)
self.assertTrue((-0.1 <= sound.data).all())
self.assertTrue((sound.data <= 0.1).all())
def test_whitenoise_invalid(self):
with self.assertRaises(InvalidDurationError) as cm:
Sound.from_whitenoise(duration=0)
self.assertEqual(cm.exception.duration, 0)
with self.assertRaises(InvalidVolumeError) as cm:
Sound.from_whitenoise(volume=-0.1)
self.assertEqual(cm.exception.volume, -0.1)
with self.assertRaises(InvalidVolumeError) as cm:
Sound.from_whitenoise(volume=1.1)
self.assertEqual(cm.exception.volume, 1.1)
with self.assertRaises(InvalidSamplerateError) as cm:
Sound.from_whitenoise(samplerate=0)
self.assertEqual(cm.exception.frequency, 0)
def test_from_fft(self):
f = numpy.zeros([2, 1024 // 2 + 1, 2], numpy.complex)
f[:, -1, 0] = 1024 // 2
f[0, 128, 1] = numpy.complex(0, -numpy.pi)
f[1, 256, 1] = numpy.complex(0, -numpy.pi)
s = Sound.from_fft(f)
self.assertEqual(s.samplerate, 1024)
self.assertEqual(s.n_channels, 2)
self.assertEqual(s.duration, 1.0)
from_sin = merge_channels(Sound.from_sinwave(128, samplerate=1024),
Sound.from_sinwave(256, samplerate=1024))
self.assertTrue(numpy.allclose(s.data / s.volume,
from_sin.data / from_sin.volume))
def test_from_fft_invalid(self):
f = numpy.zeros([1024 // 2 + 1, 2], numpy.complex)
with self.assertRaises(InvalidSamplerateError) as cm:
Sound.from_fft(f, samplerate=0)
self.assertEqual(cm.exception.frequency, 0)
def test_fft_single_channel(self):
sound = Sound.from_sinwave(440, duration=0.1)
f = sound.fft()
self.assertTrue((0 <= f[:, :, 0]).all())
self.assertEqual(f.shape[0], 1)
self.assertEqual(f.shape[2], 2)
self.assertGreaterEqual(f[:, :, 0].min(), 0)
self.assertLessEqual(f[:, :, 0].max(), sound.samplerate)
self.assertEqual(f[0, :, 1].argmax(), abs(f[0, :, 0] - 440).argmin())
def test_fft_two_channel(self):
sound = merge_channels(Sound.from_sinwave(440, duration=0.1),
Sound.from_sinwave(220, duration=0.1))
f = sound.fft()
self.assertTrue((0 <= f[:, :, 0]).all())
self.assertEqual(f.shape[0], 2)
self.assertEqual(f.shape[2], 2)
self.assertGreaterEqual(f[:, :, 0].min(), 0)
self.assertLessEqual(f[:, :, 0].max(), sound.samplerate)
self.assertEqual(f[0, :, 1].argmax(), abs(f[0, :, 0] - 440).argmin())
self.assertEqual(f[1, :, 1].argmax(), abs(f[1, :, 0] - 220).argmin())
def test_repeat(self):
sound = Sound.from_array([0.0, 0.1, 0.2], 3)
self.assertEqual(sound.duration, 1)
self.assertEqual(tuple(sound.data), (0.0, 0.1, 0.2))
sound = sound.repeat(2)
self.assertEqual(sound.duration, 2)
self.assertEqual(tuple(sound.data), (0.0, 0.1, 0.2, 0.0, 0.1, 0.2))
sound = sound.repeat(2 / 3)
self.assertEqual(sound.duration, 2 / 3)
self.assertEqual(tuple(sound.data), (0.0, 0.1))
def test_repeat_invalid(self):
sound = Sound.from_sinwave(440)
with self.assertRaises(InvalidDurationError) as cm:
sound.repeat(0)
self.assertEqual(cm.exception.duration, 0)
def test_trim_just(self):
sound = Sound.from_array([[0.0, 0.3], [0.1, 0.4], [0.2, 0.5]], 1)
self.assertEqual(sound[0.0].n_channels, 2)
self.assertEqual(tuple(sound[0.0].data[0, :]), (0.0, 0.3))
self.assertEqual(sound[0.4999999999].n_channels, 2)
self.assertEqual(tuple(sound[0.4999999999].data[0, :]), (0.0, 0.3))
self.assertEqual(sound[0.5000000001].n_channels, 2)
self.assertEqual(tuple(sound[0.5000000001].data[0, :]), (0.1, 0.4))
self.assertEqual(sound[1.0].n_channels, 2)
self.assertEqual(tuple(sound[1.0].data[0, :]), (0.1, 0.4))
self.assertEqual(sound[2.0].n_channels, 2)
self.assertEqual(tuple(sound[2.0].data[0, :]), (0.2, 0.5))
self.assertEqual(sound[3.0].n_channels, 2)
self.assertEqual(tuple(sound[3.0].data[0, :]), (0.2, 0.5))
def test_trim_just_invalid(self):
sound = Sound.from_array([0.0, 0.1, 0.2], 3)
with self.assertRaises(OutOfDurationError) as cm:
sound[-0.001]
self.assertEqual(cm.exception.duration, -0.001)
| |
<gh_stars>1000+
"""
Execution of data pipelines.
Uses forking (multiprocessing processes) for parallelism and message queues for inter-process communication.
"""
import datetime
from datetime import timezone as tz
import functools
import multiprocessing
import os
import sys
import signal
import atexit
import time
import traceback
from multiprocessing import queues
from . import pipelines, config
from .logging import logger, pipeline_events, system_statistics, run_log, node_cost
from . import events
def run_pipeline(pipeline: pipelines.Pipeline, nodes: {pipelines.Node} = None,
with_upstreams: bool = False,
interactively_started: bool = False
) -> [events.Event]:
"""
Runs a pipeline in a forked sub process. Acts as a generator that yields events from the sub process.
Using forking has two advantages:
1. The pipeline is also forked and thus can be modified without affecting the original pipeline.
2. It's possible to hand over control to the parent process while the pipeline is running, for example
for sending output to a browser.
Args:
pipeline: The pipeline to run
nodes: A list of pipeline children that should run
with_upstreams: When true and `nodes` are provided, then all upstreams of `nodes` in `pipeline` are also run
Yields:
Events emitted during pipeline execution
"""
# use forking for starting child processes to avoid cleanup functions and leakage and pickle problems
#
# On newer macs you need to set
# OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
# env variable *before* starting python/flask otherwise you will get core dumps when any forked process calls
# into certain native code (e.g. requests)! Note that this is done automatically if you create your virtual env
# via the scripts from mara-app >= 2.1.1
#
multiprocessing_context = multiprocessing.get_context('fork')
# A queue for receiving events from forked sub processes
event_queue = multiprocessing_context.Queue()
# The function that is run in a sub process
def run():
try:
# capture output of print statements and other unplanned output
logger.redirect_output(event_queue, pipeline.path())
# all nodes that have not run yet, ordered by priority
node_queue: [pipelines.Node] = []
# data needed for computing cost
node_durations_and_run_times = node_cost.node_durations_and_run_times(pipeline)
# Putting nodes into the node queue
def queue(nodes: [pipelines.Node]):
for node in nodes:
node_cost.compute_cost(node, node_durations_and_run_times)
node_queue.append(node)
node_queue.sort(key=lambda node: node.cost, reverse=True)
if nodes: # only run a set of child nodes
def with_all_upstreams(nodes: {pipelines.Node}):
"""recursively find all upstreams of a list of nodes"""
return functools.reduce(set.union, [with_all_upstreams(node.upstreams) for node in nodes], nodes)
# when requested, include all upstreams of nodes, otherwise just use provided nodes
nodes_to_run = with_all_upstreams(set(nodes)) if with_upstreams else set(nodes)
# remove everything from pipeline that should not be run
# (that's makes updating dependencies between nodes easier)
for node in set(pipeline.nodes.values()) - nodes_to_run:
pipeline.remove(node)
# queue remaining nodes
queue(list((pipeline.nodes).values()))
else:
# remove dependencies to siblings
pipeline.upstreams = set()
pipeline.downstreams = set()
# queue whole pipeline
queue([pipeline])
# book keeping
run_start_time = datetime.datetime.now(tz.utc)
# all nodes that already ran or that won't be run anymore
processed_nodes: {pipelines.Node} = set()
# running pipelines with start times and number of running children
running_pipelines: {pipelines.Pipeline: [datetime.datetime, int]} = {}
failed_pipelines: {pipelines.Pipeline} = set() # pipelines with failed tasks
running_task_processes: {pipelines.Task: TaskProcess} = {}
# make sure any running tasks are killed when this executor process is shutdown
executor_pid = os.getpid()
def ensure_task_processes_killed():
# as we fork, the TaskProcess also runs this function -> ignore it there
if os.getpid() != executor_pid: return
try:
for tp in list(running_task_processes.values()): # type: TaskProcess
if tp.is_alive():
# give it a chance to gracefully shutdown
tp.terminate()
statistics_process.kill()
except BaseException as e:
print(f"Exception during TaskProcess cleanup: {repr(e)}", file=sys.stderr, flush=True)
return
atexit.register(ensure_task_processes_killed)
def dequeue() -> pipelines.Node:
"""
Finds the next task in the queue
- without upstreams or where all upstreams have been run already
- where the pipeline specific maximum number of parallel tasks per pipeline is not reached
"""
for node in node_queue: # type: pipelines.Node
if ((not node.upstreams or len(node.upstreams & processed_nodes) == len(node.upstreams))
and (not isinstance(node.parent, pipelines.Pipeline)
or (not node.parent.max_number_of_parallel_tasks)
or (not node.parent in running_pipelines)
or (running_pipelines[node.parent][1] < node.parent.max_number_of_parallel_tasks))):
node_queue.remove(node)
processed_as_parent_failed = False
parent = node.parent
while parent:
# if the parent pipeline failed (and no overwrite), don't launch new nodes
# this needs to go down to the ultimate parent as we can have cases where we already
# queued a subpipeline and now the parent pipeline failed but the tasks parent pipeline
# (the sub pipeline) is not failed.
# If a task from a parent pipeline fails, even with force_run_all_children on the
# sub pipeline, the sub pipeline would stop. Only if the failed parent pipeline also has
# force_run_all_children, the task would get scheduled
if parent in failed_pipelines and not parent.force_run_all_children:
processed_nodes.add(node)
processed_as_parent_failed = True
break
else: parent = parent.parent
if not processed_as_parent_failed:
return node
def track_finished_pipelines():
"""when all nodes of a pipeline have been processed, then emit events"""
for running_pipeline, (start_time, running_children) \
in dict(running_pipelines).items(): # type: pipelines.Pipeline
if len(set(running_pipeline.nodes.values()) & processed_nodes) == len(running_pipeline.nodes):
succeeded = running_pipeline not in failed_pipelines
event_queue.put(pipeline_events.Output(
node_path=running_pipeline.path(), format=logger.Format.ITALICS, is_error=not succeeded,
message=f'{"succeeded" if succeeded else "failed"}, {logger.format_time_difference(run_start_time, datetime.datetime.now(tz.utc))}'))
event_queue.put(pipeline_events.NodeFinished(
node_path=running_pipeline.path(), start_time=start_time,
end_time=datetime.datetime.now(tz.utc), is_pipeline=True, succeeded=succeeded))
del running_pipelines[running_pipeline]
processed_nodes.add(running_pipeline)
# announce run start
event_queue.put(pipeline_events.RunStarted(node_path=pipeline.path(),
start_time=run_start_time,
pid=os.getpid(),
interactively_started=interactively_started,
node_ids=[node.id for node in (nodes or [])],
is_root_pipeline=(pipeline.parent is None))
)
# collect system stats in a separate Process
statistics_process = multiprocessing.Process(
target=lambda: system_statistics.generate_system_statistics(event_queue), name='system_statistics')
statistics_process.start()
# run as long
# - as task processes are still running
# - as there is still stuff in the node queue
while running_task_processes or node_queue:
# don't do anything if the maximum number of parallel tasks is currently running
if len(running_task_processes) < config.max_number_of_parallel_tasks():
next_node = dequeue() # get the next runnable node from the queue
if next_node:
if isinstance(next_node, pipelines.Pipeline):
# connect pipeline nodes without upstreams to upstreams of pipeline
for upstream in next_node.upstreams:
for pipeline_node in next_node.nodes.values():
if not pipeline_node.upstreams:
next_node.add_dependency(upstream, pipeline_node)
# connect pipeline nodes without downstreams to downstream of pipeline
for downstream in next_node.downstreams:
for pipeline_node in next_node.nodes.values():
if not pipeline_node.downstreams:
next_node.add_dependency(pipeline_node, downstream)
# get cost information for children
node_durations_and_run_times.update(node_cost.node_durations_and_run_times(next_node))
# queue all child nodes
queue(list(next_node.nodes.values()))
# book keeping and event emission
pipeline_start_time = datetime.datetime.now(tz.utc)
running_pipelines[next_node] = [pipeline_start_time, 0]
event_queue.put(pipeline_events.NodeStarted(next_node.path(), pipeline_start_time, True))
event_queue.put(pipeline_events.Output(
node_path=next_node.path(), format=logger.Format.ITALICS,
message='★ ' + node_cost.format_duration(
node_durations_and_run_times.get(tuple(next_node.path()), [0, 0])[0])))
elif isinstance(next_node, pipelines.ParallelTask):
# create sub tasks and queue them
task_start_time = datetime.datetime.now(tz.utc)
try:
logger.redirect_output(event_queue, next_node.path())
logger.log('☆ Launching tasks', format=logger.Format.ITALICS)
sub_pipeline = next_node.launch()
next_node.parent.replace(next_node, sub_pipeline)
queue([sub_pipeline])
except Exception as e:
event_queue.put(pipeline_events.NodeStarted(
node_path=next_node.path(), start_time=task_start_time, is_pipeline=True))
logger.log(message=f'Could not launch parallel tasks', format=logger.Format.ITALICS,
is_error=True)
logger.log(message=traceback.format_exc(),
format=pipeline_events.Output.Format.VERBATIM, is_error=True)
event_queue.put(pipeline_events.NodeFinished(
node_path=next_node.path(), start_time=task_start_time,
end_time=datetime.datetime.now(tz.utc), is_pipeline=True, succeeded=False))
failed_pipelines.add(next_node.parent)
processed_nodes.add(next_node)
finally:
logger.redirect_output(event_queue, pipeline.path())
else:
# run a task in a subprocess
if next_node.parent in running_pipelines:
running_pipelines[next_node.parent][1] += 1
event_queue.put(
pipeline_events.NodeStarted(next_node.path(), datetime.datetime.now(tz.utc), False))
event_queue.put(pipeline_events.Output(
node_path=next_node.path(), format=logger.Format.ITALICS,
message='★ ' + node_cost.format_duration(
node_durations_and_run_times.get(tuple(next_node.path()), [0, 0])[0])))
status_queue = multiprocessing_context.Queue()
process = TaskProcess(next_node, event_queue, status_queue)
process.start()
running_task_processes[next_node] = process
# check whether some of the running processes finished
for task_process in list(running_task_processes.values()): # type: TaskProcess
if task_process.is_alive():
pass
else:
del running_task_processes[task_process.task]
if task_process.task.parent in running_pipelines:
running_pipelines[task_process.task.parent][1] -= 1
processed_nodes.add(task_process.task)
succeeded = not (task_process.status_queue.get() == False or task_process.exitcode != 0)
if not succeeded and not task_process.task.parent.ignore_errors:
for parent in task_process.task.parents()[:-1]:
failed_pipelines.add(parent)
end_time = datetime.datetime.now(tz.utc)
event_queue.put(
pipeline_events.Output(task_process.task.path(),
('succeeded' if succeeded else 'failed') + ', '
+ logger.format_time_difference(task_process.start_time, end_time),
format=logger.Format.ITALICS, is_error=not succeeded))
event_queue.put(pipeline_events.NodeFinished(task_process.task.path(), task_process.start_time,
end_time, False, succeeded))
# check if some pipelines finished
track_finished_pipelines()
# don't busy-wait
time.sleep(0.001)
except:
event_queue.put(pipeline_events.Output(node_path=pipeline.path(), message=traceback.format_exc(),
format=logger.Format.ITALICS, is_error=True))
# run again because `dequeue` might have moved more nodes to `finished_nodes`
track_finished_pipelines()
# kill the stats process (joining or terminating does not work in gunicorn)
os.kill(statistics_process.pid, signal.SIGKILL)
statistics_process.join()
# run finished
event_queue.put(pipeline_events.RunFinished(node_path=pipeline.path(), end_time=datetime.datetime.now(tz.utc),
succeeded=not failed_pipelines,
interactively_started=interactively_started))
# fork the process and run `run`
run_process = multiprocessing_context.Process(target=run, name='pipeline-' + '-'.join(pipeline.path()))
run_process.start()
runlogger = run_log.RunLogger()
# make sure that we close this run (if still open) as failed when we close this python process
# On SIGKILL we will still leave behind open runs...
# this needs to run after we forked off the run_process as that one should not inherit the atexit function
def ensure_closed_run_on_abort():
try:
run_log.close_open_run_after_error(runlogger.run_id)
except BaseException as e:
print(f"Exception during 'close_open_run_after_error()': {repr(e)}", file=sys.stderr, flush=True)
return
| |
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((0 0,0 1,1 1,1 0,0 0))'))
lyr.CreateFeature(f)
f = None
ds = None
# Split features
layer = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertTrue(layer.isValid())
self.assertTrue(layer.isSpatial())
self.assertEqual([f for f in layer.getFeatures()][0].geometry().asWkt(), 'Polygon ((0 0, 0 1, 1 1, 1 0, 0 0))')
layer.startEditing()
self.assertEqual(layer.splitFeatures([QgsPointXY(0.5, 0), QgsPointXY(0.5, 1)], 0), 0)
self.assertTrue(layer.commitChanges())
self.assertEqual(layer.featureCount(), 2)
layer = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertEqual(layer.featureCount(), 2)
self.assertEqual([f for f in layer.getFeatures()][0].geometry().asWkt(), 'Polygon ((0.5 0, 0.5 1, 1 1, 1 0, 0.5 0))')
self.assertEqual([f for f in layer.getFeatures()][1].geometry().asWkt(), 'Polygon ((0.5 1, 0.5 0, 0 0, 0 1, 0.5 1))')
def testCreateAttributeIndex(self):
tmpfile = os.path.join(self.basetestpath, 'testGeopackageAttributeIndex.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPolygon)
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('str_field2', ogr.OFTString))
f = None
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertTrue(vl.isValid())
self.assertTrue(vl.dataProvider().capabilities() & QgsVectorDataProvider.CreateAttributeIndex)
self.assertFalse(vl.dataProvider().createAttributeIndex(-1))
self.assertFalse(vl.dataProvider().createAttributeIndex(100))
# should not be allowed - there's already a index on the primary key
self.assertFalse(vl.dataProvider().createAttributeIndex(0))
self.assertTrue(vl.dataProvider().createAttributeIndex(1))
con = spatialite_connect(tmpfile, isolation_level=None)
cur = con.cursor()
rs = cur.execute("SELECT * FROM sqlite_master WHERE type='index' AND tbl_name='test'")
res = [row for row in rs]
self.assertEqual(len(res), 1)
index_name = res[0][1]
rs = cur.execute("PRAGMA index_info({})".format(index_name))
res = [row for row in rs]
self.assertEqual(len(res), 1)
self.assertEqual(res[0][2], 'str_field')
# second index
self.assertTrue(vl.dataProvider().createAttributeIndex(2))
rs = cur.execute("SELECT * FROM sqlite_master WHERE type='index' AND tbl_name='test'")
res = [row for row in rs]
self.assertEqual(len(res), 2)
indexed_columns = []
for row in res:
index_name = row[1]
rs = cur.execute("PRAGMA index_info({})".format(index_name))
res = [row for row in rs]
self.assertEqual(len(res), 1)
indexed_columns.append(res[0][2])
self.assertCountEqual(indexed_columns, ['str_field', 'str_field2'])
con.close()
def testCreateSpatialIndex(self):
tmpfile = os.path.join(self.basetestpath, 'testGeopackageSpatialIndex.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPolygon, options=['SPATIAL_INDEX=NO'])
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('str_field2', ogr.OFTString))
f = None
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertTrue(vl.isValid())
self.assertTrue(vl.dataProvider().capabilities() & QgsVectorDataProvider.CreateSpatialIndex)
self.assertTrue(vl.dataProvider().createSpatialIndex())
def testSubSetStringEditable_bug17795_but_with_modified_behavior(self):
"""Test that a layer is editable after setting a subset"""
tmpfile = os.path.join(self.basetestpath, 'testSubSetStringEditable_bug17795.gpkg')
shutil.copy(TEST_DATA_DIR + '/' + 'provider/bug_17795.gpkg', tmpfile)
isEditable = QgsVectorDataProvider.ChangeAttributeValues
testPath = tmpfile + '|layername=bug_17795'
vl = QgsVectorLayer(testPath, 'subset_test', 'ogr')
self.assertTrue(vl.isValid())
self.assertTrue(vl.dataProvider().capabilities() & isEditable)
vl = QgsVectorLayer(testPath, 'subset_test', 'ogr')
vl.setSubsetString('')
self.assertTrue(vl.isValid())
self.assertTrue(vl.dataProvider().capabilities() & isEditable)
vl = QgsVectorLayer(testPath, 'subset_test', 'ogr')
vl.setSubsetString('"category" = \'one\'')
self.assertTrue(vl.isValid())
self.assertTrue(vl.dataProvider().capabilities() & isEditable)
vl.setSubsetString('')
self.assertTrue(vl.dataProvider().capabilities() & isEditable)
def testSubsetStringExtent_bug17863(self):
"""Check that the extent is correct when applied in the ctor and when
modified after a subset string is set """
def _lessdigits(s):
return re.sub(r'(\d+\.\d{3})\d+', r'\1', s)
tmpfile = os.path.join(self.basetestpath, 'testSubsetStringExtent_bug17863.gpkg')
shutil.copy(TEST_DATA_DIR + '/' + 'provider/bug_17795.gpkg', tmpfile)
testPath = tmpfile + '|layername=bug_17795'
subSetString = '"name" = \'int\''
subSet = '|layername=bug_17795|subset=%s' % subSetString
# unfiltered
vl = QgsVectorLayer(testPath, 'test', 'ogr')
self.assertTrue(vl.isValid())
unfiltered_extent = _lessdigits(vl.extent().toString())
del(vl)
# filter after construction ...
subSet_vl2 = QgsVectorLayer(testPath, 'test', 'ogr')
self.assertEqual(_lessdigits(subSet_vl2.extent().toString()), unfiltered_extent)
# ... apply filter now!
subSet_vl2.setSubsetString(subSetString)
self.assertEqual(subSet_vl2.subsetString(), subSetString)
self.assertNotEqual(_lessdigits(subSet_vl2.extent().toString()), unfiltered_extent)
filtered_extent = _lessdigits(subSet_vl2.extent().toString())
del(subSet_vl2)
# filtered in constructor
subSet_vl = QgsVectorLayer(testPath + subSet, 'subset_test', 'ogr')
self.assertEqual(subSet_vl.subsetString(), subSetString)
self.assertTrue(subSet_vl.isValid())
# This was failing in bug 17863
self.assertEqual(_lessdigits(subSet_vl.extent().toString()), filtered_extent)
self.assertNotEqual(_lessdigits(subSet_vl.extent().toString()), unfiltered_extent)
def testRequestWithoutGeometryOnLayerMixedGeometry(self):
""" Test bugfix for https://issues.qgis.org/issues/19077 """
# Issue is more a generic one of the OGR provider, but easy to trigger with GPKG
tmpfile = os.path.join(self.basetestpath, 'testRequestWithoutGeometryOnLayerMixedGeometry.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbUnknown, options=['SPATIAL_INDEX=NO'])
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 1)'))
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(0 0,1 0)'))
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(0 0,1 0)'))
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|geometrytype=Point|layername=" + "test", 'test', u'ogr')
self.assertTrue(vl.isValid())
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry)
features = [f for f in vl.getFeatures(request)]
self.assertEqual(len(features), 1)
def testAddingTwoIntFieldsWithWidth(self):
""" Test buggfix for https://issues.qgis.org/issues/19009 """
tmpfile = os.path.join(self.basetestpath, 'testRequestWithoutGeometryOnLayerMixedGeometry.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint, options=['SPATIAL_INDEX=NO'])
lyr.CreateField(ogr.FieldDefn('a', ogr.OFTInteger))
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertTrue(vl.isValid())
vl.startEditing()
self.assertTrue(vl.addAttribute(QgsField("b", QVariant.Int, "integer", 10)))
self.assertTrue(vl.commitChanges())
vl.startEditing()
self.assertTrue(vl.addAttribute(QgsField("c", QVariant.Int, "integer", 10)))
self.assertTrue(vl.commitChanges())
def testApproxFeatureCountAndExtent(self):
""" Test perf improvement for for https://issues.qgis.org/issues/18402 """
tmpfile = os.path.join(self.basetestpath, 'testApproxFeatureCountAndExtent.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 1)'))
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(2 3)'))
lyr.CreateFeature(f)
fid = f.GetFID()
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(4 5)'))
lyr.CreateFeature(f)
lyr.DeleteFeature(fid)
ds = None
ds = ogr.Open(tmpfile, update=1)
ds.ExecuteSQL('DROP TABLE gpkg_ogr_contents')
ds = None
os.environ['QGIS_GPKG_FC_THRESHOLD'] = '1'
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertTrue(vl.isValid())
fc = vl.featureCount()
del os.environ['QGIS_GPKG_FC_THRESHOLD']
self.assertEqual(fc, 3) # didn't notice the hole
reference = QgsGeometry.fromRect(QgsRectangle(0, 1, 4, 5))
provider_extent = QgsGeometry.fromRect(vl.extent())
self.assertTrue(QgsGeometry.compare(provider_extent.asPolygon()[0], reference.asPolygon()[0], 0.00001),
provider_extent.asPolygon()[0])
def testRegenerateFid(self):
""" Test regenerating feature ids """
fields = QgsFields()
fields.append(QgsField('fid', QVariant.Int))
fields.append(QgsField('f1', QVariant.Int))
tmpfile = os.path.join(self.basetestpath, 'testRegenerateFid.gpkg')
options = {}
options['update'] = True
options['driverName'] = 'GPKG'
options['layerName'] = 'table1'
exporter = QgsVectorLayerExporter(tmpfile, "ogr", fields, QgsWkbTypes.Polygon, QgsCoordinateReferenceSystem(3111), False, options, QgsFeatureSink.RegeneratePrimaryKey)
self.assertFalse(exporter.errorCode(),
'unexpected export error {}: {}'.format(exporter.errorCode(), exporter.errorMessage()))
feat = QgsFeature(fields)
feat['fid'] = 0
feat['f1'] = 10
exporter.addFeature(feat)
feat['fid'] = 0
feat['f1'] = 20
exporter.addFeature(feat)
feat['fid'] = 1
feat['f1'] = 30
exporter.addFeature(feat)
feat['fid'] = 1
feat['f1'] = 40
exporter.addFeature(feat)
del exporter
# make sure layers exist
lyr = QgsVectorLayer('{}|layername=table1'.format(tmpfile), "lyr1", "ogr")
self.assertTrue(lyr.isValid())
self.assertEqual(lyr.crs().authid(), 'EPSG:3111')
self.assertEqual(lyr.wkbType(), QgsWkbTypes.Polygon)
values = set([f['f1'] for f in lyr.getFeatures()])
self.assertEqual(values, set([10, 20, 30, 40]))
fids = set([f['fid'] for f in lyr.getFeatures()])
self.assertEqual(len(fids), 4)
def testTransaction(self):
tmpfile = os.path.join(self.basetestpath, 'testTransaction.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('lyr1', geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 1)'))
lyr.CreateFeature(f)
lyr = ds.CreateLayer('lyr2', geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(2 3)'))
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(4 5)'))
lyr.CreateFeature(f)
ds = None
vl1 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "lyr1", 'test', u'ogr')
self.assertTrue(vl1.isValid())
vl2 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "lyr2", 'test', u'ogr')
self.assertTrue(vl2.isValid())
# prepare a project with transactions enabled
p = QgsProject()
p.setAutoTransaction(True)
p.addMapLayers([vl1, vl2])
self.assertTrue(vl1.startEditing())
self.assertIsNotNone(vl1.dataProvider().transaction())
self.assertTrue(vl1.deleteFeature(1))
# An iterator opened on the layer should see the feature deleted
self.assertEqual(len([f for f in vl1.getFeatures(QgsFeatureRequest())]), 0)
# But not if opened from another connection
vl1_external = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "lyr1", 'test', u'ogr')
self.assertTrue(vl1_external.isValid())
self.assertEqual(len([f for f in vl1_external.getFeatures(QgsFeatureRequest())]), 1)
del vl1_external
self.assertTrue(vl1.commitChanges())
# Should still get zero features on vl1
self.assertEqual(len([f for f in vl1.getFeatures(QgsFeatureRequest())]), 0)
self.assertEqual(len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 2)
# Test undo/redo
self.assertTrue(vl2.startEditing())
self.assertIsNotNone(vl2.dataProvider().transaction())
self.assertTrue(vl2.editBuffer().deleteFeature(1))
self.assertEqual(len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 1)
self.assertTrue(vl2.editBuffer().deleteFeature(2))
self.assertEqual(len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 0)
vl2.undoStack().undo()
self.assertEqual(len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 1)
vl2.undoStack().undo()
self.assertEqual(len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 2)
vl2.undoStack().redo()
self.assertEqual(len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 1)
self.assertTrue(vl2.commitChanges())
self.assertEqual(len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 1)
del vl1
del vl2
vl2_external = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "lyr2", 'test', u'ogr')
self.assertTrue(vl2_external.isValid())
self.assertEqual(len([f for f in vl2_external.getFeatures(QgsFeatureRequest())]), 1)
del vl2_external
def testJson(self):
if int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 4, 0):
return
tmpfile = os.path.join(self.basetestpath, 'test_json.gpkg')
testdata_path = unitTestDataPath('provider')
shutil.copy(os.path.join(unitTestDataPath('provider'), 'test_json.gpkg'), tmpfile)
vl = QgsVectorLayer('{}|layerid=0'.format(tmpfile, 'foo', 'ogr'))
self.assertTrue(vl.isValid())
fields = vl.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('json_content')).type(), QVariant.Map)
fi = vl.getFeatures(QgsFeatureRequest())
f = QgsFeature()
#test reading dict value from attribute
while fi.nextFeature(f):
if f['fid'] == 1:
self.assertIsInstance(f['json_content'], dict)
self.assertEqual(f['json_content'], {'foo': 'bar'})
#test changing dict value in attribute
f['json_content'] = {'foo': 'baz'}
self.assertEqual(f['json_content'], {'foo': 'baz'})
#test changint dict to list
f['json_content'] = ['eins', 'zwei', 'drei']
self.assertEqual(f['json_content'], ['eins', 'zwei', 'drei'])
#test changing list value in attribute
f['json_content'] = ['eins', 'zwei', 'drei', 4]
self.assertEqual(f['json_content'], ['eins', 'zwei', 'drei', 4])
#test changing to complex json structure
f['json_content'] = {'name': 'Lily', 'age': '0', 'cars': {'car1': ['fiat tipo', 'fiat punto', 'davoser schlitten'], 'car2': 'bobbycar', 'car3': 'tesla'}}
self.assertEqual(f['json_content'], {'name': 'Lily', 'age': '0', 'cars': {'car1': ['fiat tipo', 'fiat punto', 'davoser schlitten'], 'car2': 'bobbycar', 'car3': 'tesla'}})
#test adding attribute
vl.startEditing()
self.assertTrue(vl.addAttribute(QgsField('json_content2', QVariant.Map, "JSON", 60, 0, 'no comment', QVariant.String)))
self.assertTrue(vl.commitChanges())
vl.startEditing()
self.assertTrue(vl.addAttribute(QgsField('json_content3', QVariant.Map, "JSON", 60, 0, 'no comment', QVariant.String)))
self.assertTrue(vl.commitChanges())
#test setting values to new attributes
while fi.nextFeature(f):
if f['fid'] == 2:
f['json_content'] = {'uno': 'foo'}
f['json_content2'] = ['uno', 'due', 'tre']
f['json_content3'] = {'uno': ['uno', 'due', 'tre']}
self.assertEqual(f['json_content'], {'foo': 'baz'})
self.assertEqual(f['json_content2'], ['uno', 'due', 'tre'])
self.assertEqual(f['json_content3'], {'uno': ['uno', 'due', 'tre']})
#test deleting attribute
vl.startEditing()
self.assertTrue(vl.deleteAttribute(vl.fields().indexFromName('json_content3')))
self.assertTrue(vl.commitChanges())
#test if index of existent field is not -1 and the one of the deleted is -1
self.assertNotEqual(vl.fields().indexFromName('json_content2'), -1)
self.assertEqual(vl.fields().indexFromName('json_content3'), -1)
def test_quote_identifier(self):
"""Regression #21100"""
tmpfile = os.path.join(self.basetestpath, 'bug_21100-wierd_field_names.gpkg') # spellok
shutil.copy(os.path.join(unitTestDataPath(''), 'bug_21100-wierd_field_names.gpkg'), tmpfile) # spellok
vl = QgsVectorLayer('{}|layerid=0'.format(tmpfile), 'foo', 'ogr')
self.assertTrue(vl.isValid())
for i in range(1, len(vl.fields())):
self.assertEqual(vl.uniqueValues(i), {'a', 'b', 'c'})
def testGeopackageLayerMetadata(self):
"""
Geopackage layer description | |
import math
import random
import geomstats.backend as gs
from geomstats.geometry.spd_matrices import SPDMatrices
from tests.data_generation import _OpenSetTestData, _RiemannianMetricTestData
SQRT_2 = math.sqrt(2.0)
LN_2 = math.log(2.0)
EXP_1 = math.exp(1.0)
EXP_2 = math.exp(2.0)
SINH_1 = math.sinh(1.0)
class SPDMatricesTestData(_OpenSetTestData):
smoke_space_args_list = [(2,), (3,), (4,), (5,)]
smoke_n_points_list = [1, 2, 1, 2]
n_list = random.sample(range(2, 5), 2)
space_args_list = [(n,) for n in n_list]
n_points_list = random.sample(range(1, 5), 2)
shape_list = [(n, n) for n in n_list]
n_vecs_list = random.sample(range(1, 10), 2)
def belongs_test_data(self):
smoke_data = [
dict(n=2, mat=[[3.0, -1.0], [-1.0, 3.0]], expected=True),
dict(n=2, mat=[[1.0, 1.0], [2.0, 1.0]], expected=False),
dict(
n=3,
mat=[[1.0, 2.0, 3.0], [2.0, 4.0, 5.0], [3.0, 5.0, 6.0]],
expected=False,
),
dict(
n=2,
mat=[[[1.0, 0.0], [0.0, 1.0]], [[1.0, -1.0], [0.0, 1.0]]],
expected=[True, False],
),
]
return self.generate_tests(smoke_data)
def projection_test_data(self):
smoke_data = [
dict(n=2, mat=[[1.0, 0.0], [0.0, 1.0]], expected=[[1.0, 0.0], [0.0, 1.0]]),
dict(
n=2,
mat=[[-1.0, 0.0], [0.0, -2.0]],
expected=[[gs.atol, 0.0], [0.0, gs.atol]],
),
]
return self.generate_tests(smoke_data)
def logm_test_data(self):
smoke_data = [
dict(spd_mat=[[1.0, 0.0], [0.0, 1.0]], expected=[[0.0, 0.0], [0.0, 0.0]])
]
return self.generate_tests(smoke_data)
def cholesky_factor_test_data(self):
smoke_data = [
dict(
n=2,
spd_mat=[[[1.0, 2.0], [2.0, 5.0]], [[1.0, 0.0], [0.0, 1.0]]],
expected=[[[1.0, 0.0], [2.0, 1.0]], [[1.0, 0.0], [0.0, 1.0]]],
),
dict(
n=3,
spd_mat=[[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]],
expected=[
[SQRT_2, 0.0, 0.0],
[0.0, SQRT_2, 0.0],
[0.0, 0.0, SQRT_2],
],
),
]
return self.generate_tests(smoke_data)
def cholesky_factor_belongs_test_data(self):
list_n = random.sample(range(1, 100), 10)
n_samples = 10
random_data = [
dict(n=n, mat=SPDMatrices(n).random_point(n_samples)) for n in list_n
]
return self.generate_tests([], random_data)
def differential_cholesky_factor_test_data(self):
smoke_data = [
dict(
n=2,
tangent_vec=[[1.0, 1.0], [1.0, 1.0]],
base_point=[[4.0, 2.0], [2.0, 5.0]],
expected=[[1 / 4, 0.0], [3 / 8, 1 / 16]],
)
]
return self.generate_tests(smoke_data)
def differential_power_test_data(self):
smoke_data = [
dict(
power=0.5,
tangent_vec=[[2.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.5, 0.5]],
base_point=[[1.0, 0.0, 0.0], [0.0, 2.5, 1.5], [0.0, 1.5, 2.5]],
expected=[
[1.0, 1 / 3, 1 / 3],
[1 / 3, 0.125, 0.125],
[1 / 3, 0.125, 0.125],
],
)
]
return self.generate_tests(smoke_data)
def inverse_differential_power_test_data(self):
smoke_data = [
dict(
power=0.5,
tangent_vec=[
[1.0, 1 / 3, 1 / 3],
[1 / 3, 0.125, 0.125],
[1 / 3, 0.125, 0.125],
],
base_point=[[1.0, 0.0, 0.0], [0.0, 2.5, 1.5], [0.0, 1.5, 2.5]],
expected=[[2.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.5, 0.5]],
)
]
return self.generate_tests(smoke_data)
def differential_log_test_data(self):
smoke_data = [
dict(
tangent_vec=[[1.0, 1.0, 3.0], [1.0, 1.0, 3.0], [3.0, 3.0, 4.0]],
base_point=[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 4.0]],
expected=[
[1.0, 1.0, 2 * LN_2],
[1.0, 1.0, 2 * LN_2],
[2 * LN_2, 2 * LN_2, 1],
],
)
]
return self.generate_tests(smoke_data)
def inverse_differential_log_test_data(self):
smoke_data = [
dict(
tangent_vec=[
[1.0, 1.0, 2 * LN_2],
[1.0, 1.0, 2 * LN_2],
[2 * LN_2, 2 * LN_2, 1],
],
base_point=[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 4.0]],
expected=[[1.0, 1.0, 3.0], [1.0, 1.0, 3.0], [3.0, 3.0, 4.0]],
)
]
return self.generate_tests(smoke_data)
def differential_exp_test_data(self):
smoke_data = [
dict(
tangent_vec=[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
base_point=[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]],
expected=[
[EXP_1, EXP_1, SINH_1],
[EXP_1, EXP_1, SINH_1],
[SINH_1, SINH_1, 1 / EXP_1],
],
)
]
return self.generate_tests(smoke_data)
def inverse_differential_exp_test_data(self):
smoke_data = [
dict(
tangent_vec=[
[EXP_1, EXP_1, SINH_1],
[EXP_1, EXP_1, SINH_1],
[SINH_1, SINH_1, 1 / EXP_1],
],
base_point=[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]],
expected=[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
)
]
return self.generate_tests(smoke_data)
def random_point_belongs_test_data(self):
belongs_atol = gs.atol * 100000
return self._random_point_belongs_test_data(
self.smoke_space_args_list,
self.smoke_n_points_list,
self.space_args_list,
self.n_points_list,
belongs_atol,
)
def to_tangent_is_tangent_test_data(self):
is_tangent_atol = gs.atol * 1000
return self._to_tangent_is_tangent_test_data(
SPDMatrices,
self.space_args_list,
self.shape_list,
self.n_vecs_list,
is_tangent_atol,
)
def random_tangent_vec_is_tangent_test_data(self):
return self._random_tangent_vec_is_tangent_test_data(
SPDMatrices, self.space_args_list, self.n_vecs_list
)
def projection_belongs_test_data(self):
return self._projection_belongs_test_data(
self.space_args_list, self.shape_list, self.n_points_list
)
def to_tangent_is_tangent_in_ambient_space_test_data(self):
return self._to_tangent_is_tangent_in_ambient_space_test_data(
SPDMatrices, self.space_args_list, self.shape_list
)
class SPDMetricAffineTestData(_RiemannianMetricTestData):
n_list = random.sample(range(2, 5), 2)
power_affine_list = [1.0, -0.5]
metric_args_list = list(zip(n_list, power_affine_list))
shape_list = [(n, n) for n in n_list]
space_list = [SPDMatrices(n) for n in n_list]
n_points_list = random.sample(range(1, 5), 2)
n_points_a_list = random.sample(range(1, 5), 2)
n_tangent_vecs_list = random.sample(range(1, 5), 2)
n_points_b_list = [1]
alpha_list = [1] * 2
n_rungs_list = [1] * 2
scheme_list = ["pole"] * 2
def inner_product_test_data(self):
smoke_data = [
dict(
n=3,
power_affine=0.5,
tangent_vec_a=[[2.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.5, 0.5]],
tangent_vec_b=[[2.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.5, 0.5]],
base_point=[[1.0, 0.0, 0.0], [0.0, 2.5, 1.5], [0.0, 1.5, 2.5]],
expected=713 / 144,
)
]
return self.generate_tests(smoke_data)
def exp_test_data(self):
smoke_data = [
dict(
n=2,
power_affine=1.0,
tangent_vec=[[2.0, 0.0], [0.0, 2.0]],
base_point=[[1.0, 0.0], [0.0, 1.0]],
expected=[[EXP_2, 0.0], [0.0, EXP_2]],
)
]
return self.generate_tests(smoke_data)
def log_test_data(self):
smoke_data = [
dict(
n=2,
power_affine=1.0,
point=[[1.0, 0.0], [0.0, 1.0]],
base_point=[[2.0, 0.0], [0.0, 2.0]],
expected=[[-2 * LN_2, 0.0], [0.0, -2 * LN_2]],
)
]
return self.generate_tests(smoke_data)
def exp_shape_test_data(self):
return self._exp_shape_test_data(
self.metric_args_list, self.space_list, self.shape_list
)
def log_shape_test_data(self):
return self._log_shape_test_data(self.metric_args_list, self.space_list)
def squared_dist_is_symmetric_test_data(self):
return self._squared_dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
atol=gs.atol * 1000,
)
def exp_belongs_test_data(self):
return self._exp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
belongs_atol=gs.atol * 1000,
)
def log_is_tangent_test_data(self):
return self._log_is_tangent_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
is_tangent_atol=gs.atol * 1000,
)
def geodesic_ivp_belongs_test_data(self):
return self._geodesic_ivp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def geodesic_bvp_belongs_test_data(self):
return self._geodesic_bvp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def exp_after_log_test_data(self):
return self._exp_after_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
rtol=gs.rtol * 100,
atol=gs.atol * 10000,
)
def log_after_exp_test_data(self):
return self._log_after_exp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
amplitude=10,
rtol=gs.rtol * 100,
atol=gs.atol * 10000,
)
def exp_ladder_parallel_transport_test_data(self):
return self._exp_ladder_parallel_transport_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_rungs_list,
self.alpha_list,
self.scheme_list,
)
def exp_geodesic_ivp_test_data(self):
return self._exp_geodesic_ivp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_points_list,
rtol=gs.rtol * 1000,
atol=gs.atol * 1000,
)
def parallel_transport_ivp_is_isometry_test_data(self):
return self._parallel_transport_ivp_is_isometry_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 1000,
atol=gs.atol * 1000,
)
def parallel_transport_bvp_is_isometry_test_data(self):
return self._parallel_transport_bvp_is_isometry_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 1000,
atol=gs.atol * 1000,
)
def dist_is_symmetric_test_data(self):
return self._dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_positive_test_data(self):
return self._dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def squared_dist_is_positive_test_data(self):
return self._squared_dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_norm_of_log_test_data(self):
return self._dist_is_norm_of_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_point_to_itself_is_zero_test_data(self):
return self._dist_point_to_itself_is_zero_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
atol=gs.atol * 1000,
)
def inner_product_is_symmetric_test_data(self):
return self._inner_product_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
)
def triangle_inequality_of_dist_test_data(self):
return self._triangle_inequality_of_dist_test_data(
self.metric_args_list, self.space_list, self.n_points_list
)
class SPDMetricBuresWassersteinTestData(_RiemannianMetricTestData):
n_list = random.sample(range(2, 5), 2)
metric_args_list = [(n,) for n in n_list]
shape_list = [(n, n) for n in n_list]
space_list = [SPDMatrices(n) for n in n_list]
n_points_list = random.sample(range(1, 5), 2)
n_tangent_vecs_list = random.sample(range(1, 5), 2)
n_points_a_list = random.sample(range(1, 5), 2)
n_points_b_list = [1]
alpha_list = [1] * 2
n_rungs_list = [1] * 2
scheme_list = ["pole"] * 2
def inner_product_test_data(self):
smoke_data = [
dict(
n=3,
tangent_vec_a=[[2.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.5, 0.5]],
tangent_vec_b=[[1.0, 2.0, 4.0], [2.0, 3.0, 8.0], [4.0, 8.0, 5.0]],
base_point=[[1.0, 0.0, 0.0], [0.0, 1.5, 0.5], [0.0, 0.5, 1.5]],
expected=4.0,
)
]
return self.generate_tests(smoke_data)
def exp_test_data(self):
smoke_data = [
dict(
n=2,
tangent_vec=[[2.0, 0.0], [0.0, 2.0]],
base_point=[[1.0, 0.0], [0.0, 1.0]],
expected=[[4.0, 0.0], [0.0, 4.0]],
)
]
return self.generate_tests(smoke_data)
def log_test_data(self):
smoke_data = [
dict(
n=2,
point=[[4.0, 0.0], [0.0, 4.0]],
base_point=[[1.0, 0.0], [0.0, 1.0]],
expected=[[2.0, 0.0], [0.0, 2.0]],
)
]
return self.generate_tests(smoke_data)
def squared_dist_test_data(self):
smoke_data = [
dict(
n=2,
point_a=[[1.0, 0.0], [0.0, 1.0]],
point_b=[[2.0, 0.0], [0.0, 2.0]],
expected=2 + 4 - (2 * 2 * SQRT_2),
)
]
return self.generate_tests(smoke_data)
def exp_shape_test_data(self):
return self._exp_shape_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
)
def log_shape_test_data(self):
return self._log_shape_test_data(
self.metric_args_list,
self.space_list,
)
def squared_dist_is_symmetric_test_data(self):
return self._squared_dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
atol=gs.atol * 1000,
)
def exp_belongs_test_data(self):
return self._exp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
belongs_atol=gs.atol * 1000,
)
def log_is_tangent_test_data(self):
return self._log_is_tangent_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
is_tangent_atol=gs.atol * 1000,
)
def geodesic_ivp_belongs_test_data(self):
return self._geodesic_ivp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def geodesic_bvp_belongs_test_data(self):
return self._geodesic_bvp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def exp_after_log_test_data(self):
return self._exp_after_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
rtol=gs.rtol * 10,
atol=gs.atol * 10,
)
def log_after_exp_test_data(self):
return self._log_after_exp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
amplitude=7.0,
rtol=gs.rtol * 10,
atol=gs.atol * 10,
)
def exp_ladder_parallel_transport_test_data(self):
return self._exp_ladder_parallel_transport_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_rungs_list,
self.alpha_list,
self.scheme_list,
)
def exp_geodesic_ivp_test_data(self):
return self._exp_geodesic_ivp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_points_list,
rtol=gs.rtol * 100000,
atol=gs.atol * 100000,
)
def parallel_transport_ivp_is_isometry_test_data(self):
return self._parallel_transport_ivp_is_isometry_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 1000,
atol=gs.atol | |
<reponame>athaun/Python-ai-assistant<gh_stars>1-10
""" Persistent cache storage for web ressources, with different cache eviction strategies, and optional compression. """
__version__ = "1.1.0"
__author__ = "desbma"
__license__ = "LGPLv2"
import bz2
import collections
import enum
import functools
import inspect
import lzma
import os
import pickle
import queue
import sqlite3
import threading
import zlib
DB_FORMAT_VERSION = 2 # incremented at each incompatible database/pickle format change
PICKLE_PROTOCOL_VERSION = 4
DISABLE_PERSISTENT_CACHING = False # useful for tests
class Compression(enum.IntEnum):
NONE = 0
DEFLATE = 1
BZIP2 = 2
LZMA = 3
CachingStrategy = enum.Enum("CachingStrategy", ("FIFO", "LRU"))
class WebCache:
def __init__(self, db_filepath, table_name, *, caching_strategy, expiration=None, compression=Compression.NONE,
compression_level=9, auto_compression_threshold=1, safe_mode=False):
"""
Args:
db_filepath: Database filepath
table_name: Database table name used for the cache
caching_strategy: CachingStrategy enum defining how cache entries are removed
expiration: Cache item lifetime in seconds, used to clean items with the FIFO and LRU strateges, or None if items
never expire
compression: Algorithm used to compress cache items
compression_level: Compression level (0-9)
auto_compression_threshold: Don't compress if compression ratio is above this value
safe_mode: If False, will enable some optimizations that increase cache write speed, but may compromise cache
integrity in case of Python crash or power loss
"""
# attribs
self.__table_name = table_name
assert(caching_strategy in CachingStrategy)
self.__caching_strategy = caching_strategy
self.__expiration = expiration
assert(compression in Compression)
self.__compression = compression
self.__compression_level = compression_level
assert(0 < auto_compression_threshold <= 1)
self.__auto_compression_threshold = auto_compression_threshold
# connection
if DISABLE_PERSISTENT_CACHING:
self.__connection = sqlite3.connect(":memory:")
else:
self.__db_filepath = db_filepath
self.__connection = sqlite3.connect(self.__db_filepath)
# create tables if necessary
with self.__connection:
if not safe_mode:
# enable some optimizations that can cause data corruption in case of power loss or python crash
self.__connection.executescript("""PRAGMA journal_mode = MEMORY;
PRAGMA synchronous = OFF;""")
self.__connection.execute("""CREATE TABLE IF NOT EXISTS %s
(
url TEXT PRIMARY KEY,
added_timestamp INTEGER NOT NULL,
last_accessed_timestamp INTEGER NOT NULL,
compression INTEGER NOT NULL,
data BLOB NOT NULL
);""" % (self.getDbTableName()))
self.__connection.execute("""CREATE TABLE IF NOT EXISTS %s
(
url TEXT NOT NULL,
post_data BLOB NOT NULL,
added_timestamp INTEGER NOT NULL,
last_accessed_timestamp INTEGER NOT NULL,
compression INTEGER NOT NULL,
data BLOB NOT NULL
);""" % (self.getDbTableName(post=True)))
self.__connection.execute("CREATE INDEX IF NOT EXISTS idx ON %s(url, post_data);" % (self.getDbTableName(post=True)))
# stats
self.__hit_count = 0
self.__miss_count = 0
def getDbTableName(self, *, post=False):
""" Get sqlite table name. """
return "%s%s_f%u" % (self.__table_name, "_post" if post else "", DB_FORMAT_VERSION)
def getDatabaseFileSize(self):
""" Return the file size of the database as a pretty string. """
if DISABLE_PERSISTENT_CACHING:
return "?"
size = os.path.getsize(self.__db_filepath)
if size > 1000000000:
size = "%0.3fGB" % (size / 1000000000)
elif size > 1000000:
size = "%0.2fMB" % (size / 1000000)
elif size > 1000:
size = "%uKB" % (size // 1000)
else:
size = "%uB" % (size)
return size
def getCacheHitStats(self):
return self.__hit_count, self.__miss_count
def __len__(self):
""" Return the number of items in the cache. """
row_count = 0
with self.__connection:
for post in (False, True):
row_count += self.__connection.execute("SELECT COUNT(*) FROM %s;" % (self.getDbTableName(post=post))).fetchone()[0]
return row_count
def __del__(self):
try:
self.__connection.close()
except AttributeError:
pass
def __getitem__(self, url_data):
""" Get an item from cache. """
if isinstance(url_data, tuple):
url, post_data = url_data
else:
url = url_data
post_data = None
with self.__connection:
if post_data is not None:
post_bin_data = sqlite3.Binary(pickle.dumps(post_data, protocol=PICKLE_PROTOCOL_VERSION))
r = self.__connection.execute("""SELECT data, compression
FROM %s
WHERE url = ? AND
post_data = ?;""" % (self.getDbTableName(post=True)),
(url, post_bin_data)).fetchone()
else:
r = self.__connection.execute("""SELECT data, compression
FROM %s
WHERE url = ?;""" % (self.getDbTableName()),
(url,)).fetchone()
if not r:
raise KeyError(url_data)
data, compression = r
if compression == Compression.DEFLATE:
buffer = memoryview(data)
data = zlib.decompress(buffer)
elif compression == Compression.BZIP2:
buffer = memoryview(data)
data = bz2.decompress(buffer)
elif compression == Compression.LZMA:
buffer = memoryview(data)
data = lzma.decompress(buffer)
if self.__caching_strategy is CachingStrategy.LRU:
# update last access time
with self.__connection:
if post_data is not None:
self.__connection.execute("UPDATE " +
self.getDbTableName(post=True) + " " +
"SET last_accessed_timestamp = strftime('%s', 'now') WHERE url = ? AND post_data = ?;",
(url, post_bin_data))
else:
self.__connection.execute("UPDATE " +
self.getDbTableName() + " " +
"SET last_accessed_timestamp = strftime('%s', 'now') WHERE url = ?;",
(url,))
return data
def __setitem__(self, url_data, data):
""" Store an item in cache. """
if isinstance(url_data, tuple):
url, post_data = url_data
else:
url = url_data
post_data = None
if self.__compression is Compression.DEFLATE:
buffer = memoryview(data)
compressed_data = zlib.compress(buffer, self.__compression_level)
elif self.__compression is Compression.BZIP2:
buffer = memoryview(data)
compressed_data = bz2.compress(buffer, compresslevel=self.__compression_level)
elif self.__compression is Compression.LZMA:
buffer = memoryview(data)
compressed_data = lzma.compress(buffer, format=lzma.FORMAT_ALONE, preset=self.__compression_level)
if (self.__compression is Compression.NONE) or (len(compressed_data) > len(data) * self.__auto_compression_threshold):
data_to_store = data
compression = Compression.NONE
else:
data_to_store = compressed_data
compression = self.__compression
# if self.__compression is not Compression.NONE:
# print("%s compression: "
# "original size = %u b, "
# "compressed size = %u b, "
# "compression threshold (%.1f%%) = %u b" % ("Disabling" if (compression is Compression.NONE) else "Enabling",
# len(data),
# len(compressed_data),
# self.__auto_compression_threshold * 100,
# self.__auto_compression_threshold * len(data)))
with self.__connection:
if post_data is not None:
post_bin_data = sqlite3.Binary(pickle.dumps(post_data, protocol=PICKLE_PROTOCOL_VERSION))
self.__connection.execute("INSERT OR REPLACE INTO " +
self.getDbTableName(post=True) +
" (url, post_data, added_timestamp, last_accessed_timestamp, compression, data) VALUES (?, ?, strftime('%s','now'), strftime('%s','now'), ?, ?);",
(url, post_bin_data, compression, sqlite3.Binary(data_to_store)))
else:
self.__connection.execute("INSERT OR REPLACE INTO " +
self.getDbTableName() +
" (url, added_timestamp, last_accessed_timestamp, compression, data) VALUES (?, strftime('%s','now'), strftime('%s','now'), ?, ?);",
(url, compression, sqlite3.Binary(data_to_store)))
def __delitem__(self, url_data):
""" Remove an item from cache. """
if isinstance(url_data, tuple):
url, post_data = url_data
else:
url = url_data
post_data = None
with self.__connection:
if post_data is not None:
post_bin_data = sqlite3.Binary(pickle.dumps(post_data, protocol=PICKLE_PROTOCOL_VERSION))
deleted_count = self.__connection.execute("DELETE FROM " + self.getDbTableName(post=True) + " " +
"WHERE url = ? AND post_data = ?;",
(url, post_bin_data)).rowcount
else:
deleted_count = self.__connection.execute("DELETE FROM " + self.getDbTableName() + " WHERE url = ?;",
(url,)).rowcount
if deleted_count == 0:
raise KeyError(url_data)
def purge(self):
""" Purge cache by removing obsolete items. """
purged_count = 0
if self.__expiration is not None:
with self.__connection:
if self.__caching_strategy is CachingStrategy.FIFO:
# dump least recently added rows
for post in (False, True):
purged_count += self.__connection.execute("DELETE FROM " +
self.getDbTableName(post=post) + " "
"WHERE (strftime('%s', 'now') - added_timestamp) > ?;",
(self.__expiration,)).rowcount
elif self.__caching_strategy is CachingStrategy.LRU:
# dump least recently accessed rows
for post in (False, True):
purged_count += self.__connection.execute("DELETE FROM " +
self.getDbTableName(post=post) + " "
"WHERE (strftime('%s', 'now') - last_accessed_timestamp) > ?;",
(self.__expiration,)).rowcount
return purged_count
def __contains__(self, url_data):
""" Return true if an item is present in cache for that url, False instead. """
if isinstance(url_data, tuple):
url, post_data = url_data
else:
url = url_data
post_data = None
with self.__connection:
if post_data is not None:
post_bin_data = sqlite3.Binary(pickle.dumps(post_data, protocol=PICKLE_PROTOCOL_VERSION))
hit = (self.__connection.execute("""SELECT COUNT(*)
FROM %s
WHERE url = ? AND
post_data = ?;""" % (self.getDbTableName(post=True)),
(url, post_bin_data)).fetchone()[0] > 0)
else:
hit = (self.__connection.execute("""SELECT COUNT(*)
FROM %s
WHERE url = ?;""" % (self.getDbTableName()),
(url,)).fetchone()[0] > 0)
if hit:
self.__hit_count += 1
else:
self.__miss_count += 1
return hit
class ThreadedWebCache:
"""
Similar to WebCache, but delegate all sqlite3 calls to a dedicated thread.
This allows getting rid of the 'same thread' sqlite3 module limitation.
Caller thread send calls in the execute queue and get the results in the result queue.
All calls are blocking and synchronous.
"""
def __init__(self, *args, **kwargs):
# this is the tricky part:
# attach methods from WebCache, decorated by callToThread, to this object's class
methods = inspect.getmembers(WebCache, inspect.isfunction)
for method_name, method in methods:
if (method_name in ("__init__", "__del__")) or (method_name not in WebCache.__dict__):
continue
new_method = __class__.callToThread(method)
setattr(self.__class__, method_name, new_method)
# start thread
self.thread = WebCacheThread()
self.thread.execute_queue.put_nowait((threading.get_ident(), args, kwargs))
self.thread.start()
self.thread.execute_queue.join()
# check WebCache object construction went ok
try:
e = self.thread.exception_queue[threading.get_ident()].get_nowait()
except queue.Empty:
pass
else:
raise e
def __del__(self):
self.thread.stop()
def waitResult(self):
""" Wait for the execution of the last enqueued job to be done, and return the result or raise an exception. """
self.thread.execute_queue.join()
try:
e = self.thread.exception_queue[threading.get_ident()].get_nowait()
except queue.Empty:
return self.thread.result_queue[threading.get_ident()].get_nowait()
else:
raise e
@staticmethod
def callToThread(method):
""" Wrap call to method to send it to WebCacheThread. """
def func_wrapped(self, *args, **kwargs):
self.thread.execute_queue.put_nowait((threading.get_ident(), method, args, kwargs))
return self.waitResult()
return func_wrapped
class WebCacheThread(threading.Thread):
""" Thread executing all sqlite3 calls for the ThreadedWebCache class. | |
self.stormIsInPrint('List[0]=1, List[-1]=4', msgs)
self.stormIsInPrint('List size is now 4', msgs)
self.stormIsInPrint('Sum is now 10', msgs)
self.stormIsInPrint('elst size is 0', msgs)
# Convert primitive python objects to List objects
q = '$v=(foo,bar,baz) [ test:str=$v.index(1) test:int=$v.length() ]'
nodes = await core.nodes(q)
self.eq(nodes[0].ndef, ('test:str', 'bar'))
self.eq(nodes[1].ndef, ('test:int', 3))
# Python Tuples can be treated like a List object for accessing via data inside of.
q = '[ test:comp=(10,lol) ] $x=$node.ndef().index(1).index(1) [ test:str=$x ]'
nodes = await core.nodes(q)
self.eq(nodes[0].ndef, ('test:str', 'lol'))
# sad case - index out of bounds.
q = 'test:comp=(10,lol) $x=$node.ndef().index(2)'
mesgs = await core.stormlist(q)
errs = [m[1] for m in mesgs if m[0] == 'err']
self.len(1, errs)
self.eq(errs[0][0], 'StormRuntimeError')
self.eq('bar', await core.callStorm('$foo = (foo, bar) return($foo.1)'))
self.eq('foo', await core.callStorm('$foo = (foo, bar) return($foo."-2")'))
self.eq('bar', await core.callStorm('$foo = (foo, bar) return($foo.pop())'))
with self.raises(s_exc.StormRuntimeError):
await core.callStorm('$lib.list().pop()')
async def test_storm_lib_fire(self):
async with self.getTestCore() as core:
text = '$lib.fire(foo:bar, baz=faz)'
gotn = [mesg for mesg in await core.stormlist(text) if mesg[0] == 'storm:fire']
self.len(1, gotn)
self.eq(gotn[0][1]['type'], 'foo:bar')
self.eq(gotn[0][1]['data']['baz'], 'faz')
await core.addTagProp('score', ('int', {}), {})
await core.callStorm('[inet:ipv4=1.2.3.4 +#foo=2021 +#foo:score=9001]')
q = 'inet:ipv4 $lib.fire(msg:pack, sode=$node.getStorNodes())'
gotn = [mesg async for mesg in core.storm(q) if mesg[0] == 'storm:fire']
self.len(1, gotn)
self.eq(gotn[0][1]['data']['sode'][0]['tagprops'], {'foo': {'score': (9001, 9)}})
self.eq(gotn[0][1]['type'], 'msg:pack')
async def test_storm_node_repr(self):
text = '''
[ inet:ipv4=1.2.3.4 :loc=us]
$ipv4 = $node.repr()
$loc = $node.repr(loc)
$latlong = $node.repr(latlong, defv="??")
$valu = $lib.str.format("{ipv4} in {loc} at {latlong}", ipv4=$ipv4, loc=$loc, latlong=$latlong)
[ test:str=$valu ]
+test:str
'''
async with self.getTestCore() as core:
nodes = await core.nodes(text)
self.len(1, nodes)
self.eq(nodes[0].ndef[1], '1.2.3.4 in us at ??')
mesgs = await core.stormlist('inet:ipv4 $repr=$node.repr(newp)')
err = mesgs[-2][1]
self.eq(err[0], 'NoSuchProp')
self.eq(err[1].get('prop'), 'newp')
self.eq(err[1].get('form'), 'inet:ipv4')
async def test_storm_csv(self):
async with self.getTestCore() as core:
await core.nodes('[test:str=1234 :tick=2001]')
await core.nodes('[test:str=9876 :tick=3001]')
q = "test:str " \
"$tick=$node.repr(tick) " \
"$lib.csv.emit($node.form(), $node.value(), $tick, table=mytable)"
mesgs = await core.stormlist(q, {'show': ('err', 'csv:row')})
csv_rows = [m for m in mesgs if m[0] == 'csv:row']
self.len(2, csv_rows)
csv_rows.sort(key=lambda x: x[1].get('row')[1])
self.eq(csv_rows[0],
('csv:row', {'row': ['test:str', '1234', '2001/01/01 00:00:00.000'],
'table': 'mytable'}))
self.eq(csv_rows[1],
('csv:row', {'row': ['test:str', '9876', '3001/01/01 00:00:00.000'],
'table': 'mytable'}))
q = 'test:str $hehe=$node.props.hehe $lib.csv.emit(:tick, $hehe)'
mesgs = await core.stormlist(q, {'show': ('err', 'csv:row')})
csv_rows = [m for m in mesgs if m[0] == 'csv:row']
self.len(2, csv_rows)
self.eq(csv_rows[0], ('csv:row', {'row': [978307200000, None], 'table': None}))
self.eq(csv_rows[1], ('csv:row', {'row': [32535216000000, None], 'table': None}))
# Sad path case...
q = '''
[ test:str=woot ]
$lib.csv.emit($path)
'''
mesgs = await core.stormlist(q, {'show': ('err', 'csv:row')})
err = mesgs[-2]
self.eq(err[1][0], 'NoSuchType')
async def test_storm_text(self):
async with self.getTestCore() as core:
nodes = await core.nodes('''
[ test:int=10 ] $text=$lib.text(hehe) { +test:int>=10 $text.add(haha) }
[ test:str=$text.str() ] +test:str''')
self.len(1, nodes)
self.eq(nodes[0].ndef, ('test:str', 'hehehaha'))
q = '''$t=$lib.text(beepboop) $lib.print($lib.len($t))
$t.add("more!") $lib.print($lib.len($t))
'''
msgs = await core.stormlist(q)
self.stormIsInPrint('8', msgs)
self.stormIsInPrint('13', msgs)
async def test_storm_set(self):
async with self.getTestCore() as core:
await core.nodes('[inet:ipv4=1.2.3.4 :asn=20]')
await core.nodes('[inet:ipv4=5.6.7.8 :asn=30]')
q = '''
$set = $lib.set()
inet:ipv4 $set.add(:asn)
[ graph:node="*" ] +graph:node [ :data=$set.list() ]
'''
nodes = await core.nodes(q)
self.len(1, nodes)
self.eq(tuple(sorted(nodes[0].get('data'))), (20, 30))
q = '''
$set = $lib.set()
inet:ipv4 $set.adds((:asn,:asn))
[ graph:node="*" ] +graph:node [ :data=$set.list() ]
'''
nodes = await core.nodes(q)
self.len(1, nodes)
self.eq(tuple(sorted(nodes[0].get('data'))), (20, 30))
q = '''
$set = $lib.set()
inet:ipv4 $set.adds((:asn,:asn))
{ +:asn=20 $set.rem(:asn) }
[ graph:node="*" ] +graph:node [ :data=$set.list() ]
'''
nodes = await core.nodes(q)
self.len(1, nodes)
self.eq(tuple(sorted(nodes[0].get('data'))), (30,))
q = '''
$set = $lib.set()
inet:ipv4 $set.add(:asn)
$set.rems((:asn,:asn))
[ graph:node="*" ] +graph:node [ :data=$set.list() ]
'''
nodes = await core.nodes(q)
self.len(1, nodes)
self.eq(tuple(sorted(nodes[0].get('data'))), ())
q = '$set = $lib.set(a, b, c, b, a) [test:int=$set.size()]'
nodes = await core.nodes(q)
self.len(1, nodes)
self.eq(nodes[0].ndef, ('test:int', 3))
q = '''$set = $lib.set(a, b, c)
for $v in $set {
$lib.print('set valu: {v}', v=$v)
}
'''
mesgs = await core.stormlist(q)
self.stormIsInPrint('set valu: a', mesgs)
self.stormIsInPrint('set valu: b', mesgs)
self.stormIsInPrint('set valu: c', mesgs)
q = '''
$set = $lib.set()
$set.add(foo)
if $set.has(foo) { [ test:str=asdf ] }
'''
nodes = await core.nodes(q)
self.len(1, nodes)
self.eq(nodes[0].ndef, ('test:str', 'asdf'))
async def test_storm_path(self):
async with self.getTestCore() as core:
await core.nodes('[ inet:dns:a=(vertex.link, 192.168.3.11) ]')
q = '''
inet:fqdn=vertex.link -> inet:dns:a -> inet:ipv4
$idens = $path.idens()
[ graph:node="*" ] +graph:node [ :data=$idens ]
'''
idens = (
'02488bc284ffd0f60f474d5af66a8c0cf89789f766b51fde1d3da9b227005f47',
'20153b758f9d5eaaa38e4f4a65c36da797c3e59e549620fa7c4895e1a920991f',
'3ecd51e142a5acfcde42c02ff5c68378bfaf1eaf49fe9721550b6e7d6013b699',
)
nodes = await core.nodes(q)
self.len(1, nodes)
self.eq(tuple(sorted(nodes[0].get('data'))), idens)
opts = {'vars': {'testvar': 'test'}}
text = "[ test:str='123' ] $testkey=testvar [ test:str=$path.vars.$testkey ]"
nodes = await core.nodes(text, opts=opts)
self.len(2, nodes)
self.eq(nodes[0].ndef, ('test:str', 'test'))
text = "[ test:str='123' ] [ test:str=$path.vars.testkey ]"
mesgs = await core.stormlist(text)
errs = [m[1] for m in mesgs if m[0] == 'err']
self.len(1, errs)
err = errs[0]
self.eq(err[0], 'StormRuntimeError')
self.isin('No var with name: testkey', err[1].get('mesg'))
opts = {'vars': {'testkey': 'testvar'}}
text = "[ test:str='123' ] $path.vars.$testkey = test [ test:str=$path.vars.testvar ]"
nodes = await core.nodes(text, opts=opts)
self.len(2, nodes)
self.eq(nodes[0].ndef, ('test:str', 'test'))
self.eq(nodes[1].ndef, ('test:str', '123'))
opts = {'vars': {'testvar': 'test', 'testkey': 'testvar'}}
text = '''
[ test:str='123' ]
for ($name, $valu) in $path.vars {
$lib.print('{name}={valu}', name=$name, valu=$valu)
}
'''
msgs = await core.stormlist(text, opts=opts)
self.stormIsInPrint('testvar=test', msgs)
self.stormIsInPrint('testkey=testvar', msgs)
async with core.getLocalProxy() as proxy:
msgs = await proxy.storm('''
[ ps:contact=* ]
$path.meta.foo = bar
$path.meta.baz = faz
$path.meta.baz = $lib.undef
{
for ($name, $valu) in $path.meta {
$lib.print('meta: {name}={valu}', name=$name, valu=$valu)
}
}
if $path.meta.foo { $lib.print(foofoofoo) }
''').list()
self.stormIsInPrint('foofoofoo', msgs)
self.stormIsInPrint('meta: foo=bar', msgs)
pode = [m[1] for m in msgs if m[0] == 'node'][0]
self.len(1, pode[1]['path'])
self.eq('bar', pode[1]['path']['foo'])
async def test_storm_trace(self):
async with self.getTestCore() as core:
await core.nodes('[ inet:dns:a=(vertex.link, 192.168.3.11) ]')
q = '''
inet:fqdn=vertex.link
$trace=$path.trace()
-> inet:dns:a -> inet:ipv4
/* Make a trace object from a path which already has nodes */
$trace2=$path.trace()
[ graph:node="*" ] +graph:node [ :data=$trace.idens() ]
/* Print the contents of the second trace */
$lib.print($trace2.idens())
'''
mesgs = await core.stormlist(q)
podes = [m[1] for m in mesgs if m[0] == 'node']
self.len(1, podes)
pode = podes[0]
idens = (
'02488bc284ffd0f60f474d5af66a8c0cf89789f766b51fde1d3da9b227005f47',
'20153b758f9d5eaaa38e4f4a65c36da797c3e59e549620fa7c4895e1a920991f',
'3ecd51e142a5acfcde42c02ff5c68378bfaf1eaf49fe9721550b6e7d6013b699',
)
self.eq(tuple(sorted(pode[1]['props'].get('data'))), idens)
for iden in idens:
self.stormIsInPrint(iden, mesgs)
async def test_stormuser(self):
# Do not include persistent vars support in this test see
# test_persistent_vars for that behavior.
async with self.getTestCore() as core:
q = '$lib.print($lib.user.name())'
mesgs = await core.stormlist(q)
self.stormIsInPrint('root', mesgs)
async def test_persistent_vars(self):
with self.getTestDir() as dirn:
async with self.getTestCore(dirn=dirn) as core:
async with core.getLocalProxy() as prox:
# User setup for $lib.user.vars() tests
ret1 = await prox.addUser('user1', passwd='<PASSWORD>')
iden1 = ret1.get('iden')
await prox.addUserRule(iden1, (True, ('node', 'add')))
await prox.addUserRule(iden1, (True, ('node', 'prop', 'set')))
await prox.addUserRule(iden1, (True, ('globals', 'get', 'userkey',)))
# Basic tests as root for $lib.globals
q = '''$lib.globals.set(adminkey, sekrit)
$lib.globals.set(userkey, lessThanSekrit)
$lib.globals.set(throwaway, beep)
$valu=$lib.globals.get(adminkey)
$lib.print($valu)
'''
mesgs = await s_test.alist(prox.storm(q))
self.stormIsInPrint('sekrit', mesgs)
popq = '''$valu = $lib.globals.pop(throwaway)
$lib.print("pop valu is {valu}", valu=$valu)
'''
mesgs = await s_test.alist(prox.storm(popq))
self.stormIsInPrint('pop valu is beep', mesgs)
q = '''$x=$lib.dict(foo=1)
$lib.globals.set(bar, $x)
$y=$lib.globals.get(bar)
$lib.print("valu={v}", v=$y.foo)
'''
mesgs = await s_test.alist(prox.storm(q))
self.stormIsInPrint('valu=1', mesgs)
# get and pop take a secondary default value which may be returned
q = '''$valu = $lib.globals.get(throwaway, $(0))
$lib.print("get valu is {valu}", valu=$valu)
'''
mesgs = await s_test.alist(prox.storm(q))
self.stormIsInPrint('get valu is 0', mesgs)
q = '''$valu = $lib.globals.pop(throwaway, $(0))
$lib.print("pop valu is {valu}", valu=$valu)
'''
mesgs = await s_test.alist(prox.storm(q))
self.stormIsInPrint('pop valu is 0', mesgs)
listq = '''for ($key, $valu) in $lib.globals.list() {
$string = $lib.str.format("{key} is {valu}", key=$key, valu=$valu)
$lib.print($string)
}
'''
mesgs = await s_test.alist(prox.storm(listq))
self.len(3, [m for m in mesgs if m[0] == 'print'])
self.stormIsInPrint('adminkey is sekrit', mesgs)
self.stormIsInPrint('userkey is lessThanSekrit', mesgs)
# Storing a valu into the hive gets toprim()'d
q = '[test:str=test] $lib.user.vars.set(mynode, $node) return($lib.user.vars.get(mynode))'
data = await prox.callStorm(q)
self.eq(data, 'test')
# Sad path - names must be strings.
q = '$lib.globals.set((my, nested, valu), haha)'
mesgs = await prox.storm(q).list()
err = 'The name of a persistent variable must be a string.'
self.stormIsInErr(err, mesgs)
# Sad path - names must be strings.
q = '$lib.globals.set((my, nested, valu), haha)'
mesgs = await prox.storm(q).list()
err = 'The name of a persistent variable must be a string.'
self.stormIsInErr(err, mesgs)
async with core.getLocalProxy() as uprox:
self.true(await uprox.setCellUser(iden1))
q = '''$lib.user.vars.set(somekey, hehe)
| |
'1626443':{'en': 'El Monte, CA'},
'1626444':{'en': 'El Monte, CA'},
'1626445':{'en': 'Arcadia, CA'},
'1626446':{'en': 'Arcadia, CA'},
'1626447':{'en': 'Arcadia, CA'},
'1626448':{'en': 'El Monte, CA'},
'1626449':{'en': 'Pasadena, CA'},
'1626452':{'en': 'El Monte, CA'},
'1626453':{'en': 'El Monte, CA'},
'1626454':{'en': 'El Monte, CA'},
'1626457':{'en': 'Alhambra, CA'},
'1626458':{'en': 'Alhambra, CA'},
'1626462':{'en': 'Arcadia, CA'},
'1626564':{'en': 'Pasadena, CA'},
'1626568':{'en': 'Pasadena, CA'},
'1626570':{'en': 'Alhambra, CA'},
'1626574':{'en': 'Arcadia, CA'},
'1626576':{'en': 'Alhambra, CA'},
'1626577':{'en': 'Pasadena, CA'},
'1626578':{'en': 'Pasadena, CA'},
'1626579':{'en': 'El Monte, CA'},
'1626583':{'en': 'Pasadena, CA'},
'1626584':{'en': 'Pasadena, CA'},
'1626585':{'en': 'Pasadena, CA'},
'1626599':{'en': 'Monrovia, CA'},
'1626683':{'en': 'Pasadena, CA'},
'1626732':{'en': 'Covina, CA'},
'1626744':{'en': 'Pasadena, CA'},
'1626765':{'en': 'Pasadena, CA'},
'162679':{'en': 'Pasadena, CA'},
'1626799':{'en': 'South Pasadena, CA'},
'1626812':{'en': 'Azusa, CA'},
'1626815':{'en': 'Azusa, CA'},
'1626821':{'en': 'Arcadia, CA'},
'1626844':{'en': 'Pasadena, CA'},
'1626852':{'en': 'Glendora, CA'},
'1626857':{'en': 'Glendora, CA'},
'1626858':{'en': 'Covina, CA'},
'1626859':{'en': 'Covina, CA'},
'1626914':{'en': 'Glendora, CA'},
'1626915':{'en': 'Covina, CA'},
'1626917':{'en': 'La Puente, CA'},
'1626919':{'en': 'West Covina, CA'},
'1626943':{'en': 'Alhambra, CA'},
'1626963':{'en': 'Glendora, CA'},
'1626966':{'en': 'Covina, CA'},
'1626967':{'en': 'Covina, CA'},
'1626969':{'en': 'Azusa, CA'},
'1626974':{'en': 'Covina, CA'},
'1628':{'en': 'California'},
'1629':{'en': 'Tennessee'},
'1630':{'en': 'Illinois'},
'1630208':{'en': 'Geneva, IL'},
'1630226':{'en': 'Bolingbrook, IL'},
'1630229':{'en': 'Aurora, IL'},
'1630231':{'en': 'West Chicago, IL'},
'1630232':{'en': 'Geneva, IL'},
'1630236':{'en': 'Aurora, IL'},
'1630238':{'en': 'Bensenville, IL'},
'1630243':{'en': 'Lemont, IL'},
'1630250':{'en': 'Itasca, IL'},
'1630257':{'en': 'Lemont, IL'},
'1630260':{'en': 'Wheaton, IL'},
'1630262':{'en': 'Geneva, IL'},
'1630264':{'en': 'Aurora, IL'},
'1630275':{'en': 'Downers Grove, IL'},
'1630279':{'en': 'Elmhurst, IL'},
'1630285':{'en': 'Itasca, IL'},
'1630293':{'en': 'West Chicago, IL'},
'1630305':{'en': 'Naperville, IL'},
'1630340':{'en': 'Aurora, IL'},
'1630350':{'en': 'Bensenville, IL'},
'1630355':{'en': 'Naperville, IL'},
'1630357':{'en': 'Naperville, IL'},
'1630365':{'en': 'Elburn, IL'},
'1630368':{'en': 'Oak Brook, IL'},
'1630369':{'en': 'Naperville, IL'},
'1630375':{'en': 'Aurora, IL'},
'1630377':{'en': 'St. Charles, IL'},
'1630378':{'en': 'Bolingbrook, IL'},
'1630393':{'en': 'Warrenville, IL'},
'1630406':{'en': 'Batavia, IL'},
'1630416':{'en': 'Naperville, IL'},
'1630420':{'en': 'Naperville, IL'},
'1630422':{'en': 'Bensenville, IL'},
'1630428':{'en': 'Naperville, IL'},
'1630443':{'en': 'St. Charles, IL'},
'1630444':{'en': 'St. Charles, IL'},
'1630458':{'en': 'Addison, IL'},
'1630462':{'en': 'Wheaton, IL'},
'1630466':{'en': 'Sugar Grove, IL'},
'1630469':{'en': 'Glen Ellyn, IL'},
'1630482':{'en': 'Batavia, IL'},
'1630495':{'en': 'Lombard, IL'},
'1630499':{'en': 'Aurora, IL'},
'1630505':{'en': 'Naperville, IL'},
'1630513':{'en': 'St. Charles, IL'},
'1630521':{'en': 'Bensenville, IL'},
'1630527':{'en': 'Naperville, IL'},
'1630530':{'en': 'Elmhurst, IL'},
'1630543':{'en': 'Addison, IL'},
'1630545':{'en': 'G<NAME>, IL'},
'1630548':{'en': 'Naperville, IL'},
'1630551':{'en': 'Oswego, IL'},
'1630552':{'en': 'Plano, IL'},
'1630553':{'en': 'Yorkville, IL'},
'1630554':{'en': 'Oswego, IL'},
'1630556':{'en': 'Big Rock, IL'},
'1630562':{'en': 'West Chicago, IL'},
'1630571':{'en': 'Oak Brook, IL'},
'1630572':{'en': 'Oak Brook, IL'},
'1630573':{'en': 'Oak Brook, IL'},
'1630574':{'en': 'Oak Brook, IL'},
'1630575':{'en': 'Oak Brook, IL'},
'1630579':{'en': 'Naperville, IL'},
'1630584':{'en': 'St. Charles, IL'},
'1630585':{'en': 'Aurora, IL'},
'1630587':{'en': 'St. Charles, IL'},
'1630595':{'en': 'Bensenville, IL'},
'1630616':{'en': 'Bensenville, IL'},
'1630617':{'en': 'Elmhurst, IL'},
'1630620':{'en': 'Lombard, IL'},
'1630627':{'en': 'Lombard, IL'},
'1630628':{'en': 'Addison, IL'},
'1630629':{'en': 'Lombard, IL'},
'1630637':{'en': 'Naperville, IL'},
'1630653':{'en': 'Wheaton, IL'},
'1630665':{'en': 'Wheaton, IL'},
'1630668':{'en': 'Wheaton, IL'},
'1630679':{'en': 'Bolingbrook, IL'},
'1630681':{'en': 'Wheaton, IL'},
'1630682':{'en': 'Wheaton, IL'},
'1630692':{'en': 'Aurora, IL'},
'1630717':{'en': 'Naperville, IL'},
'1630718':{'en': 'Naperville, IL'},
'1630719':{'en': 'Downers Grove, IL'},
'1630739':{'en': 'Bolingbrook, IL'},
'1630752':{'en': 'Wheaton, IL'},
'1630758':{'en': 'Elmhurst, IL'},
'1630759':{'en': 'Bolingbrook, IL'},
'1630761':{'en': 'Batavia, IL'},
'1630762':{'en': 'St. Charles, IL'},
'1630766':{'en': 'Bensenville, IL'},
'1630769':{'en': 'Downers Grove, IL'},
'1630771':{'en': 'Bolingbrook, IL'},
'1630773':{'en': 'Itasca, IL'},
'1630778':{'en': 'Naperville, IL'},
'1630782':{'en': 'Elmhurst, IL'},
'1630783':{'en': 'Bolingbrook, IL'},
'1630787':{'en': 'Bensenville, IL'},
'1630790':{'en': '<NAME>, IL'},
'1630801':{'en': 'Aurora, IL'},
'1630820':{'en': 'Aurora, IL'},
'1630832':{'en': 'Elmhurst, IL'},
'1630833':{'en': 'Elmhurst, IL'},
'1630834':{'en': 'Elmhurst, IL'},
'1630836':{'en': 'Warrenville, IL'},
'1630844':{'en': 'Aurora, IL'},
'1630845':{'en': 'Geneva, IL'},
'1630848':{'en': 'Naperville, IL'},
'1630851':{'en': 'Aurora, IL'},
'1630856':{'en': 'Hinsdale, IL'},
'1630858':{'en': '<NAME>, IL'},
'1630859':{'en': 'Aurora, IL'},
'1630860':{'en': 'Bensenville, IL'},
'1630876':{'en': 'West Chicago, IL'},
'1630879':{'en': 'Batavia, IL'},
'1630882':{'en': 'Yorkville, IL'},
'1630892':{'en': 'Aurora, IL'},
'1630896':{'en': 'Aurora, IL'},
'1630897':{'en': 'Aurora, IL'},
'1630898':{'en': 'Aurora, IL'},
'1630904':{'en': 'Naperville, IL'},
'1630906':{'en': 'Aurora, IL'},
'1630907':{'en': 'Aurora, IL'},
'1630916':{'en': 'Lombard, IL'},
'1630922':{'en': 'Naperville, IL'},
'1630928':{'en': 'Oak Brook, IL'},
'1630932':{'en': 'Lombard, IL'},
'1630933':{'en': 'Winfield, IL'},
'1630941':{'en': 'Elmhurst, IL'},
'1630942':{'en': '<NAME>, IL'},
'1630954':{'en': 'Oak Brook, IL'},
'1630960':{'en': 'Downers Grove, IL'},
'1630961':{'en': 'Naperville, IL'},
'1630966':{'en': 'Aurora, IL'},
'1630968':{'en': 'Downers Grove, IL'},
'1630969':{'en': 'Downers Grove, IL'},
'1630972':{'en': 'Bolingbrook, IL'},
'1630978':{'en': 'Aurora, IL'},
'1630983':{'en': 'Naperville, IL'},
'1630990':{'en': 'Oak Brook, IL'},
'1630993':{'en': 'Elmhurst, IL'},
'1631':{'en': 'New York'},
'1631204':{'en': 'Southampton, NY'},
'1631206':{'en': 'Bay Shore, NY'},
'1631208':{'en': 'Riverhead, NY'},
'1631225':{'en': 'Lindenhurst, NY'},
'1631226':{'en': 'Lindenhurst, NY'},
'1631242':{'en': 'Deer Park, NY'},
'1631243':{'en': 'Deer Park, NY'},
'1631249':{'en': 'Farmingdale, NY'},
'1631254':{'en': 'Deer Park, NY'},
'1631259':{'en': 'Southampton, NY'},
'1631261':{'en': 'Northport, NY'},
'1631264':{'en': 'Amityville, NY'},
'1631265':{'en': 'Smithtown, NY'},
'1631266':{'en': 'East Northport, NY'},
'1631267':{'en': 'Amagansett, NY'},
'1631269':{'en': 'Kings Park, NY'},
'1631273':{'en': 'Brentwood, NY'},
'1631274':{'en': 'Deer Park, NY'},
'1631283':{'en': 'Southampton, NY'},
'1631287':{'en': 'Southampton, NY'},
'1631293':{'en': 'Farmingdale, NY'},
'1631298':{'en': 'Mattituck, NY'},
'1631324':{'en': 'East Hampton, NY'},
'1631328':{'en': 'Bay Shore, NY'},
'1631329':{'en': 'East Hampton, NY'},
'1631351':{'en': 'Huntington, NY'},
'1631360':{'en': 'Smithtown, NY'},
'1631363':{'en': 'Blue Point, NY'},
'1631368':{'en': 'East Northport, NY'},
'1631369':{'en': 'Riverhead, NY'},
'1631376':{'en': 'West Islip, NY'},
'1631392':{'en': 'Deer Park, NY'},
'1631420':{'en': 'Farmingdale, NY'},
'1631427':{'en': 'Huntington, NY'},
'1631462':{'en': 'Commack, NY'},
'1631465':{'en': 'Melville, NY'},
'1631472':{'en': 'Bayport, NY'},
'1631475':{'en': 'Patchogue, NY'},
'1631477':{'en': 'Greenport, NY'},
'1631499':{'en': 'Commack, NY'},
'1631537':{'en': 'Bridgehampton, NY'},
'1631543':{'en': 'Commack, NY'},
'1631544':{'en': 'Kings Park, NY'},
'1631569':{'en': 'Patchogue, NY'},
'1631583':{'en': 'Ocean Beach, NY'},
'1631584':{'en': 'St. James, NY'},
'1631586':{'en': 'Deer Park, NY'},
'1631591':{'en': 'Riverhead, NY'},
'1631592':{'en': 'Lindenhurst, NY'},
'1631595':{'en': 'Deer Park, NY'},
'1631598':{'en': 'Amityville, NY'},
'1631604':{'en': 'East Hampton, NY'},
'1631608':{'en': 'Amityville, NY'},
'1631632':{'en': 'Stony Brook, NY'},
'1631647':{'en': 'Bay Shore, NY'},
'1631653':{'en': 'Quogue, NY'},
'1631665':{'en': 'Bay Shore, NY'},
'1631666':{'en': 'Bay Shore, NY'},
'1631667':{'en': 'Deer Park, NY'},
'1631668':{'en': 'Montauk, NY'},
'1631687':{'en': 'Patchogue, NY'},
'1631691':{'en': 'Amityville, NY'},
'1631694':{'en': 'Farmingdale, NY'},
'1631723':{'en': 'Hampton Bays, NY'},
'1631725':{'en': 'Sag Harbor, NY'},
'1631726':{'en': 'Water Mill, NY'},
'1631727':{'en': 'Riverhead, NY'},
'1631728':{'en': 'Hampton Bays, NY'},
'1631734':{'en': 'Cutchogue, NY'},
'1631738':{'en': 'Ronkonkoma, NY'},
'1631749':{'en': 'Shelter Island, NY'},
'1631752':{'en': 'Farmingdale, NY'},
'1631753':{'en': 'Farmingdale, NY'},
'1631765':{'en': 'Southold, NY'},
'1631789':{'en': 'Amityville, NY'},
'1631841':{'en': 'Amityville, NY'},
'1631842':{'en': 'Copiague, NY'},
'1631858':{'en': 'Commack, NY'},
'1631863':{'en': 'Smithtown, NY'},
'1631864':{'en': 'Commack, NY'},
'1631884':{'en': 'Lindenhurst, NY'},
'1631907':{'en': 'East Hampton, NY'},
'1631929':{'en': 'Wading River, NY'},
'1631940':{'en': 'Deer Park, NY'},
'1631941':{'en': 'Setauket- East Setauket, NY'},
'1631956':{'en': 'Lindenhurst, NY'},
'1631957':{'en': 'Lindenhurst, NY'},
'1631968':{'en': 'Bay Shore, NY'},
'1631969':{'en': 'Bay Shore, NY'},
'1631991':{'en': 'Lindenhurst, NY'},
'1636':{'en': 'Missouri'},
'1636230':{'en': 'Ballwin, MO'},
'1636239':{'en': 'Washington, MO'},
'1636240':{'en': 'O\'Fallon, MO'},
'1636256':{'en': 'Ballwin, MO'},
'1636257':{'en': 'Pacific, MO'},
'1636271':{'en': 'Pacific, MO'},
'1636272':{'en': 'O\'Fallon, MO'},
'1636274':{'en': 'Cedar Hill, MO'},
'1636278':{'en': 'St. Peters, MO'},
'1636279':{'en': 'St. Peters, MO'},
'1636281':{'en': 'O\'Fallon, MO'},
'1636282':{'en': 'Arnold, MO'},
'1636285':{'en': '<NAME>, MO'},
'1636287':{'en': 'Arnold, MO'},
'1636294':{'en': 'O\'Fallon, MO'},
'1636296':{'en': 'Arnold, MO'},
'1636305':{'en': 'Fenton, MO'},
'1636326':{'en': 'Fenton, MO'},
'1636327':{'en': 'Wentzville, MO'},
'1636332':{'en': 'Wentzville, MO'},
'1636337':{'en': '<NAME>, MO'},
'1636343':{'en': 'Fenton, MO'},
'1636349':{'en': 'Fenton, MO'},
'1636376':{'en': 'High Ridge, MO'},
'1636379':{'en': 'O\'Fallon, MO'},
'1636390':{'en': 'Washington, MO'},
'1636397':{'en': 'St. Peters, MO'},
'1636433':{'en': 'Marthasville, MO'},
'1636456':{'en': 'Warrenton, MO'},
'1636462':{'en': 'Troy, MO'},
'1636475':{'en': 'Pevely, MO'},
'1636479':{'en': 'Pevely, MO'},
'1636493':{'en': 'St. Charles, MO'},
'1636496':{'en': 'Fenton, MO'},
'1636519':{'en': 'Chesterfield, MO'},
'1636527':{'en': 'Ballwin, MO'},
'1636528':{'en': 'Troy, MO'},
'1636530':{'en': 'Chesterfield, MO'},
'1636532':{'en': 'Chesterfield, MO'},
'1636536':{'en': 'Chesterfield, MO'},
'1636537':{'en': 'Chesterfield, MO'},
'1636583':{'en': 'Union, MO'},
'1636584':{'en': 'Union, MO'},
'1636586':{'en': 'De Soto, MO'},
'1636587':{'en': 'Eureka, MO'},
'1636625':{'en': 'Lake Saint Louis, MO'},
'1636629':{'en': 'Saint Clair, MO'},
'1636639':{'en': 'Wentzville, MO'},
'1636671':{'en': 'House Springs, MO'},
'1636677':{'en': 'High Ridge, MO'},
'1636717':{'en': 'Fenton, MO'},
'1636723':{'en': 'St. Charles, MO'},
'1636724':{'en': 'St. Charles, MO'},
'1636728':{'en': 'Chesterfield, MO'},
'1636745':{'en': 'Wright City, MO'},
'1636789':{'en': 'Hillsboro, MO'},
'1636797':{'en': 'Hillsboro, MO'},
'1636887':{'en': 'Wentzville, MO'},
'1636916':{'en': 'St. Charles, MO'},
'1636922':{'en': 'St. Peters, MO'},
'1636925':{'en': 'St. Charles, MO'},
'1636931':{'en': 'Festus, MO'},
'1636933':{'en': 'Festus, MO'},
'1636937':{'en': 'Festus, MO'},
'1636938':{'en': 'Eureka, MO'},
'1636940':{'en': 'St. Charles, MO'},
'1636946':{'en': 'St. Charles, MO'},
'1636947':{'en': 'St. Charles, MO'},
'1636949':{'en': 'St. Charles, MO'},
'1636970':{'en': 'St. Peters, MO'},
'1636978':{'en': 'O\'Fallon, MO'},
'1636980':{'en': 'O\'Fallon, MO'},
'1639':{'en': 'Saskatchewan'},
'1640':{'en': 'New Jersey'},
'1641':{'en': 'Iowa'},
'1641209':{'en': 'Fairfield, IA'},
'1641228':{'en': 'Charles City, IA'},
'1641236':{'en': 'Grinnell, IA'},
'1641259':{'en': 'Monroe, IA'},
'1641322':{'en': 'Corning, IA'},
'1641324':{'en': 'Northwood, IA'},
'1641333':{'en': 'Lenox, IA'},
'1641342':{'en': 'Osceola, IA'},
'1641357':{'en': 'Clear Lake, IA'},
'1641366':{'en': 'Conrad, IA'},
'1641394':{'en': 'New Hampton, IA'},
'1641421':{'en': 'Mason City, IA'},
'1641422':{'en': 'Mason City, IA'},
'1641423':{'en': 'Mason City, IA'},
'1641424':{'en': 'Mason City, IA'},
'1641435':{'en': 'Nashua, IA'},
'1641437':{'en': 'Centerville, IA'},
'1641444':{'en': 'Belmond, IA'},
'1641446':{'en': 'Leon, IA'},
'1641456':{'en': 'Hampton, IA'},
'1641464':{'en': 'Mount Ayr, IA'},
'1641469':{'en': 'Fairfield, IA'},
'1641472':{'en': 'Fairfield, IA'},
'1641473':{'en': 'Gladbrook, IA'},
'1641484':{'en': 'Toledo, IA'},
'1641522':{'en': 'Brooklyn, IA'},
'1641585':{'en': 'Forest City, IA'},
'1641592':{'en': 'Lake Mills, IA'},
'1641594':{'en': 'Sully, IA'},
'1641622':{'en': 'Sigourney, IA'},
'1641623':{'en': 'Montezuma, IA'},
'1641628':{'en': 'Pella, IA'},
'1641637':{'en': | |
+ 130653.05862079248 * self.t)
X1 += 0.00000000006 * math.cos(6.20953170755 + 110.45013870291 * self.t)
X1 += 0.00000000007 * math.cos(1.43696950392 + 77631.16950289288 * self.t)
X1 += 0.00000000006 * math.cos(1.84528711156 + 53131.16220727349 * self.t)
X1 += 0.00000000006 * math.cos(4.02329719726 + 26013.3653604904 * self.t)
X1 += 0.00000000006 * math.cos(0.25065144064 + 25508.4593720589 * self.t)
X1 += 0.00000000006 * math.cos(5.41297359057 + 26667.8345460565 * self.t)
X1 += 0.00000000006 * math.cos(1.27873892607 + 70268.93716521488 * self.t)
X1 += 0.00000000006 * math.cos(3.75153393501 + 103918.14464590348 * self.t)
X1 += 0.00000000008 * math.cos(0.93245740025 + 157587.04459711789 * self.t)
X1 += 0.00000000006 * math.cos(1.85606762274 + 6885.39370741431 * self.t)
X1 += 0.00000000007 * math.cos(4.77971152102 + 214364.80099922928 * self.t)
X1 += 0.00000000007 * math.cos(4.36588080270 + 25754.2910182883 * self.t)
X1 += 0.00000000007 * math.cos(1.29774422851 + 26422.0028998271 * self.t)
X1 += 0.00000000006 * math.cos(2.52395990409 + 60055.65161900389 * self.t)
X1 += 0.00000000006 * math.cos(6.00893693971 + 78149.51395352086 * self.t)
X1 += 0.00000000005 * math.cos(0.89617169225 + 50064.3997872543 * self.t)
X1 += 0.00000000006 * math.cos(2.63212801251 + 128221.00242116769 * self.t)
X1 += 0.00000000006 * math.cos(4.95615912967 + 75615.49841673308 * self.t)
X1 += 0.00000000005 * math.cos(1.27204675441 + 137678.43511695448 * self.t)
X1 += 0.00000000007 * math.cos(5.96534970508 + 56727.51596272369 * self.t)
X1 += 0.00000000007 * math.cos(5.86123151663 + 8194.0315157251 * self.t)
X1 += 0.00000000005 * math.cos(6.05965648339 + 130226.46042991648 * self.t)
X1 += 0.00000000006 * math.cos(5.74195245077 + 80382.71710258449 * self.t)
X1 += 0.00000000006 * math.cos(2.26934702805 + 51653.47268253809 * self.t)
X1 += 0.00000000006 * math.cos(0.06833462464 + 123669.04892410889 * self.t)
X1 += 0.00000000006 * math.cos(3.85232597863 + 72602.13355808688 * self.t)
X1 += 0.00000000006 * math.cos(4.43482215622 + 50056.79860528649 * self.t)
X1 += 0.00000000005 * math.cos(1.17789065193 + 102769.89703549728 * self.t)
X1 += 0.00000000006 * math.cos(3.77265893918 + 74820.89066227368 * self.t)
X1 += 0.00000000006 * math.cos(1.81837030655 + 161079.61616398748 * self.t)
X1 += 0.00000000006 * math.cos(3.17899498144 + 103932.37173990508 * self.t)
X1 += 0.00000000006 * math.cos(2.61088164463 + 25234.9505773057 * self.t)
X1 += 0.00000000007 * math.cos(1.11412129297 + 154938.58977164488 * self.t)
X1 += 0.00000000005 * math.cos(3.94989870298 + 419.2408263917 * self.t)
X1 += 0.00000000005 * math.cos(3.88779112513 + 19406.9221056581 * self.t)
X1 += 0.00000000006 * math.cos(3.53467301913 + 1265.81129610991 * self.t)
X1 += 0.00000000007 * math.cos(0.99187081299 + 64901.5035354069 * self.t)
X1 += 0.00000000005 * math.cos(2.45437001625 + 73711.51211018028 * self.t)
X1 += 0.00000000005 * math.cos(0.40751082829 + 78213.95662030189 * self.t)
X1 += 0.00000000006 * math.cos(0.47609620784 + 182085.87484340828 * self.t)
X1 += 0.00000000005 * math.cos(0.72963478402 + 102755.66994149568 * self.t)
X1 += 0.00000000005 * math.cos(3.23310371062 + 40565.01050729069 * self.t)
X1 += 0.00000000005 * math.cos(4.56090628241 + 102.84895673509 * self.t)
X1 += 0.00000000005 * math.cos(2.73032624940 + 51748.96427478889 * self.t)
X1 += 0.00000000005 * math.cos(2.42117857215 + 53399.86794141051 * self.t)
X1 += 0.00000000005 * math.cos(3.99699244254 + 27780.31262856009 * self.t)
X1 += 0.00000000005 * math.cos(6.27879128389 + 80482.22271142589 * self.t)
X1 += 0.00000000005 * math.cos(4.04842386586 + 25938.5837619231 * self.t)
X1 += 0.00000000005 * math.cos(0.51237606895 + 26402.33313892731 * self.t)
X1 += 0.00000000005 * math.cos(5.15124896226 + 25773.96077918809 * self.t)
X1 += 0.00000000005 * math.cos(1.58549672991 + 77616.94240889128 * self.t)
X1 += 0.00000000005 * math.cos(2.07804016284 + 34282.4222922663 * self.t)
X1 += 0.00000000006 * math.cos(0.18266035721 + 50050.17269325269 * self.t)
X1 += 0.00000000005 * math.cos(3.56668263605 + 17893.8716258491 * self.t)
X1 += 0.00000000004 * math.cos(2.54682197432 + 130432.64597835368 * self.t)
X1 += 0.00000000004 * math.cos(1.97821363637 + 51852.54468397449 * self.t)
X1 += 0.00000000004 * math.cos(0.66387666740 + 32370.73517408209 * self.t)
X1 += 0.00000000005 * math.cos(2.92531330514 + 204151.51545301828 * self.t)
X1 += 0.00000000006 * math.cos(3.31682565623 + 75930.75684933408 * self.t)
X1 += 0.00000000004 * math.cos(3.69733037417 + 25668.17468021549 * self.t)
X1 += 0.00000000005 * math.cos(6.08000873981 + 6043.9847638919 * self.t)
X1 += 0.00000000005 * math.cos(4.31437334805 + 39450.5966658569 * self.t)
X1 += 0.00000000005 * math.cos(3.85449175904 + 24491.96051677309 * self.t)
X1 += 0.00000000004 * math.cos(2.80245159203 + 51955.63745819309 * self.t)
X1 += 0.00000000004 * math.cos(5.66172545911 + 103498.66000202828 * self.t)
X1 += 0.00000000005 * math.cos(1.63974900109 + 52396.4627430707 * self.t)
X1 += 0.00000000004 * math.cos(4.13434256469 + 104505.63519426509 * self.t)
X1 += 0.00000000005 * math.cos(4.01690246624 + 23754.46293121869 * self.t)
X1 += 0.00000000005 * math.cos(0.99791835393 + 123201.08393375449 * self.t)
X1 += 0.00000000004 * math.cos(1.49755025165 + 54087.2495838491 * self.t)
X1 += 0.00000000004 * math.cos(5.20879448063 + 637.24008950771 * self.t)
X1 += 0.00000000004 * math.cos(2.16539647390 + 28791.7631137333 * self.t)
X1 += 0.00000000004 * math.cos(3.49822855731 + 23384.5308043821 * self.t)
X1 += 0.00000000005 * math.cos(6.09748705519 + 53029.2464823839 * self.t)
X1 += 0.00000000005 * math.cos(4.96740570225 + 18848.98373249069 * self.t)
X1 += 0.00000000004 * math.cos(3.38348270644 + 26824.02347258949 * self.t)
X1 += 0.00000000004 * math.cos(2.28014232477 + 25352.2704455259 * self.t)
X1 += 0.00000000004 * math.cos(5.11054976948 + 133882.3344703199 * self.t)
X1 += 0.00000000004 * math.cos(2.44432694792 + 132658.51662954128 * self.t)
X1 += 0.00000000004 * math.cos(4.79464066723 + 53772.23654291649 * self.t)
X1 += 0.00000000004 * math.cos(3.58245667544 + 183571.16555011149 * self.t)
X1 += 0.00000000004 * math.cos(1.18282836563 + 167850.32676523308 * self.t)
X1 += 0.00000000005 * math.cos(0.57469466163 + 145204.99856862429 * self.t)
X1 += 0.00000000004 * math.cos(2.91942328210 + 27171.2271913513 * self.t)
X1 += 0.00000000004 * math.cos(2.74420174911 + 25005.0667267641 * self.t)
X1 += 0.00000000004 * math.cos(4.57300561972 + 50483.8844311295 * self.t)
X1 += 0.00000000004 * math.cos(1.71297602638 + 78786.53066029989 * self.t)
X1 += 0.00000000004 * math.cos(4.98597716653 + 949.4194264533 * self.t)
X1 += 0.00000000005 * math.cos(5.48294820542 + 45892.48661567349 * self.t)
X1 += 0.00000000004 * math.cos(5.09042463265 + 1795.5022612045 * self.t)
X1 += 0.00000000005 * math.cos(3.41506026848 + 26709.8907598969 * self.t)
X1 += 0.00000000005 * math.cos(2.24856476273 + 25466.4031582185 * self.t)
X1 += 0.00000000004 * math.cos(5.84172747790 + 52065.84377941249 * self.t)
X1 += 0.00000000004 * math.cos(1.67956658507 + 2222.1004520805 * self.t)
X1 += 0.00000000004 * math.cos(2.68517686010 + 78270.58180110609 * self.t)
X1 += 0.00000000004 * math.cos(4.37036821346 + 25653.94758621389 * self.t)
X1 += 0.00000000004 * math.cos(3.79249696812 + 49842.36607279289 * self.t)
X1 += 0.00000000004 * math.cos(0.19068653380 + 143005.91122533729 * self.t)
X1 += 0.00000000004 * math.cos(3.27752582001 + 78800.75775430149 * self.t)
X1 += 0.00000000004 * math.cos(4.85511136724 + 65697.31390725628 * self.t)
X1 += 0.00000000003 * math.cos(0.19189003895 + 52195.23222656469 * self.t)
X1 += 0.00000000004 * math.cos(5.02476065705 + 130459.42928625426 * self.t)
X1 += 0.00000000004 * math.cos(3.97588615914 + 24491.47288180609 * self.t)
X1 += 0.00000000004 * math.cos(4.14710532879 + 3178.38960805111 * self.t)
X1 += 0.00000000004 * math.cos(2.12747516147 + 220.16882495529 * self.t)
X1 += 0.00000000004 * math.cos(4.96500381777 + 52250.8316991992 * self.t)
X1 += 0.00000000003 * math.cos(4.30211830006 + 78160.86046798748 * self.t)
X1 += 0.00000000004 * math.cos(4.46270461199 + 87367.86023632369 * self.t)
X1 += 0.00000000004 * math.cos(1.72859821695 + 130435.87818999648 * self.t)
X1 += 0.00000000004 * math.cos(4.85546199905 + 53234.94439585409 * self.t)
X1 += 0.00000000004 * math.cos(1.63790933088 + 26575.7817103119 * self.t)
X1 += 0.00000000004 * math.cos(4.02571570033 + 25600.5122078035 * self.t)
X1 += 0.00000000003 * math.cos(0.66645810174 + 1486.2239385487 * self.t)
X1 += 0.00000000005 * math.cos(4.92448477691 + 86144.04239554508 * self.t)
X1 += 0.00000000004 * math.cos(2.91181818574 + 32132.3755404331 * self.t)
X1 += 0.00000000004 * math.cos(2.61773549272 + 66653.40128383189 * self.t)
X1 += 0.00000000003 * math.cos(1.93014248251 + 52310.1591502169 * self.t)
X1 += 0.00000000003 * math.cos(3.81387230068 + 45290.90021070109 * self.t)
X1 += 0.00000000003 * math.cos(0.34402480925 + 70383.86408886709 * self.t)
X1 += 0.00000000003 * math.cos(4.41765442359 + 52252.31617190749 * self.t)
X1 += 0.00000000005 * math.cos(1.98227688514 + 52808.83383994509 * self.t)
X1 += 0.00000000003 * math.cos(5.60337796614 + 1588.82907780029 * self.t)
X1 += 0.00000000004 * math.cos(5.75007070234 + 58857.2749540315 * self.t)
X1 += 0.00000000004 * math.cos(4.77171683373 + 51951.70530492999 * self.t)
X1 += 0.00000000003 * math.cos(3.97223691848 + 50264.8506174147 * self.t)
X1 += 0.00000000004 * math.cos(2.74364830901 + 20043.9183776823 * self.t)
X1 += 0.00000000004 * math.cos(5.35376721002 + 128320.99566497609 * self.t)
X1 += 0.00000000003 * math.cos(5.53627007155 + 25986.18444079209 * self.t)
X1 += 0.00000000003 * math.cos(0.12735495966 + 26190.1094773233 * self.t)
X1 += 0.00000000004 * math.cos(3.03031240281 + 136722.83537534589 * self.t)
X1 += 0.00000000003 * math.cos(4.41334655990 + 65717.47130312308 * self.t)
X1 += 0.00000000003 * math.cos(4.19581525920 + 181026.49291321907 * self.t)
X1 += 0.00000000003 * math.cos(2.72755670550 + 51535.66517935089 * self.t)
X1 += 0.00000000004 * math.cos(1.89805960768 + 129799.86223904048 * self.t)
X1 += 0.00000000003 * math.cos(0.20996976206 + 26073.91986505609 * self.t)
X1 += 0.00000000003 * math.cos(5.45365526915 + 26102.3740530593 * self.t)
X1 += 0.00000000003 * math.cos(3.40818167058 + 52168.44891866409 * self.t)
X1 += 0.00000000004 * math.cos(0.98011046066 + 52155.8927047651 * self.t)
X1 += 0.00000000004 * math.cos(2.86756899369 + 37698.6989174319 * self.t)
X1 += 0.00000000003 * math.cos(5.28107423901 + 51109.06698847489 * self.t)
X1 += 0.00000000003 * math.cos(3.44421079458 + 26247.4486938499 * self.t)
X1 += 0.00000000003 * math.cos(2.21941423663 + 25928.8452242655 | |
from app.utils import splitDict
from app.service.charts import Charts
class groupsCharts(Charts):
def __init__(self, bddCon):
super().__init__(bddCon)
def charts(self, form):
charts = list()
recall = dict()
error = False
# Variables input
date = self.pickDate(form, recall)[0]
groupName = form.groups.data
recall["Groupe"] = groupName
#if Catch erreurs then stop
sql = """
SELECT COUNT(job_.id_job_) AS nb_job
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
{date}
GROUP BY groupes.id_groupe;
"""
jobsTotal = self.e.fetch(command=sql.format(date=date, groupName=groupName))
if(super().detectError(jobsTotal)):
return charts, recall, True
# Group, conso
sql = """
SELECT COUNT(job_.id_job_) as nb_job
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
{test}
{date}
GROUP BY groupes.id_groupe;
"""
jobsSuccess = self.e.fetch(command=sql.format( date=date,
test = 'AND (job_.failed = 0 OR job_.exit_status = 0)',
groupName = groupName))
jobsFailed = self.e.fetch(command=sql.format( date=date,
test = 'AND job_.failed != 0 AND job_.exit_status != 0',
groupName = groupName))
jobsSuccess = super().nameDict("Jobs réussi", super().isNullDict("nb_job", jobsSuccess))
jobsFailed = super().nameDict("Jobs mauvais", super().isNullDict("nb_job", jobsFailed))
jobsSuccessFailed = (jobsSuccess, jobsFailed)
# Group, Exec time
sql = """
SELECT min(job_.ru_wallclock) as min, avg(job_.ru_wallclock) as avg, max(job_.ru_wallclock) as max
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
{date}
GROUP BY groupes.group_name ;
"""
execTimeMAM = self.e.fetch(command=sql.format( date=date,
groupName = groupName))
execTimeMAM = splitDict(execTimeMAM)
sql = """
SELECT COUNT(job_.id_job_) as {select}
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
AND (job_.failed = 0 OR job_.exit_status = 0)
{date}
-- avg donné par requête imbriquée
AND job_.ru_wallclock {test} (
SELECT AVG(job_.ru_wallclock)
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
AND (job_.failed = 0 OR job_.exit_status = 0)
{date}
GROUP BY groupes.group_name)
GROUP BY groupes.group_name ;
"""
execTimeSupAvg = self.e.fetch(command=sql.format( select='sup_avg',
date=date,
test = ">",
groupName = groupName))
execTimeInfAvg = self.e.fetch(command=sql.format( select='inf_avg',
date=date,
test = "<",
groupName = groupName))
execTimeSupAvg = super().nameDict("Temps d'éxecution moyen supérieur", super().isNullDict("sup_avg", execTimeSupAvg))
execTimeInfAvg = super().nameDict("Temps d'éxecution moyen inférieur", super().isNullDict("inf_avg", execTimeInfAvg))
execTimeComparaison = (execTimeSupAvg, execTimeInfAvg)
sql = """
SELECT COUNT(job_.id_job_) as {select}
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
AND (job_.failed = 0 OR job_.exit_status = 0)
{date}
AND job_.ru_wallclock {test}
GROUP BY groupes.group_name ;
"""
execTime1 = self.e.fetch(command=sql.format( select='exectime',
date=date,
test = " < 86400 ",
groupName=groupName))
execTime2 = self.e.fetch(command=sql.format( select='exectime',
date=date,
test = " > 86400 AND job_.ru_wallclock < 604800 ",
groupName=groupName))
execTime3 = self.e.fetch(command=sql.format( select='exectime',
date=date,
test = " > 604800 AND job_.ru_wallclock < 18144000 ",
groupName=groupName))
execTime4 = self.e.fetch(command=sql.format( select='exectime',
date=date,
test = " > 18144000 ",
groupName=groupName))
execTime1 = super().nameDict("< 24", super().isNullDict("exectime", execTime1))
execTime2 = super().nameDict("[24; 168]", super().isNullDict("exectime", execTime2))
execTime3 = super().nameDict("[168; 5 040]", super().isNullDict("exectime", execTime3))
execTime4 = super().nameDict("> 5 040", super().isNullDict("exectime", execTime4))
execTime = (execTime1, execTime2, execTime3, execTime4) #Posibilité que des valeurs disparaissent car value = 0.
# Mem Usage
sql = """
SELECT MAX(job_.maxvmem) AS max, AVG(job_.maxvmem) AS avg, MIN(job_.maxvmem) AS min
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
AND (job_.failed = 0 OR job_.exit_status = 0)
{date}
GROUP BY groupes.group_name ;
"""
memUseMAM = self.e.fetch(command=sql.format( date=date,
groupName=groupName))
memUseMAM = splitDict(memUseMAM)
sql = """
SELECT COUNT(job_.id_job_) as {select}
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
AND (job_.failed = 0 OR job_.exit_status = 0)
{date}
AND job_.maxvmem {test} (
SELECT AVG(job_.maxvmem)
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
AND (job_.failed = 0 OR job_.exit_status = 0)
{date}
GROUP BY groupes.group_name)
GROUP BY groupes.group_name ;
"""
memUseSupAvg = self.e.fetch(command=sql.format( select='jobs_sup_avg',
date=date,
test = ">",
groupName=groupName))
memUseInfAvg = self.e.fetch(command=sql.format( select='jobs_inf_avg',
date=date,
test = "<",
groupName=groupName))
memUseSupAvg = super().nameDict("Utilisation de la mémoire moyenne supérieur", super().isNullDict("jobs_sup_avg", memUseSupAvg))
memUseInfAvg = super().nameDict("Utilisation de la mémoire moyenne inférieur", super().isNullDict("jobs_inf_avg", memUseInfAvg))
memUseComparaison = (memUseSupAvg, memUseInfAvg)
sql = """
SELECT COUNT(job_.id_job_) as {select}
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
AND (job_.failed = 0 OR job_.exit_status = 0)
{date}
{test}
GROUP BY groupes.group_name ;
"""
mUsage1 = self.e.fetch(command=sql.format( select='musage',
date=date,
test = " < 1073741824 ",
groupName=groupName))
mUsage2 = self.e.fetch(command=sql.format( select='musage',
date=date,
test = " > 1073741824 AND job_.maxvmem < 4294967296 ",
groupName=groupName))
mUsage3 = self.e.fetch(command=sql.format( select='musage',
date=date,
test = " > 4294967296 AND job_.maxvmem < 858993459 ",
groupName=groupName))
mUsage4 = self.e.fetch(command=sql.format( select='musage',
date=date,
test = " > 8589934592 AND job_.maxvmem < 17179869184 ",
groupName=groupName))
mUsage5 = self.e.fetch(command=sql.format( select='musage',
date=date,
test = " > 17179869184 AND job_.maxvmem < 34359738368 ",
groupName=groupName))
mUsage6 = self.e.fetch(command=sql.format( select='musage',
date=date,
test = " > 34359738368 AND job_.maxvmem < 68719476736 ",
groupName=groupName))
mUsage7 = self.e.fetch(command=sql.format( select='musage',
date=date,
test = " > 68719476736 AND job_.maxvmem < 137438953472 ",
groupName=groupName))
mUsage8 = self.e.fetch(command=sql.format( select='musage',
date=date,
test = " > 137438953472 ",
groupName=groupName))
mUsage1 = super().nameDict("< 1", super().isNullDict("musage", mUsage1))
mUsage2 = super().nameDict("[1; 4]", super().isNullDict("musage", mUsage2))
mUsage3 = super().nameDict("[4; 8]", super().isNullDict("musage", mUsage3))
mUsage4 = super().nameDict("[8; 16]", super().isNullDict("musage", mUsage4))
mUsage5 = super().nameDict("[16; 32]", super().isNullDict("musage", mUsage5))
mUsage6 = super().nameDict("[32; 64]", super().isNullDict("musage", mUsage6))
mUsage7 = super().nameDict("[64; 128]", super().isNullDict("musage", mUsage7))
mUsage8 = super().nameDict("> 128", super().isNullDict("musage", mUsage8))
memUsage = (mUsage1, mUsage2, mUsage3, mUsage4, mUsage5, mUsage6, mUsage7, mUsage8) #Posibilité que des valeurs disparaissent car value = 0.
# Slots usage
sql = """
SELECT min(job_.slots) as min, avg(job_.slots) as avg, max(job_.slots) as max
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
AND (job_.failed = 0 OR job_.exit_status = 0)
{date}
GROUP BY groupes.group_name ;
"""
slotsPerJobsMAM = self.e.fetch(command=sql.format( date=date,
groupName=groupName))
slotsPerJobsMAM = splitDict(slotsPerJobsMAM)
sql = """
SELECT COUNT(job_.id_job_) as {select}
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
AND (job_.failed = 0 OR job_.exit_status = 0)
{date}
AND job_.slots {test} (
SELECT AVG(job_.slots)
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
AND (job_.failed = 0 OR job_.exit_status = 0)
{date}
GROUP BY groupes.group_name)
GROUP BY groupes.group_name ;
"""
slotsPerJobsSupAvg = self.e.fetch(command=sql.format( select='jobs_sup_avg',
date=date,
test = ">",
groupName=groupName))
slotsPerJobsInfAvg = self.e.fetch(command=sql.format( select='jobs_inf_avg',
date=date,
test = "<",
groupName=groupName))
slotsPerJobsSupAvg = super().nameDict("Slots par job moyen supérieur", super().isNullDict("jobs_sup_avg", slotsPerJobsSupAvg))
slotsPerJobsInfAvg = super().nameDict("Slots par job moyen inférieur", super().isNullDict("jobs_inf_avg", slotsPerJobsInfAvg))
slotsPerJobsComparaison = (slotsPerJobsSupAvg, slotsPerJobsInfAvg)
sql = """
SELECT COUNT(job_.id_job_) as {select}
FROM job_, groupes
WHERE job_.id_groupe = groupes.id_groupe
AND groupes.group_name = '{groupName}'
AND (job_.failed = 0 OR job_.exit_status = 0)
{date}
AND job_.slots {test}
GROUP BY groupes.group_name;
"""
slots1 = self.e.fetch(command=sql.format( select='slots',
date=date,
test = " = 1 ",
groupName=groupName))
slots2 = self.e.fetch(command=sql.format( select='slots',
date=date,
test = " > 1 AND job_.slots <= 4 ",
groupName=groupName))
slots3 = self.e.fetch(command=sql.format( select='slots',
date=date,
test = " > 5 AND job_.slots <= 8 ",
groupName=groupName))
slots4 = self.e.fetch(command=sql.format( select='slots',
date=date,
test = " > 9 AND job_.slots <= 16 ",
groupName=groupName))
slots5 = self.e.fetch(command=sql.format( select='slots',
date=date,
test = " > 17 AND job_.slots <= 32 ",
groupName=groupName))
slots6 = self.e.fetch(command=sql.format( select='slots',
date=date,
test = " > 33 AND job_.slots <= 64 ",
groupName=groupName))
slots7 = self.e.fetch(command=sql.format( select='slots',
date=date,
test = " > 65 AND job_.slots <= 128 ",
groupName=groupName))
slots8 = self.e.fetch(command=sql.format( select='slots',
date=date,
test = " > 128 ",
groupName=groupName))
slots1 = super().nameDict("= 1", super().isNullDict("slots", slots1))
slots2 = super().nameDict("[1; 4]", super().isNullDict("slots", slots2))
slots3 = super().nameDict("[5; 8]", super().isNullDict("slots", slots3))
slots4 = super().nameDict("[9; 16]", super().isNullDict("slots", slots4))
slots5 = super().nameDict("[17; 32]", super().isNullDict("slots", slots5))
slots6 = super().nameDict("[33; 64]", super().isNullDict("slots", slots6))
slots7 = super().nameDict("[65; 128]", super().isNullDict("slots", slots7))
slots8 = super().nameDict("> 128", super().isNullDict("slots", slots8))
slotsPerJob = (slots1, slots2, slots3, slots4, slots5, slots6, slots7, slots8)
charts.append( {"id": "chart1", "name" : "Information utilisateur/groupe", "charts" : (
{"id":"jobsSuccessFailed", "type": "PieChart", "values" : jobsSuccessFailed, "title" : "Taux réussite"},
)})
charts.append( {"id": "chart2", "name" : "Temps d'éxecution", "charts": (
{"id":"execTimeMAM", "type": "BarChart", "values" : execTimeMAM, "title" : "Temps d'exécution (heures)"},
{"id":"execTimeComparaison", "type": "PieChart", "values" : execTimeComparaison, "title" : "Temps d'exécution moyen (heures)"},
{"id":"execTime", "type": "BarChart", "values" : execTime, "title" : "Temps d'exécution (heures)"}
)})
charts.append( {"id": | |
<filename>Akitrix.py
import json
import os
import threading
import time
import urllib.error
import urllib.request
import discord
import pymysql
from PIL import Image, ImageEnhance
from discord.ext import commands
import AkitrixDB
import Image_Edit
'''Unique token for bot to connect with Discord API'''
TOKEN = 'YOUR API KEY'
def get_prefix(bot, message):
with open('prefix.json', 'r') as f:
guild_dict = json.load(f)
if str(message.guild.id) in guild_dict.keys():
return guild_dict[str(message.guild.id)]
else:
return ';'
intents = discord.Intents(messages=True, guilds=True, members=True, presences=True)
bot = commands.Bot(command_prefix=get_prefix, intents=intents)
'''Writes current time to a text file every 10 minutes to track last running timestamp of bot'''
def update_time():
threading.Timer(600.0, update_time).start()
with open('BotRuntime.txt', 'w') as f:
f.write(str(time.asctime()) + '\n')
update_time()
permissionlist = ['administrator', 'ban_members', 'kick_members',
'manage_channels', 'manage_guild', 'manage_messages',
'add_reactions', 'read_messages',
'send_messages', 'send_tts_messages',
'embed_links', 'attach_files', 'read_message_history', 'mention_everyone', 'external_emojis',
'connect', 'speak', 'mute_members', 'deafen_members', 'move_members',
'change_nickname', 'manage_nicknames', 'manage_roles', 'manage_webhooks', 'manage_emojis',
'bot_owner']
'''Throughout the project, 'ctx' is a discord.Context object which consists details on the guild and channel where a
command was invoked, and also the user that invoked it.'''
@bot.command(name="akiset")
async def set_prefix(ctx, prefix):
with open('prefix.json', 'r') as json_file:
guild_dict = json.load(json_file)
guild_dict[str(ctx.guild.id)] = prefix
with open('prefix.json', 'w') as json_file:
json.dump(guild_dict, json_file, indent=4)
await ctx.send("The prefix is now set to ``{0}``".format(prefix))
'''Lists permissions of specified user in chat'''
@bot.command(name="perms")
async def list_perms(ctx, *member):
if len(member) == 1:
member = ctx.guild.get_member(int(''.join(d for d in member[0] if d.isdigit())))
else:
member = ctx.message.author
memberperms = []
if member.guild_permissions.administrator:
await ctx.send('This user is an administrator!')
else:
for x in permissionlist:
if x == "bot_owner" and member.id in [432126465254096896]:
memberperms.append(x)
elif x == "bot_owner":
pass
else:
check = getattr(member.guild_permissions, x)
if check:
memberperms.append(x)
memberperms_str = '\n'.join(memberperms)
embed = discord.Embed(color=discord.Colour.gold(), title="{0}'s roles".format(member.name),
description=memberperms_str)
embed.set_thumbnail(url=member.avatar_url)
await ctx.send(embed=embed)
@bot.command(name="perm_check")
async def perm_check(ctx, member, required_perms):
missing_perms = []
for i in required_perms:
if i == "bot_owner" and (ctx.author.id not in [<PASSWORD>]):
missing_perms.append(i)
elif i == "bot_owner":
pass
else:
check = getattr(member.guild_permissions, i)
if not check:
missing_perms.append(i)
if len(missing_perms) != len(required_perms):
return True
else:
await ctx.send(
f"You are missing one or more of the following permissions: `{','.join(i for i in missing_perms)}`")
return False
@bot.command(name="yeet")
async def yeet(ctx, *, message):
message = message.lower()
message2 = list(message)
for i in range(1, len(message2), 2):
message2[i] = message2[i].upper()
await ctx.send(''.join(message2))
@bot.listen()
async def on_member_join(member):
channel = member.guild.system_channel
filename = str(member.id)
filepath = 'assets/Avatars/' + filename
if "gif" in str(member.avatar_url):
filepath += ".gif"
else:
filepath += "-process.jpg"
with open(os.path.join(filepath), "wb+") as f:
await member.avatar_url.save(filepath)
if filepath[-3:] == "gif":
new_filepath = filepath[:-3] + "-process.jpg"
Image.open(filepath).convert('RGB').save(new_filepath)
filepath = new_filepath
final_image = Image_Edit.welcome_image(filepath, member.id)
await channel.send(content="Hello there, {0.mention}".format(member), file=discord.File(final_image))
os.remove(final_image)
datamod = AkitrixDB.Database()
datamod.initialize(member.guild.id, member.guild.name)
datamod.add_member(member.id, member.name, str(member.avatar_url), 0, 10000, 1, 100)
timer = bot.get_command("reset-timer")
await timer.__call__(channel, member.id)
datamod.terminate()
@bot.listen()
async def on_member_remove(member):
channel = member.guild.system_channel
await channel.send("Goodbye {0}...we'll miss you!".format(member.name))
datamod = AkitrixDB.Database()
datamod.initialize(member.guild.id, member.guild.name)
datamod.remove_member(member.id)
datamod.terminate()
@bot.command(name="cr")
async def newroleyay(ctx, *, newrole):
check = await perm_check(ctx, ctx.author, ["manage_roles"])
if check:
guild = ctx.guild
await guild.create_role(name=newrole)
await ctx.send("New Role Created: ```{0}```".format(newrole))
@bot.command(name="ar")
async def add_role(ctx, member, *role):
check = await perm_check(ctx, ctx.author, ["manage_roles"])
if check:
role = discord.utils.get(ctx.guild.roles, name=' '.join(role))
if role is None:
await ctx.send("This role is not available")
member = ctx.guild.get_member(int(''.join(d for d in member if d.isdigit())))
await member.add_roles(role)
await ctx.send("`{0}` added to {1.mention}".format(role.name, member))
@bot.command(name="rr")
async def remove_role(ctx, member, *role):
check = await perm_check(ctx, ctx.author, ["manage_roles"])
if check:
role = discord.utils.get(ctx.guild.roles, name=' '.join(role))
if role is None:
await ctx.send("This role is not available")
member = ctx.guild.get_member(int(''.join(d for d in member if d.isdigit())))
await member.remove_roles(role)
await ctx.send("`{0}` removed from {1.mention}".format(role.name, member))
@bot.command(name="dr")
async def byerole(ctx, *, oldrole):
check = await perm_check(ctx, ctx.author, ["manage_roles"])
if check:
guild = ctx.guild
role = discord.utils.get(guild.roles, name=oldrole)
if role is None:
role = guild.get_role(int(oldrole))
await role.delete()
await ctx.send("R.I.P This role got deleted: ```{0}```".format(oldrole))
@bot.command(name="mention")
async def mentiontime(ctx, *, person):
guild = ctx.guild
member = discord.utils.get(guild.members, name=person)
if member is None:
member = guild.get_role(int(person))
await ctx.send("{0} wants you now, {1.mention}".format(ctx.message.author.mention, member))
@bot.command(name="enlarge", aliases=['emojibig', 'emoteyeet'])
async def emojibig(ctx, *, emote):
emoji_converter = commands.PartialEmojiConverter()
emoji = await emoji_converter.convert(ctx, emote)
emotestring = str(emote)
colon1 = emotestring.find(':')
colon2 = emotestring.rfind(':')
filename = emotestring[colon1 + 1:colon2]
filepath = 'assets/Emotes/' + filename
if emotestring[1] == "a":
filepath += ".gif"
else:
filepath += ".jpg"
with open(os.path.join(filepath), "wb+") as f:
await emoji.url.save(filepath)
await ctx.send(file=discord.File(filepath))
os.remove(filepath)
@bot.command(name="avatar", aliases=["av"])
async def display_avatar(ctx, *member):
if len(member) == 1:
member = ctx.guild.get_member(int(''.join(d for d in member[0] if d.isdigit())))
else:
member = ctx.message.author
filename = ''.join(i for i in member.name if i.isalnum())
filepath = 'assets/Avatars/' + filename
if "gif" in str(member.avatar_url):
filepath += ".gif"
else:
filepath += ".jpg"
with open(os.path.join(filepath), "wb+") as f:
await member.avatar_url.save(filepath)
await ctx.send(file=discord.File(filepath))
os.remove(filepath)
@bot.command(name="deepfry")
async def deepfry(ctx, *avatar):
factor = 10
if len(avatar) == 0 or (avatar[0].isnumeric() and len(avatar[0]) <= 4):
messages = await ctx.history(limit=20).flatten()
message_index = -1
is_embed = False
for message in range(0, len(messages)):
if (len(messages[message].embeds) >= 1 and not isinstance(messages[message].embeds[0].image,
type(discord.Embed.Empty))):
is_embed = True
message_index = message
break
elif len(messages[message].attachments) >= 1:
message_index = message
break
if is_embed:
image_url = messages[message_index].embeds[0].image.url
else:
image_url = messages[message_index].attachments[0].url
dot = image_url.rfind('.')
image_url2 = 'assets/DeepFry/' + str(ctx.author.id)
if image_url[dot + 1:] == "gif":
ext = ".gif"
elif image_url[dot + 1:] == "png":
ext = "-original.png"
else:
ext = "-original.jpg"
image_url2 += ext
if not is_embed:
await messages[message_index].attachments[0].save(image_url2)
else:
urllib.request.urlretrieve(image_url, image_url2)
if len(avatar) != 0 and avatar[0].isnumeric():
factor = int(avatar[0])
else:
if avatar[0].isnumeric() and len(avatar[0]) > 4:
member = ctx.guild.get_member(avatar[0])
else:
member = ctx.guild.get_member(int(''.join(d for d in avatar[0] if d.isdigit())))
if avatar[-1].isnumeric() and len(avatar[-1]) <= 4:
factor = int(avatar[-1])
image_url = member.avatar_url
image_url2 = 'assets/DeepFry/' + str(ctx.author.id)
if "gif" in str(image_url):
ext = ".gif"
else:
ext = "-original.jpg"
image_url2 += ext
with open(image_url2, "wb+") as f:
await member.avatar_url.save(image_url2)
if image_url2[-3:] == "gif":
temp_image = image_url2[:-3] + "-original.jpg"
Image.open(image_url2).convert('RGB').save(temp_image)
image_url2 = temp_image
img = Image.open(image_url2)
converter = ImageEnhance.Color(img)
img2 = converter.enhance(factor)
img2.convert("RGB").save(image_url2)
await ctx.send(file=discord.File(image_url2))
os.remove(image_url2)
@bot.command(name="roles")
async def listroles(ctx):
guild = ctx.guild
list_roles = []
for role in guild.roles:
if len(role.members) == 0 or not role.members[0].bot:
list_roles.append(str(role))
list_roles_str = ', '.join(list_roles)
embed = discord.Embed(color=discord.Colour.gold(), title="Roles in: `{0}`".format(guild.name),
description=f"```{list_roles_str}```")
embed.set_thumbnail(url=guild.icon_url)
await ctx.send(embed=embed)
@bot.command(name="report")
async def report(ctx, *report):
i = await bot.application_info()
owner = i.owner
await owner.send(
"**Server:** {0}\n**User:** {1}\n**Report:** {2}".format(ctx.guild.name, ctx.author.name, ' '.join(report)))
@bot.command(name="fulldb")
async def add_db(ctx):
check = await perm_check(ctx, ctx.author, ["bot_owner"])
if check:
try:
members = ctx.guild.members
datamod = AkitrixDB.Database()
datamod.initialize(ctx.guild.id, ctx.guild.name)
inmembers = []
for i in range(len(members)):
member = members[i]
added = datamod.add_member(member.id, member.name, str(member.avatar_url), 0, 10000, 1, 100)
if added:
inmembers.append(member)
datamod.terminate()
await ctx.send("The members are added to the database!")
if len(inmembers) != 0:
await ctx.send("Some new members have been added! Would you like to view them...?")
def check(message):
return message.author == ctx.author
response = await bot.wait_for('message', check=check)
if response.content == "y":
embed = discord.Embed(color=discord.Colour.dark_blue(), title="Newly added members:")
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.add_field(name="Members", value=str([i.mention for i in inmembers]), inline=False)
await ctx.send(embed=embed)
except pymysql.err.OperationalError:
await ctx.send("The server is having issues...")
@bot.command(name="setcred")
async def set_credits(ctx, *people):
check = await perm_check(ctx, ctx.author, ["administrator", "bot_owner"])
if check:
if people[0] == "all":
members = [i for i in ctx.guild.members]
elif people[0] == "allindb":
members = "all"
else:
members = [ctx.guild.get_member(int(''.join(d for d in people[0] if d.isdigit())))]
amount = 1000
if people[-1].isdigit():
amount = int(people[-1])
datamod = AkitrixDB.Database()
datamod.initialize(0, 'Main')
if members != "all":
for member in members:
datamod.reset_credits(member.id, amount)
else:
datamod.reset_all_credits(amount)
datamod.terminate()
if len(members) > 1:
await ctx.send("Everyone's credits has been set to {0}".format(amount))
else:
await ctx.send("{0}'s credits has been set to {1}".format(members[0].name, amount))
@bot.command(name="resetxp")
async def resetxp(ctx, *people):
check = await perm_check(ctx, ctx.author, ["administrator"])
if check:
if people[0] == "all":
members = [i for i in ctx.guild.members]
else:
members = [ctx.guild.get_member(int(''.join(d for d in people[0] if d.isdigit())))]
datamod = AkitrixDB.Database()
datamod.initialize(0, 'Main')
for member in members:
datamod.reset_xp(member.id)
datamod.terminate()
if len(members) > 1:
await ctx.send("Everyone's XP has been reset")
else:
await ctx.send("{0}'s XP has been reset".format(members[0].name))
# @bot.event
# async def on_message(message):
# await bot.process_commands(message)
# # xp_gain = len(message.content) // (random.randint(3, 7))
# if len(message.embeds)!=0:
# for embed in message.embeds:
# print(embed.to_dict())
# datamod = AkitrixDB.Database()
# datamod.initialize('Main')
# datamod.update_xp(message.author.id, xp_gain)
@bot.event
async def on_user_update(before, after):
datamod = AkitrixDB.Database()
datamod.initialize(0, 'Main')
if before.avatar != after.avatar:
datamod.update_pfp(after.id, after.avatar_url)
if before.name != after.name:
datamod.update_name(after.id, after.name)
datamod.terminate()
@bot.event
async def on_guild_update(before, after):
if before.name != after.name:
datamod = AkitrixDB.Database()
datamod.initialize(before.id, after.name)
datamod.terminate()
@bot.command(name="profile")
async def profile(ctx, *member):
try:
if len(member) == 0:
member_id = ctx.message.author.id
elif isinstance(member[0], int):
member_id = member[0]
elif not member[0].isdigit():
member_id | |
3 doubles: X, Y, Z, representing the
| coordinates in model space of the X axis of the axis system.
|
|
| Example:
| The following example retrieves in XAxisCoord the coordinates of the X
| axis of the axisSystem axis system:
|
| Dim XAxisCoord(2)
| axisSystem.GetXAxis XAxisCoord
:param tuple o_x_axis:
:return: None
:rtype: None
"""
vba_function_name = 'get_x_axis'
vba_code = """
Public Function get_x_axis(axis_system)
Dim oXAxis (2)
axis_system.GetXAxis oXAxis
get_x_axis = oXAxis
End Function
"""
system_service = self.application.system_service
return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_y_axis(self) -> tuple:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetYAxis(CATSafeArrayVariant oYAxis)
|
| Returns the coordinates X,Y,Z of the Y axis of the axis
| system.
|
| Parameters:
|
| oYAxis
| A Safe Array made up of 3 doubles: X, Y, Z, representing the
| coordinates in model space of the Y axis of the axis system.
|
|
| Example:
| The following example retrieves in YAxisCoord the coordinates of the Y
| axis of the axisSystem axis system:
|
| Dim YAxisCoord(2)
| axisSystem.GetYAxis XAxisCoord
:param tuple o_y_axis:
:return: None
:rtype: None
"""
vba_function_name = 'get_y_axis'
vba_code = """
Public Function get_y_axis(axis_system)
Dim oYAxis (2)
axis_system.GetYAxis oYAxis
get_y_axis = oYAxis
End Function
"""
system_service = self.application.system_service
return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_z_axis(self) -> tuple:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetZAxis(CATSafeArrayVariant oZAxis)
|
| Returns the coordinates X,Y,Z of the Z axis of the axis
| system.
|
| Parameters:
|
| oZAxis
| A Safe Array made up of 3 doubles: X, Y, Z, representing the
| coordinates in model space of the Z axis of the axis system.
|
|
| Example:
| The following example retrieves in ZAxisCoord the coordinates of the Z
| axis of the axisSystem axis system:
|
| Dim ZAxisCoord(2)
| axisSystem.GetZAxis ZAxisCoord
:param tuple o_z_axis:
:return: None
:rtype: None
"""
vba_function_name = 'get_z_axis'
vba_code = """
Public Function get_z_axis(axis_system)
Dim oZAxis (2)
axis_system.GetZAxis oZAxis
get_z_axis = oZAxis
End Function
"""
system_service = self.application.system_service
return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def put_origin(self, i_origin: tuple) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub PutOrigin(CATSafeArrayVariant iOrigin)
|
| Defines the coordinates X,Y,Z of the origin point of the axis
| system.
|
| Parameters:
|
| iOrigin
| A Safe Array made up of 3 doubles: X, Y, Z, representing the
| coordinates in model space of the origin point of the axis system.
|
|
| Example:
| The following example puts in originCoord the new coordinates of the
| origin point of the axisSystem axis system:
|
| Dim originCoord(2)
| originCoord ( 0 ) = 100.000000
| originCoord ( 1 ) = 200.000000
| originCoord ( 2 ) = 10.000000
| axisSystem.PutOrigin originCoord
:param tuple i_origin:
:return: None
:rtype: None
"""
return self.axis_system.PutOrigin(i_origin)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'put_origin'
# # vba_code = """
# # Public Function put_origin(axis_system)
# # Dim iOrigin (2)
# # axis_system.PutOrigin iOrigin
# # put_origin = iOrigin
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def put_vectors(self, i_vector_x: tuple, i_vector_y: tuple) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub PutVectors(CATSafeArrayVariant iVectorX,
| CATSafeArrayVariant iVectorY)
|
| Defines the coordinates X,Y,Z of the axes X and Y of the axis
| system.
|
| Parameters:
|
| iVectorX
| A Safe Array made up of 3 doubles: X, Y, Z, representing the
| coordinates in model space of the X axis vector of the axis system.
|
| iVectorY
| A Safe Array made up of 3 doubles: X, Y, Z, representing the
| coordinates in model space of the Y axis vector of the axis system.
|
|
| Example:
| The following example modifies in vectorXCoord and vectorYCoord the
| coordinates of the vectors of the axisSystem axis
| system:
|
| Dim vectorXCoord(2)
| vectorYCoord ( 0 ) = 1.000000
| vectorYCoord ( 1 ) = -1.000000
| vectorYCoord ( 2 ) = 0.000000
| Dim vectorYCoord(2)
| vectorYCoord ( 0 ) = 0.000000
| vectorYCoord ( 1 ) = 0.000000
| vectorYCoord ( 2 ) = 1.000000
| axisSystem.PutVectors vectorXCoord, vectorYCoord
:param tuple i_vector_x:
:param tuple i_vector_y:
:return: None
:rtype: None
"""
return self.axis_system.PutVectors(i_vector_x, i_vector_y)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'put_vectors'
# # vba_code = """
# # Public Function put_vectors(axis_system)
# # Dim iVectorX (2)
# # axis_system.PutVectors iVectorX
# # put_vectors = iVectorX
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def put_x_axis(self, i_x_axis: tuple) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub PutXAxis(CATSafeArrayVariant iXAxis)
|
| Defines the coordinates X,Y,Z of the X axis of the axis
| system.
|
| Parameters:
|
| iXAxis
| A Safe Array made up of 3 doubles: X, Y, Z, representing the
| coordinates in model space of the X axis of the axis system.
|
|
| Example:
| The following example puts in XAxisCoord the new coordinates of the X
| axis of the axisSystem axis system:
|
| Dim XAxis(2)
| XAxis ( 0 ) = 100.000000
| XAxis ( 1 ) = 200.000000
| XAxis ( 2 ) = 10.000000
| axisSystem.PutXAxis XAxis
:param tuple i_x_axis:
:return: None
:rtype: None
"""
return self.axis_system.PutXAxis(i_x_axis)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'put_x_axis'
# # vba_code = """
# # Public Function put_x_axis(axis_system)
# # Dim iXAxis (2)
# # axis_system.PutXAxis iXAxis
# # put_x_axis = iXAxis
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def put_y_axis(self, i_y_axis: tuple) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub PutYAxis(CATSafeArrayVariant iYAxis)
|
| Defines the coordinates X,Y,Z of the Y axis of the axis
| system.
|
| Parameters:
|
| iYAxis
| A Safe Array made up of 3 doubles: X, Y, Z, representing the
| coordinates in model space of the Y axis of the axis system.
|
|
| Example:
| The following example puts in XAxisCoord the new coordinates of the Y
| axis of the axisSystem axis system:
| | |
TCTAGG 123456 AS:i:77',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(
{
'ref1': {
'query1': 77.0,
},
},
dm.scores)
self.assertEqual(77.0, dm.score('ref1', 'query1'))
def testOneQueryMappedWithScoreTagFloat(self):
"""
If one query is mapped to one reference, the scores matrix must have
the correct score if a score tag is passed and the score is of type
float (the AS:f:77.5 in the SAM record).
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:f:77.5',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(
{
'ref1': {
'query1': 77.5,
},
},
dm.scores)
self.assertEqual(77.5, dm.score('ref1', 'query1'))
def testNonExistentQueryNotMapped(self):
"""
If a query (not even existing in this case) is not mapped to the
reference, the score between the two must be zero.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(0.0, dm.score('ref1', 'query1'))
def testNonExistentQuery(self):
"""
The score for a non-existent query must be zero.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(0.0, dm.score('ref1', 'query1'))
def testQueryNotMapped(self):
"""
If a query did not map to a reference, the score between the two must
be zero.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:77',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(
{
'ref2': {
'query1': 77,
},
},
dm.scores)
self.assertEqual(0.0, dm.score('ref1', 'query1'))
def testJaccardDistanceToSelf(self):
"""
The Jaccard distance between a reference and itself must be zero.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual(0.0, dm.jaccardDistance('ref1', 'ref1'))
def testJaccardDistanceToIdentical(self):
"""
The Jaccard distance between a reference and another with the same set
of matching queries must be zero.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual(0.0, dm.jaccardDistance('ref1', 'ref1'))
def testJaccardDistanceWithNoQueriesInCommon(self):
"""
The Jaccarddistance between two references that have no matching
queries in common must be 1.0.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual(1.0, dm.jaccardDistance('ref1', 'ref2'))
def testJaccardDistanceWithOneQueryInCommon(self):
"""
The Jaccard similarity between two references with one query in common
is one over the number of queries that match them in total (four),
i.e., 1/4 and the Jaccard distance is 1.0 minus this, or 3/4.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query4 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual(0.75, dm.jaccardDistance('ref1', 'ref2'))
def testJaccardDistanceWithTwoQueriesInCommon(self):
"""
The Jaccard similarity between two references with two queries in
common is two over the number of queries that match them in total
(five), i.e., 2/5 and the Jaccard distance is 1.0 minus this, or 3/5.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query4 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query5 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query5 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual(0.6, dm.jaccardDistance('ref1', 'ref2'))
def testSoergelDistanceWithNegativeScore(self):
"""
Soergel distance cannot be computed if a negative score is present.
A ValueError must be raised in such cases.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:-50',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
error = (fr"^Alignment 1 in {filename!r} has tag 'AS' with "
fr"negative value \(-50\)\.$")
self.assertRaisesRegex(ValueError, error, dm.addFile, filename,
scoreTag='AS')
def testSoergelDistanceWithOneQueryInCommonNoScoreTag(self):
"""
The Soergel similarity between two references with one query in common
if no score tag was given is one over the number of queries that match
them in total (four), i.e., 1/4 and the distance is 1.0 minus this, or
3/4.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query4 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual(0.75, dm.soergelDistance('ref1', 'ref2'))
def testSoergelDistanceWithNoQueryInCommon(self):
"""
The Soergel similarity between two references with no queries in common
when using a score tag given is the sum of the minimum scores (all are
zero) over the sum of the maximum scores (50 + 10 + 60 + 30 = 150),
i.e., zero, and the distance is 1.0 minus this, or 1.0.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:50',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:60',
'query4 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:30',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(1.0, dm.soergelDistance('ref1', 'ref2'))
def testSoergelDistanceToIdentical(self):
"""
The Soergel similarity between two references with two queries in
common with the same scores must be zero.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query2 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:20',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:20',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(0.0, dm.soergelDistance('ref1', 'ref2'))
def testSoergelDistanceSameQueriesDifferentScores(self):
"""
The Soergel similarity between two references with two queries in
common but with different scores is the sum of the minimum scores
(10 + 15 = 25) over the sum of the maximum scores (30 + 70 = 100),
or 1/4, and the distance is 1.0 minus this, or 3/4. The unrelated
query3 and ref3 are ignored.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'@SQ SN:ref3 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:30',
'query2 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:15',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:70',
'query3 0 ref3 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:70',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(0.75, dm.soergelDistance('ref1', 'ref2'))
def testSoergelDistanceWithOneQueryInCommon(self):
| |
file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
def _get_resourceId(self):
"""
Getter method for resourceId, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/resourceId (string)
"""
return self.__resourceId
def _set_resourceId(self, v, load=False):
"""
Setter method for resourceId, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/resourceId (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_resourceId is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_resourceId() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="resourceId", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """resourceId must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="resourceId", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)""",
})
self.__resourceId = t
if hasattr(self, '_set'):
self._set()
def _unset_resourceId(self):
self.__resourceId = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="resourceId", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
def _get_nativeConditionType(self):
"""
Getter method for nativeConditionType, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/nativeConditionType (string)
"""
return self.__nativeConditionType
def _set_nativeConditionType(self, v, load=False):
"""
Setter method for nativeConditionType, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/nativeConditionType (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_nativeConditionType is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nativeConditionType() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="nativeConditionType", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nativeConditionType must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="nativeConditionType", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)""",
})
self.__nativeConditionType = t
if hasattr(self, '_set'):
self._set()
def _unset_nativeConditionType(self):
self.__nativeConditionType = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="nativeConditionType", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
def _get_conditionSeverity(self):
"""
Getter method for conditionSeverity, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/conditionSeverity (string)
"""
return self.__conditionSeverity
def _set_conditionSeverity(self, v, load=False):
"""
Setter method for conditionSeverity, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/conditionSeverity (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_conditionSeverity is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_conditionSeverity() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="conditionSeverity", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """conditionSeverity must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="conditionSeverity", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)""",
})
self.__conditionSeverity = t
if hasattr(self, '_set'):
self._set()
def _unset_conditionSeverity(self):
self.__conditionSeverity = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="conditionSeverity", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
def _get_serviceAffecting(self):
"""
Getter method for serviceAffecting, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/serviceAffecting (string)
"""
return self.__serviceAffecting
def _set_serviceAffecting(self, v, load=False):
"""
Setter method for serviceAffecting, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/serviceAffecting (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_serviceAffecting is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_serviceAffecting() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="serviceAffecting", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """serviceAffecting must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="serviceAffecting", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)""",
})
self.__serviceAffecting = t
if hasattr(self, '_set'):
self._set()
def _unset_serviceAffecting(self):
self.__serviceAffecting = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="serviceAffecting", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
def _get_manualClearable(self):
"""
Getter method for manualClearable, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/manualClearable (string)
"""
return self.__manualClearable
def _set_manualClearable(self, v, load=False):
"""
Setter method for manualClearable, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/manualClearable (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_manualClearable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_manualClearable() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="manualClearable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """manualClearable must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="manualClearable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)""",
})
self.__manualClearable = t
if hasattr(self, '_set'):
self._set()
def _unset_manualClearable(self):
self.__manualClearable = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="manualClearable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
def _get_additionalText(self):
"""
Getter method for additionalText, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/additionalText (string)
"""
return self.__additionalText
def _set_additionalText(self, v, load=False):
"""
Setter method for additionalText, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/additionalText (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_additionalText is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_additionalText() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="additionalText", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """additionalText must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="additionalText", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)""",
})
self.__additionalText = t
if hasattr(self, '_set'):
self._set()
def _unset_additionalText(self):
self.__additionalText = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="additionalText", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
def _get_firstRaiseTime(self):
"""
Getter method for firstRaiseTime, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/firstRaiseTime (string)
"""
return self.__firstRaiseTime
def _set_firstRaiseTime(self, v, load=False):
"""
Setter method for firstRaiseTime, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/firstRaiseTime (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_firstRaiseTime is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_firstRaiseTime() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="firstRaiseTime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """firstRaiseTime must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="firstRaiseTime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)""",
})
self.__firstRaiseTime = t
if hasattr(self, '_set'):
self._set()
def _unset_firstRaiseTime(self):
self.__firstRaiseTime = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="firstRaiseTime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
def _get_lastRaiseTime(self):
"""
Getter method for lastRaiseTime, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/lastRaiseTime (string)
"""
return self.__lastRaiseTime
def _set_lastRaiseTime(self, v, load=False):
"""
Setter method for lastRaiseTime, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/lastRaiseTime (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_lastRaiseTime is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lastRaiseTime() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="lastRaiseTime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lastRaiseTime must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="lastRaiseTime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)""",
})
self.__lastRaiseTime = t
if hasattr(self, '_set'):
self._set()
def _unset_lastRaiseTime(self):
self.__lastRaiseTime = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="lastRaiseTime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
def _get_numberOfOccurrences(self):
"""
Getter method for numberOfOccurrences, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/numberOfOccurrences (uint32)
"""
return self.__numberOfOccurrences
def _set_numberOfOccurrences(self, v, load=False):
"""
Setter method for numberOfOccurrences, mapped from YANG variable /bncFilteredAlarm/alarm/attributes/numberOfOccurrences (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_numberOfOccurrences is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_numberOfOccurrences() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="numberOfOccurrences", | |
of the time series (useful when multiple time series are run simultaneously),
and the predictive model used.
"""
assert (
model in self.available_models
), f"Requested model {model} not currently supported. Please choose one from: {self.available_models}"
if model_parameters is None:
model_parameters = self.parameter_type[model]()
assert isinstance(
model_parameters, self.parameter_type[model]
), f"Expected parameter type {self.parameter_type[model]}, but got {model_parameters}"
if choose_priors:
changepoint_prior, model_parameters = self._choose_priors(model, model_parameters)
if getattr(model_parameters, "data", 0) is None:
model_parameters.data = self.data
logging.debug(f"Newest model parameters: {model_parameters}")
if not self.data.is_univariate() and not self.models[model].is_multivariate():
msg = "Model {model.name} support univariate time series, but get {type}.".format(
model=model,
type=type(self.data.value)
)
logging.error(msg)
raise ValueError(msg)
# parameters_dict = dataclasses.asdict(model_parameters)
# pyre-fixme[45]: Cannot instantiate abstract class `_PredictiveModel` with `__init__`, `is_multivariate`, `pred_mean` and 4 additional abstract methods.Pyre
underlying_model = self.models[model](data=self.data, parameters=model_parameters)
underlying_model.setup()
logging.debug(f"Creating detector with lag {lag} and debug option {debug}.")
bocpd = _BayesOnlineChangePoint(data=self.data, lag=lag, debug=debug, agg_cp=agg_cp)
logging.debug(
f"Running .detector() with model {underlying_model}, threshold {threshold}, changepoint prior {changepoint_prior}."
)
detector_results_all = bocpd.detector(
model=underlying_model,
threshold=threshold,
changepoint_prior=changepoint_prior,
)
self.detected_flag = True
change_points = []
for ts_name, detector_results in detector_results_all.items():
change_indices = detector_results["change_points"]
change_probs = detector_results["change_prob"]
self.change_prob[ts_name] = change_probs
self._run_length_prob[ts_name] = detector_results["run_length_prob"]
logging.debug(
f"Obtained {len(change_indices)} change points from underlying model in ts={ts_name}."
)
for cp_index in change_indices:
cp_time = self.data.time.values[cp_index]
cp = TimeSeriesChangePoint(
start_time=cp_time,
end_time=cp_time,
confidence=change_probs[cp_index],
)
bocpd_metadata = BOCPDMetadata(model=model, ts_name=ts_name)
change_points.append((cp, bocpd_metadata))
logging.debug(f"Returning {len(change_points)} change points to client in ts={ts_name}.")
return change_points
def plot(
self,
change_points: List[Tuple[TimeSeriesChangePoint, BOCPDMetadata]],
ts_names: Optional[List[str]] = None
) -> None:
"""Plots the change points, along with the time series.
Use this function to visualize the results of the changepoint detection.
Args:
change_points: List of changepoints, which are the return value of the detector() function.
ts_names: List of names of the time series, useful in case multiple time series are used.
Returns:
None.
"""
# TODO note: Once D23226664 lands, replace this with self.data.time_col_name
time_col_name = 'time'
# Group changepoints together
change_points_per_ts = self.group_changepoints_by_timeseries(change_points)
ts_names = ts_names or list(change_points_per_ts.keys())
data_df = self.data.to_dataframe()
for ts_name in ts_names:
ts_changepoints = change_points_per_ts[ts_name]
plt.plot(data_df[time_col_name].values, data_df[ts_name].values)
logging.info(f"Plotting {len(ts_changepoints)} change points for {ts_name}.")
if len(ts_changepoints) == 0:
logging.warning("No change points detected!")
for change in ts_changepoints:
plt.axvline(x=change[0].start_time, color="red")
plt.show()
def _choose_priors(self, model: BOCPDModelType,
params: BOCPDModelParameters) -> Tuple[Any, BOCPDModelParameters]:
"""Chooses priors which are defined by the model parameters.
Chooses priors which are defined by the model parameters.
All BOCPDModelParameters classes have a changepoint prior to iterate on.
Other parameters can be added to specific models.
This function runs a parameter search using the hyperparameter tuning library
to get the best hyperparameters.
Args:
model: Type of predictive model.
params: Parameters class, containing list of values of the parameters
on which to run hyperparameter tuning.
Returns:
best_cp_prior: best value of the prior on the changepoint probabilities.
params: parameter dictionary, where the selected values are set.
"""
# test these changepoint_priors
param_dict = params.prior_choice
# which parameter seaching method are we using
search_method = params.search_method
# pick search iterations and method based on definition
if search_method == 'random':
search_N, SearchMethod = 3, SearchMethodEnum.RANDOM_SEARCH_UNIFORM
elif search_method == 'gridsearch':
search_N, SearchMethod = 1, SearchMethodEnum.GRID_SEARCH
else:
raise Exception(f'Search method has to be in random or gridsearch but it is {search_method}!')
# construct the custom parameters for the HPT library
custom_parameters = [
{"name": k,
"type": "choice",
"values": v,
"value_type": "float",
"is_ordered": False
} for k, v in param_dict.items()
]
eval_fn = self._get_eval_function(model, params)
# Use the HPT library
seed_value = 100
ts_tuner = tpt.SearchMethodFactory.create_search_method(
parameters=custom_parameters,
selected_search_method=SearchMethod,
seed=seed_value
)
for _ in range(search_N):
ts_tuner.generate_evaluate_new_parameter_values(
evaluation_function=eval_fn, arm_count=4
)
scores_df = (
ts_tuner.list_parameter_value_scores()
)
scores_df = scores_df.sort_values(by='mean', ascending=False)
best_params = scores_df.parameters.values[0]
params.set_prior(best_params)
best_cp_prior = best_params['cp_prior']
return best_cp_prior, params
def _get_eval_function(self, model: BOCPDModelType,
model_parameters: BOCPDModelParameters):
"""
generates the objective function evaluated by hyperparameter
tuning library for choosing the priors
"""
def eval_fn(params_to_eval: Dict[str, float]) -> float:
changepoint_prior = params_to_eval['cp_prior']
model_parameters.set_prior(params_to_eval)
logging.debug(model_parameters)
logging.debug(params_to_eval)
# pyre-fixme[45]: Cannot instantiate abstract class `_PredictiveModel` with `__init__`, `is_multivariate`, `pred_mean` and 4 additional abstract methods.Pyre
underlying_model = self.models[model](data=self.data, parameters=model_parameters)
change_point = _BayesOnlineChangePoint(data=self.data, lag=3, debug=False)
change_point.detector(model=underlying_model,
changepoint_prior=changepoint_prior,
threshold=0.4)
post_pred = np.mean(change_point.get_posterior_predictive())
return post_pred
return eval_fn
def group_changepoints_by_timeseries(
self,
change_points: List[Tuple[TimeSeriesChangePoint, BOCPDMetadata]]
) -> Dict[str, List[Tuple[TimeSeriesChangePoint, BOCPDMetadata]]]:
"""Helper function to group changepoints by time series.
For multivariate inputs, all changepoints are output in
a list and the time series they correspond to is referenced
in the metadata. This function is a helper function to
group these changepoints by time series.
Args:
change_points: List of changepoints, with metadata containing the time
series names. This is the return value of the detector() method.
Returns:
Dictionary, with time series names, and their corresponding changepoints.
"""
if self.data.is_univariate():
data_df = self.data.to_dataframe()
ts_names = [x for x in data_df.columns if x != 'time']
else:
# Multivariate
ts_names = self.data.value.columns
change_points_per_ts = {}
for ts_name in ts_names:
change_points_per_ts[ts_name] = []
for cp in change_points:
change_points_per_ts[cp[1].ts_name].append(cp)
return dict(change_points_per_ts)
def get_change_prob(self) -> Dict[str, np.ndarray]:
"""Returns the probability of being a changepoint.
Args:
None.
Returns:
For every point in the time series. The return
type is a dict, with the name of the timeseries
as the key, and the value is an array of probabilities
of the same length as the timeseries data.
"""
if not self.detected_flag:
raise ValueError('detector needs to be run before getting prob')
return self.change_prob
def get_run_length_matrix(self) -> Dict[str, np.ndarray]:
"""Returns the entire run-time posterior.
Args:
None.
Returns:
The return type is a dict, with the name of the timeseries
as the key, and the value is an array of probabilities
of the same length as the timeseries data.
"""
if not self.detected_flag:
raise ValueError('detector needs to be run before getting prob')
return self._run_length_prob
class _BayesOnlineChangePoint(Detector):
"""The underlying implementation of the BOCPD algorithm.
This is called by the class BayesianOnlineChangepoint. The user should
call the top level class, and not this one.
Given an univariate time series, this class
performs changepoint detection, i.e. it tells
us when the time series shows a change. This is online,
which means it gives the best estimate based on a
lookehead number of time steps (which is the lag).
This faithfully implements the algorithm in
Adams & McKay, 2007. "Bayesian Online Changepoint Detection"
https://arxiv.org/abs/0710.3742
The basic idea is to see whether the new values are
improbable, when compared to a bayesian predictive model,
built from the previous observations.
Attributes::
data: This is univariate time series data. We require more
than 10 points, otherwise it is not very meaningful to define
changepoints.
T: number of values in the time series data.
lag: This specifies, how many time steps we will look ahead to
determine the change. There is a tradeoff in setting this parameter.
A small lag means we can detect a change really fast, which is important
in many applications. However, this also means we will make more
mistakes/have lower confidence since we might mistake a spike for change.
threshold: Threshold between 0 and 1. Probability values above this threshold
will be denoted as changepoint.
debug: This is a boolean. If set to true, this shows additional plots.
Currently, it shows a plot of the predicted mean and variance, after
lag steps, and the predictive probability of the next point. If the
results are unusual, the user should set it to true in order to
debug.
agg_cp: It is tested and believed that by aggregating run-length
posterior, we may have a stronger signal for changepoint
detection. When setting this parameter as True, posterior
will be the aggregation of run-length posterior by fetching
maximum values diagonally.
"""
rt_posterior: Optional[np.ndarray] = None
pred_mean_arr: Optional[np.ndarray] = None
pred_std_arr: Optional[np.ndarray] = None
next_pred_prob: Optional[np.ndarray] = None
def __init__(self, data: TimeSeriesData, lag: int = 10, debug: bool = False, agg_cp: bool = False):
self.data = data
self.T = data.value.shape[0]
self.lag = lag
self.threshold = None
self.debug = debug
self.agg_cp = agg_cp
# We use tensors for all data throughout; if the data is univariate
# then the last dimension is trivial. | |
<reponame>fpjnijweide/clrs
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX implementation of CLRS baseline models."""
import functools
import os
import pickle
from typing import Dict, List, Optional, Tuple, Union
import chex
from clrs._src import decoders
from clrs._src import losses
from clrs._src import model
from clrs._src import nets
from clrs._src import probing
from clrs._src import samplers
from clrs._src import specs
import haiku as hk
import jax
import jax.numpy as jnp
import optax
_Array = chex.Array
_DataPoint = probing.DataPoint
_Features = samplers.Features
_FeaturesChunked = samplers.FeaturesChunked
_Feedback = samplers.Feedback
_Location = specs.Location
_Seed = jnp.ndarray
_Spec = specs.Spec
_Stage = specs.Stage
_Trajectory = samplers.Trajectory
_Type = specs.Type
_OutputClass = specs.OutputClass
class BaselineModel(model.Model):
"""Model implementation with selectable message passing algorithm."""
def __init__(
self,
spec: Union[_Spec, List[_Spec]],
dummy_trajectory: Union[List[_Feedback], _Feedback],
nb_heads: int = 1,
hidden_dim: int = 32,
kind: str = 'mpnn',
encode_hints: bool = False,
decode_hints: bool = True,
decode_diffs: bool = False,
use_lstm: bool = False,
learning_rate: float = 0.005,
checkpoint_path: str = '/tmp/clrs3',
freeze_processor: bool = False,
dropout_prob: float = 0.0,
name: str = 'base_model',
):
"""Constructor for BaselineModel.
The model consists of encoders, processor and decoders. It can train
and evaluate either a single algorithm or a set of algorithms; in the
latter case, a single processor is shared among all the algorithms, while
the encoders and decoders are separate for each algorithm.
Args:
spec: Either a single spec for one algorithm, or a list of specs for
multiple algorithms to be trained and evaluated.
dummy_trajectory: Either a single feedback batch, in the single-algorithm
case, or a list of feedback batches, in the multi-algorithm case, that
comply with the `spec` (or list of specs), to initialize network size.
nb_heads: Number of heads for GAT processors.
hidden_dim: Size of the hidden state of the model, i.e., size of the
message-passing vectors.
kind: Type of processor (see `processors.py`).
encode_hints: Whether to provide hints as model inputs.
decode_hints: Whether to provide hints as model outputs.
decode_diffs: Whether to predict masks within the model.
use_lstm: Whether to insert an LSTM after message passing.
learning_rate: Learning rate for training.
checkpoint_path: Path for loading/saving checkpoints.
freeze_processor: If True, the processor weights will be frozen and
only encoders and decoders (and, if used, the lstm) will be trained.
dropout_prob: Dropout rate in the message-passing stage.
name: Model name.
Raises:
ValueError: if `encode_hints=True` and `decode_hints=False`.
"""
super(BaselineModel, self).__init__(spec=spec)
if encode_hints and not decode_hints:
raise ValueError('`encode_hints=True`, `decode_hints=False` is invalid.')
self.decode_hints = decode_hints
self.decode_diffs = decode_diffs
self.checkpoint_path = checkpoint_path
self.name = name
self._freeze_processor = freeze_processor
self.opt = optax.adam(learning_rate)
self.nb_dims = []
if isinstance(dummy_trajectory, _Feedback):
assert len(self._spec) == 1
dummy_trajectory = [dummy_trajectory]
for traj in dummy_trajectory:
nb_dims = {}
for inp in traj.features.inputs:
nb_dims[inp.name] = inp.data.shape[-1]
for hint in traj.features.hints:
nb_dims[hint.name] = hint.data.shape[-1]
for outp in traj.outputs:
nb_dims[outp.name] = outp.data.shape[-1]
self.nb_dims.append(nb_dims)
self._create_net_fns(hidden_dim, encode_hints, kind,
use_lstm, dropout_prob, nb_heads)
self.params = None
self.opt_state = None
self.opt_state_skeleton = None
def _create_net_fns(self, hidden_dim, encode_hints, kind,
use_lstm, dropout_prob, nb_heads):
def _use_net(*args, **kwargs):
return nets.Net(self._spec, hidden_dim, encode_hints,
self.decode_hints, self.decode_diffs,
kind, use_lstm, dropout_prob,
nb_heads, self.nb_dims)(*args, **kwargs)
self.net_fn = hk.transform(_use_net)
self.net_fn_apply = jax.jit(self.net_fn.apply,
static_argnames=['repred', 'algorithm_index'])
def init(self, features: Union[_Features, List[_Features]], seed: _Seed):
if not isinstance(features, list):
assert len(self._spec) == 1
features = [features]
self.params = self.net_fn.init(jax.random.PRNGKey(seed), features, True, -1)
self.opt_state = self.opt.init(self.params)
# We will use the optimizer state skeleton for traversal when we
# want to avoid updating the state of params of untrained algorithms.
self.opt_state_skeleton = self.opt.init(jnp.zeros(1))
def feedback(self, rng_key: hk.PRNGSequence, feedback: _Feedback,
algorithm_index: Optional[int] = None) -> float:
"""Advance to the next task, incorporating any available feedback."""
if algorithm_index is None:
assert len(self._spec) == 1
algorithm_index = 0
self.params, self.opt_state, cur_loss = self.update(
rng_key, self.params, self.opt_state, feedback, algorithm_index)
return cur_loss
def predict(self, rng_key: hk.PRNGSequence, features: _Features,
algorithm_index: Optional[int] = None):
"""Model inference step."""
if algorithm_index is None:
assert len(self._spec) == 1
algorithm_index = 0
outs, hint_preds, diff_logits, gt_diff = self.net_fn_apply(
self.params, rng_key, [features],
repred=True, algorithm_index=algorithm_index)
return decoders.postprocess(self._spec[algorithm_index],
outs), (hint_preds, diff_logits, gt_diff)
def update(
self,
rng_key: hk.PRNGSequence,
params: hk.Params,
opt_state: optax.OptState,
feedback: _Feedback,
algorithm_index: Optional[int] = None,
) -> Tuple[hk.Params, optax.OptState, _Array]:
"""Model update step."""
if algorithm_index is None:
assert len(self._spec) == 1
algorithm_index = 0
def loss(params, rng_key, feedback):
"""Calculates model loss f(feedback; params)."""
(output_preds, hint_preds, diff_logits,
gt_diffs) = self.net_fn_apply(params, rng_key, [feedback.features],
repred=False,
algorithm_index=algorithm_index)
nb_nodes = _nb_nodes(feedback, is_chunked=False)
lengths = feedback.features.lengths
total_loss = 0.0
# Calculate output loss.
for truth in feedback.outputs:
total_loss += losses.output_loss(
truth=truth,
pred=output_preds[truth.name],
nb_nodes=nb_nodes,
)
# Optionally accumulate diff losses.
if self.decode_diffs:
total_loss += losses.diff_loss(
diff_logits=diff_logits,
gt_diffs=gt_diffs,
lengths=lengths,
)
# Optionally accumulate hint losses.
if self.decode_hints:
for truth in feedback.features.hints:
total_loss += losses.hint_loss(
truth=truth,
preds=[x[truth.name] for x in hint_preds],
gt_diffs=gt_diffs,
lengths=lengths,
nb_nodes=nb_nodes,
decode_diffs=self.decode_diffs,
)
return total_loss
# Calculate and apply gradients.
assert algorithm_index >= 0
lss, grads = jax.value_and_grad(loss)(params, rng_key, feedback)
new_params, opt_state = self._update_params(params, grads, opt_state)
return new_params, opt_state, lss
def _update_params(self, params, grads, opt_state):
updates, opt_state = filter_null_grads(
grads, self.opt, opt_state, self.opt_state_skeleton)
if self._freeze_processor:
params_subset = _filter_processor(params)
updates_subset = _filter_processor(updates)
new_params = optax.apply_updates(params_subset, updates_subset)
new_params = hk.data_structures.merge(params, new_params)
else:
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
def verbose_loss(self, feedback: _Feedback, extra_info) -> Dict[str, _Array]:
"""Gets verbose loss information."""
hint_preds, diff_logits, gt_diffs = extra_info
nb_nodes = _nb_nodes(feedback, is_chunked=False)
lengths = feedback.features.lengths
losses_ = {}
# Optionally accumulate diff losses.
if self.decode_diffs:
losses_.update(
losses.diff_loss(
diff_logits=diff_logits,
gt_diffs=gt_diffs,
lengths=lengths,
verbose=True,
))
# Optionally accumulate hint losses.
if self.decode_hints:
for truth in feedback.features.hints:
losses_.update(
losses.hint_loss(
truth=truth,
preds=hint_preds,
gt_diffs=gt_diffs,
lengths=lengths,
nb_nodes=nb_nodes,
decode_diffs=self.decode_diffs,
verbose=True,
))
return losses_
def restore_model(self, file_name: str, only_load_processor: bool = False):
"""Restore model from `file_name`."""
path = os.path.join(self.checkpoint_path, file_name)
with open(path, 'rb') as f:
restored_state = pickle.load(f)
if only_load_processor:
restored_params = _filter_processor(restored_state['params'])
else:
restored_params = restored_state['params']
self.params = hk.data_structures.merge(self.params, restored_params)
self.opt_state = restored_state['opt_state']
def save_model(self, file_name: str):
"""Save model (processor weights only) to `file_name`."""
os.makedirs(self.checkpoint_path, exist_ok=True)
to_save = {'params': self.params, 'opt_state': self.opt_state}
path = os.path.join(self.checkpoint_path, file_name)
with open(path, 'wb') as f:
pickle.dump(to_save, f)
class BaselineModelChunked(BaselineModel):
"""Model that processes time-chunked data.
Unlike `BaselineModel`, which processes full samples, `BaselineModelChunked`
processes fixed-timelength chunks of data. Each tensor of inputs and hints
has dimensions chunk_length x batch_size x ... The beginning of a new
sample withing the chunk is signalled by a tensor called `is_first` of
dimensions chunk_length x batch_size.
The chunked model is intended for training. For validation and test, use
`BaselineModel`.
"""
def _create_net_fns(self, hidden_dim, encode_hints, kind,
use_lstm, dropout_prob, nb_heads):
def _use_net(*args, **kwargs):
return nets.NetChunked(
self._spec, hidden_dim, encode_hints,
self.decode_hints, self.decode_diffs,
kind, use_lstm, dropout_prob,
nb_heads, self.nb_dims)(*args, **kwargs)
self.net_fn = hk.transform(_use_net)
self.net_fn_apply = jax.jit(
functools.partial(self.net_fn.apply, init_mp_state=False),
static_argnames=['repred', 'algorithm_index'])
def _init_mp_state(self, features_list: List[_FeaturesChunked],
rng_key: _Array):
def _empty_mp_state():
return nets.MessagePassingStateChunked(
inputs=None, hints=None, is_first=None,
hint_preds=None, hiddens=None, lstm_state=None)
empty_mp_states = [_empty_mp_state() for _ in range(len(features_list))]
dummy_params = self.net_fn.init(
rng_key, features_list, empty_mp_states, False,
init_mp_state=True, algorithm_index=-1)
_, mp_states = self.net_fn.apply(
dummy_params, rng_key, features_list, empty_mp_states, False,
init_mp_state=True, algorithm_index=-1)
return mp_states
def init(self, features: Union[_FeaturesChunked, List[_FeaturesChunked]],
seed: _Seed):
if not isinstance(features, list):
assert len(self._spec) == 1
features = [features]
self.mp_states = self._init_mp_state(features,
jax.random.PRNGKey(seed))
self.params = self.net_fn.init(
jax.random.PRNGKey(seed), features, self.mp_states,
True, init_mp_state=False, algorithm_index=-1)
self.opt_state = self.opt.init(self.params)
# We will use the optimizer state skeleton for traversal when we
# want to avoid updating the state of params of untrained algorithms.
self.opt_state_skeleton = self.opt.init(jnp.zeros(1))
def predict(self, rng_key: hk.PRNGSequence, features: _FeaturesChunked,
algorithm_index: Optional[int] = None):
"""Inference not implemented. Chunked model intended for training only."""
raise NotImplementedError
def update(
self,
rng_key: hk.PRNGSequence,
params: hk.Params,
opt_state: optax.OptState,
feedback: _Feedback,
algorithm_index: Optional[int] = None,
) -> Tuple[hk.Params, optax.OptState, _Array]:
"""Model update step."""
if algorithm_index is None:
assert len(self._spec) == 1
algorithm_index = 0
def loss(params, rng_key, feedback):
((output_preds, hint_preds, diff_logits, gt_diffs),
mp_state) = self.net_fn_apply(params, | |
to the workflow\'s output folder)')
parser_update_stage.set_defaults(func=workflow_cli.update_stage)
register_parser(parser_update_stage, subparsers_action=subparsers_update, categories='workflow')
parser_update_member = subparsers_update.add_parser("member", help="Update the membership of a user in an org", description="Update the membership of a user in an org", prog="dx update member", parents=[stdout_args, env_args])
parser_update_member.add_argument("org_id", help="ID of the org")
parser_update_member.add_argument("username_or_user_id", help="Username or ID of user")
parser_update_member.add_argument("--level", choices=["ADMIN", "MEMBER"], help="The new org membership level of the specified user")
parser_update_member.add_argument("--allow-billable-activities", choices=["true", "false"], help='The new "allowBillableActivities" membership permission of the specified user in the org; default false if demoting the specified user from ADMIN to MEMBER')
parser_update_member.add_argument("--app-access", choices=["true", "false"], help='The new "appAccess" membership permission of the specified user in the org; default true if demoting the specified user from ADMIN to MEMBER')
parser_update_member.add_argument("--project-access", choices=["ADMINISTER", "CONTRIBUTE", "UPLOAD", "VIEW", "NONE"], help='The new default implicit maximum permission the specified user will receive to projects explicitly shared with the org; default CONTRIBUTE if demoting the specified user from ADMIN to MEMBER')
parser_update_member.set_defaults(func=update_membership)
register_parser(parser_update_member, subparsers_action=subparsers_update, categories="org")
parser_update_project = subparsers_update.add_parser("project",
help="Updates a specified project with the specified options",
description="", prog="dx update project",
parents=[stdout_args, env_args])
parser_update_project.add_argument('project_id', help="Project ID or project name")
parser_update_project.add_argument('--name', help="New project name")
parser_update_project.add_argument('--summary', help="Project summary")
parser_update_project.add_argument('--description', help="Project description")
parser_update_project.add_argument('--protected', choices=["true", "false"],
help="Whether the project should be PROTECTED")
parser_update_project.add_argument('--restricted', choices=["true", "false"],
help="Whether the project should be RESTRICTED")
parser_update_project.add_argument('--download-restricted', choices=["true", "false"],
help="Whether the project should be DOWNLOAD RESTRICTED")
parser_update_project.add_argument('--containsPHI', choices=["true"],
help="Flag to tell if project contains PHI")
parser_update_project.add_argument('--bill-to', help="Update the user or org ID of the billing account", type=str)
parser_update_project.set_defaults(func=update_project)
register_parser(parser_update_project, subparsers_action=subparsers_update, categories="metadata")
#####################################
# install
#####################################
parser_install = subparsers.add_parser('install', help='Install an app',
description='Install an app by name. To see a list of apps you can install, hit <TAB> twice after "dx install" or run "' + BOLD('dx find apps') + '" to see a list of available apps.', prog='dx install',
parents=[env_args])
install_app_action = parser_install.add_argument('app', help='ID or name of app to install')
install_app_action.completer = DXAppCompleter(installed=False)
parser_install.set_defaults(func=install)
register_parser(parser_install, categories='exec')
#####################################
# uninstall
#####################################
parser_uninstall = subparsers.add_parser('uninstall', help='Uninstall an app',
description='Uninstall an app by name.', prog='dx uninstall',
parents=[env_args])
uninstall_app_action = parser_uninstall.add_argument('app', help='ID or name of app to uninstall')
uninstall_app_action.completer = DXAppCompleter(installed=True)
parser_uninstall.set_defaults(func=uninstall)
register_parser(parser_uninstall, categories='exec')
#####################################
# run
#####################################
parser_run = subparsers.add_parser('run', help='Run an applet, app, or workflow', add_help=False,
description=(fill('Run an applet, app, or workflow. To see a list of executables you can run, hit <TAB> twice after "dx run" or run "' + BOLD('dx find apps') + '" or "' + BOLD('dx find globalworkflows') + '" to see a list of available apps and global workflows.') + '\n\n' + fill('If any inputs are required but not specified, an interactive mode for selecting inputs will be launched. Inputs can be set in multiple ways. Run "' + BOLD('dx run --input-help') + '" for more details.') + '\n\n' + fill('Run "' + BOLD('dx run --instance-type-help') + '" to see a list of specifications for computers available to run executables.')),
prog='dx run',
formatter_class=argparse.RawTextHelpFormatter,
parents=[exec_input_args, stdout_args, env_args, extra_args,
instance_type_arg, property_args, tag_args])
run_executable_action = parser_run.add_argument('executable',
help=fill('Name or ID of an applet, app, or workflow to run; must be provided if --clone is not set', width_adjustment=-24),
nargs="?", default="")
run_executable_action.completer = MultiCompleter([DXAppCompleter(),
DXPathCompleter(classes=['applet', 'workflow'], visibility="visible")])
parser_run.add_argument('-d', '--depends-on',
help=fill('ID of job, analysis, or data object that must be in the "done" or ' +
'"closed" state, as appropriate, before this executable can be run; ' +
'repeat as necessary (e.g. "--depends-on id1 ... --depends-on idN"). ' +
'Cannot be supplied when running workflows',
width_adjustment=-24),
action='append', type=str)
parser_run.add_argument('-h', '--help', help='show this help message and exit', nargs=0, action=runHelp)
parser_run.add_argument('--clone', help=fill('Job or analysis ID or name from which to use as default options (will use the exact same executable ID, destination project and folder, job input, instance type requests, and a similar name unless explicitly overridden by command-line arguments)', width_adjustment=-24))
parser_run.add_argument('--alias', '--version', dest='alias',
help=fill('Alias (tag) or version of the app to run (default: "default" if an app)', width_adjustment=-24))
parser_run.add_argument('--destination', '--folder', metavar='PATH', dest='folder', help=fill('The full project:folder path in which to output the results. By default, the current working directory will be used.', width_adjustment=-24))
parser_run.add_argument('--batch-folders', dest='batch_folders',
help=fill('Output results to separate folders, one per batch, using batch ID as the name of the output folder. The batch output folder location will be relative to the path set in --destination', width_adjustment=-24),
action='store_true')
parser_run.add_argument('--project', metavar='PROJECT',
help=fill('Project name or ID in which to run the executable. This can also ' +
'be specified together with the output folder in --destination.',
width_adjustment=-24))
parser_run.add_argument('--stage-output-folder', metavar=('STAGE_ID', 'FOLDER'),
help=fill('A stage identifier (ID, name, or index), and a folder path to ' +
'use as its output folder',
width_adjustment=-24),
nargs=2,
action='append',
default=[])
parser_run.add_argument('--stage-relative-output-folder', metavar=('STAGE_ID', 'FOLDER'),
help=fill('A stage identifier (ID, name, or index), and a relative folder ' +
'path to the workflow output folder to use as the output folder',
width_adjustment=-24),
nargs=2,
action='append',
default=[])
parser_run.add_argument('--rerun-stage', metavar='STAGE_ID', dest='rerun_stages',
help=fill('A stage (using its ID, name, or index) to rerun, or "*" to ' +
'indicate all stages should be rerun; repeat as necessary',
width_adjustment=-24),
action='append')
parser_run.add_argument('--name', help=fill('Name for the job (default is the app or applet name)', width_adjustment=-24))
parser_run.add_argument('--delay-workspace-destruction',
help=fill('Whether to keep the job\'s temporary workspace around for debugging purposes for 3 days after it succeeds or fails', width_adjustment=-24),
action='store_true')
parser_run.add_argument('--priority',
choices=['normal', 'high'],
help='Request a scheduling priority for all resulting jobs')
parser_run.add_argument('-y', '--yes', dest='confirm', help='Do not ask for confirmation', action='store_false')
parser_run.add_argument('--wait', help='Wait until the job is done before returning', action='store_true')
parser_run.add_argument('--watch', help="Watch the job after launching it; sets --priority high", action='store_true')
parser_run.add_argument('--allow-ssh', action='append', nargs='?', metavar='ADDRESS',
help=fill('Configure the job to allow SSH access; sets --priority high. If an argument is ' +
'supplied, it is interpreted as an IP or hostname mask to allow connections from, ' +
'e.g. "--allow-ssh 172.16.17.32 --allow-ssh berkeley.edu"',
width_adjustment=-24))
parser_run.add_argument('--ssh',
help=fill("Configure the job to allow SSH access and connect to it after launching; " +
"sets --priority high",
width_adjustment=-24),
action='store_true')
parser_run.add_argument('--ssh-proxy', metavar=('<address>:<port>'),
help=fill('SSH connect via proxy, argument supplied is used as the proxy address and port',
width_adjustment=-24))
parser_run.add_argument('--debug-on', action='append', choices=['AppError', 'AppInternalError', 'ExecutionError', 'All'],
help=fill("Configure the job to hold for debugging when any of the listed errors occur",
width_adjustment=-24))
parser_run.add_argument('--ignore-reuse',
help=fill("Disable job reuse for execution",
width_adjustment=-24),
action='store_true')
parser_run.add_argument('--batch-tsv', dest='batch_tsv', metavar="FILE",
help=fill('A file in tab separated value (tsv) format, with a subset ' +
'of the executable input arguments. A job will be launched ' +
'for each table row.',
width_adjustment=-24))
parser_run.add_argument('--input-help',
help=fill('Print help and examples for how to specify inputs',
width_adjustment=-24),
action=runInputHelp, nargs=0)
parser_run.set_defaults(func=run, verbose=False, help=False, details=None,
stage_instance_types=None, stage_folders=None)
register_parser(parser_run, categories='exec')
#####################################
# watch
#####################################
parser_watch = subparsers.add_parser('watch', help='Watch logs of a job and its subjobs', prog='dx watch',
description='Monitors logging output from a running job',
parents=[env_args, no_color_arg])
parser_watch.add_argument('jobid', help='ID of the job to watch')
# .completer = TODO
parser_watch.add_argument('-n', '--num-recent-messages', help='Number of recent messages to get',
type=int, default=1024*256)
parser_watch.add_argument('--tree', help='Include the entire job tree', action='store_true')
parser_watch.add_argument('-l', '--levels', action='append', choices=["EMERG", "ALERT", "CRITICAL", "ERROR", "WARNING",
"NOTICE", "INFO", "DEBUG", "STDERR", "STDOUT"])
parser_watch.add_argument('--get-stdout', help='Extract stdout only from this job', action='store_true')
parser_watch.add_argument('--get-stderr', help='Extract stderr only from this job', action='store_true')
parser_watch.add_argument('--get-streams', help='Extract only stdout and stderr from this job', action='store_true')
parser_watch.add_argument('--no-timestamps', help='Omit timestamps from messages', action='store_false',
dest='timestamps')
parser_watch.add_argument('--job-ids', help='Print job ID in each message', action='store_true')
parser_watch.add_argument('--no-job-info', help='Omit job info and status updates', action='store_false',
dest='job_info')
parser_watch.add_argument('-q', '--quiet', help='Do not print extra info messages', action='store_true')
parser_watch.add_argument('-f', '--format', help='Message format. Available fields: job, level, msg, date')
parser_watch.add_argument('--no-wait', '--no-follow', action='store_false', dest='tail',
help='Exit after the first new message is received, instead of waiting for all logs')
parser_watch.set_defaults(func=watch)
register_parser(parser_watch, categories='exec')
#####################################
# shh_config
#####################################
parser_ssh_config = subparsers.add_parser('ssh_config', help='Configure SSH keys for your DNAnexus account',
description='Configure SSH access credentials for your DNAnexus account',
prog='dx ssh_config',
parents=[env_args])
parser_ssh_config.add_argument('ssh_keygen_args', help='Command-line arguments to pass to ssh-keygen',
nargs=argparse.REMAINDER)
parser_ssh_config.add_argument('--revoke', help='Revoke SSH public key associated with your DNAnexus account; you will no longer be able to SSH into any jobs.', action='store_true')
parser_ssh_config.set_defaults(func=ssh_config)
register_parser(parser_ssh_config, categories='exec')
#####################################
# ssh
#####################################
parser_ssh = subparsers.add_parser('ssh', help='Connect to a running job via SSH',
description='Use an SSH client to connect to a job being executed on the DNAnexus ' +
'platform. The job must be launched using "dx run --allow-ssh" or ' +
'equivalent API options. Use "dx ssh_config" or the Profile page on ' +
'the DNAnexus website to configure SSH for your DNAnexus account.',
prog='dx ssh',
parents=[env_args])
parser_ssh.add_argument('job_id', help='Name of job to connect to')
parser_ssh.add_argument('ssh_args', help='Command-line arguments to pass to the SSH client', nargs=argparse.REMAINDER)
parser_ssh.add_argument('--ssh-proxy', metavar=('<address>:<port>'),
help='SSH connect via proxy, argument supplied is used as the proxy address and port')
# If ssh is run with the supress-running-check flag, then dx won't prompt
# the user whether they would like to terminate the currently running job
# after they exit ssh. Among other things, this will allow users to setup
# ssh | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Network Control Theory Tutorial
@author: Johannes.Wiesner
"""
import json
import numpy as np
import pandas as pd
import networkx as nx
import dash
import dash_cytoscape as cyto
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_table
import itertools
import operator
import plotly.express as px
from network_control.utils import matrix_normalization
from network_control.energies import minimum_input,optimal_input
from nct_utils import state_trajectory
###############################################################################
## Set Default Data ###########################################################
###############################################################################
# set seed
np.random.seed(28)
# create a default adjacency matrix
A = np.array([[0, 1, 2, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 3, 0, 0, 0, 0, 0],
[2, 0, 0, 4, 0, 0, 0, 0, 0],
[1, 3, 4, 0, 5, 0, 0, 0, 0],
[0, 0, 0, 5, 0, 6, 0, 0, 0],
[0, 0, 0, 0, 6, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 0]])
# create default (random) x0 and xf (between 0 and 1)
states_df = pd.DataFrame({'x0':np.round(np.random.rand(len(A)),2),'xf':np.round(np.random.rand(len(A)),2)})
states_df.reset_index(inplace=True)
###############################################################################
## Dash App ###################################################################
###############################################################################
## Topology-Modification ######################################################
# FIXME: transform networkx coordinates into dash/plotly space
# - Positions could however also be irrelevant here, because layout component
# from dash can also decide automatically over node positions
def from_A_to_elements(A):
'''Create a lists of elements from a numpy adjaceny matrix that can be inter-
preted by dash_cytoscape.Cytoscape. The following steps are implemented from
https://community.plotly.com/t/converting-networkx-graph-object-into-cytoscape-format/23224/2
'''
# create graph object
G = nx.Graph(A)
# get node positions
pos = nx.spring_layout(G)
# convert networkx to cytoscape layout
cy = nx.readwrite.json_graph.cytoscape_data(G)
# Add the dictionary key 'label' to the node dict (this is a required attribute for dash)
# Delete the key 'value' from node dict (not needed)
# Delete the key 'name' from node dict (not needed)
# Add the dictionary key 'controller' to the node dict and set to True
for node_dict in cy['elements']['nodes']:
for _,d in node_dict.items():
d['label'] = d.pop('value')
del d['name']
d['controller'] = True
d['constrain'] = True
# NOTE: in cytoscape, all ids of the nodes must be strings, that's why
# we convert the edge ids also to strings (but check if this is really
# necessary)
for edge_dict in cy['elements']['edges']:
for _,d in edge_dict.items():
d['source'] = str(d['source'])
d['target'] = str(d['target'])
# Add the positions you got from as a value for data in the nodes portion of cy
# NOTE: This might be not necessary, as positions can be automatically
# determined in the layout attribute from cyto.Cytoscape (see FIXME above)
for n,p in zip(cy['elements']['nodes'],pos.values()):
n['pos'] = {'x':p[0],'y':p[1]}
# Take the results and write them to a list
elements = cy['elements']['nodes'] + cy['elements']['edges']
return elements
# NOTE: What's that utils module? https://dash.plotly.com/cytoscape/reference
def get_edge_dicts(elements):
'''Extract all edge dictionaries from elements. Edge dicts are
identfied by their 'weight' key'''
edge_dicts = []
for d in elements:
if 'weight' in d['data']:
edge_dicts.append(d)
return edge_dicts
# NOTE: What's that utils module? https://dash.plotly.com/cytoscape/reference
def get_node_dicts(elements):
'''Extract all node dictionaries from elements. Node dicts are
identified by not having a 'weight' key'''
node_dicts = []
for d in elements:
if not 'weight' in d['data']:
node_dicts.append(d)
return node_dicts
def add_edges(selectedNodeData,edge_weight,elements):
'''For each combination of selected nodes, check if this combination is connected
by an edge. If not, create an edge dict for that combination and modify the elements list'''
edge_dicts = get_edge_dicts(elements)
edge_ids = [(d['data']['source'],d['data']['target']) for d in edge_dicts]
# get a list of ids of all nodes that user has currently selected and that
# should be connected by an edge. Sort the list alphanumerically (that ensures
# that we get only get combinations of source and target ids where source id
# is always the lower integer)
node_ids = [d['id'] for d in selectedNodeData]
node_ids.sort()
# create all pair-wise combinations of the selected nodes
source_and_target_ids = list(itertools.combinations(node_ids,2))
# for each source and target tuple, check if this edge already exists. If not,
# create a new edge dict and add it to elements
for (source,target) in source_and_target_ids:
if not (source,target) in edge_ids:
new_edge = {'data':{'weight':edge_weight,'source':source,'target':target}}
elements.append(new_edge)
return elements
def drop_edges(selectedEdgeData,elements):
'''Drop an input list of selected edges from cytoscape elements'''
# get source and target ids for all currently selected edges
source_and_target_ids = [(d['source'],d['target']) for d in selectedEdgeData]
# iterate over all dictionaries in elements, identify edge dicts by their
# 'weight' key and check again if this edge dict belongs to the currently selected
# edges. If yes, add its index to list of to be dropped dictionaires.
drop_these_dicts = []
for idx,d in enumerate(elements):
if 'weight' in d['data']:
if (d['data']['source'],d['data']['target']) in source_and_target_ids:
drop_these_dicts.append(idx)
# drop selected edge dictionaries from elements
elements = [i for j,i in enumerate(elements) if j not in drop_these_dicts]
return elements
def get_edge_min_max(elements):
'''Get minimum and maximum edge weights'''
# get all edges from elements
edge_dicts = get_edge_dicts(elements)
# find minimum and maximum weights
edge_weights = [d['data']['weight'] for d in edge_dicts]
weights_max = max(edge_weights)
weights_min = min(edge_weights)
return weights_min,weights_max
# FIXME: Delete this function if it's not necessary
def set_edge_width(elements,edge_weight):
'''Return the edge width for a single edge'''
weights_min,weights_max = get_edge_min_max(elements)
min_width = 1 # constant (selected by me)
max_width = 10 # constant (selected by me)
edge_width = min_width + ((max_width - min_width) / (weights_max - weights_min)) * (edge_weight - weights_min)
return edge_width
def set_edge_weights(selectedEdgeData,edge_weight,elements):
'''Modify the weights of the selected edges'''
# get source and target ids for all currently selected edges
source_and_target_ids = [(d['source'],d['target']) for d in selectedEdgeData]
# iterate over all dictionaries in elements, identify edge dicts by their
# 'weight' key and check again if this edge dict belongs to the currently selected
# edges. If yes, add its index to list of to be dropped dictionaires.
modify_these_dicts = []
for idx,d in enumerate(elements):
if 'weight' in d['data']:
if (d['data']['source'],d['data']['target']) in source_and_target_ids:
modify_these_dicts.append(idx)
# drop selected edge dictionaries from elements
for i in modify_these_dicts:
elements[i]['data']['weight'] = edge_weight
return elements
## Figure Plotting ###########################################################
def from_elements_to_A(elements):
'''Extract nodes and edges from current elements and convert them to
adjacency matrix
'''
# FIXME: This is inefficient, we iterate over the same list twice (see #8)
edge_dicts = get_edge_dicts(elements)
node_dicts = get_node_dicts((elements))
edges = [(d['data']['source'],d['data']['target'],d['data']['weight']) for d in edge_dicts]
nodes = [d['data']['id'] for d in node_dicts]
n_nodes = len(nodes)
A = np.zeros((n_nodes,n_nodes))
for edge in edges:
i = int(edge[0])
j = int(edge[1])
weight = edge[2]
A[i,j] = weight
A[j,i] = weight
return A
# FIXME: lots of repetitions to from_elements_to_S
def from_elements_to_B(elements):
'''Extract nodes from current elements, check which nodes are selected
as controllers and get a corresponding control matrix B that can be
fed to control_package functions.
'''
# get a list of all nodes from current elements (get their ID and their
# controller attribute)
node_dicts = get_node_dicts(elements)
nodes = [(d['data']['id'],d['data']['controller']) for d in node_dicts]
# sort nodes by their ids and get controller attribute
nodes.sort(key=operator.itemgetter(0))
c_attributes = [n[1] for n in nodes]
# create B matrix
B = np.zeros(shape=(len(nodes),len(nodes)))
for idx,c in enumerate(c_attributes):
if c == True:
B[idx,idx] = 1
return B
# FIXME: lots of repetitions to from_elements_to_B
def from_elements_to_S(elements):
'''Extract nodes from current elements, check which nodes are selected
to be constrained and get a corresponding matrix S that can be
fed to control_package functions.
'''
# get a list of all nodes from current elements (get their ID and their
# controller attribute)
node_dicts = get_node_dicts(elements)
nodes = [(d['data']['id'],d['data']['constrain']) for d in node_dicts]
# sort nodes by their ids and get controller attribute
nodes.sort(key=operator.itemgetter(0))
constrain_attributes = [n[1] for n in nodes]
# create B matrix
S = np.zeros(shape=(len(nodes),len(nodes)))
for idx,constrain in enumerate(constrain_attributes):
if constrain == True:
S[idx,idx] = 1
return S
def get_state_trajectory_fig(A,x0,T,c):
'''Generate a plotly figure that plots a state trajectory using an input | |
# MIT License
#
# Copyright (c) 2020-2021 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import Counter
from typing import Optional, Text, Tuple, Union
import numpy as np
import torch
from torch_audiomentations.core.transforms_interface import BaseWaveformTransform
from typing_extensions import Literal
from pyannote.audio.core.task import Problem, Resolution, Specifications, Task
from pyannote.audio.tasks.segmentation.mixins import SegmentationTaskMixin
from pyannote.audio.utils.loss import binary_cross_entropy, mse_loss
from pyannote.audio.utils.permutation import permutate
from pyannote.core import SlidingWindow
from pyannote.database import Protocol
class Segmentation(SegmentationTaskMixin, Task):
"""Speaker segmentation
Parameters
----------
protocol : Protocol
pyannote.database protocol
duration : float, optional
Chunks duration. Defaults to 2s.
max_num_speakers : int, optional
Force maximum number of speakers per chunk (must be at least 2).
Defaults to estimating it from the training set.
warm_up : float or (float, float), optional
Use that many seconds on the left- and rightmost parts of each chunk
to warm up the model. While the model does process those left- and right-most
parts, only the remaining central part of each chunk is used for computing the
loss during training, and for aggregating scores during inference.
Defaults to 0. (i.e. no warm-up).
balance: str, optional
When provided, training samples are sampled uniformly with respect to that key.
For instance, setting `balance` to "uri" will make sure that each file will be
equally represented in the training samples.
overlap: dict, optional
Controls how artificial chunks with overlapping speech are generated:
- "probability" key is the probability of artificial overlapping chunks. Setting
"probability" to 0.6 means that, on average, 40% of training chunks are "real"
chunks, while 60% are artifical chunks made out of the (weighted) sum of two
chunks. Defaults to 0.5.
- "snr_min" and "snr_max" keys control the minimum and maximum signal-to-noise
ratio between summed chunks, in dB. Default to 0.0 and 10.
weight: str, optional
When provided, use this key to as frame-wise weight in loss function.
batch_size : int, optional
Number of training samples per batch. Defaults to 32.
num_workers : int, optional
Number of workers used for generating training samples.
Defaults to multiprocessing.cpu_count() // 2.
pin_memory : bool, optional
If True, data loaders will copy tensors into CUDA pinned
memory before returning them. See pytorch documentation
for more details. Defaults to False.
augmentation : BaseWaveformTransform, optional
torch_audiomentations waveform transform, used by dataloader
during training.
vad_loss : {"bce", "mse"}, optional
Add voice activity detection loss.
Reference
----------
<NAME> and <NAME>
"End-To-End Speaker Segmentation for Overlap-Aware Resegmentation."
Proc. Interspeech 2021
"""
ACRONYM = "seg"
OVERLAP_DEFAULTS = {"probability": 0.5, "snr_min": 0.0, "snr_max": 10.0}
def __init__(
self,
protocol: Protocol,
duration: float = 2.0,
max_num_speakers: int = None,
warm_up: Union[float, Tuple[float, float]] = 0.0,
overlap: dict = OVERLAP_DEFAULTS,
balance: Text = None,
weight: Text = None,
batch_size: int = 32,
num_workers: int = None,
pin_memory: bool = False,
augmentation: BaseWaveformTransform = None,
loss: Literal["bce", "mse"] = "bce",
vad_loss: Literal["bce", "mse"] = None,
):
super().__init__(
protocol,
duration=duration,
warm_up=warm_up,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
augmentation=augmentation,
)
self.max_num_speakers = max_num_speakers
self.overlap = overlap
self.balance = balance
self.weight = weight
if loss not in ["bce", "mse"]:
raise ValueError("'loss' must be one of {'bce', 'mse'}.")
self.loss = loss
self.vad_loss = vad_loss
def setup(self, stage: Optional[str] = None):
super().setup(stage=stage)
if self.max_num_speakers is None:
# TODO: optimize this
# slide a window (with 1s step) over the whole training set
# and keep track of the number of speakers in each location
num_speakers = []
for file in self._train:
start = file["annotated"][0].start
end = file["annotated"][-1].end
window = SlidingWindow(
start=start, end=end, duration=self.duration, step=1.0,
)
for chunk in window:
num_speakers.append(len(file["annotation"].crop(chunk).labels()))
# because there might a few outliers, estimate the upper bound for the
# number of speakers as the 99th percentile
num_speakers, counts = zip(*list(Counter(num_speakers).items()))
num_speakers, counts = np.array(num_speakers), np.array(counts)
sorting_indices = np.argsort(num_speakers)
num_speakers = num_speakers[sorting_indices]
counts = counts[sorting_indices]
self.max_num_speakers = max(
2,
num_speakers[np.where(np.cumsum(counts) / np.sum(counts) > 0.99)[0][0]],
)
# now that we know about the number of speakers upper bound
# we can set task specifications
self.specifications = Specifications(
problem=Problem.MULTI_LABEL_CLASSIFICATION,
resolution=Resolution.FRAME,
duration=self.duration,
warm_up=self.warm_up,
classes=[f"speaker#{i+1}" for i in range(self.max_num_speakers)],
permutation_invariant=True,
)
def prepare_y(self, one_hot_y: np.ndarray):
"""Zero-pad segmentation targets
Parameters
----------
one_hot_y : (num_frames, num_speakers) np.ndarray
One-hot-encoding of current chunk speaker activity:
* one_hot_y[t, k] = 1 if kth speaker is active at tth frame
* one_hot_y[t, k] = 0 otherwise.
Returns
-------
padded_one_hot_y : (num_frames, self.max_num_speakers) np.ndarray
One-hot-encoding of current chunk speaker activity:
* one_hot_y[t, k] = 1 if kth speaker is active at tth frame
* one_hot_y[t, k] = 0 otherwise.
"""
num_frames, num_speakers = one_hot_y.shape
if num_speakers > self.max_num_speakers:
raise ValueError()
if num_speakers < self.max_num_speakers:
one_hot_y = np.pad(
one_hot_y, ((0, 0), (0, self.max_num_speakers - num_speakers))
)
return one_hot_y
def val__getitem__(self, idx):
f, chunk = self._validation[idx]
sample = self.prepare_chunk(f, chunk, duration=self.duration, stage="val")
y, labels = sample["y"], sample.pop("labels")
# since number of speakers is estimated from the training set,
# we might encounter validation chunks that have more speakers.
# in that case, we arbitrarily remove last speakers
if y.shape[1] > self.max_num_speakers:
y = y[:, : self.max_num_speakers]
labels = labels[: self.max_num_speakers]
sample["y"] = self.prepare_y(y)
return sample
def segmentation_loss(
self,
permutated_prediction: torch.Tensor,
target: torch.Tensor,
weight: torch.Tensor = None,
) -> torch.Tensor:
"""Permutation-invariant segmentation loss
Parameters
----------
permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor
Permutated speaker activity predictions.
target : (batch_size, num_frames, num_speakers) torch.Tensor
Speaker activity.
weight : (batch_size, num_frames, 1) torch.Tensor, optional
Frames weight.
Returns
-------
seg_loss : torch.Tensor
Permutation-invariant segmentation loss
"""
if self.loss == "bce":
seg_loss = binary_cross_entropy(
permutated_prediction, target.float(), weight=weight
)
elif self.loss == "mse":
seg_loss = mse_loss(permutated_prediction, target.float(), weight=weight)
return seg_loss
def voice_activity_detection_loss(
self,
permutated_prediction: torch.Tensor,
target: torch.Tensor,
weight: torch.Tensor = None,
) -> torch.Tensor:
"""Voice activity detection loss
Parameters
----------
permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor
Speaker activity predictions.
target : (batch_size, num_frames, num_speakers) torch.Tensor
Speaker activity.
weight : (batch_size, num_frames, 1) torch.Tensor, optional
Frames weight.
Returns
-------
vad_loss : torch.Tensor
Voice activity detection loss.
"""
vad_prediction, _ = torch.max(permutated_prediction, dim=2, keepdim=True)
# (batch_size, num_frames, 1)
vad_target, _ = torch.max(target.float(), dim=2, keepdim=False)
# (batch_size, num_frames)
if self.vad_loss == "bce":
loss = binary_cross_entropy(vad_prediction, vad_target, weight=weight)
elif self.vad_loss == "mse":
loss = mse_loss(vad_prediction, vad_target, weight=weight)
return loss
def training_step(self, batch, batch_idx: int):
"""Compute permutation-invariant binary cross-entropy
Parameters
----------
batch : (usually) dict of torch.Tensor
Current batch.
batch_idx: int
Batch index.
Returns
-------
loss : {str: torch.tensor}
{"loss": loss}
"""
# forward pass
prediction = self.model(batch["X"])
batch_size, num_frames, _ = prediction.shape
# (batch_size, num_frames, num_classes)
# target
target = batch["y"]
permutated_prediction, _ = permutate(target, prediction)
# frames weight
weight_key = getattr(self, "weight", None)
weight = batch.get(
weight_key, torch.ones(batch_size, num_frames, 1, device=self.model.device),
)
# (batch_size, num_frames, 1)
# warm-up
warm_up_left = round(self.warm_up[0] / self.duration * num_frames)
weight[:, :warm_up_left] = 0.0
warm_up_right = round(self.warm_up[1] / self.duration * num_frames)
weight[:, num_frames - warm_up_right :] = 0.0
seg_loss = self.segmentation_loss(permutated_prediction, target, weight=weight)
self.model.log(
f"{self.ACRONYM}@train_seg_loss",
seg_loss,
on_step=False,
on_epoch=True,
prog_bar=False,
logger=True,
)
if self.vad_loss is None:
vad_loss = 0.0
else:
vad_loss = self.voice_activity_detection_loss(
permutated_prediction, target, weight=weight
)
self.model.log(
f"{self.ACRONYM}@train_vad_loss",
vad_loss,
on_step=False,
on_epoch=True,
prog_bar=False,
logger=True,
)
loss = seg_loss + vad_loss
self.model.log(
f"{self.ACRONYM}@train_loss",
loss,
on_step=False,
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _unknown_shape(op):
return [tensor_shape.unknown_shape() for _ in op.outputs]
# NOTE(cwhipkey): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape("If")(_unknown_shape)
ops.RegisterShape("Iff")(_unknown_shape)
ops.RegisterShape("Ii")(_unknown_shape)
ops.RegisterShape("Iif")(_unknown_shape)
ops.RegisterShape("Iii")(_unknown_shape)
ops.RegisterShape("In")(_unknown_shape)
ops.RegisterShape("Iri")(_unknown_shape)
ops.RegisterShape("None")(_unknown_shape)
ops.RegisterShape("Of")(_unknown_shape)
ops.RegisterShape("Oi")(_unknown_shape)
ops.RegisterShape("Oif")(_unknown_shape)
ops.RegisterShape("Oii")(_unknown_shape)
ops.RegisterShape("OpWithDefaultAttr")(_unknown_shape)
ops.RegisterShape("OpWithFutureDefaultAttr")(_unknown_shape)
ops.RegisterShape("Or")(_unknown_shape)
ops.RegisterShape("Otl")(_unknown_shape)
ops.RegisterShape("Unary")(_unknown_shape)
_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'None'
}
op {
name: 'Oi'
output_arg { name: 'a' type: DT_INT32 }
}
op {
name: 'Or'
output_arg { name: 'a' type: DT_INT32 is_ref: true }
}
op {
name: 'Of'
output_arg { name: 'a' type: DT_FLOAT }
}
op {
name: 'Ii'
input_arg { name: 'a' type: DT_INT32 }
}
op {
name: 'If'
input_arg { name: 'a' type: DT_FLOAT }
}
op {
name: 'Oii'
output_arg { name: 'a' type: DT_INT32 }
output_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'Oif'
output_arg { name: 'a' type: DT_INT32 }
output_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iii'
input_arg { name: 'a' type: DT_INT32 }
input_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'Iff'
input_arg { name: 'a' type: DT_FLOAT }
input_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iif'
input_arg { name: 'a' type: DT_INT32 }
input_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iri'
input_arg { name: 'a' type: DT_INT32 is_ref: true }
input_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'In'
input_arg { name: 'a' number_attr: 'N' type_attr: 'T' }
attr { name: 'N' type: 'int' minimum: 1 }
attr { name: 'T' type: 'type' }
}
op {
name: 'Otl'
output_arg { name: 'a' type_list_attr: 't' }
attr { name: 'T' type: 'list(type)' minimum: 1 }
}
op {
name: 'Unary'
input_arg { name: 'a' type_attr: 'T' }
output_arg { name: 'b' type_attr: 'T' }
attr { name: 'T' type: 'type' }
}
op {
name: 'OpWithDefaultAttr'
output_arg { name: 'a' type: DT_INT32 }
attr { name: 'default_float' type: 'float' default_value { f: 123.0 } }
}
op {
name: 'OpWithFutureDefaultAttr'
}
""", _op_list)
op_def_registry.register_op_list(_op_list)
# NOTE(mrry): Dummy shape registrations for ops used in the tests.
for op_def in _op_list.op:
ops.RegisterShape(op_def.name)(None)
class ImportGraphDefTest(test.TestCase):
def _MakeGraphDef(self,
text,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
text = "versions: { producer: %d min_consumer: %d };\n%s" % (producer,
min_consumer,
text)
ret = graph_pb2.GraphDef()
text_format.Merge(text, ret)
return ret
def testBasic(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oif' }
node { name: 'B' op: 'Otl'
attr { key: 't'
value { list { type: DT_INT32 type: DT_FLOAT } } } }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_FLOAT } }
input: 'A:1' input: 'B:1' }
"""),
return_elements=["A", "B", "C", "D"],
name="import")
# Assert that the import process creates distinct tensors.
self.assertNotEqual(a.outputs[0].name, a.outputs[1].name)
self.assertNotEqual(b.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[1].name)
# Assert that the ops are connected according to the GraphDef topology.
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], b.outputs[1])
# Check the types of the returned ops and tensors.
self.assertEqual(a.type, "Oif")
self.assertEqual(b.type, "Otl")
self.assertEqual(c.type, "In")
self.assertEqual(d.type, "In")
self.assertEqual(a.outputs[0].dtype, dtypes.int32)
self.assertEqual(a.outputs[1].dtype, dtypes.float32)
self.assertEqual(b.outputs[0].dtype, dtypes.int32)
self.assertEqual(b.outputs[1].dtype, dtypes.float32)
# Check the names of the returned ops.
self.assertEqual(a.name, "import/A")
self.assertEqual(b.name, "import/B")
self.assertEqual(c.name, "import/C")
self.assertEqual(d.name, "import/D")
# Check that the op_def is still available.
self.assertNotEqual(None, a.op_def)
def testInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={"A:0": feed_a_0,
"B:1": feed_b_1},
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapBytes(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={b"A:0": feed_a_0,
b"B:1": feed_b_1},
return_elements=[b"A", b"B", b"C", b"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapUnicode(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={u"A:0": feed_a_0,
u"B:1": feed_b_1},
return_elements=[u"A", u"B", u"C", u"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testImplicitZerothOutput(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Ii' input: 'A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.inputs[0], a.outputs[0])
def testInputMapImplicitZerothOutput(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Ii' input: 'A:0' }
"""),
input_map={"A": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testWithControlDependency(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' input: '^A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.control_inputs, [a])
def testWithRefs(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Or' }
node { name: 'B' op: 'Oi' }
node { name: 'C' op: 'Iii' input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'Iri' input: 'A:0' input: 'B:0' }
"""),
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[0])
self.assertEqual(d.inputs[1], b.outputs[0])
self.assertEqual(a.outputs[0].dtype, dtypes.int32_ref)
self.assertEqual(c._input_dtypes, [dtypes.int32, dtypes.int32])
self.assertEqual(c.outputs, [])
self.assertEqual(d._input_dtypes, [dtypes.int32_ref, dtypes.int32])
self.assertEqual(d.outputs, [])
def testCyclic(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'B:0' }
node { name: 'B' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'A:0' }
"""),
return_elements=["A", "B"])
self.assertEqual(a.inputs[0], b.outputs[0])
self.assertEqual(b.inputs[0], a.outputs[0])
| |
<reponame>viveklam/beep
# Copyright 2019 Toyota Research Institute. All rights reserved.
"""Unit tests related to cycler run data structures"""
import json
import os
import subprocess
import unittest
import boto3
import numpy as np
import pandas as pd
from botocore.exceptions import NoRegionError, NoCredentialsError
from beep import MODULE_DIR
from beep.structure import RawCyclerRun, ProcessedCyclerRun, \
process_file_list_from_json, EISpectrum, get_project_sequence, \
get_protocol_parameters, get_diagnostic_parameters, \
determine_paused
from monty.serialization import loadfn, dumpfn
from monty.tempfile import ScratchDir
from beep.utils import os_format
import matplotlib.pyplot as plt
BIG_FILE_TESTS = os.environ.get("BEEP_BIG_TESTS", False)
SKIP_MSG = "Tests requiring large files with diagnostic cycles are disabled, set BIG_FILE_TESTS to run full tests"
TEST_DIR = os.path.dirname(__file__)
TEST_FILE_DIR = os.path.join(TEST_DIR, "test_files")
class RawCyclerRunTest(unittest.TestCase):
def setUp(self):
self.arbin_bad = os.path.join(TEST_FILE_DIR, "2017-05-09_test-TC-contact_CH33.csv")
self.arbin_file = os.path.join(TEST_FILE_DIR, "2017-12-04_4_65C-69per_6C_CH29.csv")
self.maccor_file = os.path.join(TEST_FILE_DIR, "xTESLADIAG_000019_CH70.070")
self.maccor_file_w_diagnostics = os.path.join(TEST_FILE_DIR, "xTESLADIAG_000020_CH71.071")
self.maccor_file_w_parameters = os.path.join(TEST_FILE_DIR, "PreDiag_000287_000128.092")
self.maccor_file_timezone = os.path.join(TEST_FILE_DIR, "PredictionDiagnostics_000109_tztest.010")
self.maccor_file_timestamp = os.path.join(TEST_FILE_DIR, "PredictionDiagnostics_000151_test.052")
self.maccor_file_paused = os.path.join(TEST_FILE_DIR, "PredictionDiagnostics_000151_paused.052")
self.indigo_file = os.path.join(TEST_FILE_DIR, "indigo_test_sample.h5")
self.biologic_file = os.path.join(TEST_FILE_DIR, "raw", "biologic_test_file_short.mpt")
def test_serialization(self):
smaller_run = RawCyclerRun.from_file(self.arbin_bad)
with ScratchDir('.'):
dumpfn(smaller_run, "smaller_cycler_run.json")
resurrected = loadfn("smaller_cycler_run.json")
pd.testing.assert_frame_equal(smaller_run.data, resurrected.data, check_dtype=True)
def test_ingestion_maccor(self):
raw_cycler_run = RawCyclerRun.from_maccor_file(self.maccor_file, include_eis=False)
# Simple test of whether or not correct number of columns is parsed for data/metadata
self.assertEqual(set(raw_cycler_run.metadata.keys()),
{"barcode", "_today_datetime", "start_datetime",
"filename", "protocol", "channel_id"})
self.assertEqual(70, raw_cycler_run.metadata['channel_id'])
# self.assertIsNotNone(raw_cycler_run.eis)
# Test filename recognition
raw_cycler_run = RawCyclerRun.from_file(self.maccor_file)
self.assertEqual(set(raw_cycler_run.metadata.keys()),
{"barcode", "_today_datetime", "start_datetime",
"filename", "protocol", "channel_id"})
# Quick test to see whether columns get recasted
self.assertTrue({"data_point", "cycle_index", "step_index", "voltage", "temperature",
"current", "charge_capacity", "discharge_capacity"} < set(raw_cycler_run.data.columns))
def test_timezone_maccor(self):
raw_cycler_run = RawCyclerRun.from_maccor_file(self.maccor_file_timezone, include_eis=False)
# Simple test of whether or not correct number of columns is parsed for data/metadata
self.assertEqual(set(raw_cycler_run.metadata.keys()),
{"barcode", "_today_datetime", "start_datetime",
"filename", "protocol", "channel_id"})
self.assertEqual(10, raw_cycler_run.metadata['channel_id'])
# self.assertIsNotNone(raw_cycler_run.eis)
# Test filename recognition
raw_cycler_run = RawCyclerRun.from_file(self.maccor_file)
self.assertEqual(set(raw_cycler_run.metadata.keys()),
{"barcode", "_today_datetime", "start_datetime",
"filename", "protocol", "channel_id"})
# Quick test to see whether columns get recasted
self.assertTrue({"data_point", "cycle_index", "step_index", "voltage", "temperature",
"current", "charge_capacity", "discharge_capacity"} < set(raw_cycler_run.data.columns))
def test_timestamp_maccor(self):
raw_cycler_run = RawCyclerRun.from_maccor_file(self.maccor_file_timestamp, include_eis=False)
# Simple test of whether or not correct number of columns is parsed for data/metadata
self.assertEqual(set(raw_cycler_run.metadata.keys()),
{"barcode", "_today_datetime", "start_datetime",
"filename", "protocol", "channel_id"})
# self.assertIsNotNone(raw_cycler_run.eis)
# Test filename recognition
raw_cycler_run = RawCyclerRun.from_file(self.maccor_file)
self.assertEqual(set(raw_cycler_run.metadata.keys()),
{"barcode", "_today_datetime", "start_datetime",
"filename", "protocol", "channel_id"})
# Quick test to see whether columns get recasted
self.assertTrue({"data_point", "cycle_index", "step_index", "voltage", "temperature",
"current", "charge_capacity", "discharge_capacity"} < set(raw_cycler_run.data.columns))
def test_quantity_sum_maccor(self):
raw_cycler_run = RawCyclerRun.from_maccor_file(self.maccor_file_w_diagnostics, include_eis=False)
cycle_sign = np.sign(np.diff(raw_cycler_run.data['cycle_index']))
capacity_sign = np.sign(np.diff(raw_cycler_run.data['charge_capacity']))
self.assertTrue(np.all(capacity_sign >= -cycle_sign)) # Capacity increases throughout cycle
capacity_sign = np.sign(np.diff(raw_cycler_run.data['discharge_capacity']))
self.assertTrue(np.all(capacity_sign >= -cycle_sign)) # Capacity increases throughout cycle
# Note that the compression is from 45 M / 6 M as of 02/25/2019
def test_binary_save(self):
cycler_run = RawCyclerRun.from_file(self.arbin_file)
with ScratchDir('.'):
cycler_run.save_numpy_binary("test")
loaded = cycler_run.load_numpy_binary("test")
# Test equivalence of columns
# More strict test
self.assertTrue(np.all(loaded.data[RawCyclerRun.FLOAT_COLUMNS] ==
cycler_run.data[RawCyclerRun.FLOAT_COLUMNS]))
self.assertTrue(np.all(loaded.data[RawCyclerRun.INT_COLUMNS] ==
cycler_run.data[RawCyclerRun.INT_COLUMNS]))
# Looser test (for future size testing)
self.assertTrue(np.allclose(loaded.data[RawCyclerRun.FLOAT_COLUMNS],
cycler_run.data[RawCyclerRun.FLOAT_COLUMNS]))
self.assertTrue(np.all(loaded.data[RawCyclerRun.INT_COLUMNS] ==
cycler_run.data[RawCyclerRun.INT_COLUMNS]))
def test_get_interpolated_discharge_cycles(self):
cycler_run = RawCyclerRun.from_file(self.arbin_file)
all_interpolated = cycler_run.get_interpolated_cycles()
all_interpolated = all_interpolated[(all_interpolated.step_type == 'discharge')]
lengths = [len(df) for index, df in all_interpolated.groupby("cycle_index")]
self.assertTrue(np.all(np.array(lengths) == 1000))
# Found these manually
all_interpolated = all_interpolated.drop(columns=["step_type"])
y_at_point = all_interpolated.iloc[[1500]]
x_at_point = all_interpolated.voltage[1500]
cycle_1 = cycler_run.data[cycler_run.data['cycle_index'] == 1]
# Discharge step is 12
discharge = cycle_1[cycle_1.step_index == 12]
discharge = discharge.sort_values('voltage')
# Get an interval between which one can find the interpolated value
measurement_index = np.max(np.where(discharge.voltage - x_at_point < 0))
interval = discharge.iloc[measurement_index:measurement_index + 2]
interval = interval.drop(columns=["date_time_iso"]) # Drop non-numeric column
# Test interpolation with a by-hand calculation of slope
diff = np.diff(interval, axis=0)
pred = interval.iloc[[0]] + diff * (x_at_point - interval.voltage.iloc[0]) \
/ (interval.voltage.iloc[1] - interval.voltage.iloc[0])
pred = pred.reset_index()
for col_name in y_at_point.columns:
self.assertAlmostEqual(pred[col_name].iloc[0], y_at_point[col_name].iloc[0], places=5)
def test_get_interpolated_charge_cycles(self):
cycler_run = RawCyclerRun.from_file(self.arbin_file)
all_interpolated = cycler_run.get_interpolated_cycles()
all_interpolated = all_interpolated[(all_interpolated.step_type == 'charge')]
lengths = [len(df) for index, df in all_interpolated.groupby("cycle_index")]
self.assertTrue(np.all(np.array(lengths) == 1000))
self.assertTrue(all_interpolated['current'].mean() > 0)
@unittest.skipUnless(BIG_FILE_TESTS, SKIP_MSG)
def test_get_diagnostic(self):
os.environ['BEEP_ROOT'] = TEST_FILE_DIR
cycler_run = RawCyclerRun.from_file(self.maccor_file_w_parameters)
v_range, resolution, nominal_capacity, full_fast_charge, diagnostic_available = \
cycler_run.determine_structuring_parameters()
self.assertEqual(nominal_capacity, 4.84)
self.assertEqual(v_range, [2.7, 4.2])
self.assertEqual(diagnostic_available['cycle_type'], ['reset', 'hppc', 'rpt_0.2C', 'rpt_1C', 'rpt_2C'])
diag_summary = cycler_run.get_diagnostic_summary(diagnostic_available)
self.assertEqual(diag_summary.cycle_index.tolist(), [1, 2, 3, 4, 5,
36, 37, 38, 39, 40,
141, 142, 143, 144, 145,
246, 247
])
self.assertEqual(diag_summary.cycle_type.tolist(), ['reset', 'hppc', 'rpt_0.2C', 'rpt_1C', 'rpt_2C',
'reset', 'hppc', 'rpt_0.2C', 'rpt_1C', 'rpt_2C',
'reset', 'hppc', 'rpt_0.2C', 'rpt_1C', 'rpt_2C',
'reset', 'hppc'
])
diag_interpolated = cycler_run.get_interpolated_diagnostic_cycles(diagnostic_available, resolution=1000)
diag_cycle = diag_interpolated[(diag_interpolated.cycle_type == 'rpt_0.2C')
& (diag_interpolated.step_type == 1)]
self.assertEqual(diag_cycle.cycle_index.unique().tolist(), [3, 38, 143])
plt.figure()
plt.plot(diag_cycle.discharge_capacity, diag_cycle.voltage)
plt.savefig(os.path.join(TEST_FILE_DIR, "discharge_capacity_interpolation.png"))
plt.figure()
plt.plot(diag_cycle.voltage, diag_cycle.discharge_dQdV)
plt.savefig(os.path.join(TEST_FILE_DIR, "discharge_dQdV_interpolation.png"))
self.assertEqual(len(diag_cycle.index), 3000)
hppcs = diag_interpolated[(diag_interpolated.cycle_type == 'hppc') & pd.isnull(diag_interpolated.current)]
self.assertEqual(len(hppcs), 0)
hppc_dischg1 = diag_interpolated[(diag_interpolated.cycle_index == 37)
& (diag_interpolated.step_type == 2)
& (diag_interpolated.step_index_counter == 3)
& ~pd.isnull(diag_interpolated.current)]
print(hppc_dischg1)
plt.figure()
plt.plot(hppc_dischg1.test_time, hppc_dischg1.voltage)
plt.savefig(os.path.join(TEST_FILE_DIR, "hppc_discharge_pulse_1.png"))
self.assertEqual(len(hppc_dischg1), 176)
processed_cycler_run = cycler_run.to_processed_cycler_run()
self.assertNotIn(diag_summary.index.tolist(), processed_cycler_run.cycles_interpolated.cycle_index.unique())
processed_cycler_run_loc = os.path.join(TEST_FILE_DIR, 'processed_diagnostic.json')
dumpfn(processed_cycler_run, processed_cycler_run_loc)
proc_size = os.path.getsize(processed_cycler_run_loc)
self.assertLess(proc_size, 29000000)
test = loadfn(processed_cycler_run_loc)
self.assertIsInstance(test.diagnostic_summary, pd.DataFrame)
os.remove(processed_cycler_run_loc)
def test_get_interpolated_cycles_maccor(self):
cycler_run = RawCyclerRun.from_file(self.maccor_file)
all_interpolated = cycler_run.get_interpolated_cycles(v_range=[3.0, 4.2], resolution=10000)
interp2 = all_interpolated[(all_interpolated.cycle_index == 2) &
(all_interpolated.step_type == 'discharge')].sort_values('discharge_capacity')
interp3 = all_interpolated[(all_interpolated.cycle_index == 1) &
(all_interpolated.step_type == 'charge')].sort_values('charge_capacity')
self.assertTrue(interp3.current.mean() > 0)
self.assertEqual(len(interp3.voltage), 10000)
self.assertEqual(interp3.voltage.median(), 3.6)
np.testing.assert_almost_equal(interp3[interp3.voltage <= interp3.voltage.median()].current.iloc[0],
2.4227011, decimal=6)
cycle_2 = cycler_run.data[cycler_run.data['cycle_index'] == 2]
discharge = cycle_2[cycle_2.step_index == 12]
discharge = discharge.sort_values('discharge_capacity')
acceptable_error = 0.01
acceptable_error_offest = 0.001
voltages_to_check = [3.3, 3.2, 3.1]
columns_to_check = ['voltage', 'current', 'discharge_capacity', 'charge_capacity']
for voltage_check in voltages_to_check:
closest_interp2_index = interp2.index[(interp2['voltage'] - voltage_check).abs().min() ==
(interp2['voltage'] - voltage_check).abs()]
closest_interp2_match = interp2.loc[closest_interp2_index]
print(closest_interp2_match)
closest_discharge_index = discharge.index[(discharge['voltage'] - voltage_check).abs().min() ==
(discharge['voltage'] - voltage_check).abs()]
closest_discharge_match = discharge.loc[closest_discharge_index]
print(closest_discharge_match)
for column_check in columns_to_check:
off_by = (closest_interp2_match.iloc[0][column_check] - closest_discharge_match.iloc[0][column_check])
print(column_check)
print(np.abs(off_by))
print(np.abs(closest_interp2_match.iloc[0][column_check]) * acceptable_error)
assert np.abs(off_by) <= (np.abs(closest_interp2_match.iloc[0][column_check]) *
acceptable_error + acceptable_error_offest)
def test_get_summary(self):
cycler_run = RawCyclerRun.from_file(self.maccor_file_w_diagnostics)
summary = cycler_run.get_summary(nominal_capacity=4.7, full_fast_charge=0.8)
self.assertTrue(set.issubset({'discharge_capacity', 'charge_capacity', 'dc_internal_resistance',
'temperature_maximum', 'temperature_average', 'temperature_minimum',
'date_time_iso', 'charge_throughput', 'energy_throughput',
'charge_energy', 'discharge_energy', 'energy_efficiency'}, set(summary.columns)))
self.assertEqual(len(summary.index), len(summary['date_time_iso']))
self.assertFalse(summary['paused'].any())
def test_get_energy(self):
cycler_run = RawCyclerRun.from_file(self.arbin_file)
summary = cycler_run.get_summary(nominal_capacity=4.7, full_fast_charge=0.8)
self.assertEqual(summary['charge_energy'][5], 3.7134638)
self.assertEqual(summary['energy_efficiency'][5], 0.872866405753033)
def test_get_charge_throughput(self):
cycler_run = RawCyclerRun.from_file(self.arbin_file)
summary = cycler_run.get_summary(nominal_capacity=4.7, full_fast_charge=0.8)
self.assertEqual(summary['charge_throughput'][5], 6.7614094)
self.assertEqual(summary['energy_throughput'][5], 23.2752363)
def test_ingestion_indigo(self):
# specific
raw_cycler_run = RawCyclerRun.from_indigo_file(self.indigo_file)
self.assertTrue({"data_point", "cycle_index", "step_index", "voltage", "temperature",
"current", "charge_capacity", "discharge_capacity"} < set(raw_cycler_run.data.columns))
self.assertEqual(set(raw_cycler_run.metadata.keys()),
set({"indigo_cell_id", "_today_datetime", "start_datetime","filename"}))
# general
raw_cycler_run = RawCyclerRun.from_file(self.indigo_file)
self.assertTrue({"data_point", "cycle_index", "step_index", "voltage", "temperature",
"current", "charge_capacity", "discharge_capacity"} < set(raw_cycler_run.data.columns))
self.assertEqual(set(raw_cycler_run.metadata.keys()),
set({"indigo_cell_id", "_today_datetime", "start_datetime","filename"}))
def test_ingestion_biologic(self):
# specific
raw_cycler_run = RawCyclerRun.from_biologic_file(self.biologic_file)
self.assertEqual({"cycle_index", "step_index", "voltage", "current",
"discharge_capacity", "charge_capacity", "data_point",
"charge_energy", "discharge_energy"},
set(raw_cycler_run.data.columns))
self.assertEqual(set({"_today_datetime", "filename"}),
set(raw_cycler_run.metadata.keys()))
# general
raw_cycler_run = RawCyclerRun.from_file(self.biologic_file)
self.assertEqual({"cycle_index", "step_index", "voltage", "current",
"discharge_capacity", "charge_capacity", "data_point",
"charge_energy", "discharge_energy"},
set(raw_cycler_run.data.columns))
self.assertEqual(set({"_today_datetime", "filename"}),
set(raw_cycler_run.metadata.keys()))
def test_get_project_name(self):
project_name_parts = get_project_sequence(os.path.join(TEST_FILE_DIR,
"PredictionDiagnostics_000109_tztest.010"))
project_name = project_name_parts[0]
self.assertEqual(project_name, "PredictionDiagnostics")
def test_get_protocol_parameters(self):
os.environ['BEEP_ROOT'] = TEST_FILE_DIR
filepath = os.path.join(TEST_FILE_DIR, "PredictionDiagnostics_000109_tztest.010")
test_path = os.path.join('data-share', 'raw', 'parameters')
parameters, _ = get_protocol_parameters(filepath, parameters_path=test_path)
self.assertEqual(parameters['diagnostic_type'].iloc[0], 'HPPC+RPT')
self.assertEqual(parameters['diagnostic_parameter_set'].iloc[0], 'Tesla21700')
self.assertEqual(parameters['seq_num'].iloc[0], 109)
self.assertEqual(len(parameters.index), 1)
parameters_missing, project_missing = get_protocol_parameters('Fake', parameters_path=test_path)
self.assertEqual(parameters_missing, None)
self.assertEqual(project_missing, None)
filepath = os.path.join(TEST_FILE_DIR, "PreDiag_000292_tztest.010")
parameters, _ = get_protocol_parameters(filepath, parameters_path=test_path)
self.assertIsNone(parameters)
def test_determine_structering_parameters(self):
os.environ['BEEP_ROOT'] = TEST_FILE_DIR
raw_cycler_run = RawCyclerRun.from_file(self.maccor_file_timestamp)
v_range, resolution, nominal_capacity, full_fast_charge, diagnostic_available = \
raw_cycler_run.determine_structuring_parameters()
diagnostic_available_test = {'parameter_set': 'Tesla21700',
'cycle_type': ['reset', 'hppc', 'rpt_0.2C', 'rpt_1C', 'rpt_2C'],
'length': 5,
'diagnostic_starts_at': [1, 36, 141, 246, 351, 456, 561, 666, 771, 876, 981,
1086, 1191, 1296, 1401, 1506, 1611, 1716, 1821, 1926,
2031, 2136, 2241, 2346, 2451, 2556, 2661, 2766, 2871,
2976, 3081, 3186, 3291, 3396, 3501, 3606, 3711, 3816,
3921, 4026, 4131, 4236, 4341, 4446, 4551, 4656, 4761,
4866, 4971, 5076, 5181, 5286, 5391, 5496, 5601, 5706,
5811, 5916, 6021, 6126, 6231, 6336, 6441, 6546, 6651,
6756, 6861, 6966, 7071, 7176, 7281, 7386, 7491, 7596,
7701, 7806, 7911, 8016, 8121, 8226, 8331, 8436, 8541,
8646, 8751, 8856, 8961, 9066, 9171, 9276, 9381, 9486,
9591, 9696, 9801, 9906, 10011, 10116, 10221, 10326,
10431]
}
self.assertEqual(v_range, [2.7, 4.2])
self.assertEqual(resolution, 1000)
self.assertEqual(nominal_capacity, 4.84)
self.assertEqual(full_fast_charge, 0.8)
self.assertEqual(diagnostic_available, diagnostic_available_test)
def test_get_diagnostic_parameters(self):
os.environ['BEEP_ROOT'] = TEST_FILE_DIR
diagnostic_available = {'parameter_set': 'Tesla21700',
'cycle_type': ['reset', 'hppc', 'rpt_0.2C', 'rpt_1C', 'rpt_2C'],
'length': 5,
'diagnostic_starts_at': [1, 36, 141]
}
diagnostic_parameter_path = os.path.join(MODULE_DIR, 'procedure_templates')
project_name = 'PreDiag'
v_range = get_diagnostic_parameters(
diagnostic_available, diagnostic_parameter_path, project_name)
self.assertEqual(v_range, [2.7, 4.2])
def test_get_interpolated_diagnostic_cycles(self):
cycler_run = RawCyclerRun.from_file(self.maccor_file_w_diagnostics)
diagnostic_available = {'type': 'HPPC',
'cycle_type': ['hppc'],
'length': 1,
'diagnostic_starts_at': [1]
}
d_interp = \
cycler_run.get_interpolated_diagnostic_cycles(
diagnostic_available, resolution=500)
self.assertGreaterEqual(
len(d_interp.cycle_index.unique()), 1)
# Ensure step indices are partitioned and processed separately
self.assertEqual(len(d_interp.step_index.unique()), 9)
first_step = d_interp[(d_interp.step_index == 7) & (d_interp.step_index_counter == 1)]
second_step = d_interp[(d_interp.step_index == 7) & (d_interp.step_index_counter == 4)]
self.assertLess(first_step.voltage.diff().max(), 0.001)
self.assertLess(second_step.voltage.diff().max(), 0.001)
self.assertTrue('date_time_iso' in d_interp.columns)
self.assertFalse(d_interp.date_time_iso.isna().all())
def test_get_diagnostic_summary(self):
cycler_run = RawCyclerRun.from_file(self.maccor_file_w_diagnostics)
diagnostic_available = | |
the Astrophysical factor (GeV**2/cm**5)
to compute the dm flux
Parameters
----------
jfactor : Astrophysical factor J (GeV**2/cm**5)
"""
if jfactor < 1.e-40 :
raise ValueError('\nValue of jfactor must be greater than 1.e-40.')
# Set the jfactor
self._jfactor = jfactor
# Return
return
@property
def dfactor(self) :
"""
Return the value of the Astrophysical factor
used to compute the flux
"""
# Return
return self._dfactor
@dfactor.setter
def dfactor(self, dfactor) :
"""
Set the value of the Astrophysical factor (GeV/cm**2)
to compute the dm flux
Parameters
----------
dfactor : Astrophysical factor D (GeV/cm**2)
"""
# Set the jfactor
self._dfactor = dfactor
# Return
return
@property
def mmin(self) :
"""
Return Minimum value mass (GeV) used to compute
the dm flux
"""
# Return
return self._mmin
@mmin.setter
def mmin(self, m_min) :
"""
Set the value of minimum mass (GeV) used to compute
the dm flux
"""
# Just check that the minimum mass is greater than
# 10.0 GeV.
if m_min < 10. :
raise ValueError(('\nMinimum mass {0} GeV '.format(m_min) +
'is below the allowed value (10GeV)'))
# Set minimum energy
self._mmin = m_min
# Update masses
mvalues = self._marray(self._mmin, self._mmax, self._mpoints)
self._masses = mvalues
# Return
return
@property
def mmax(self) :
"""
Return Maximum value of mass (GeV) used to compute
the dm flux
"""
# Return
return self._mmax
@mmax.setter
def mmax(self, m_max) :
"""
Set the value of minimum mass (GeV) used to compute
the dm flux
"""
if m_max > 1.e+5 :
raise ValueError(('\nMaximum mass {0} GeV '.format(m_max) +
'is above the allowed value (1.e+5GeV)'))
# Set minimum energy
self._mmax = m_max
# Update masses
mvalues = self._marray(self._mmin, self._mmax, self._mpoints)
self._masses = mvalues
# Return
return
@property
def masses(self) :
"""
Return the values of the energy array used to compute the spectrum
"""
# Return
return self._masses
@masses.setter
def masses(self, m_vals) :
"""
Set the masses used to compute the spectrum
Parameters
----------
- evals : tuple with:
- mmin : Minimum mass (GeV)
- mmax : Maximum mass (GeV)
- mpoints : Number of points to create the array
"""
mmin, mmax, mpoints = m_vals
# Check if emin and emax are valid
if mmin < 10.0 :
raise ValueError(('Mass {0} '.format(mmin) +
'is lower than the allowed value 10.0'))
if mmax > 1.e+5 :
raise ValueError(('Mass {0} '.format(mmax) +
'is greater than the allowed value 1.e+5'))
# Create energy array
mvalues = self._marray(mmin, mmax, mpoints)
self._masses = mvalues
# Return
return
@staticmethod
def _marray(mmin, mmax, mpoints) :
"""
Create list of masses to generate the fits table.
The calculation is based in the number of points
The masses are computed assuming logarithmic distance
"""
logmmin = np.log10(mmin)
logmmax = np.log10(mmax)
width = (logmmax - logmmin)/(mpoints-1)
masses = []
for index in range(mpoints) :
masses.append(math.pow(10., logmmin+index*width))
# Return
return masses
@property
def delta(self) :
"""
Return what kind of dark matter particle is
used to compute the dm flux
"""
# Return
return self._delta
@delta.setter
def delta(self, delta) :
"""
Set the value of delta to describe what kind of
dark matter particle is used to compute the
dm flux.
Parameters
----------
delta : String, either Majorana or Dirac
"""
# Just to check that delta is valid
if delta not in ALLOWED_FERMIONS :
raise ValueError(('\nKind of Dark matter particle not ' +
'supported.\nOptions are:{0}'.format(ALLOWED_FERMIONS)))
# Set minimum energy
self._delta = delta
# Return
return
@property
def hasEW(self) :
"""
Return whether EW corrections are included or not
"""
# Return
return self._dminterp.hasEW
@hasEW.setter
def hasEW(self, has_EW) :
"""
Include EW corrections in computation of DM spectra
"""
self._dminterp.hasEW = has_EW
# Update the tuple of allowed channels
if has_EW :
self._allowed_channels = ALLOWED_CHANNELS
else :
self._allowed_channels = ALLOWED_CHANNELSNOEW
# Return
return
@property
def allowed_channels(self) :
"""
Return tuple of allowed channels according to
whether or not to include EW corrections in spectra
"""
# Return
return self._allowed_channels
@property
def channel(self) :
'''
Return channel used to compute the gamma-ray flux
'''
# Return
return self._channel
@channel.setter
def channel(self, ch) :
'''
Set channel used to compute the dmspectrum.
Also updates the channel parameter of the
spectrum interpolator dminterp
If channel is not valid, raise value error
'''
# Check if channel is valid
if ch not in self._allowed_channels :
msg = ('\nChannel {0} not found in'.format(channel) +
'allowed channels. Options are: {0}'.format(ALLOWED_FERMIONS))
raise ValueError(msg)
# Set channel
self._channel = ch
# Update dminterp instance
self._dminterp.channel = ch
# Return
return
@property
def tablemodel(self) :
"""
Return GModelSpectralTable
"""
# Return
return self._model
@property
def process(self) :
"""
Return dm process
"""
# Return
return self._dminterp.process
@process.setter
def process(self, process_vals) :
"""
Set annihilation (anna) or decay process in dminterp
Also update the properties jfactor and sigmav for anna
or dfactor and lifetime for decay
"""
# Extract values
dmprocess = process_vals[0]
astfactor = process_vals[1]
paroi = process_vals[2]
# Check that process is valid
VALID_PROCESSES = ['anna', 'decay']
if dmprocess not in VALID_PROCESSES :
msg = 'Valid options are: {0}'.format(VALID_PROCESSES)
raise ValueError(msg)
if astfactor < 1.e-40 or paroi < 1.e-40 :
raise ValueError('\nParameters must be greater than 1.e-40.')
# Update properties
if dmprocess == 'anna' :
self._jfactor = astfactor
self._sigmav = paroi
elif dmprocess == 'decay' :
self._dfactor = astfactor
self._lifetime = paroi
self._dminterp.process = dmprocess
# Update
# Return
return
@property
def elist(self) :
"""
Return list of energy values used to compute the spectrum
"""
# Return
return self._dminterp.energy
@elist.setter
def elist(self, evals) :
"""
Update energy values used to compute the spectrum
evals[0] --> emin
evals[1] --> emax
evals[2] --> epoints
"""
# Check that emin and emax are ok
# I set the minimum to 500 MeV
if evals[0] < 5.0e-3 or evals[1] > 1.e+5 :
raise ValueError('\nParameters outside of range')
# Update properties
self._dminterp.energy = evals
# Return
return
@staticmethod
def _norm_anna(sigmav, mass, delta, jfactor) :
"""
Compute normalization of the dm flux compatible with gammalib
Parameters
----------
sigmav : Value of annihilation cross-section (cm**3/s)
mass : Mass of dark matter particles (GeV)
delta : String to indicate if dark matter is a
Majorana or Dirac fermion
jfactor : Astrophysica factor for annihilation
Return
------
norm : (1/[MeV* cm^2 * s])
"""
d = 0.
# Check delta
if delta == 'Majorana' :
d = 2.
elif delta == 'Dirac' :
d = 4.
# Compute ppfactor
ppfactor = sigmav / (d*4.*gammalib.pi*mass*mass)
norm = ppfactor * jfactor
return norm * 1.0e-3
@staticmethod
def _norm_decay(lifetime, mass, dfactor) :
"""
Compute normalization of the dm flux compatible with gammalib
Parameters
----------
lifetime : Value of decay lifetime (s)
mass : Mass of dark matter particles (GeV)
dfactor : Astrophysical factor for ecay
Return
------
norm : (1/[MeV* cm^2 * s])
"""
# Compute ppfactor
ppfactor = 1 / (4.*gammalib.pi*mass*lifetime)
norm = ppfactor * dfactor
return norm * 1.0e-3
def create_modeltable(self) :
"""
Create fits table with spectrum for specific channel
"""
# Number of points in energy array
n_eng = len(self._dminterp.energy)
# Array with definitions of energy bins
# The min and max values are encapsulated in the
# dm spectrum interpolator dminterp
gemin = gammalib.GEnergy(self._dminterp.emin, 'GeV')
gemax = gammalib.GEnergy(self._dminterp.emax, 'GeV')
ebins = gammalib.GEbounds(n_eng, gemin, gemax)
# Then create the GModelPar objects for mass
dmmass = gammalib.GModelPar('Mass', self._mmin, 1.0)
dmmass.unit('GeV')
# Create the GSpectralTablePar objects
par_mass = gammalib.GModelSpectralTablePar(dmmass, self._masses)
# Create the container GSpectralTablePars and append the pars
pars = gammalib.GModelSpectralTablePars()
pars.append(par_mass)
# GNdarray to save the spectra
spectra = gammalib.GNdarray(self._mpoints,n_eng)
# filling the spectrum
desc = 'Computing {}-spectrrum'.format(self._dminterp.process)
for index, mass in tqdm(enumerate(self._masses),desc=desc,leave=False):
# Change the value of the mass
self._dminterp.mass = mass
dmspec = self._dminterp.spectra()
for eindex in range(n_eng):
spectra[index, eindex] = dmspec[eindex]
# Get ppfactor and normalization
# This normalization computed here
# is not neccessary. You can change the normalization
| |
= 60 + coef
if intelligence == 3:
x = 40 + coef
if intelligence == 4:
x = 20 + coef
if x >= 90:
x = 90
if random.randint(1, 100) <= x:
cardplayers.remove(ids[1])
else:
cardplayers.remove(ids[0])
i = 10
except:
try:
print('try3')
int(ids[1])
index = 0
coef = 0
user = users.find_one({'id': ids[1]})
if user != None:
coef += user['intelligence']
if ids[index] == 'miku':
intelligence = mikustats['intelligence']
if ids[index] == 'alisa':
intelligence = alisastats['intelligence']
if ids[index] == 'lena':
intelligence = lenastats['intelligence']
if ids[index] == 'slavya':
intelligence = slavyastats['intelligence']
if ids[index] == 'zhenya':
intelligence = zhenyastats['intelligence']
if ids[index] == 'uliana':
intelligence = ulianastats['intelligence']
if intelligence == 1:
x = 75 + coef
if intelligence == 2:
x = 60 + coef
if intelligence == 3:
x = 40 + coef
if intelligence == 4:
x = 20 + coef
if x >= 90:
x = 90
if random.randint(1, 100) <= x:
cardplayers.remove(ids[0])
else:
cardplayers.remove(ids[1])
i = 10
except:
print('try4')
if ids[0] == 'miku':
intelligence1 = mikustats['intelligence']
if ids[0] == 'alisa':
intelligence1 = alisastats['intelligence']
if ids[0] == 'lena':
intelligence1 = lenastats['intelligence']
if ids[0] == 'slavya':
intelligence1 = slavyastats['intelligence']
if ids[0] == 'zhenya':
intelligence1 = zhenyastats['intelligence']
if ids[0] == 'uliana':
intelligence1 = ulianastats['intelligence']
if ids[1] == 'miku':
intelligence2 = mikustats['intelligence']
if ids[1] == 'alisa':
intelligence2 = alisastats['intelligence']
if ids[1] == 'lena':
intelligence2 = lenastats['intelligence']
if ids[1] == 'slavya':
intelligence2 = slavyastats['intelligence']
if ids[1] == 'zhenya':
intelligence2 = zhenyastats['intelligence']
if ids[1] == 'uliana':
intelligence2 = ulianastats['intelligence']
z = intelligence1 - intelligence2
if z == 0:
x = 50
elif z == 1:
x = 60
elif z == 2:
x = 75
elif z == 3:
x = 85
elif z == -1:
x = 40
elif z == -2:
x = 25
elif z == -3:
x = 15
if random.randint(1, 100) <= x:
cardplayers.remove(ids[1])
else:
cardplayers.remove(ids[0])
i = 10
text = ''
x = 0
for dd in cardplayers:
x += 1
try:
int(dd)
text += users.find_one({'id': dd})['pionername'] + '\n'
except:
text += nametopioner(dd) + '\n'
text1 = ''
text3 = ''
if electronicstats['cardsturn'] == 1:
text1 = 'Завершился первый этап турнира! А вот и наши победители:\n\n'
elif electronicstats['cardsturn'] == 2:
if x > 1:
text1 = 'Второй этап турнира подошёл к концу! Встречайте победителей:\n\n'
else:
text1 = 'Финал подошёл к концу! И наш победитель:\n\n'
elif electronicstats['cardsturn'] == 3:
if x == 2:
text1 = 'Полуфинал завершён! В финале встретятся:\n\n'
else:
text1 = 'Встречайте победителя турнира:\n\n'
elif electronicstats['cardsturn'] == 4:
text1 = 'Турнир завершён! И наш победитель:\n\n'
if x == 2:
text3 = 'Настало время для финала! Кто же станет победителем на этот раз?'
elif x == 4:
text3 = 'На очереди у нас полуфинал. Кто же из четырёх оставшихся игроков попадёт в финал?'
elif x == 8:
text3 = 'Скоро начнётся раунд 2. Игроки, приготовьтесь!'
electronicstats['cardsturn'] += 1
electronic.send_message(-1001351496983, text1 + text + '\n' + text3, parse_mode='markdown')
setka = []
i = 0
if len(cardplayers) > 1:
x = len(cardplayers) / 2
while i < x:
player1 = random.choice(cardplayers)
cardplayers.remove(player1)
player2 = random.choice(cardplayers)
cardplayers.remove(player2)
lst = [player1, player2]
setka.append(lst)
i += 1
t = threading.Timer(120, cards_nextturn)
t.start()
else:
time.sleep(2)
bot.send_chat_action(-1001351496983, 'typing')
time.sleep(5)
try:
name = users.find_one({'id': cardplayers[0]})['pionername']
except:
name = nametopioner(cardplayers[0])
bot.send_message(-1001351496983,
'Отлично! Поздравляю, ' + name + '! А теперь приберитесь тут, скоро ужин.',
parse_mode='markdown')
bot.send_sticker(-1001351496983, 'CAADAgADqwADgi0zDzm_zSmMbMmiAg')
setka = []
cardplayers = []
electronicstats['waitingplayers'] = 0
electronicstats['playingcards'] = 0
electronicstats['cardsturn'] = 0
else:
electronic.send_message(-1001351496983,
'К сожалению, игроков для турнира сегодня не набралось. Ну ничего, в следующий раз попробуем!')
setka = []
cardplayers = []
electronicstats['waitingplayers'] = 0
electronicstats['playingcards'] = 0
electronicstats['cardsturn'] = 0
except:
setka = []
cardplayers = []
electronicstats['waitingplayers'] = 0
electronicstats['playingcards'] = 0
electronicstats['cardsturn'] = 0
electronic.send_message(-1001351496983, 'Непредвиденные обстоятельства! Турнир придётся отменить!')
def talkwithplayer(player, pioner):
if pioner == 'miku':
t = threading.Timer(random.randint(10, 90), sayto, args=[miku, 'miku', player, cards_startround_mikutexts])
t.start()
if pioner == 'alisa':
t = threading.Timer(random.randint(10, 90), sayto, args=[alisa, 'alisa', player, cards_startround_alisatexts])
t.start()
if pioner == 'zhenya':
t = threading.Timer(random.randint(10, 90), sayto,
args=[zhenya, 'zhenya', player, cards_startround_zhenyatexts])
t.start()
if pioner == 'uliana':
t = threading.Timer(random.randint(10, 90), sayto,
args=[uliana, 'uliana', player, cards_startround_ulianatexts])
t.start()
if pioner == 'slavya':
t = threading.Timer(random.randint(10, 90), sayto,
args=[slavya, 'slavya', player, cards_startround_slavyatexts])
t.start()
if pioner == 'lena':
t = threading.Timer(random.randint(10, 90), sayto, args=[lena, 'lena', player, cards_startround_lenatexts])
t.start()
cards_startround_mikutexts = ['Ой, Привет! Если не помнишь, то меня Мику зовут. Мы сейчас с тобой ' + \
'играем! Ты хорошо играешь? Я не очень...',
'Привет! Мы с тобой уже знакомы, если помнишь... ' + \
'Удачи на турнире!']
cards_startround_alisatexts = ['Ну привет. Готовься проиграть!']
cards_startround_slavyatexts = ['Привет! Интересно, кто победит в турнире в этот раз...']
cards_startround_ulianatexts = ['Привет-привет! Я сегодня настроена на победу, так что советую сразу сдаться!']
cards_startround_lenatexts = ['Привет. Удачи на сегодняшнем турнире!']
cards_startround_zhenyatexts = ['Выходит, мы с тобой сегодня играем. Давай сразу к игре, без лишних разговоров!']
def sayto(pioner, pionername, id, texts):
x = users.find_one({'id': id})
if x['gender'] == 'female':
gndr = 'а'
else:
gndr = ''
if pionername == 'miku':
textstochat = ['Привет, ' + x['pionername'] + '! <NAME> зовут! Мы ещё не знакомы, можем ' + \
'[поговорить](https://t.me/ES_MikuBot) после турнира... А сейчас - удачи!']
elif pionername == 'alisa':
textstochat = ['Ну привет, ' + x['pionername'] + '! Думаешь победить в турнире? Даже не надейся! Меня тебе ' + \
'точно не обыграть!']
elif pionername == 'slavya':
textstochat = ['Привет, ' + x['pionername'] + '! Чего-то я тебя не видела раньше... <NAME> зовут! Можем ' + \
'[познакомиться](https://t.me/SlavyaBot) на досуге. Ну а сейчас готовься к игре!']
elif pionername == 'uliana':
textstochat = ['Привет! Тебя ведь ' + x['pionername'] + ' зовут? Я Ульяна! Готов' + gndr + ' проиграть?']
elif pionername == 'lena':
textstochat = ['Привет, ' + x[
'pionername'] + '. <NAME> зовут... Хотя ты наверняка уже знаешь, ведь в турнирной сетке написано. ' + \
'Удачи!']
elif pionername == 'zhenya':
textstochat = ['Ну привет, ' + x['pionername'] + '. Не знаю, зачем я вообще играю, но уже поздно передумывать.']
try:
pioner.send_chat_action(id, 'typing')
time.sleep(5)
pioner.send_message(id, random.choice(texts))
except:
pioner.send_chat_action(-1001351496983, 'typing')
time.sleep(5)
pioner.send_message(-1001351496983, random.choice(textstochat), parse_mode='markdown')
def nametopioner(pioner):
if pioner == 'miku':
return '[Мику](https://t.me/ES_MikuBot)'
if pioner == 'alisa':
return '[Алиса](https://t.me/ES_AlisaBot)'
if pioner == 'zhenya':
return '[Женя](https://t.me/ES_ZhenyaBot)'
if pioner == 'uliana':
return '[Ульяна](https://t.me/ES_UlianaBot)'
if pioner == 'slavya':
return '[Славя](https://t.me/SlavyaBot)'
if pioner == 'lena':
return '[Лена](https://t.me/ES_LenaBot)'
if pioner == 'electronic':
return '[Электроник](https://t.me/ES_ElectronicBot)'
if pioner == 'shurik':
return '[Шурик](https://t.me/ES_Shurikbot)'
def addtogame(name, game):
game.append(name)
def sendmes(sender, text, parse_mode):
sender.send_message(-1001351496983, text, parse_mode=parse_mode)
def sendstick(sender, stick):
sender.send_sticker(-1001351496983, stick)
####################################### ELECTRONIC ##############################################
@electronic.message_handler(commands=['control'])
def electroniccontrol(m):
config.about(m, electronic)
adm=admins.find_one({'name':'el_admins'})
if m.from_user.id in adm['el_admins']:
if adm['controller'] == None:
admins.update_one({'name':'el_admins'},{'$set':{'controller': {'id': m.from_user.id,
'name': m.from_user.first_name}}})
electronic.send_message(m.from_user.id, 'Привет! надеюсь ты знаешь, как управлять мной.')
@electronic.message_handler(commands=['stopcontrol'])
def electronicstopcontrol(m):
config.about(m, electronic)
x='el_admins'
adm=admins.find_one({'name':x})
if adm['controller'] != None:
if adm['controller']['id'] == m.from_user.id:
admins.update_one({'name':x},{'$set':{'controller':None}})
electronic.send_message(m.from_user.id, 'Ты больше не управляешь мной!')
@electronic.message_handler(content_types=['sticker'])
def stickercatchelectronic(m):
stickhandler(m, electronic)
@electronic.message_handler(content_types=['audio'])
@electronic.message_handler(content_types=['voice'])
def stickercatchelectronic(m):
audiohandler(m, electronic)
@electronic.message_handler(content_types = ['document'])
def docsss(m):
dochandler(m, electronic)
@electronic.message_handler(content_types=['photo'])
def photocatchel(m):
pichandler(m, electronic)
@electronic.message_handler()
def electronichandler(m):
try:
if ban.find_one({'id': m.from_user.id}) == None:
if electronicstats['waitingplayers'] == 1:
if m.text.lower() == 'хочу принять участие в турнире!':
x = users.find_one({'id': m.from_user.id})
if x['gender'] == 'female':
gndr = 'а'
else:
gndr = ''
if x['id'] not in cardplayers:
if m.from_user.id == m.chat.id:
texts = ['Привет! Записал тебя в список участников. Жди начала турнира!',
'Хорошо. Записал тебя!',
'Рад, что тебя заинтересовала моя игра. Теперь ты тоже в списке участников!']
text = random.choice(texts)
electronic.send_message(m.chat.id, text)
cardplayers.append(x['id'])
else:
if m.reply_to_message != None:
if m.reply_to_message.from_user.id == 609648686:
texts = ['Привет, [' + x['pionername'] + '](tg://user?id=' + str(
x['id']) + ')! Записал тебя в список участников. Жди начала турнира!',
'Хорошо, [' + x['pionername'] + '](tg://user?id=' + str(
x['id']) + '). Записал тебя!',
'Рад, что тебя заинтересовала моя игра. Теперь ты тоже в списке участников!']
text = random.choice(texts)
electronic.send_message(m.chat.id, text, parse_mode='markdown',
reply_to_message_id=m.message_id)
cardplayers.append(x['id'])
else:
if m.from_user.id == m.chat.id:
reply_to_message_id = None
else:
reply_to_message_id = m.message_id
electronic.send_message(m.chat.id, '[' + x['pionername'] + '](tg://user?id=' + str(x['id']) + \
'), ты уже записан' + gndr + ' на турнир!', parse_mode='markdown',
reply_to_message_id=reply_to_message_id)
else:
| |
<reponame>Haiiliin/PyAbaqus<gh_stars>1-10
import typing
from abaqusConstants import *
from ..BasicGeometry.Cell import Cell
from ..BasicGeometry.Edge import Edge
from ..BasicGeometry.Face import Face
from ..BasicGeometry.InterestingPoint import InterestingPoint
from ..BasicGeometry.Vertex import Vertex
from ..Datum.Datum import Datum
from ..Datum.DatumPlane import DatumPlane
from ..Mesh.MeshFace import MeshFace
from ..Mesh.MeshNode import MeshNode
from ..Sketcher.ConstrainedSketch import ConstrainedSketch
class Feature:
"""Abaqus/CAE is a feature-based modeling system, and features are stored in the Feature
object. The user defines the parameters of the feature, and Abaqus/CAE modifies the
model based on the value of the parameters. This evaluation of the parameters is called
regeneration of the feature. Feature objects contain both the parameters and the
resulting model modification.
Attributes
----------
name: str
A String specifying the repository key.
id: int
An Int specifying the ID of the feature.
Notes
-----
This object can be accessed by:
.. code-block:: python
import part
mdb.models[name].parts[name].features[name]
mdb.models[name].parts[name].featuresById[i]
import assembly
mdb.models[name].rootAssembly.features[name]
mdb.models[name].rootAssembly.featuresById[i]
"""
# A String specifying the repository key.
name: str = ''
# An Int specifying the ID of the feature.
id: int = None
def AttachmentPoints(self, name: str, points: float, projectionMethod: SymbolicConstant = PROJECT_BY_PROXIMITY,
projectOnFaces: tuple[Face] = (), projectOnElementFaces: tuple[MeshFace] = (),
projectionDirStartPt: float = None, projectionDirEndPt: float = None, setName: str = ''):
"""This method creates an attachment points Feature. Attachment points may be created using
datum points, vertices, reference points, attachment points, interesting points, orphan
mesh nodes or coordinates. Optionally, the attachment points can be projected on
geometric faces or element faces.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].parts[*name*].AttachmentPoints
mdb.models[name].rootAssembly.AttachmentPoints
Parameters
----------
name
A String specifying a unique Feature name.
points
A tuple of points. Each point can be a ConstrainedSketchVertex, Datum point, Reference point, Attachment
point, orphan mesh Node, Interesting point object, or a tuple of Floats representing the
coordinates of a point.
projectionMethod
A SymbolicConstant specifying the projection method. Possible values are
PROJECT_BY_PROXIMITY and PROJECT_BY_DIRECTION. The default value is
PROJECT_BY_PROXIMITY.
projectOnFaces
A sequence of Face objects specifying the geometry faces onto which the points are to be
projected.
projectOnElementFaces
A sequence of MeshFace objects specifying the orphan mesh element faces onto which the
points are to be projected.
projectionDirStartPt
A point specifying the start point of the projection direction. The point can be a
ConstrainedSketchVertex, Datum point, Reference point, Attachment point, orphan mesh Node, Interesting
point object, or a tuple of Floats representing the coordinates of a point.
projectionDirEndPt
A point specifying the end point of the projection direction. The point can be a ConstrainedSketchVertex,
Datum point, Reference point, Attachment point, orphan mesh Node, Interesting point
object, or a tuple of Floats representing the coordinates of a point.
setName
A String specifying a unique set name.
Returns
-------
feature: Feature
A Feature object
"""
pass
def AttachmentPointsAlongDirection(self, name: str, startPoint: float, pointCreationMethod: SymbolicConstant,
endPoint: float = None, direction: str = '', spacing: str = '',
numPtsAlongDir: str = '', numPtsBetweenPts: str = '',
createPtAtStartPt: Boolean = True,
createPtAtEndPt: Boolean = True,
projectionMethod: SymbolicConstant = PROJECT_BY_PROXIMITY,
projectOnFaces: tuple[Face] = (), projectOnElementFaces: tuple[MeshFace] = (),
projectionDirStartPt: float = None, projectionDirEndPt: float = None,
flipDirection: Boolean = OFF, setName: str = ''):
"""This method creates a Feature object by creating attachment points along a direction or
between two points. A Datum point, a ConstrainedSketchVertex, a Reference point, an Attachment point, an
Interesting point, or an orphan mesh Node can be specified as the start or end point.
The direction can be specified using a straight edge or a datum axis.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].parts[*name*].AttachmentPoints
mdb.models[name].rootAssembly.AttachmentPoints
Parameters
----------
name
A String specifying a unique Feature name.
startPoint
A point specifying the start point of the direction along which to create points. The
point can be a ConstrainedSketchVertex, Datum point, Reference point, Attachment point, orphan mesh Node,
Interesting point object, or a tuple of Floats representing the coordinates of a point.
pointCreationMethod
A SymbolicConstant specifying the point creation method. Possible values are AUTO_FIT,
NUM_PTS_ALONG_DIR, and NUM_PTS_BETWEEN_PTS.
endPoint
A point specifying the end point if creating points between two points. The point can be
a ConstrainedSketchVertex, Datum point, Reference point, Attachment point, orphan mesh Node, Interesting
point object, or a tuple of Floats representing the coordinates of a point.
direction
The direction can be specified by a straight edge or a datum axis.
spacing
A float specifying the spacing to be used between two points.
numPtsAlongDir
An integer specifying the number of points to be created along the specified direction.
numPtsBetweenPts
An integer specifying the number of points to be created between the start and end
points.
createPtAtStartPt
A Boolean specifying whether to create an attachment point at the start point. The
default value is True.
createPtAtEndPt
A Boolean specifying whether to create an attachment point at the end point. The default
value is True.
projectionMethod
A SymbolicConstant specifying the projection method. Possible values are
PROJECT_BY_PROXIMITY and PROJECT_BY_DIRECTION. The default value is
PROJECT_BY_PROXIMITY.
projectOnFaces
A sequence of Face objects specifying the geometry faces onto which the points are to be
projected.
projectOnElementFaces
A sequence of MeshFace objects specifying the orphan mesh element faces onto which the
points are to be projected.
projectionDirStartPt
A point specifying the start point of the projection direction. The point can be a
ConstrainedSketchVertex, Datum point, Reference point, Attachment point, orphan mesh Node, Interesting
point object, or a tuple of Floats representing the coordinates of a point.
projectionDirEndPt
A point specifying the end point of the projection direction. The point can be a ConstrainedSketchVertex,
Datum point, Reference point, Attachment point, orphan mesh Node, Interesting point
object, or a tuple of Floats representing the coordinates of a point.
flipDirection
A Boolean specifying if the direction along which the attachment points are created
should be reversed. This argument is valid only when
*pointCreationMethod*=NUM_PTS_ALONG_DIR.
setName
A String specifying a unique set name.
Returns
-------
feature: Feature
A Feature object
"""
pass
def AttachmentPointsOffsetFromEdges(self, name: str, edges: tuple, startPoint: str = '', flipDirection: str = '',
pointCreationMethod: SymbolicConstant = None, numberOfPoints: str = '',
spacingBetweenPoints: str = '', offsetFromStartPoint: str = 0,
offsetFromEndPoint: str = 0, spacingMethod: SymbolicConstant = AUTO_FIT_PTS,
patterningMethod: SymbolicConstant = None, referenceFace: str = '',
startPointForPatternDirection: float = None,
endPointForPatternDirection: float = None,
offsetFromEdges: str = '', numberOfRows: str = 1, spacingBetweenRows: str = '',
projectionMethod: SymbolicConstant = PROJECT_BY_PROXIMITY,
projectOnFaces: tuple[Face] = (), projectOnElementFaces: tuple[MeshFace] = (),
projectionDirStartPt: float = None, projectionDirEndPt: float = None,
setName: str = ''):
"""This method creates a Feature object by creating attachment points along or offset from
one or more connected edges.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].parts[*name*].AttachmentPoints
mdb.models[name].rootAssembly.AttachmentPoints
Parameters
----------
name
A String specifying a unique Feature name.
edges
A sequence of connected Edge objects specifying the geometry edges from which to offset
the points.
startPoint
A ConstrainedSketchVertex of the selected edges that specifies the point from which to create points.
This point can be one of the two end vertices of the connected edges. In case of edges
forming a closed loop and having multiple vertices, this point can be any one of the
vertices on the edges.
flipDirection
This parameter is required to indicate the direction in which to create the points. This
parameter is required only in case of edges forming a closed loop.
pointCreationMethod
A SymbolicConstant specifying the point creation method. Possible values are BY_NUMBER
or BY_SPACING.
numberOfPoints
An integer specifying the number of points to be created along the selected edges. | |
<reponame>shivaathreya/ibis<filename>lib/ingest/tests/test_parquet_opt_ddl_time.py
"""Tests for parquet_opt_ddl_time.py"""
import difflib
import os
import unittest
from mock import patch, MagicMock
from lib.ingest.parquet_opt_ddl_time import ConnectionManager, DDLTypes, main
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class ParquetOptTimeFunctionsTest(unittest.TestCase):
"""Tests."""
def setUp(self):
pass
def tearDown(self):
self.conn_mgr = None
self.ddl_types = None
def compare_xml(self, test_xml, expected_xml):
"""Compare two xml files."""
same = True
test_xml = [xml.strip().replace('\t', '') for xml in
test_xml.splitlines()]
expected_xml = [xml.strip().replace('\t', '') for xml in
expected_xml.splitlines()]
if "".join(expected_xml) != "".join(test_xml):
same = False
print ""
print test_xml
print expected_xml
print "XML strings don't match."
diff = difflib.unified_diff(test_xml, expected_xml)
print '\n'.join(list(diff))
return same
@patch('lib.ingest.parquet_opt_ddl_time.ConnectionManager', autospec=True)
@patch('lib.ingest.parquet_opt_ddl_time.sys', autospec=True)
@patch('lib.ingest.parquet_opt_ddl_time.os', autospec=True)
@patch('lib.ingest.parquet_opt_ddl_time.open')
def test_main_full_load(self, m_open, m_os, m_sys, m_cm):
"""test main method"""
m_sys.argv = ['test_arg0', 'test_arg1', 'test_arg2', 'test_arg3',
'test_arg4', 'full_load', 'test_arg6']
cm_methods = MagicMock()
cm_methods.create_ingest_table.return_value = 'test_ingest_hql'
cm_methods.create_parquet_live.return_value = 'test_live_hql'
cm_methods.get_hql.return_value = 'test_hql'
cm_methods.get_incremental_hql.return_value = 'test_incr_hql'
cm_methods.create_externaltable.return_value = \
('views', 'invalidate', 'info')
m_cm.return_value = cm_methods
main()
self.assertEquals(m_open.call_count, 5)
cm_methods.create_externaltable.return_value = ('', '', '')
m_cm.return_value = cm_methods
main()
self.assertEquals(m_open.call_count, 7)
@patch('lib.ingest.parquet_opt_ddl_time.ConnectionManager', autospec=True)
@patch('lib.ingest.parquet_opt_ddl_time.sys', autospec=True)
@patch('lib.ingest.parquet_opt_ddl_time.os', autospec=True)
@patch('lib.ingest.parquet_opt_ddl_time.open')
def test_main_incr(self, m_open, m_os, m_sys, m_cm):
"""test main method"""
m_sys.argv = ['test_arg0', 'test_arg1', 'test_arg2', 'test_arg3',
'test_arg4', 'incremental', 'test_arg6']
cm_methods = MagicMock()
cm_methods.create_ingest_table.return_value = 'test_ingest_hql'
cm_methods.create_parquet_live.return_value = 'test_live_hql'
cm_methods.get_hql.return_value = 'test_hql'
cm_methods.get_incremental_hql.return_value = 'test_incr_hql'
cm_methods.create_externaltable.return_value = \
('views', 'invalidate', 'info')
m_cm.return_value = cm_methods
main()
self.assertEquals(m_open.call_count, 5)
@patch.object(ConnectionManager, 'sqoop_eval', autospec=True)
@patch.object(ConnectionManager, 'get_schema', autospec=True)
def test_get_full_hql(self, m_schema, m_sqoop_eval):
"""test full hql"""
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
m_sqoop_eval.return_value = sqoop_eval_output
m_schema.return_value = DDLTypes(
input_mapping=sqoop_eval_output, data_source="td",
ingest_timestamp="2016-01-01 16:47:56")
self.conn_mgr = ConnectionManager(
'database_test', 'table_test', '', 'domain', 'jdbc_url_conn_test',
'connect_factories', 'username', 'password', 'view', 'int',
'domain', 'impala_host_name', '2016-01-01 16:47:56', 'hdfs_test',
'jars_test', 'jdbc_test', 'ingestion')
test_create_hql = self.conn_mgr.get_hql()
with open(BASE_DIR + '/expected/full_create_table.hql', 'r') as file_h:
expected_create_table_hql = file_h.read()
self.assertTrue(self.compare_xml(expected_create_table_hql,
test_create_hql))
@patch.object(ConnectionManager, 'sqoop_eval', autospec=True)
@patch.object(ConnectionManager, 'get_schema', autospec=True)
def test_get_full_hql_for_incremental(self, m_schema, m_sqoop_eval):
"""test incremental hql"""
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
m_sqoop_eval.return_value = sqoop_eval_output
m_schema.return_value = DDLTypes(
input_mapping=sqoop_eval_output, data_source="td",
ingest_timestamp="2016-01-01 16:47:56")
self.conn_mgr = ConnectionManager(
'database_test', 'table_test', '', 'domain', 'jdbc_url_conn_test',
'connect_factories', 'username', 'password', 'view', 'int',
'domain', 'impala_host_name', '2016-01-01 16:47:56', 'hdfs_test',
'jars_test', 'jdbc_test', 'ingestion')
test_create_hql = self.conn_mgr.get_hql("incremental")
with open(BASE_DIR + '/expected/incremental_full_create_table.hql',
'r') as file_h:
expected_create_table_hql = file_h.read()
self.assertTrue(
self.compare_xml(expected_create_table_hql, test_create_hql))
@patch.object(ConnectionManager, 'sqoop_eval', autospec=True)
@patch.object(ConnectionManager, 'get_schema', autospec=True)
def test_get_incremental_hql(self, m_schema, m_sqoop_eval):
"""test incremental hql"""
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
m_sqoop_eval.return_value = sqoop_eval_output
m_schema.return_value = DDLTypes(
input_mapping=sqoop_eval_output, data_source="td",
ingest_timestamp="2016-01-01 16:47:56")
self.conn_mgr = ConnectionManager(
'database_test', 'table_test', '', 'domain', 'jdbc_url_conn_test',
'connect_factories', 'username', 'password', 'view', 'int',
'domain', 'impala_host_name', '2016-01-01 16:47:56', 'hdfs_test',
'jars_test', 'jdbc_test', 'ingestion')
test_create_hql = self.conn_mgr.get_incremental_hql()
with open(BASE_DIR + '/expected/incr_create_table.hql', 'r') as file_h:
expected_create_table_hql = file_h.read()
self.assertTrue(
self.compare_xml(expected_create_table_hql, test_create_hql))
def test_get_types_schema_mysql(self):
"""test avro parquet hql for mysql"""
with open(BASE_DIR + '/fixtures/ddl_mysql.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="mysql",
ingest_timestamp="2016-01-01 16:47:56")
with open(BASE_DIR + '/expected/mysql_avro_parquet_select.txt',
'r') as file_h:
expected_select_hql = file_h.read()
with open(BASE_DIR + '/expected/mysql_avro_parquet_create.txt',
'r') as file_h:
expected_create_hql = file_h.read()
test_select_hql, test_create_hql = self.ddl_types.create_ddl_mappings()
self.assertTrue(self.compare_xml(test_select_hql, expected_select_hql))
self.assertTrue(self.compare_xml(test_create_hql, expected_create_hql))
def test_get_types_schema_ora(self):
"""test avro parquet hql for oracle"""
with open(BASE_DIR + '/fixtures/ddl_ora_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="oracle",
ingest_timestamp="2016-01-01 16:47:56")
with open(BASE_DIR + '/expected/ora_avro_parquet_hql.txt',
'r') as file_h:
expected_select_hql = file_h.read()
with open(BASE_DIR + '/expected/ora_avro_parquet_create.txt',
'r') as file_h:
expected_create_hql = file_h.read()
test_select_hql, test_create_hql = self.ddl_types.create_ddl_mappings()
self.assertTrue(self.compare_xml(expected_select_hql, test_select_hql))
self.assertTrue(self.compare_xml(expected_create_hql, test_create_hql))
def test_get_types_schema_db2(self):
"""test avro parquet hql for db2"""
with open(BASE_DIR + '/fixtures/ddl_db2_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="db2",
ingest_timestamp="2016-01-01 16:47:56")
with open(BASE_DIR + '/expected/db2_avro_parquet_hql.txt',
'r') as file_h:
expected_select_hql = file_h.read()
with open(BASE_DIR + '/expected/db2_avro_parquet_create.txt',
'r') as file_h:
expected_create_hql = file_h.read()
test_select_hql, test_create_hql = self.ddl_types.create_ddl_mappings()
self.assertTrue(self.compare_xml(test_select_hql, expected_select_hql))
self.assertTrue(self.compare_xml(test_create_hql, expected_create_hql))
def test_get_types_schema_td(self):
"""test avro parquet hql for td"""
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="td",
ingest_timestamp="2016-01-01 16:47:56")
with open(BASE_DIR + '/expected/td_avro_parquet_hql.txt',
'r') as file_h:
expected_select_hql = file_h.read()
with open(BASE_DIR + '/expected/td_avro_parquet_create.txt',
'r') as file_h:
expected_create_hql = file_h.read()
test_select_hql, test_create_hql = self.ddl_types.create_ddl_mappings()
self.assertTrue(self.compare_xml(expected_select_hql, test_select_hql))
self.assertTrue(self.compare_xml(expected_create_hql, test_create_hql))
def test_get_types_schema_mapping(self):
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="td",
ingest_timestamp="2016-01-01 16:47:56")
td_column_type_1 = self.ddl_types.get_types_schema('A1')
self.assertTrue(self.compare_xml(td_column_type_1, 'STRING'))
td_column_type_2 = self.ddl_types.get_types_schema('TS')
self.assertTrue(self.compare_xml(td_column_type_2, 'TIMESTAMP'))
td_column_type_3 = self.ddl_types.get_types_schema('N')
self.assertTrue(self.compare_xml(td_column_type_3, 'INT'))
ora_column_type_1 = self.ddl_types.get_types_schema(
'INTERVAL DAY(2) TO SECOND(6)')
self.assertTrue(self.compare_xml(ora_column_type_1, 'STRING'))
ora_column_type_2 = self.ddl_types.get_types_schema('URITYPE')
self.assertTrue(self.compare_xml(ora_column_type_2, 'STRING'))
def test_format_select(self):
"""test avro parquet hql for db2"""
with open(BASE_DIR + '/fixtures/ddl_ora_avro_underscore.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="oracle",
ingest_timestamp="2016-01-01 16:47:56")
row_with = "_testColumnName"
row_without = "testColumnName"
row_with_spaces = "row with spaces"
int_expected = 'CAST(`i_testColumnName` AS INT) AS `_testColumnName`'
string_expected = ('CAST(`testColumnName` AS STRING)'
' AS `testColumnName`')
timestamp_expected = "CAST(from_unixtime(unix_timestamp" \
"(`testColumnName`, " \
"'yyyy-MM-dd')) " \
"AS TIMESTAMP) " \
"AS `testColumnName`"
string_spaces_exepected = "CAST(`rowwithspaces` AS STRING) " \
"AS `rowwithspaces`"
self.assertEqual(self.ddl_types.format_select(row_with, 'INT'),
int_expected)
self.assertEqual(self.ddl_types.format_select(row_without, 'STRING'),
string_expected)
self.assertEqual(
self.ddl_types.format_select(row_without, 'TIMESTAMP',
timestamp_format='yyyy-MM-dd'),
timestamp_expected)
self.assertEqual(
self.ddl_types.format_select(row_with_spaces, 'STRING'),
string_spaces_exepected)
def test_remove_6_in_timestamp_columns(self):
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="td",
ingest_timestamp="2016-01-01 16:47:56")
timestamp_column = "TIMESTAMP(6)"
timestamp_column_standard = "TIMESTAMP"
self.assertEqual(
self.ddl_types.remove_6_in_timestamp_columns(timestamp_column),
['TIMESTAMP', '6'])
self.assertEqual(self.ddl_types.remove_6_in_timestamp_columns(
timestamp_column_standard), ['TIMESTAMP'])
def test_func_of_all_special_types(self):
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="td",
ingest_timestamp="2016-01-01 16:47:56")
function_names = self.ddl_types.func_of_all_special_types().keys()
self.assertEqual(function_names,
['CHAR', 'TIMESTAMP', 'DECIMAL', 'VARCHAR',
'TIMESTAMP_DATE'])
def test_timestamp_date_td(self):
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="td",
ingest_timestamp="2016-01-01 16:47:56")
timestamp_output = self.ddl_types.timestamp_date(
['', 'TEST_TIMESTAMP', 'AT', ''])
self.assertEqual(timestamp_output,
"CAST(from_unixtime(unix_timestamp"
"(`TEST_TIMESTAMP`, 'yyyy-MM-dd')) "
"AS TIMESTAMP) AS `TEST_TIMESTAMP`")
def test_timestamp_date_ora(self):
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="oracle",
ingest_timestamp="2016-01-01 16:47:56")
timestamp_output = self.ddl_types.timestamp_date(
['', '_TEST_TIMESTAMP', 'TIMESTAMP', ''])
timestamp_result = "CAST(from_unixtime(unix_timestamp" \
"(`i_TEST_TIMESTAMP`, 'yyyy-MM-dd HH:mm:ss')) " \
"AS TIMESTAMP) AS `_TEST_TIMESTAMP`"
self.assertEqual(timestamp_output, timestamp_result)
def test_timestamp_t_td(self):
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="td",
ingest_timestamp="2016-01-01 16:47:56")
timestamp_output = self.ddl_types.timestamp_t(
['', 'TEST_TIMESTAMP', 'AT', ''])
self.assertEqual(timestamp_output,
'CAST(`TEST_TIMESTAMP` AS TIMESTAMP) '
'AS `TEST_TIMESTAMP`')
def test_char_t_td(self):
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="td",
ingest_timestamp="2016-01-01 16:47:56")
char_output = self.ddl_types.char_t(
['', 'TEST_CHAR', 'CF', '', '', '5'])
self.assertEqual(char_output,
'CAST(`TEST_CHAR` AS CHAR(5)) AS `TEST_CHAR`')
def test_varchar_t_td(self):
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="td",
ingest_timestamp="2016-01-01 16:47:56")
varchar_output = self.ddl_types.varchar_t(
['', 'TEST_VARCHAR', 'CV', '', '', '5'])
self.assertEqual(
varchar_output, 'CAST(`TEST_VARCHAR` AS VARCHAR(5))'
' AS `TEST_VARCHAR`')
def test_decimal_t_td(self):
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="td",
ingest_timestamp="2016-01-01 16:47:56")
decimal_output = self.ddl_types.decimal_t(
['', 'TEST_DECIMAL', 'D', '', '', '5', '1', '2.5'])
self.assertEqual(decimal_output,
'CAST(`TEST_DECIMAL` AS DECIMAL(1,2.5)) '
'AS `TEST_DECIMAL`')
def test_convert_spl_chars(self):
with open(BASE_DIR + '/fixtures/ddl_td_avro_parquet.txt',
'r') as file_h:
sqoop_eval_output = file_h.read()
self.ddl_types = DDLTypes(input_mapping=sqoop_eval_output,
data_source="td",
ingest_timestamp="2016-01-01 16:47:56")
column_with_spaces = 'column with spaces'
column_without_spaces = 'columnWithoutSpaces'
removed_spaces = self.ddl_types.convert_spl_chars(column_with_spaces)
not_removed = self.ddl_types.convert_spl_chars(column_without_spaces)
self.assertEqual(removed_spaces, 'columnwithspaces')
self.assertEqual(not_removed, column_without_spaces)
@patch.object(ConnectionManager, 'get_schema', autospec=True)
def test_create_externaltable(self, m_get_schema):
"""test incremental views"""
jdbcurl = ('jdbc:sqlserver://fake.sqlserver:1433;database=fake_database;encrypt=true;'
'trustServerCertificate=true')
m_get_schema.return_value = MagicMock(spec=DDLTypes)
conn_mgr = ConnectionManager(
'fake_database', 'mock_table', 'dbo', 'mock_domain', jdbcurl,
'mock_connection_factories', 'mock_db_username',
'mock_password_file', 'fake_view_open|fake_view_im', 'int', 'domain',
'impala_host_name',
'2016-01-01 16:47:56',
'mock_hdfs_loc', 'jars_test', 'hive_jdbc_url', 'ingestion')
conn_mgr.ddl_types.get_create_hql.return_value = 'mock_create_hql'
view_hql, impl_txt, views_info_txt = \
conn_mgr.create_externaltable('incremental')
with open(BASE_DIR + '/expected/incremental_views_sqlserver.hql',
'r') as file_h:
expected_view_hql = file_h.read()
with open(BASE_DIR + '/expected/views_invalidate_sqlserver.txt',
'r') as file_h:
expected_impl_txt = file_h.read()
with open(BASE_DIR + '/expected/views_info_txt.txt', 'r') as file_h:
expected_views_info_txt = file_h.read()
self.assertTrue(self.compare_xml(expected_view_hql, view_hql))
self.assertTrue(self.compare_xml(expected_impl_txt, impl_txt))
self.assertTrue(self.compare_xml(
expected_views_info_txt, views_info_txt))
@patch.object(ConnectionManager, 'get_schema', autospec=True)
@patch('lib.ingest.parquet_opt_ddl_time.ImpalaConnect', autospec=True)
def test_create_externaltable_perf(self, mock_impala_conn, m_get_schema):
"""test perf views with domain"""
jdbcurl = ('jdbc:sqlserver://fake.sqlserver:1433;database=fake_database;encrypt=true;'
'trustServerCertificate=true')
m_get_schema.return_value = MagicMock(spec=DDLTypes)
mock_impala_conn.run_query.side_effect = [[['test0']],
None,
[['test0']],
None,
[['test0']],
None,
[['test0']],
None]
conn_mgr = ConnectionManager(
'fake_database', 'mock_table', 'dbo', 'mock_domain', jdbcurl,
'mock_connection_factories', 'mock_db_username',
'mock_password_file', 'fake_view_open|fake_view_im|pharmacy', 'PERF',
'pharmacy', 'impala_host_name', '2016-01-01 16:47:56',
'mock_hdfs_loc', 'jars_test', 'hive_jdbc_url', 'ingestion')
conn_mgr.ddl_types.get_create_hql.return_value = 'mock_create_hql'
view_hql, impl_txt, views_info_txt = \
conn_mgr.create_externaltable('incremental')
with open(BASE_DIR + '/expected/views_sqlserver_perf.hql',
'r') as file_h:
expected_view_hql = file_h.read()
with open(BASE_DIR + '/expected/views_invalidate_sqlserver_domain.txt',
'r') as file_h:
expected_impl_txt = file_h.read()
with open(BASE_DIR + '/expected/views_info_txt_domain.txt', 'r') as \
file_h:
expected_views_info_txt = file_h.read()
self.assertTrue(self.compare_xml(expected_view_hql, view_hql))
self.assertTrue(self.compare_xml(expected_impl_txt, impl_txt))
self.assertTrue(self.compare_xml(
expected_views_info_txt, views_info_txt))
@patch.object(ConnectionManager, 'get_schema', autospec=True)
@patch('lib.ingest.parquet_opt_ddl_time.ImpalaConnect', autospec=True)
def test_create_externaltable_perf_wd(self, mock_impala_conn, m_get_schema):
"""test perf views without domain """
jdbcurl = ('jdbc:sqlserver://fake.sqlserver:1433;database=fake_database;encrypt=true;'
'trustServerCertificate=true')
m_get_schema.return_value = MagicMock(spec=DDLTypes)
mock_impala_conn.run_query.side_effect = [[['test0']],
None,
[['test0']],
None,
[['test0']],
None,
[['test0']],
None]
conn_mgr = ConnectionManager(
'fake_database', 'mock_table', 'dbo', 'mock_domain', jdbcurl,
'mock_connection_factories', 'mock_db_username',
| |
tube
scores = (D[action,q[te]] - D[action,q[ts]]) / (te - ts)
label = [action for _ in range(len(ts))]
total_score = np.ones(len(ts)) * D[p[-1],q[-1]] / len(p)
return ts,te,scores,label,total_score
def actionPathSmoother4oneVideo(video_paths,alpha,num_action):
final_tubes = {}
final_tubes['starts'] = {}
final_tubes['ts'] = {}
final_tubes['te'] = {}
final_tubes['label'] = {}
final_tubes['path_total_score'] = {}
final_tubes['dpActionScore'] = {}
final_tubes['dpPathScore'] = {}
final_tubes['path_boxes'] = {}
final_tubes['path_scores'] = {}
action_count = 0
if len(video_paths) > 0:
for a in range(1,num_action+1):
action_paths = video_paths[a]
num_act_paths = getPathCount(action_paths)
for p in range(num_act_paths):
M = action_paths[p]['allscores'].transpose(1,0)
assert(len(M.shape) == 2)
M += 20
# refine the path
pred_path,time,D = dpEM_max(M,alpha)
Ts, Te, Scores, Label, DpPathScore = extract_action(pred_path,time,D,a)
# print("Num tubes for action",a,len(Ts))
for k in range(len(Ts)):
final_tubes['starts'][action_count] = action_paths[p]['start']
final_tubes['ts'][action_count] = Ts[k]
final_tubes['te'][action_count] = Te[k]
final_tubes['dpActionScore'][action_count] = Scores[k]
final_tubes['label'][action_count] = Label[k]
final_tubes['dpPathScore'][action_count] = DpPathScore[k]
final_tubes['path_total_score'][action_count] = torch.mean(action_paths[p]['scores'])
final_tubes['path_boxes'][action_count] = action_paths[p]['boxes']
final_tubes['path_scores'][action_count] = action_paths[p]['scores']
action_count += 1
del video_paths[a]
return final_tubes
def actionPathSmoother(allPath,alpha,num_action):
final_tubes = actionPathSmoother4oneVideo(allPath,alpha,num_action)
return final_tubes
def convert2eval(final_tubes,min_num_frames,topk):
xmld = {}
xmld['score'] = {}
xmld['nr'] = {}
xmld['class'] = {}
xmld['framenr'] = {}
xmld['boxes'] = {}
action_score = final_tubes['dpActionScore']
path_score = final_tubes['path_scores']
ts = final_tubes['ts']
starts = final_tubes['starts']
te = final_tubes['te']
act_nr = 0
for a in range(len(ts)):
act_ts = ts[a]
act_te = te[a]
act_path_scores = path_score[a]
act_scores,_ = torch.sort(act_path_scores[act_ts:act_te+1],descending=True)
topk_mean = torch.mean(act_scores[:min(topk,len(act_scores))])
bxs = final_tubes['path_boxes'][a][act_ts:act_te+1,:]
label = final_tubes['label'][a]
if topk_mean > 0 and (act_te-act_ts) > min_num_frames:
xmld['score'][act_nr] = topk_mean
xmld['nr'][act_nr] = act_nr
xmld['class'][act_nr] = label
xmld['framenr'][act_nr] = {'fnr':np.array(range(act_ts,act_te+1)) + starts[a]}
xmld['boxes'][act_nr] = {'bxs':bxs}
act_nr += 1
return xmld
def sort_detection(dt_tubes):
sorted_tubes = {}
sorted_tubes['score'] = {}
sorted_tubes['nr'] = {}
sorted_tubes['class'] = {}
sorted_tubes['framenr'] = {}
sorted_tubes['boxes'] = {}
num_detection = len(dt_tubes['class'])
if num_detection > 0:
scores = dt_tubes['score']
indexes = [k for k, _ in sorted(scores.items(), key=lambda item: -item[1])]
for dt in range(num_detection):
dtind = indexes[dt]
sorted_tubes['framenr'][dt] = {'fnr':dt_tubes['framenr'][dtind]['fnr']}
sorted_tubes['boxes'][dt] = {'bxs':dt_tubes['boxes'][dtind]['bxs']}
sorted_tubes['class'][dt] = dt_tubes['class'][dtind]
sorted_tubes['score'][dt] = dt_tubes['score'][dtind]
sorted_tubes['nr'][dt] = dt
return sorted_tubes
def inters_union(bounds1,bounds2):
box_a = torch.Tensor(bounds1)
box_b = torch.Tensor(bounds2)
max_xy = torch.min(box_a[2:],box_b[2:])
min_xy = torch.max(box_a[:2],box_b[:2])
inter = torch.clamp((max_xy - min_xy), min=0)
inter = inter[0] * inter[1]
area_a = ((box_a[2]-box_a[0])*(box_a[3]-box_a[1]))
area_b = ((box_b[2]-box_b[0])*(box_b[3]-box_b[1]))
union = area_a + area_b - inter
return inter / union # [A,B]
def compute_spatial_temporal_iou(gt_fnr,gt_bb,dt_fnr,dt_bb):
tgb = gt_fnr[0]
tge = gt_fnr[-1]
tdb = dt_fnr[0]
tde = dt_fnr[-1]
T_i = max(0,min(tge,tde)-max(tgb,tdb))
if T_i > 0:
T_i += 1
T_u = max(tge,tde) - min(tgb,tdb) + 1
T_iou = T_i/T_u
int_fnr = range(max(tgb,tdb),min(tge,tde)+1)
int_find_dt = []
for i in range(len(dt_fnr)):
if dt_fnr[i] in int_fnr:
int_find_dt.append(i)
int_find_gt = []
for i in range(len(gt_fnr)):
if gt_fnr[i] in int_fnr:
int_find_gt.append(i)
assert(len(int_find_gt) == len(int_find_dt))
iou = np.zeros(len(int_find_dt))
for i in range(len(int_find_dt)):
gt_bound = gt_bb[int_find_gt[i],:]
dt_bound = dt_bb[int_find_dt[i],:]
iou[i] = inters_union(gt_bound,dt_bound)
st_iou = T_iou*np.mean(iou)
else:
st_iou = 0
return st_iou
def auc(fp,tp):
mfp = np.concatenate(([0.], fp, [1.]))
mtp = np.concatenate(([0.], tp, [0.]))
for i in range(mtp.size - 1, 0, -1):
mtp[i - 1] = np.maximum(mtp[i - 1], mtp[i])
i = np.where(mfp[1:] != mfp[:-1])[0]
res = np.sum((mfp[i + 1] - mfp[i]) * mtp[i + 1])
return res
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
# print('voc_ap() - use_07_metric:=' + str(use_07_metric))
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
# count of detected tubes per class
cc = [0 for _ in range(len(CLASSES))]
total_num_gt_tubes = [0 for _ in range(len(CLASSES))]
IoUTHs = [0.2] + [0.5 + 0.05*i for i in range(10)]
allscore = {}
averageIoU = {}
for iouth in IoUTHs:
allscore[iouth] = {}
for a in range(len(CLASSES)):
allscore[iouth][a] = np.zeros((10000,2))
averageIoU[iouth] = np.zeros(len(CLASSES))
allscore_05_portion = {}
allscore_02_portion = {}
cc_portion = {}
for i in range(10):
allscore_05_portion[i] = {}
allscore_02_portion[i] = {}
for a in range(len(CLASSES)):
allscore_05_portion[i][a] = np.zeros((10000,2))
allscore_02_portion[i][a] = np.zeros((10000,2))
cc_portion[i] = [0 for _ in range(len(CLASSES))]
preds = {}
gts = {}
for i in range(10):
preds[i] = []
gts[i] = []
tubeGenTime = []
frameLevelTime = []
def get_PR_curve(annot, xmldata,checkpoint):
numActions = len(CLASSES)
maxscore = -10000
# annotName = annot[1][0]
action_id = annot[2][0][0][2] - 1
gt_tubes = annot[2][0]
dt_tubes = sort_detection(xmldata)
xmldata = None
num_detection = len(dt_tubes['class'])
num_gt_tubes = len(gt_tubes)
pred = -1
gt = action_id[0][0]
dt_labels = dt_tubes['class']
covered_gt_tubes = np.zeros(num_gt_tubes)
covered_gt_tubes_portion = np.zeros(num_gt_tubes)
for dtind in range(num_detection):
# frame number range
dt_fnr = dt_tubes['framenr'][dtind]['fnr']
# bounding boxes
dt_bb = dt_tubes['boxes'][dtind]['bxs']
# class label
dt_label = dt_labels[dtind] - 1
# the tube having the max score decides
# the label of the video
if dt_tubes['score'][dtind] > maxscore:
pred = dt_label
maxscore = dt_tubes['score'][dtind]
# for portion
cc_portion[checkpoint][dt_label] += 1
ioumax = -10000
maxgtind = 0
for gtind in range(num_gt_tubes):
action_id = gt_tubes[gtind][2] - 1#class
# if this gt tube is not covered and has the same label as this detected tube
if (not covered_gt_tubes_portion[gtind]) and dt_label == action_id:
gt_fnr = range(gt_tubes[gtind][0][0][0] - gt_tubes[gtind][1][0][0] + 1)
gt_bb = gt_tubes[gtind][3]
iou = compute_spatial_temporal_iou(gt_fnr,gt_bb,dt_fnr,dt_bb)
if iou > ioumax:
# find the best possible gttube based on stiou
ioumax = iou
maxgtind = gtind
if ioumax > 0.2:
covered_gt_tubes_portion[gtind] = 1
# records the score,T/F of each dt tube at every step for every class
allscore_02_portion[checkpoint][dt_label][cc_portion[checkpoint][dt_label],:] = [dt_tubes['score'][dtind],1]
else:
allscore_02_portion[checkpoint][dt_label][cc_portion[checkpoint][dt_label],:] = [dt_tubes['score'][dtind],0]
if ioumax > 0.5:
covered_gt_tubes_portion[gtind] = 1
# records the score,T/F of each dt tube at every step for every class
allscore_05_portion[checkpoint][dt_label][cc_portion[checkpoint][dt_label],:] = [dt_tubes['score'][dtind],1]
else:
allscore_05_portion[checkpoint][dt_label][cc_portion[checkpoint][dt_label],:] = [dt_tubes['score'][dtind],0]
# for portion
if checkpoint == 9:
# cc counts the number of detections per class
cc[dt_label] += 1
assert(cc[dt_label]<10000)
ioumax = -10000
maxgtind = 0
for gtind in range(num_gt_tubes):
action_id = gt_tubes[gtind][2] - 1#class
# if this gt tube is not covered and has the same label as this detected tube
if (not covered_gt_tubes[gtind]) and dt_label == action_id:
gt_fnr = range(gt_tubes[gtind][0][0][0] - gt_tubes[gtind][1][0][0] + 1)
gt_bb = gt_tubes[gtind][3]
iou = compute_spatial_temporal_iou(gt_fnr,gt_bb,dt_fnr,dt_bb)
if iou > ioumax:
# find the best possible gttube based on stiou
ioumax = iou
maxgtind = gtind
for iouth in IoUTHs:
if ioumax > iouth:
covered_gt_tubes[gtind] = 1
# records the score,T/F of each dt tube at every step for every class
allscore[iouth][dt_label][cc[dt_label],:] = [dt_tubes['score'][dtind],1]
# max iou with rest gt tubes
averageIoU[iouth][dt_label] += ioumax
else:
allscore[iouth][dt_label][cc[dt_label],:] = [dt_tubes['score'][dtind],0]
preds[checkpoint].append(pred)
gts[checkpoint].append(gt)
return int(pred==gt)
def evaluate_tubes(outfile):
actions = CLASSES
numActions = len(CLASSES)
AP = np.zeros(numActions)
AIoU = np.zeros(numActions)
AUC = np.zeros(numActions)
mAPs = []
for iouth in IoUTHs:
for a in range(numActions):
tmpscore = allscore[iouth][a][:cc[a],:].copy()
scores = tmpscore[:,0]
result = tmpscore[:,1]
si = np.argsort(-scores)
result = result[si]
fp = np.cumsum(result == 0)
tp = np.cumsum(result == 1)
fp = fp.astype(np.float64)
tp = tp.astype(np.float64)
recall = tp/float(total_num_gt_tubes[a]+1)
precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
AP[a] = voc_ap(recall,precision)
mAP = np.mean(AP)
mAPs.append(mAP)
ptr_str = 'iouth=0.2->0.95:' + str(mAPs) + '\n'
print(ptr_str)
outfile.write(ptr_str)
# add for 0.2,0.5 portion....todo
mAPs = []
for i in range(10):
for a in range(numActions):
tmpscore = allscore_05_portion[i][a][:cc_portion[i][a],:].copy()
scores = tmpscore[:,0]
result = tmpscore[:,1]
si = np.argsort(-scores)
result = result[si]
fp = np.cumsum(result == 0)
tp = np.cumsum(result == 1)
fp = fp.astype(np.float64)
tp = tp.astype(np.float64)
recall = tp/float(total_num_gt_tubes[a]+1)
precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
AP[a] = voc_ap(recall,precision)
mAP = np.mean(AP)
mAPs.append(mAP)
ptr_str = 'iouth=0.5,' + str(mAPs) + '\n'
print(ptr_str)
outfile.write(ptr_str)
mAPs = []
for i in range(10):
for a in range(numActions):
tmpscore = allscore_02_portion[i][a][:cc_portion[i][a],:].copy()
scores | |
# Every crownstone creates its ResultMap and no result is published in order to not influence the way that the testdata
# is created for every crownstone. A confidence Map with the probabilities that correspond to each labelroom is created as well.
from simulator.simulatorBases.CrownstoneCore import CrownstoneCore
from simulator.simulatorBases.GuiCrownstoneCore import GuiCrownstoneCore
import math
import operator
import string
class SimulatorCrownstone(GuiCrownstoneCore):
"""
Class variables are created here.
"""
# myValue = False
def __init__(self, id, x, y):
super().__init__(id=id, x=x, y=y)
# self.debugPrint = False
self.flag, self.label, self.counter, self.param, self.value = 0, 0, 0, 0, 0
self.radiomap, self.predictions, self.testSet, self.probabilities, self.parameters = {}, {}, {}, {}, {}
self.w, self.p, self.publish, self.resetTrainingData, self.timelimit = 0, 0, 0, 0, 0
self.Map, self.confidence_Map = {}, {}
self.nodes = 20
# def print(self, data):
# if self.debugPrint:
# print(data)
def resetState(self, resetTrainingData):
# This is an important method to reset any state the Crownstone may have so the simulation can be restarted.
# If resetTrainingData is False, you should clear all state data except that referring to the training sets.
if resetTrainingData:
self.flag, self.label, self.counter = 0, 0, 0
self.radiomap = {}
self.w, self.p, self.publish = 0, 0, 0
self.param = 1
else:
self.counter, self.param, self.value = 0, 0, 0
self.predictions, self.testSet, self.probabilities, self.parameters = {}, {}, {}, {}
self.w, self.p = 0, 0
self.publish, self.resetTrainingData = 1, 1
self.timelimit = self.time
self.flag = 2
self.nodes = 20
def tick(self, time):
roomId = 'None'
# make predictions after 0.5 sec
# the result map of each crownstone should be created after the crownstones have received the info from their neighbors
if (self.time > self.timelimit + 0.5 and self.resetTrainingData == 1):
self.timelimit = self.timelimit + 500
if len(self.testSet) != 0 and self.param == 1:
self.probabilities, self.predictions = self.Predictions_norm(self.testSet)
if self.predictions[0] == 1:
roomId = "Room 1"
elif self.predictions[0] == 2:
roomId = "Room 2"
elif self.predictions[0] == 3:
roomId = "Room 3"
elif self.predictions[0] == 4:
roomId = "Room 4"
elif self.predictions[0] == 5:
roomId = "Room 5"
elif self.predictions[0] == 6:
roomId = "Room 6"
elif self.predictions[0] == 7:
roomId = "Room 7"
if 'x' in self.debugInformation:
x = int(self.debugInformation['x'])
y = int(self.debugInformation['y'])
# construct the result map of every crownstone
for key, values in self.Map.items():
if key == x:
for ck in values.keys():
if ck == y:
self.Map[key][ck] = roomId
# construct the confidence map for every crownstone. store the probability with which the room was predicted.
for key, values in self.confidence_Map.items():
if key == x:
for ck in values.keys():
if ck == y:
self.confidence_Map[key][ck] = self.probabilities[0]
# accuracy = self.Accuracy(self.predictions)
def receiveMessage(self, data, rssi):
# print(self.time, "Crownstone", self.id, "received from crownstone", data["sender"], "with payload", data["payload"], " and rssi:", rssi)
if data["payload"] == "StartTraining":
self.label = self.label + 1
self.radiomap[self.label] = {}
self.flag = 1
# When I receive "Start training" a flag informs the crownstones to start constructing their radio maps.
if data["payload"] == "StopTraining":
self.flag = 0
if data["payload"] == "StartLocalizing":
# the parameters (mean & standard deviation) to be calculated only once. flag: self.param
self.param = 1
self.flag = 2
# both the radio map construction and the testSet construction are held in both receiveMessage and newMeasurement functions
# as the radio map of each crownstone contains information (RSSI values) received from other crownstones. Either from all the crownstones
# in the mesh network (all crownstones have the same data - highest ttl - fully connected graph) or from only their neighbours (ttl=1 - not fully connected graph).
if (self.flag == 1):
# Construction of radiomap.
if 'rssi' in data['payload']:
if data['payload']['originalSender'] not in self.radiomap[self.label]:
self.radiomap[self.label][data['payload']['originalSender']] = []
self.radiomap[self.label][data['payload']['originalSender']].append(data['payload']['rssi'])
if (self.flag == 2):
# The parameters (mean & standard deviation) of each crownstone for each room are calculated only once after the end of the training phase.
if self.param == 1:
self.parameters = self.crownParameters(self.radiomap)
# Initialization of result map for every crownstone
self.Map = {}
self.confidence_Map = {}
for self.x in range(85, 735, 10):
self.Map[self.x] = {}
self.confidence_Map[self.x] = {}
for self.y in range(85, 855, 10):
self.Map[self.x][self.y] = None
self.confidence_Map[self.x][self.y] = None
self.param = 0
if self.w == 0:
self.counter = self.counter + 1
# self.testSet[self.counter]={}
# for a complete testSet if a crownstone doesn't even scan the user, set RSSI to a really small value.
# self.testSet[self.counter][self.id]=[-100]
# Construction of testSet, the original sender of the packet is received and saved to the set.
if 'rssi' in data['payload']:
if self.counter not in self.testSet:
self.testSet[self.counter] = {}
if data['payload']['originalSender'] not in self.testSet[self.counter]:
self.testSet[self.counter][data['payload']['originalSender']] = []
self.testSet[self.counter][data['payload']['originalSender']].append(data['payload']['rssi'])
# check if the testSet is complete. The lenght of each row should be equal to the number of nodes in the mesh network.
if len(self.testSet[self.counter]) == self.nodes:
self.w = 0
else:
self.w = 1
def newMeasurement(self, data, rssi):
# print(self.time, self.id, "scans", data["address"], " with payload ", data["payload"], " and rssi:", rssi)
if (self.flag == 1):
self.sendMessage({"rssi": rssi, "originalSender": self.id}, 1)
# Construction of radio map
if self.id not in self.radiomap[self.label]:
self.radiomap[self.label][self.id] = []
self.radiomap[self.label][self.id].append(rssi)
if (self.flag == 2):
# Construction of testSet. If the crownstone is able to scan the user the flag w is set to 1, otherwise it remains 0.
# flag p is used to avoid retransmissions
if self.p == 0:
self.counter = self.counter + 1
self.w = 1
if self.counter not in self.testSet:
self.testSet[self.counter] = {}
if self.id not in self.testSet[self.counter]:
self.testSet[self.counter][self.id] = []
self.testSet[self.counter][self.id].append(rssi)
self.value = self.testSet[self.counter][self.id][0]
self.sendMessage({"rssi": rssi, "originalSender": self.id}, 1)
self.p = 1
if rssi != self.value:
self.counter = self.counter + 1
self.w = 1
if self.counter not in self.testSet:
self.testSet[self.counter] = {}
if self.id not in self.testSet[self.counter]:
self.testSet[self.counter][self.id] = []
self.testSet[self.counter][self.id].append(rssi)
self.sendMessage({"rssi": rssi, "originalSender": self.id}, 1)
def crownParameters(self, radiomap):
parameters = {}
for self.label, crowns in radiomap.items():
if self.label not in parameters:
parameters[self.label] = {}
sorted_crowns = sorted(crowns.items(), key=operator.itemgetter(0))
for crown, RSSI in sorted_crowns:
if len(RSSI) != 1:
if crown not in parameters[self.label]:
parameters[self.label][crown] = []
parameters[self.label][crown] = self.Statistics(RSSI)
return parameters
def Statistics(self, RSSI):
parameters = [self.MeanValue(RSSI), self.StandardDeviation(RSSI)]
return parameters
def MeanValue(self, rss):
mean = sum(rss) / float(len(rss))
return mean
def StandardDeviation(self, rss):
average = self.MeanValue(rss)
variance = sum([pow(RSSI - average, 2) for RSSI in rss]) / float(len(rss) - 1)
standarddev = math.sqrt(variance)
return standarddev
def Predictions_norm(self, testSet):
predictions = []
probabilities = []
for counter in testSet:
best_probability, room_label = self.PredictRoom_norm(testSet[counter])
# if self.publish == 1:
# if room_label == 1:
# self.publishResult("Room 1")
# elif room_label == 2:
# self.publishResult("Room 2")
# elif room_label == 3:
# self.publishResult("Room 3")
# elif room_label == 4:
# self.publishResult("Room 4")
# elif room_label == 5:
# self.publishResult("Room 5")
# elif room_label == 6:
# self.publishResult("Room 6")
# elif room_label == 7:
# self.publishResult("Room 7")
probabilities.append(best_probability)
predictions.append(room_label)
return probabilities, predictions
def PredictRoom_norm(self, testSet):
probabilities = self.RoomProbabilities_norm(testSet)
room_predicted, best_probability = None, -1
for room_label, probability in probabilities.items():
if room_predicted is None or probability > best_probability:
best_probability = probability
room_predicted = room_label
return best_probability, room_predicted
def RoomProbabilities_norm(self, testSet):
probabilities1 = {}
norm_factor = {}
norm_probabilities = {}
for self.label, room_parameters in parameters.items():
probabilities1[self.label] = {}
for crown in room_parameters.items():
if crown[0] not in probabilities1[self.label]:
probabilities1[self.label][crown[0]] = []
probabilities1[self.label][crown[0]].append(1)
for node, rssi in testSet.items():
if crown[0] == node:
mean = crown[1][0]
standardev = crown[1][1]
exponent_numerator = math.pow(rssi[0] - mean, 2)
exponent_denominator = 2 * math.pow(standardev, 2)
exponent_result = math.exp((-exponent_numerator) / exponent_denominator)
prob_density = (1 / (math.sqrt(2 * math.pi) * standardev)) * exponent_result
# non-normalized probabilities
# product of our prior distribution
probabilities1[self.label][node][0] *= prob_density
# normalization_factor one for each crownstone, sum of non-normalized probabilities for all rooms
n = 1
for self.label, prob in probabilities1.items():
for node in prob.items():
if n <= len(prob):
norm_factor[node[0]] = node[1][0]
n = n + 1
else:
norm_factor[node[0]] += node[1][0]
for self.label, prob in probabilities1.items():
norm_probabilities[self.label] = 1
for node in prob.items():
norm_probabilities[self.label] *= (1 / norm_factor[node[0]]) * node[1][0]
return norm_probabilities
def Accuracy(self, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-08-21 at 12:28
@author: cook
"""
from astropy.table import Table
from astropy import constants as cc
from astropy import units as uu
import numpy as np
import os
from scipy.optimize import curve_fit
import warnings
from apero import core
from apero import lang
from apero.core import constants
from apero.core import math as mp
from apero.core.core import drs_log
from apero.core.core import drs_file
from apero.io import drs_data
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'science.rv.general.py'
__INSTRUMENT__ = 'None'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# get param dict
ParamDict = constants.ParamDict
DrsFitsFile = drs_file.DrsFitsFile
# Get function string
display_func = drs_log.display_func
# Get Logging function
WLOG = drs_log.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
TextDict = lang.drs_text.TextDict
# alias pcheck
pcheck = core.pcheck
# Speed of light
# noinspection PyUnresolvedReferences
speed_of_light_ms = cc.c.to(uu.m / uu.s).value
# noinspection PyUnresolvedReferences
speed_of_light = cc.c.to(uu.km / uu.s).value
# =============================================================================
# Define functions
# =============================================================================
def measure_fp_peaks(params, props, limit, normpercent):
"""
Measure the positions of the FP peaks
Returns the pixels positions and Nth order of each FP peak
:param p: parameter dictionary, ParamDict containing constants
Must contain at least:
drift_peak_border_size: int, the border size (edges in
x-direction) for the FP fitting
algorithm
drift_peak_fpbox_size: int, the box half-size (in pixels) to
fit an individual FP peak to - a
gaussian will be fit to +/- this size
from the center of the FP peak
drift_peak_peak_sig_lim: dictionary, the sigma above the median
that a peak must have to be recognised
as a valid peak (before fitting a
gaussian) dictionary must have keys
equal to the lamp types (hc, fp)
drift_peak_inter_peak_spacing: int, the minimum spacing between
peaks in order to be recognised
as a valid peak (before fitting
a gaussian)
log_opt: string, log option, normally the program name
:param loc: parameter dictionary, ParamDict containing data
Must contain at least:
speref: numpy array (2D), the reference spectrum
wave: numpy array (2D), the wave solution image
lamp: string, the lamp type (either 'hc' or 'fp')
:return loc: parameter dictionary, the updated parameter dictionary
Adds/updates the following:
ordpeak: numpy array (1D), the order number for each valid FP
peak
xpeak: numpy array (1D), the central position each gaussain fit
to valid FP peak
ewpeak: numpy array (1D), the FWHM of each gaussain fit
to valid FP peak
vrpeak: numpy array (1D), the radial velocity drift for each
valid FP peak
llpeak: numpy array (1D), the delta wavelength for each valid
FP peak
amppeak: numpy array (1D), the amplitude for each valid FP peak
"""
func_name = __NAME__ + '.create_drift_file()'
# get the reference data and the wave data
speref = np.array(props['SPEREF'])
wave = props['WAVE']
# storage for order of peaks
allpeaksize = []
allordpeak = []
allxpeak = []
allewpeak = []
allvrpeak = []
allllpeak = []
allamppeak = []
alldcpeak = []
allshapepeak = []
# loop through the orders
for order_num in range(speref.shape[0]):
# storage for order of peaks
ordpeak = []
xpeak = []
ewpeak = []
vrpeak = []
llpeak = []
amppeak = []
dcpeak = []
shapepeak = []
# storage of warnings
warn_dict = dict()
# set number of peaks rejected to zero
nreject = 0
# set a counter for total number of peaks
ipeak = 0
# get the pixels for this order
tmp = np.array(speref[order_num, :])
# define indices
index = np.arange(len(tmp))
# ------------------------------------------------------------------
# normalize the spectrum
tmp = tmp / np.nanpercentile(tmp, normpercent)
# ------------------------------------------------------------------
# find the peaks
with warnings.catch_warnings(record=True) as w:
peakmask = (tmp[1:-1] > tmp[2:]) & (tmp[1:-1] > tmp[:-2])
peakpos = np.where(peakmask)[0]
# work out the FP width for this order
size = int(np.nanmedian(peakpos[1:] - peakpos[:-1]))
# ------------------------------------------------------------------
# mask for finding maximum peak
mask = np.ones_like(tmp)
# mask out the edges
mask[:size + 1] = 0
mask[-(size + 1):] = 0
# ------------------------------------------------------------------
# loop for peaks that are above a value of limit
while mp.nanmax(mask * tmp) > limit:
# --------------------------------------------------------------
# find peak along the order
maxpos = np.nanargmax(mask * tmp)
maxtmp = tmp[maxpos]
# --------------------------------------------------------------
# get the values around the max position
index_peak = index[maxpos - size: maxpos + size]
tmp_peak = tmp[maxpos - size: maxpos + size]
# --------------------------------------------------------------
# mask out this peak for next iteration of while loop
mask[maxpos - (size // 2):maxpos + (size // 2) + 1] = 0
# --------------------------------------------------------------
# return the initial guess and the best fit
p0, gg, _, warns = fit_fp_peaks(index_peak, tmp_peak, size)
# --------------------------------------------------------------
# only keep peaks within +/- 1 pixel of original peak
# (gaussian fit is to find sub-pixel value)
cond = np.abs(maxpos - gg[1]) < 1
if cond:
# work out the radial velocity of the peak
lambefore = wave[order_num, maxpos - 1]
lamafter = wave[order_num, maxpos + 1]
deltalam = lamafter - lambefore
# get the radial velocity
waveomax = wave[order_num, maxpos]
radvel = speed_of_light_ms * deltalam / (2.0 * waveomax)
# add to storage
ordpeak.append(order_num)
xpeak.append(gg[1])
ewpeak.append(gg[2])
vrpeak.append(radvel)
llpeak.append(deltalam)
amppeak.append(maxtmp)
shapepeak.append(gg[3])
dcpeak.append(gg[4])
else:
# add to rejected
nreject += 1
# iterator
ipeak += 1
# --------------------------------------------------------------
# deal with warnings
if warns is not None:
if warns in warn_dict:
warn_dict[warns] += 1
else:
warn_dict[warns] = 1
# --------------------------------------------------------------
# log how many FPs were found and how many rejected
wargs = [order_num, ipeak, nreject]
WLOG(params, '', TextEntry('40-018-00001', args=wargs))
# ------------------------------------------------------------------
# print warnings
for key in list(warn_dict.keys()):
wargs = [warn_dict[key], key]
WLOG(params, 'warning', TextEntry('00-018-00001', args=wargs))
# ------------------------------------------------------------------
# add values to all storage (and sort by xpeak)
indsort = np.argsort(xpeak)
allordpeak.append(np.array(ordpeak)[indsort])
allxpeak.append(np.array(xpeak)[indsort])
allewpeak.append(np.array(ewpeak)[indsort])
allvrpeak.append(np.array(vrpeak)[indsort])
allllpeak.append(np.array(llpeak)[indsort])
allamppeak.append(np.array(amppeak)[indsort])
allshapepeak.append(np.array(shapepeak)[indsort])
alldcpeak.append(np.array(dcpeak)[indsort])
allpeaksize.append(size)
# store values in loc
props['ORDPEAK'] = np.concatenate(allordpeak).astype(int)
props['XPEAK'] = np.concatenate(allxpeak)
props['PEAK2PEAK'] = np.concatenate(allewpeak)
props['VRPEAK'] = np.concatenate(allvrpeak)
props['LLPEAK'] = np.concatenate(allllpeak)
props['AMPPEAK'] = np.concatenate(allamppeak)
props['DCPEAK'] = np.concatenate(alldcpeak)
props['SHAPEPEAK'] = np.concatenate(allshapepeak)
props['PEAKSIZE'] = np.array(allpeaksize)
# set source
keys = ['ORDPEAK', 'XPEAK', 'PEAK2PEAK', 'VRPEAK', 'LLPEAK', 'AMPPEAK',
'DCPEAK', 'SHAPEPEAK', 'PEAKSIZE']
props.set_sources(keys, func_name)
# Log the total number of FP lines found
wargs = [len(props['XPEAK'])]
WLOG(params, 'info', TextEntry('40-018-00002', args=wargs))
# return the property parameter dictionary
return props
def fit_fp_peaks(x, y, size, return_model=False):
# storage of warnings
warns = None
# get gauss function
ea_airy = mp.ea_airy_function
# set up initial guess
pnames = ['amp', 'pos', 'period', 'shape', 'dc']
# [amp, position, period, exponent, zero point]
p0 = [np.max(y) - np.min(y), np.median(x), size, 1.5,
np.max([0, np.min(y)])]
# set up the bounds
lowerbounds = [0.5 * p0[0], p0[1] - 2, 0.7 * p0[2], 1.0, 0.0]
upperbounds = [2.0 * p0[0], p0[1] + 2, 1.3 * p0[2], 10.0, 0.5 * p0[0]]
bounds = [lowerbounds, upperbounds]
# test bounds make sense
for p_it in range(len(lowerbounds)):
if lowerbounds[p_it] >= upperbounds[p_it]:
if warns is None:
warns = ''
warns += ('\nBoundError: Lower bound {0} incorrect (lower={1} '
'upper={2})'.format(pnames[p_it], lowerbounds[p_it],
upperbounds[p_it]))
if p0[p_it] < lowerbounds[p_it] or p0[p_it] > upperbounds[p_it]:
if warns is None:
warns = ''
warns += ('\nBoundError: Inital guess for {0} out of bounds '
'(guess={1} lower={2} upper={3})'
''.format(pnames[p_it], p0[p_it],
lowerbounds[p_it], upperbounds[p_it]))
# deal with bad bounds
if warns is not None:
popt = [np.nan, np.nan, np.nan, np.nan, np.nan]
pcov = None
model = np.repeat([np.nan], len(x))
else:
# try to fit etiennes airy function
try:
with warnings.catch_warnings(record=True) as _:
popt, pcov = curve_fit(ea_airy, x, y, p0=p0, bounds=bounds)
model = ea_airy(x, *popt)
except ValueError as e:
# log that ydata or xdata contains NaNs
popt = [np.nan, np.nan, np.nan, np.nan, np.nan]
pcov = None
warns = '{0}: {1}'.format(type(e), e)
model = np.repeat([np.nan], len(x))
except RuntimeError as e:
popt = [np.nan, np.nan, np.nan, np.nan, np.nan]
pcov = None
warns = '{0}: {1}'.format(type(e), e)
model = np.repeat([np.nan], len(x))
# deal with returning model
if return_model:
return p0, popt, pcov, warns, model
else:
# return the guess and the best fit
return p0, popt, pcov, warns
def remove_wide_peaks(params, props, cutwidth):
"""
Remove peaks that are too wide
:param p: parameter dictionary, ParamDict containing constants
:param loc: parameter dictionary, ParamDict containing data
Must contain at least:
ordpeak: numpy | |
TimeDistributed(Dense(4096, activation="relu", kernel_regularizer=l2_reg), name="frcnn_fc1")(output)
output = TimeDistributed(Dense(4096, activation="relu", kernel_regularizer=l2_reg), name="frcnn_fc2")(output)
frcnn_cls_predictions = TimeDistributed(Dense(cfg.NUM_CLASSES, activation="softmax", kernel_regularizer=l2_reg),
name="frcnn_cls")(output)
frcnn_reg_predictions = TimeDistributed(Dense(cfg.NUM_CLASSES * 4, activation="linear", kernel_regularizer=l2_reg),
name="frcnn_reg")(output)
input_gt_boxes = Input(shape=(None, 4), name="input_gt_boxes", dtype=tf.float32)
input_gt_labels = Input(shape=(None,), name="input_gt_labels", dtype=tf.int32)
rpn_cls_actuals = Input(shape=(None, None, cfg.ANCHOR_COUNT), name="input_rpn_cls_actuals", dtype=tf.float32)
rpn_reg_actuals = Input(shape=(None, 4), name="input_rpn_reg_actuals", dtype=tf.float32)
input_gt_masks = KL.Input(shape=(None, cfg.IMG_SIZE_HEIGHT, cfg.IMG_SIZE_WIDTH), name="input_gt_masks",
dtype=tf.int32)
input_gt_seg_mask_inds = KL.Input(shape=(None,), name="input_gt_seg_mask_inds", dtype=tf.int32)
frcnn_reg_actuals, frcnn_cls_actuals, target_maks, rois_pos = ProposalTargetLayer(cfg, name="roi_deltas")(
[roi_bboxes, input_gt_boxes, input_gt_labels, input_gt_masks, input_gt_seg_mask_inds])
# only for positive rois
roi_align_mask = RoiAlign(cfg, name="roi_align_mask")([feature_extractor.output, rois_pos]) # rois_pos])
pool5_2_conv = KL.TimeDistributed(KL.Conv2D(512, (1, 1), activation="relu", padding="valid",
kernel_initializer=KI.RandomNormal(stddev=0.01),
bias_initializer="zeros",
kernel_regularizer=l2_reg),
name="mask_conv_1")(roi_align_mask)
pool5_2_conv2 = KL.TimeDistributed(KL.Conv2D(512, (3, 3), activation="relu", padding="same",
kernel_initializer=KI.RandomNormal(stddev=0.01),
bias_initializer="zeros",
kernel_regularizer=l2_reg),
name="mask_conv_2")(pool5_2_conv)
mask_deconv1 = KL.TimeDistributed(KL.Conv2DTranspose(256, (8, 8), padding="same", strides=(4, 4)), # , groups=256),
# kernel_initializer=BilinearInitializer(filter_size=8, num_channels_in=512, num_channels_out=256)),
name='mask_deconv_1')(pool5_2_conv2)
pool5_2_conv3 = KL.TimeDistributed(KL.Conv2D(512, (3, 3), activation="relu", padding="same",
kernel_initializer=KI.RandomNormal(stddev=0.01),
bias_initializer="zeros",
kernel_regularizer=l2_reg),
name="mask_conv_3")(mask_deconv1)
pool5_2_conv4 = KL.TimeDistributed(KL.Conv2D(512, (3, 3), activation="relu", padding="same",
kernel_initializer=KI.RandomNormal(stddev=0.01),
bias_initializer="zeros",
kernel_regularizer=l2_reg),
name="mask_conv_4")(pool5_2_conv3)
mask_deconv2 = KL.TimeDistributed(KL.Conv2DTranspose(256, (8, 8), padding="same", strides=(4, 4)), # , groups=256),
# kernel_initializer=BilinearInitializer(filter_size=8, num_channels_in=512, num_channels_out=256)),
name='mask_deconv_2')(pool5_2_conv4)
pool5_2_conv5 = KL.TimeDistributed(KL.Conv2D(512, (3, 3), activation="relu", padding="same",
kernel_initializer=KI.RandomNormal(stddev=0.01),
bias_initializer="zeros",
kernel_regularizer=l2_reg),
name="mask_conv_5")(mask_deconv2)
pool5_2_conv6 = KL.TimeDistributed(KL.Conv2D(512, (3, 3), activation="relu", padding="same",
kernel_initializer=KI.RandomNormal(stddev=0.01),
bias_initializer="zeros",
kernel_regularizer=l2_reg),
name="mask_conv_6")(pool5_2_conv5)
mask_deconv3 = KL.TimeDistributed(KL.Conv2DTranspose(256, (4, 4), padding="same", strides=(2, 2)), # , groups=256),
# kernel_initializer=BilinearInitializer(filter_size=4, num_channels_in=512, num_channels_out=256)),
name='mask_deconv_3')(pool5_2_conv6)
mask_prob_output = KL.TimeDistributed(
KL.Conv2D(cfg.NUM_AFFORDANCE_CLASSES, (1, 1), padding="valid", activation="softmax",
kernel_initializer=KI.RandomNormal(stddev=0.01),
bias_initializer="zeros",
kernel_regularizer=l2_reg),
name="mask_score")(mask_deconv3)
frcnn_model = Model(inputs=[input_img, input_gt_boxes, input_gt_labels, rpn_reg_actuals, rpn_cls_actuals, input_gt_masks, input_gt_seg_mask_inds],
outputs=[frcnn_reg_predictions, frcnn_cls_predictions, frcnn_reg_actuals, frcnn_cls_actuals, roi_bboxes, target_maks, rois_pos])
return frcnn_model
# WITH RESIZING
# class ProposalLayer(Layer):
# """Generating bounding boxes from rpn predictions.
# First calculating the boxes from predicted deltas and label probs.
# Then applied non max suppression and selecting "train or test nms_topn" boxes.
# inputs:
# rpn_bbox_deltas = (batch_size, img_output_height, img_output_width, anchor_count * [delta_y, delta_x, delta_h, delta_w])
# img_output_height and img_output_width are calculated to the base model feature map
# rpn_labels = (batch_size, img_output_height, img_output_width, anchor_count)
#
# outputs:
# roi_bboxes = (batch_size, train/test_nms_topn, [y1, x1, y2, x2])
# """
#
# def __init__(self, anchors, mode, cfg, **kwargs):
# super(ProposalLayer, self).__init__(**kwargs)
# self.cfg = cfg
# self.mode = mode
# self.anchors = tf.constant(anchors, dtype=tf.float32)
#
# def get_config(self):
# config = super(ProposalLayer, self).get_config()
# config.update({"cfg": self.cfg, "anchors": self.anchors, "mode": self.mode})
# return config
#
# def call(self, inputs):
# rpn_bbox_deltas = inputs[0]
# rpn_labels = inputs[1]
# anchors = self.anchors
#
# pre_nms_topn = self.cfg.PRE_NMS_TOPN
# post_nms_topn = self.cfg.TRAIN_NMS_TOPN if self.mode == "training" else self.cfg.TEST_NMS_TOPN
# nms_iou_threshold = self.cfg.NMS_IOU_THRESHOLD
# variances = self.cfg.VARIANCES
# total_anchors = anchors.shape[0]
# batch_size = tf.shape(rpn_bbox_deltas)[0]
# rpn_bbox_deltas = tf.reshape(rpn_bbox_deltas, (batch_size, total_anchors, 4))
# rpn_labels = tf.reshape(rpn_labels, (batch_size, total_anchors))
#
# rpn_bbox_deltas *= variances
# rpn_bboxes = bbox_utils.get_bboxes_from_deltas(anchors, rpn_bbox_deltas)
#
# _, pre_indices = tf.nn.top_k(rpn_labels, pre_nms_topn)
#
# pre_roi_bboxes = tf.gather(rpn_bboxes, pre_indices, batch_dims=1)
# pre_roi_labels = tf.gather(rpn_labels, pre_indices, batch_dims=1)
#
# pre_roi_bboxes = tf.reshape(pre_roi_bboxes, (batch_size, pre_nms_topn, 1, 4))
# pre_roi_labels = tf.reshape(pre_roi_labels, (batch_size, pre_nms_topn, 1))
#
# roi_bboxes, _, _, _ = bbox_utils.non_max_suppression(pre_roi_bboxes, pre_roi_labels,
# max_output_size_per_class=post_nms_topn,
# max_total_size=post_nms_topn,
# iou_threshold=nms_iou_threshold)
# return tf.stop_gradient(roi_bboxes)
# WITH RESIZING
# class ProposalTargetLayer(Layer):
# """Calculating faster rcnn actual bounding box deltas and labels.
# This layer only running on the training phase.
# inputs:
# roi_bboxes = (batch_size, nms_topn, [y1, x1, y2, x2])
# gt_boxes = (batch_size, padded_gt_boxes_size, [y1, x1, y2, x2])
# gt_labels = (batch_size, padded_gt_boxes_size)
# gt_masks = (batch_size, num_masks, img_height, img_width)
#
# outputs:
# roi_bbox_deltas = (batch_size, train_nms_topn * total_labels, [delta_y, delta_x, delta_h, delta_w])
# roi_bbox_labels = (batch_size, train_nms_topn, total_labels)
# """
#
# def __init__(self, cfg, **kwargs):
# super(ProposalTargetLayer, self).__init__(**kwargs)
# self.cfg = cfg
# # self.img_height = tf.cast(img_height, tf.float32)
# # self.img_width = tf.cast(img_width, tf.float32)
#
# def get_config(self):
# config = super(ProposalTargetLayer, self).get_config()
# config.update({"cfg": self.cfg, "img_height": self.img_height, "img_width": self.img_width})
# return config
#
# def call(self, inputs):
# roi_bboxes = inputs[0]
# gt_boxes = inputs[1]
# gt_labels = inputs[2]
# if self.cfg.MASK_REG:
# gt_masks = inputs[3]
# gt_seg_mask_inds = inputs[4]
#
# total_labels = self.cfg.NUM_CLASSES
# # total_pos_bboxes = int(self.cfg.RPN_BATCHSIZE * self.cfg.RPN_FG_FRACTION)
# # total_neg_bboxes = self.cfg.RPN_BATCHSIZE - total_pos_bboxes
# # TODO: try to increment number of positive rois
# # Negative ROIs. Add enough to maintain positive:negative ratio.
# r = 1.0 / self.cfg.ROI_POSITIVE_RATIO
# total_pos_bboxes = int(self.cfg.TRAIN_ROIS_PER_IMAGE * self.cfg.ROI_POSITIVE_RATIO)
# total_neg_bboxes = int(r * float(total_pos_bboxes)) - total_pos_bboxes
#
# variances = self.cfg.VARIANCES
# # batch_size, total_bboxes = tf.shape(roi_bboxes)[0], tf.shape(roi_bboxes)[1]
#
# # Calculate iou values between each bboxes and ground truth boxes
# iou_map, _ = bbox_utils.generate_iou_map(roi_bboxes, gt_boxes)
# # Get max index value for each row
# max_indices_each_gt_box = tf.argmax(iou_map, axis=2, output_type=tf.int32)
# # IoU map has iou values for every gt boxes and we merge these values column wise
# merged_iou_map = tf.reduce_max(iou_map, axis=2)
# #
# pos_mask = tf.greater(merged_iou_map, self.cfg.TRAIN_FG_THRES)
# pos_mask = train_utils.randomly_select_xyz_mask(pos_mask, tf.constant([total_pos_bboxes], dtype=tf.int32))
#
# neg_mask = tf.logical_and(tf.less(merged_iou_map, self.cfg.TRAIN_BG_THRESH_HI), tf.greater(merged_iou_map, self.cfg.TRAIN_BG_THRESH_LO))
# neg_mask = train_utils.randomly_select_xyz_mask(neg_mask, tf.constant([total_neg_bboxes], dtype=tf.int32))
#
# # take corresponding gt boxes and gt labels to rois
# gt_boxes_map = tf.gather(gt_boxes, max_indices_each_gt_box, batch_dims=1)
# expanded_gt_boxes = tf.where(tf.expand_dims(pos_mask, axis=-1), gt_boxes_map, tf.zeros_like(gt_boxes_map))
#
# gt_labels_map = tf.gather(gt_labels, max_indices_each_gt_box, batch_dims=1)
# pos_gt_labels = tf.where(pos_mask, gt_labels_map, tf.constant(-1, dtype=tf.int32))
# neg_gt_labels = tf.cast(neg_mask, dtype=tf.int32)
# expanded_gt_labels = tf.cast(pos_gt_labels + neg_gt_labels, dtype=tf.int32)
# # (batch_size, num_rois, 4)
# roi_bbox_deltas = bbox_utils.get_deltas_from_bboxes(roi_bboxes, expanded_gt_boxes) / variances
#
# # roi_bbox_labels = expanded_gt_labels
#
# # Transform to one hot representation (batch_size, num_rois, num_classes)
# roi_bbox_labels = tf.one_hot(expanded_gt_labels, total_labels)
# # scatter_indices = tf.tile(tf.expand_dims(roi_bbox_labels, -1), (1, 1, 1, 4))
# # roi_bbox_deltas = scatter_indices * tf.expand_dims(roi_bbox_deltas, -2)
# # roi_bbox_deltas = tf.reshape(roi_bbox_deltas, (batch_size, total_bboxes * total_labels, 4))
#
# if self.cfg.MASK_REG:
# # Take only positive rois for mask training and corresponding roi_gt_boxes
# pos_indices = tf.where(pos_mask)
# positive_count = tf.shape(pos_indices)[0]
# positive_rois = tf.gather_nd(roi_bboxes, pos_indices)
# roi_gt_boxes = tf.gather_nd(gt_boxes_map, pos_indices)
#
# y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
# y1t, x1t, y2t, x2t = tf.split(roi_gt_boxes, 4, axis=1)
#
# img_w = float(self.cfg.IMG_SIZE_WIDTH)
# img_h = float(self.cfg.IMG_SIZE_HEIGHT)
# # sanity check
# x1 = tf.minimum(img_w - 1, tf.maximum(0.0, x1))
# y1 = tf.minimum(img_h - 1, tf.maximum(0.0, y1))
# x2 = tf.minimum(img_w - 1, tf.maximum(0.0, x2))
# y2 = tf.minimum(img_h - 1, tf.maximum(0.0, y2))
# x1t = tf.minimum(img_w - 1, tf.maximum(0.0, x1t))
# y1t = tf.minimum(img_h - 1, tf.maximum(0.0, y1t))
# x2t = tf.minimum(img_w - 1, tf.maximum(0.0, x2t))
# y2t = tf.minimum(img_h - 1, tf.maximum(0.0, y2t))
#
# w = (x2 - x1) + 1
# h = (y2 - y1) + 1
#
# # compute overlap between roi coordinate and gt_roi coordinate TODO: use overlap function?
# x1o = tf.maximum(x1, x1t)
# y1o = tf.maximum(y1, y1t)
# x2o = tf.minimum(x2, x2t)
# y2o = tf.minimum(y2, y2t)
#
# if positive_count != 0:
# # Calculate labels in original mask -> gt_masks=(batch_size, num_masks, img_height, img_width)
# original_affordance_labels = tf.unique(tf.reshape(gt_masks, [-1]))
# original_affordance_labels = tf.sort(original_affordance_labels.y)
#
# # filter indices of gt boxes
# indices_pos_gt_boxes = tf.boolean_mask(max_indices_each_gt_box, pos_mask)
#
# # mask associated wrt to true bbox (batch_size, positive_rois, mask_size, mask_size)
# gt_mask = tf.gather(gt_masks, indices_pos_gt_boxes, axis=1)
#
# gt_mask = tf.cast(tf.expand_dims(gt_mask, axis=4), tf.float32)
# y1o = tf.squeeze(y1o, axis=1)
# x1o = tf.squeeze(x1o, axis=1)
# y2o = tf.squeeze(y2o, axis=1)
# x2o = tf.squeeze(x2o, axis=1)
#
# # create boxes to crop and indexes where each mask has its own box
# boxes = tf.cast(tf.stack([y1o, x1o, y2o, x2o], axis=1), tf.float32)
# box_index = tf.range(positive_count)
#
# # remove batch dim -> needed for crop and resize op
# gt_mask = tf.squeeze(gt_mask, axis=0)
#
# # crop and resize the masks individually
# positive_masks = self._crop_and_resize_masks(gt_mask, boxes, positive_rois, positive_count, original_affordance_labels)
#
# # Add batch dim
# positive_masks = tf.expand_dims(positive_masks, axis=0)
# positive_rois = tf.expand_dims(positive_rois, axis=0)
# masks = positive_masks # tf.concat([positive_masks, negative_masks], axis=0)
# else:
# positive_rois = tf.expand_dims(positive_rois, axis=0)
# masks = tf.constant(0, dtype=tf.int32, shape=[1, 0, self.cfg.TRAIN_MASK_SIZE, self.cfg.TRAIN_MASK_SIZE])
#
# return tf.stop_gradient(roi_bbox_deltas), tf.stop_gradient(roi_bbox_labels), tf.stop_gradient(masks), \
# tf.stop_gradient(positive_rois)
#
# return tf.stop_gradient(roi_bbox_deltas), tf.stop_gradient(roi_bbox_labels)
#
# def _crop_and_resize_masks(self, masks, overlapping_boxes, rois, positive_count, original_aff_labels):
# # overlapping_boxes = tf.cast(bbox_utils.denormalize_bboxes(overlapping_boxes, self.img_height, self.img_width), tf.int32)
# # rois = tf.cast(bbox_utils.denormalize_bboxes(rois, self.img_height, self.img_width), tf.int32)
# overlapping_boxes = tf.cast(bbox_utils.denormalize_bboxes(overlapping_boxes, self.cfg.IMG_SIZE_HEIGHT, self.cfg.IMG_SIZE_WIDTH), tf.int32)
# rois = tf.cast(bbox_utils.denormalize_bboxes(rois, self.cfg.IMG_SIZE_HEIGHT, self.cfg.IMG_SIZE_WIDTH), tf.int32)
#
# # overlapping_boxes = tf.cast(overlapping_boxes, tf.int32)
# # rois = tf.cast(rois, tf.int32)
#
# num_masks = tf.shape(masks)[0]
# final_masks = tf.zeros((num_masks, self.cfg.TRAIN_MASK_SIZE, self.cfg.TRAIN_MASK_SIZE))
# for i in range(num_masks):
# mask = masks[i]
#
# # get roi and overlap area coordinates
# y1, x1, y2, x2 = tf.split(rois[i], 4, axis=0)
# y1, x1, y2, x2 = tf.squeeze(y1), tf.squeeze(x1), tf.squeeze(y2), tf.squeeze(x2)
#
# y1o, x1o, y2o, x2o = tf.split(overlapping_boxes[i], 4, axis=0)
# y1o, x1o, y2o, x2o = tf.squeeze(y1o), tf.squeeze(x1o), tf.squeeze(y2o), tf.squeeze(x2o)
#
# # take overlap area between gt_bbox and roi
# overlapping_mask_area = mask[y1o:y2o, x1o:x2o]
#
# # calculate offsets with 0 above and in the left of the overlapping area
# offset_height = y1o - y1
# offset_width = x1o - x1
#
# # calculate roi height and width
# target_height = y2 - y1 + 1
# target_width = x2 - x1 + 1
#
# roi_mask = tf.image.pad_to_bounding_box(overlapping_mask_area, offset_height, offset_width, target_height, target_width)
#
# # # add overlapping area inside the roi and resize to mask size
# # roi_mask[(y1o - y1):(y2o - y1), (x1o - x1):(x2o - x1)] = overlapping_mask_area
# roi_mask = tf.image.resize(roi_mask, [self.cfg.TRAIN_MASK_SIZE, self.cfg.TRAIN_MASK_SIZE], method='bilinear')
#
# # Create a structure | |
* square_sum) ** beta)
expect(expected, onp.nn.lrn(onp.array(x), size=nsize))
@pytest.mark.parametrize("type_a", [np.float32])
def test_lrn(type_a):
alpha = 0.0002
beta = 0.5
bias = 2.0
nsize = 3
x = np.random.randn(5, 5, 5, 5).astype(type_a)
square_sum = np.zeros((5, 5, 5, 5)).astype(type_a)
for n, c, h, w in np.ndindex(x.shape):
square_sum[n, c, h, w] = sum(
x
[n,
max(0, c - int(math.floor((nsize - 1) / 2))):
min(5, c + int(math.ceil((nsize - 1) / 2)) + 1),
h, w] ** 2)
expected = x / ((bias + (alpha / nsize) * square_sum) ** beta)
expect(
expected, onp.nn.lrn(
onp.array(x),
size=nsize, alpha=alpha, beta=beta, bias=bias))
@pytest.mark.parametrize("type_a", [np.float32])
def test_lstm(type_a):
x = np.array([[[1., 2.]], [[3., 4.]], [[5., 6.]]]).astype(type_a)
input_size = 2
hidden_size = 3
weight_scale = 0.1
number_of_gates = 4
W = weight_scale * np.ones((1, number_of_gates * hidden_size,
input_size)).astype(type_a)
R = weight_scale * np.ones((1, number_of_gates * hidden_size,
hidden_size)).astype(type_a)
expected_y, expected_yh = LSTM_Helper(X=x, W=W, R=R).step()
y, yh, _ = onp.nn.lstm(onp.array(x), onp.array(W), onp.array(R),
hidden_size=hidden_size)
expect(expected_y.astype(type_a), y.numpy())
expect(expected_yh.astype(type_a), yh.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_lstm_initial_bias(type_a):
x = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]]).astype(type_a)
input_size = 3
hidden_size = 4
weight_scale = 0.1
custom_bias = 0.1
number_of_gates = 4
W = weight_scale * np.ones((1, number_of_gates * hidden_size,
input_size)).astype(type_a)
R = weight_scale * np.ones((1, number_of_gates * hidden_size,
hidden_size)).astype(type_a)
W_B = custom_bias * np.ones((1,
number_of_gates * hidden_size)).astype(type_a)
R_B = np.zeros((1, number_of_gates * hidden_size)).astype(type_a)
B = np.concatenate((W_B, R_B), 1)
expected_y, expected_yh = LSTM_Helper(X=x, W=W, R=R, B=B).step()
y, yh, _ = onp.nn.lstm(onp.array(x), onp.array(W), onp.array(R),
b=onp.array(B), hidden_size=hidden_size)
expect(expected_y.astype(type_a), y.numpy())
expect(expected_yh.astype(type_a), yh.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
@pytest.mark.parametrize("type_b", [np.int32])
def test_lstm_peepholes(type_a, type_b):
x = np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]]).astype(type_a)
input_size = 4
hidden_size = 3
weight_scale = 0.1
number_of_gates = 4
number_of_peepholes = 3
W = weight_scale * np.ones((1, number_of_gates * hidden_size,
input_size)).astype(type_a)
R = weight_scale * np.ones((1, number_of_gates * hidden_size,
hidden_size)).astype(type_a)
B = np.zeros((1, 2 * number_of_gates * hidden_size)).astype(type_a)
seq_lens = np.repeat(x.shape[0], x.shape[1]).astype(type_b)
init_h = np.zeros((1, x.shape[1], hidden_size)).astype(type_a)
init_c = np.zeros((1, x.shape[1], hidden_size)).astype(type_a)
P = weight_scale * np.ones((1,
number_of_peepholes * hidden_size)).astype(type_a)
expected_y, expected_yh = LSTM_Helper(
X=x, W=W, R=R, B=B, P=P, initial_c=init_c, initial_h=init_h).step()
y, yh, _ = onp.nn.lstm(onp.array(x), onp.array(W), onp.array(R),
b=onp.array(B), P=onp.array(P),
sequence_lengths=onp.array(seq_lens),
hidden_size=hidden_size)
expect(expected_y.astype(np.float32), y.numpy())
expect(expected_yh.astype(np.float32), yh.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_leakyrelu(type_a):
x = np.array([-1, 0, 1], dtype=type_a)
expected = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1
result = onp.nn.leakyrelu(onp.array(x), alpha=0.1)
expect(expected, result.numpy())
x = np.random.randn(3, 4, 5).astype(type_a)
expected = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1
result = onp.nn.leakyrelu(onp.array(x), alpha=0.1)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_leakyrelu_default(type_a):
x = np.random.randn(3, 4, 5).astype(type_a)
expected = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.01
result = onp.nn.leakyrelu(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", float_types)
def test_logsoftmax(type_a):
x = np.array([[-1, 0, 1]]).astype(type_a)
expected = np.array([[-2.4076061, -1.407606, -0.407606]]).astype(type_a)
result = onp.nn.logsoftmax(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", float_types)
def test_logsoftmax_axis(type_a):
def logsoftmax(x, axis=-1):
x_max = np.max(x, axis=axis, keepdims=True)
tmp = np.exp(x - x_max)
s = np.sum(tmp, axis=axis, keepdims=True)
return (x - x_max) - np.log(s)
x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]
).astype(type_a)
expected = logsoftmax(x)
result = onp.nn.logsoftmax(onp.array(x))
expect(expected, result.numpy())
x = np.abs(np.random.randn(3, 4, 5).astype(type_a))
expected = logsoftmax(x, axis=0)
result = onp.nn.logsoftmax(onp.array(x), axis=0)
expect(expected, result.numpy())
expected = logsoftmax(x, axis=1)
result = onp.nn.logsoftmax(onp.array(x), axis=1)
expect(expected, result.numpy())
expected = logsoftmax(x, axis=2)
result = onp.nn.logsoftmax(onp.array(x), axis=2)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.uint8])
def test_maxpool_1d_default(type_a):
x = np.random.randn(1, 3, 32).astype(type_a)
x_shape = np.shape(x)
kernel_shape = [2]
strides = [1]
out_shape = get_pool_output_shape(
'VALID', x_shape[2:],
kernel_shape, strides)
padded = x
expected = pool_reference(
padded, x_shape, kernel_shape, strides, out_shape, [0],
'MAX').astype(type_a)
result, _ = onp.nn.maxpool(onp.array(x), kernel_shape=kernel_shape)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.uint8])
def test_maxpool_2d_ceil(type_a):
x = np.array([[[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]]]).astype(type_a)
expected = np.array([[[
[11, 12],
[15, 16]]]]).astype(type_a)
result, _ = onp.nn.maxpool(
onp.array(x),
kernel_shape=(3, 3),
strides=(2, 2),
ceil_mode=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.uint8])
def test_maxpool_2d_default(type_a):
x = np.random.randn(1, 3, 32, 32).astype(type_a)
x_shape = np.shape(x)
kernel_shape = (2, 2)
strides = (1, 1)
out_shape = get_pool_output_shape(
'VALID', x_shape[2:],
kernel_shape, strides)
padded = x
expected = pool_reference(padded, x_shape, kernel_shape,
strides, out_shape, (0, 0), 'MAX').astype(type_a)
result, _ = onp.nn.maxpool(onp.array(x), kernel_shape=kernel_shape)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.uint8])
def test_maxpool_2d_dilations(type_a):
x = np.array([[[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]]]).astype(type_a)
expected = np.array([[[
[11, 12],
[15, 16]]]]).astype(type_a)
result, _ = onp.nn.maxpool(
onp.array(x),
kernel_shape=(2, 2),
strides=(1, 1),
dilations=(2, 2))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.uint8])
def test_maxpool_2d_pads(type_a):
x = np.random.randn(1, 3, 28, 28).astype(type_a)
x_shape = np.shape(x)
kernel_shape = (3, 3)
strides = (1, 1)
pad_bottom = pad_top = pad_right = pad_left = 2
pad_shape = [pad_top + pad_bottom, pad_left + pad_right]
out_shape = get_pool_output_shape('VALID', np.add(
x_shape[2:], pad_shape), kernel_shape, strides)
padded = np.pad(
x.astype(np.float64), ((0, 0),
(0, 0),
(pad_top, pad_bottom),
(pad_left, pad_right)),
mode='constant', constant_values=np.nan)
expected = pool_reference(padded, x_shape, kernel_shape,
strides, out_shape, pad_shape, 'MAX').astype(type_a)
result, _ = onp.nn.maxpool(
onp.array(x),
kernel_shape=kernel_shape, strides=strides, pads=(2, 2, 2, 2))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.uint8])
def test_maxpool_2d_precomputed_pads(type_a):
x = np.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]]]).astype(type_a)
expected = np.array([[[
[13, 14, 15, 15, 15],
[18, 19, 20, 20, 20],
[23, 24, 25, 25, 25],
[23, 24, 25, 25, 25],
[23, 24, 25, 25, 25]]]]).astype(type_a)
result, _ = onp.nn.maxpool(
onp.array(x),
kernel_shape=(5, 5),
pads=(2, 2, 2, 2))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.uint8])
def test_maxpool_2d_precomputed_same_upper(type_a):
x = np.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]]]).astype(type_a)
expected = np.array([[[[7, 9, 10],
[17, 19, 20],
[22, 24, 25]]]]).astype(type_a)
result, _ = onp.nn.maxpool(
onp.array(x),
kernel_shape=(3, 3),
strides=(2, 2),
auto_pad='SAME_UPPER')
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.uint8])
def test_maxpool_2d_precomputed_strides(type_a):
x = np.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]]]).astype(type_a)
expected = np.array([[[[7, 9],
[17, 19]]]]).astype(type_a)
result, _ = onp.nn.maxpool(
onp.array(x),
kernel_shape=(2, 2),
strides=(2, 2))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.uint8])
def test_maxpool_2d_same_lower(type_a):
x = np.random.randn(1, 3, 32, 32).astype(type_a)
x_shape = np.shape(x)
kernel_shape = (2, 2)
strides = (1, 1)
out_shape = get_pool_output_shape(
'SAME_LOWER', x_shape[2:],
kernel_shape, strides)
pad_shape = get_pool_pad_shape(
'SAME_LOWER', x_shape[2:],
kernel_shape, strides, out_shape)
pad_bottom = pad_shape[0] // 2
pad_top = pad_shape[0] - pad_bottom
pad_right = pad_shape[1] // 2
pad_left = pad_shape[1] - pad_right
padded = np.pad(
x.astype(np.float64), ((0, 0),
(0, 0),
(pad_top, pad_bottom),
(pad_left, pad_right)),
mode='constant', constant_values=np.nan)
expected = pool_reference(padded, x_shape, kernel_shape,
strides, out_shape, pad_shape, 'MAX').astype(type_a)
result, _ = onp.nn.maxpool(
onp.array(x),
kernel_shape=kernel_shape, auto_pad="SAME_LOWER")
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.uint8])
def test_maxpool_2d_same_upper(type_a):
x = np.random.randn(1, 3, 32, 32).astype(type_a)
x_shape = np.shape(x)
kernel_shape = (2, 2)
strides = (1, 1)
out_shape = get_pool_output_shape(
'SAME_UPPER', x_shape[2:],
kernel_shape, strides)
pad_shape = get_pool_pad_shape(
'SAME_UPPER', x_shape[2:],
kernel_shape, strides, out_shape)
pad_top = pad_shape[0] // 2
pad_bottom = pad_shape[0] - pad_top
pad_left = pad_shape[1] // 2
pad_right = pad_shape[1] - pad_left
padded = np.pad(
x.astype(np.float64), ((0, 0),
(0, 0),
(pad_top, pad_bottom),
(pad_left, pad_right)),
mode='constant', constant_values=np.nan)
expected = pool_reference(padded, x_shape, kernel_shape,
strides, out_shape, pad_shape, 'MAX').astype(type_a)
result, _ = onp.nn.maxpool(
onp.array(x),
kernel_shape=kernel_shape, auto_pad="SAME_UPPER")
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.uint8])
def test_maxpool_2d_strides(type_a):
x = np.random.randn(1, 3, 32, 32).astype(type_a)
x_shape = np.shape(x)
kernel_shape = (5, 5)
strides = (3, 3)
out_shape = get_pool_output_shape(
'VALID', x_shape[2:],
kernel_shape, strides)
padded = x
expected = pool_reference(
padded, x_shape, kernel_shape, strides, out_shape, (0, 0),
'MAX').astype(type_a)
result, _ = onp.nn.maxpool(
onp.array(x), kernel_shape=kernel_shape, strides=strides)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.uint8])
def test_maxpool_3d_default(type_a):
x = np.random.randn(1, 3, 32, 32, 32).astype(type_a)
x_shape = np.shape(x)
kernel_shape = (2, 2, 2)
strides = [1, 1, 1]
out_shape = get_pool_output_shape(
'VALID', x_shape[2:],
kernel_shape, strides)
padded = x
expected = pool_reference(
padded, x_shape, kernel_shape, strides, out_shape, (0, 0, 0),
'MAX').astype(type_a)
result, _ = onp.nn.maxpool(
onp.array(x), kernel_shape=kernel_shape)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.uint8])
def test_maxpool_with_argmax_2d_precomputed_pads(type_a):
x = np.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]]]).astype(type_a)
y_expected = np.array([[[
[13, 14, 15, 15, 15],
[18, 19, 20, 20, 20],
[23, 24, 25, 25, 25],
[23, 24, | |
0x81E6
Animation231 = 0x81E7
Animation232 = 0x81E8
Animation233 = 0x81E9
Animation234 = 0x81EA
Animation235 = 0x81EB
Animation236 = 0x81EC
Animation237 = 0x81ED
Animation238 = 0x81EE
Animation239 = 0x81EF
Animation240 = 0x81F0
Animation241 = 0x81F1
Animation242 = 0x81F2
Animation243 = 0x81F3
Animation244 = 0x81F4
Animation245 = 0x81F5
Animation246 = 0x81F6
Animation247 = 0x81F7
Animation248 = 0x81F8
Animation249 = 0x81F9
Animation250 = 0x81FA
Animation251 = 0x81FB
Animation252 = 0x81FC
Animation253 = 0x81FD
Animation254 = 0x81FE
Animation255 = 0x81FF
# FancyLedA List
FancyLedA0 = 0x8200
FancyLedA1 = 0x8201
FancyLedA2 = 0x8202
FancyLedA3 = 0x8203
FancyLedA4 = 0x8204
FancyLedA5 = 0x8205
FancyLedA6 = 0x8206
FancyLedA7 = 0x8207
FancyLedA8 = 0x8208
FancyLedA9 = 0x8209
FancyLedA10 = 0x820A
FancyLedA11 = 0x820B
FancyLedA12 = 0x820C
FancyLedA13 = 0x820D
FancyLedA14 = 0x820E
FancyLedA15 = 0x820F
FancyLedA16 = 0x8210
FancyLedA17 = 0x8211
FancyLedA18 = 0x8212
FancyLedA19 = 0x8213
FancyLedA20 = 0x8214
FancyLedA21 = 0x8215
FancyLedA22 = 0x8216
FancyLedA23 = 0x8217
FancyLedA24 = 0x8218
FancyLedA25 = 0x8219
FancyLedA26 = 0x821A
FancyLedA27 = 0x821B
FancyLedA28 = 0x821C
FancyLedA29 = 0x821D
FancyLedA30 = 0x821E
FancyLedA31 = 0x821F
FancyLedA32 = 0x8220
FancyLedA33 = 0x8221
FancyLedA34 = 0x8222
FancyLedA35 = 0x8223
FancyLedA36 = 0x8224
FancyLedA37 = 0x8225
FancyLedA38 = 0x8226
FancyLedA39 = 0x8227
FancyLedA40 = 0x8228
FancyLedA41 = 0x8229
FancyLedA42 = 0x822A
FancyLedA43 = 0x822B
FancyLedA44 = 0x822C
FancyLedA45 = 0x822D
FancyLedA46 = 0x822E
FancyLedA47 = 0x822F
FancyLedA48 = 0x8230
FancyLedA49 = 0x8231
FancyLedA50 = 0x8232
FancyLedA51 = 0x8233
FancyLedA52 = 0x8234
FancyLedA53 = 0x8235
FancyLedA54 = 0x8236
FancyLedA55 = 0x8237
FancyLedA56 = 0x8238
FancyLedA57 = 0x8239
FancyLedA58 = 0x823A
FancyLedA59 = 0x823B
FancyLedA60 = 0x823C
FancyLedA61 = 0x823D
FancyLedA62 = 0x823E
FancyLedA63 = 0x823F
FancyLedA64 = 0x8240
FancyLedA65 = 0x8241
FancyLedA66 = 0x8242
FancyLedA67 = 0x8243
FancyLedA68 = 0x8244
FancyLedA69 = 0x8245
FancyLedA70 = 0x8246
FancyLedA71 = 0x8247
FancyLedA72 = 0x8248
FancyLedA73 = 0x8249
FancyLedA74 = 0x824A
FancyLedA75 = 0x824B
FancyLedA76 = 0x824C
FancyLedA77 = 0x824D
FancyLedA78 = 0x824E
FancyLedA79 = 0x824F
FancyLedA80 = 0x8250
FancyLedA81 = 0x8251
FancyLedA82 = 0x8252
FancyLedA83 = 0x8253
FancyLedA84 = 0x8254
FancyLedA85 = 0x8255
FancyLedA86 = 0x8256
FancyLedA87 = 0x8257
FancyLedA88 = 0x8258
FancyLedA89 = 0x8259
FancyLedA90 = 0x825A
FancyLedA91 = 0x825B
FancyLedA92 = 0x825C
FancyLedA93 = 0x825D
FancyLedA94 = 0x825E
FancyLedA95 = 0x825F
FancyLedA96 = 0x8260
FancyLedA97 = 0x8261
FancyLedA98 = 0x8262
FancyLedA99 = 0x8263
FancyLedA100 = 0x8264
FancyLedA101 = 0x8265
FancyLedA102 = 0x8266
FancyLedA103 = 0x8267
FancyLedA104 = 0x8268
FancyLedA105 = 0x8269
FancyLedA106 = 0x826A
FancyLedA107 = 0x826B
FancyLedA108 = 0x826C
FancyLedA109 = 0x826D
FancyLedA110 = 0x826E
FancyLedA111 = 0x826F
FancyLedA112 = 0x8270
FancyLedA113 = 0x8271
FancyLedA114 = 0x8272
FancyLedA115 = 0x8273
FancyLedA116 = 0x8274
FancyLedA117 = 0x8275
FancyLedA118 = 0x8276
FancyLedA119 = 0x8277
FancyLedA120 = 0x8278
FancyLedA121 = 0x8279
FancyLedA122 = 0x827A
FancyLedA123 = 0x827B
FancyLedA124 = 0x827C
FancyLedA125 = 0x827D
FancyLedA126 = 0x827E
FancyLedA127 = 0x827F
FancyLedA128 = 0x8280
FancyLedA129 = 0x8281
FancyLedA130 = 0x8282
FancyLedA131 = 0x8283
FancyLedA132 = 0x8284
FancyLedA133 = 0x8285
FancyLedA134 = 0x8286
FancyLedA135 = 0x8287
FancyLedA136 = 0x8288
FancyLedA137 = 0x8289
FancyLedA138 = 0x828A
FancyLedA139 = 0x828B
FancyLedA140 = 0x828C
FancyLedA141 = 0x828D
FancyLedA142 = 0x828E
FancyLedA143 = 0x828F
FancyLedA144 = 0x8290
FancyLedA145 = 0x8291
FancyLedA146 = 0x8292
FancyLedA147 = 0x8293
FancyLedA148 = 0x8294
FancyLedA149 = 0x8295
FancyLedA150 = 0x8296
FancyLedA151 = 0x8297
FancyLedA152 = 0x8298
FancyLedA153 = 0x8299
FancyLedA154 = 0x829A
FancyLedA155 = 0x829B
FancyLedA156 = 0x829C
FancyLedA157 = 0x829D
FancyLedA158 = 0x829E
FancyLedA159 = 0x829F
FancyLedA160 = 0x82A0
FancyLedA161 = 0x82A1
FancyLedA162 = 0x82A2
FancyLedA163 = 0x82A3
FancyLedA164 = 0x82A4
FancyLedA165 = 0x82A5
FancyLedA166 = 0x82A6
FancyLedA167 = 0x82A7
FancyLedA168 = 0x82A8
FancyLedA169 = 0x82A9
FancyLedA170 = 0x82AA
FancyLedA171 = 0x82AB
FancyLedA172 = 0x82AC
FancyLedA173 = 0x82AD
FancyLedA174 = 0x82AE
FancyLedA175 = 0x82AF
FancyLedA176 = 0x82B0
FancyLedA177 = 0x82B1
FancyLedA178 = 0x82B2
FancyLedA179 = 0x82B3
FancyLedA180 = 0x82B4
FancyLedA181 = 0x82B5
FancyLedA182 = 0x82B6
FancyLedA183 = 0x82B7
FancyLedA184 = 0x82B8
FancyLedA185 = 0x82B9
FancyLedA186 = 0x82BA
FancyLedA187 = 0x82BB
FancyLedA188 = 0x82BC
FancyLedA189 = 0x82BD
FancyLedA190 = 0x82BE
FancyLedA191 = 0x82BF
FancyLedA192 = 0x82C0
FancyLedA193 = 0x82C1
FancyLedA194 = 0x82C2
FancyLedA195 = 0x82C3
FancyLedA196 = 0x82C4
FancyLedA197 = 0x82C5
FancyLedA198 = 0x82C6
FancyLedA199 = 0x82C7
FancyLedA200 = 0x82C8
FancyLedA201 = 0x82C9
FancyLedA202 = 0x82CA
FancyLedA203 = 0x82CB
FancyLedA204 = 0x82CC
FancyLedA205 = 0x82CD
FancyLedA206 = 0x82CE
FancyLedA207 = 0x82CF
FancyLedA208 = 0x82D0
FancyLedA209 = 0x82D1
FancyLedA210 = 0x82D2
FancyLedA211 = 0x82D3
FancyLedA212 = 0x82D4
FancyLedA213 = 0x82D5
FancyLedA214 = 0x82D6
FancyLedA215 = 0x82D7
FancyLedA216 = 0x82D8
FancyLedA217 = 0x82D9
FancyLedA218 = 0x82DA
FancyLedA219 = 0x82DB
FancyLedA220 = 0x82DC
FancyLedA221 = 0x82DD
FancyLedA222 = 0x82DE
FancyLedA223 = 0x82DF
FancyLedA224 = 0x82E0
FancyLedA225 = 0x82E1
FancyLedA226 = 0x82E2
FancyLedA227 = 0x82E3
FancyLedA228 = 0x82E4
FancyLedA229 = 0x82E5
FancyLedA230 = 0x82E6
FancyLedA231 = 0x82E7
FancyLedA232 = 0x82E8
FancyLedA233 = 0x82E9
FancyLedA234 = 0x82EA
FancyLedA235 = 0x82EB
FancyLedA236 = 0x82EC
FancyLedA237 = 0x82ED
FancyLedA238 = 0x82EE
FancyLedA239 = 0x82EF
FancyLedA240 = 0x82F0
FancyLedA241 = 0x82F1
FancyLedA242 = 0x82F2
FancyLedA243 = 0x82F3
FancyLedA244 = 0x82F4
FancyLedA245 = 0x82F5
FancyLedA246 = 0x82F6
FancyLedA247 = 0x82F7
FancyLedA248 = 0x82F8
FancyLedA249 = 0x82F9
FancyLedA250 = 0x82FA
FancyLedA251 = 0x82FB
FancyLedA252 = 0x82FC
FancyLedA253 = 0x82FD
FancyLedA254 = 0x82FE
FancyLedA255 = 0x82FF
# FancyLedB List
FancyLedB0 = 0x8300
FancyLedB1 = 0x8301
FancyLedB2 = 0x8302
FancyLedB3 = 0x8303
FancyLedB4 = 0x8304
FancyLedB5 = 0x8305
FancyLedB6 = 0x8306
FancyLedB7 = 0x8307
FancyLedB8 = 0x8308
FancyLedB9 = 0x8309
FancyLedB10 = 0x830A
FancyLedB11 = 0x830B
FancyLedB12 = 0x830C
FancyLedB13 = 0x830D
FancyLedB14 = 0x830E
FancyLedB15 = 0x830F
FancyLedB16 = 0x8310
FancyLedB17 = 0x8311
FancyLedB18 = 0x8312
FancyLedB19 = 0x8313
FancyLedB20 = 0x8314
FancyLedB21 = 0x8315
FancyLedB22 = 0x8316
FancyLedB23 = 0x8317
FancyLedB24 = 0x8318
FancyLedB25 = 0x8319
FancyLedB26 = 0x831A
FancyLedB27 = 0x831B
FancyLedB28 = 0x831C
FancyLedB29 = 0x831D
FancyLedB30 = 0x831E
FancyLedB31 = 0x831F
FancyLedB32 = 0x8320
FancyLedB33 = 0x8321
FancyLedB34 = 0x8322
FancyLedB35 = 0x8323
FancyLedB36 = 0x8324
FancyLedB37 = 0x8325
FancyLedB38 = 0x8326
FancyLedB39 = 0x8327
FancyLedB40 = 0x8328
FancyLedB41 = 0x8329
FancyLedB42 = 0x832A
FancyLedB43 = 0x832B
FancyLedB44 = 0x832C
FancyLedB45 = 0x832D
FancyLedB46 = 0x832E
FancyLedB47 = 0x832F
FancyLedB48 = 0x8330
FancyLedB49 = 0x8331
FancyLedB50 = 0x8332
FancyLedB51 = 0x8333
FancyLedB52 = 0x8334
FancyLedB53 = 0x8335
FancyLedB54 = 0x8336
FancyLedB55 = 0x8337
FancyLedB56 = 0x8338
FancyLedB57 = 0x8339
FancyLedB58 = 0x833A
FancyLedB59 = 0x833B
FancyLedB60 = 0x833C
FancyLedB61 = 0x833D
FancyLedB62 = 0x833E
FancyLedB63 = 0x833F
FancyLedB64 = 0x8340
FancyLedB65 = 0x8341
FancyLedB66 = 0x8342
FancyLedB67 = 0x8343
FancyLedB68 = 0x8344
FancyLedB69 = 0x8345
FancyLedB70 = 0x8346
FancyLedB71 = 0x8347
FancyLedB72 = 0x8348
FancyLedB73 = 0x8349
FancyLedB74 = 0x834A
FancyLedB75 = 0x834B
FancyLedB76 = 0x834C
FancyLedB77 = 0x834D
FancyLedB78 = 0x834E
FancyLedB79 = 0x834F
FancyLedB80 = 0x8350
FancyLedB81 = 0x8351
FancyLedB82 = 0x8352
FancyLedB83 = 0x8353
FancyLedB84 = 0x8354
FancyLedB85 = 0x8355
FancyLedB86 = 0x8356
FancyLedB87 = 0x8357
FancyLedB88 = 0x8358
FancyLedB89 = 0x8359
FancyLedB90 = 0x835A
FancyLedB91 = 0x835B
FancyLedB92 = 0x835C
FancyLedB93 = 0x835D
FancyLedB94 = 0x835E
FancyLedB95 = 0x835F
FancyLedB96 = 0x8360
FancyLedB97 = 0x8361
FancyLedB98 = 0x8362
FancyLedB99 = 0x8363
FancyLedB100 = 0x8364
FancyLedB101 = 0x8365
FancyLedB102 = 0x8366
FancyLedB103 = 0x8367
FancyLedB104 = 0x8368
FancyLedB105 = 0x8369
FancyLedB106 = 0x836A
FancyLedB107 = 0x836B
FancyLedB108 = 0x836C
FancyLedB109 = 0x836D
FancyLedB110 = 0x836E
FancyLedB111 = 0x836F
FancyLedB112 = 0x8370
FancyLedB113 = 0x8371
FancyLedB114 = 0x8372
FancyLedB115 = 0x8373
FancyLedB116 = 0x8374
FancyLedB117 = 0x8375
FancyLedB118 = 0x8376
FancyLedB119 = 0x8377
FancyLedB120 = 0x8378
FancyLedB121 = 0x8379
FancyLedB122 = 0x837A
FancyLedB123 = 0x837B
FancyLedB124 = 0x837C
FancyLedB125 = 0x837D
FancyLedB126 = 0x837E
FancyLedB127 = 0x837F
FancyLedB128 = 0x8380
FancyLedB129 = 0x8381
FancyLedB130 = 0x8382
FancyLedB131 = 0x8383
FancyLedB132 = 0x8384
FancyLedB133 = 0x8385
FancyLedB134 = 0x8386
FancyLedB135 = 0x8387
FancyLedB136 = 0x8388
FancyLedB137 = 0x8389
FancyLedB138 = 0x838A
FancyLedB139 = 0x838B
FancyLedB140 = 0x838C
FancyLedB141 = 0x838D
FancyLedB142 = 0x838E
FancyLedB143 | |
#!/usr/bin/env python
"""
Copyright (c) 2015, <NAME> <<EMAIL>>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
"""
import struct
import sys
import hashlib
import collections
import math
GLOBAL_COLOR_TABLE_SIZE = "Global Color Table Size"
COLOR_RESOLUTION = "Color Resolution"
GLOBAL_COLOR_TABLE_PRESENT = "Global Color Table Present"
GLOBAL_COLOR_TABLE_SORTED = "Global Color Table Sorted"
EXTENSION_INTRODUCER = 0x21
IMAGE_BLOCK_LABEL = 0x2c
GRAPHICAL_CONTROL_LABEL = 0xf9
BLOCK_TERMINATOR = 0x00
def parse_graphics_control_extension(content, offset):
e = collections.OrderedDict()
e["Type"] = "Graphics Control Extension"
e["Offset"] = offset
e["Size"] = struct.unpack("=B", content[2])[0]
packed_fields = struct.unpack("=B", content[3])[0]
e["Reserved Field"] = bits(7, 5, packed_fields)
e["Disposal Method"] = bits(4, 2, packed_fields)
e["User Input"] = bits(1, 1, packed_fields) == 1
e["Transparent Color"] = bits(0, 0, packed_fields) == 1
e["Delay Time"] = struct.unpack("=H", content[4:6])[0]
e["Transparent Color Index"] = struct.unpack("=B", content[6])[0]
e["Terminator"] = struct.unpack("=B", content[7])[0]
if e["Terminator"] != 0:
print bcolor.WARNING + "WARNING: Non null terminator of block" + bcolor.ENDC
return e, content[8:], offset + 8
def parse_application_extension(content, offset):
e = collections.OrderedDict()
e["Type"] = "Application Extension"
e["Offset"] = offset
e["Size"] = struct.unpack("=B", content[2])[0]
e["AppBlock"] = struct.unpack("{0}s".format(e["Size"]), content[3:3+e["Size"]])[0]
content = content[e["Size"]+3:]
offset += e["Size"] + 3
block_size = struct.unpack("=B", content[0])[0]
app_data = ""
while True:
content = content[1:]
offset += 1
app_data += content[:block_size]
content = content[block_size:]
offset += block_size
block_size = struct.unpack("=B", content[0])[0]
if block_size == 0x00:
e["AppData"] = "\n" + hexprint(app_data)
e["Entropy"] = entropy2(app_data)
return e, content[1:], offset + 1
def parse_image_descriptor(content, offset):
e = collections.OrderedDict()
e["Type"] = "Image Descriptor"
e["Offset"] = offset
e["Image Left"] = struct.unpack("=H", content[1:3])[0]
e["Image Top"] = struct.unpack("=H", content[3:5])[0]
e["Image Width"] = struct.unpack("=H", content[5:7])[0]
e["Image Heigth"] = struct.unpack("=H", content[7:9])[0]
packed_field = struct.unpack("=B", content[9])[0]
e["Local Color Table Flag"] = bits(7, 7, packed_field) == 1
e["Interlace Flag"] = bits(6, 6, packed_field) == 1
e["Sort Flag"] = bits(5, 5, packed_field) == 1
e["Reserved"] = bits(4, 3, packed_field)
lctValue = bits(2, 0, packed_field)
lctSize = 2**(lctValue + 1)
e["Size of Local Color Table"] = lctSize
content = content[10:]
offset += 10
if e["Local Color Table Flag"]:
ct, content, offset = get_color_table("Local", content, lctSize, offset)
else:
if lctValue > 0:
print bcolor.WARNING + "WARNING: Local Color Table Size > 0 but LCT Present == False" + bcolor.ENDC
e["Size of Local Color Table"] = bcolor.FAIL + str(lctSize) + bcolor.ENDC
ct = None
blocks, count, offset = get_image_blocks(content, offset)
e["Image Blocks"] = LocalImage(blocks, ct)
e["Entropy"] = e["Image Blocks"].entropy
return e, content[count:], offset
def get_image_blocks(content, offset):
blocks = []
count = 0
lzw_min = struct.unpack("=B", content[count])[0]
count += 1
while True:
num_bytes = struct.unpack("=B", content[count])[0]
count += 1
imagebytes = struct.unpack("={0}B".format(num_bytes), content[count:count+num_bytes])
blocks.append(ImageBlock(offset+count, lzw_min, imagebytes))
count += num_bytes
if ord(content[count]) == 0x00:
count += 1
break
return blocks, count, offset + count
def hexprint(mybuffer):
lines = []
while True:
line = mybuffer[:16]
mybuffer = mybuffer[16:]
if not line:
break
lines.append("{0:50}".format(" ".join("{0:02x}".format(ord(x)) for x in line)) + " " + printable(line))
return "\n".join(lines)
def printable(mybuffer):
ret = ""
for bc in mybuffer:
val = ord(bc)
if val > 31 and val < 127:
ret += chr(val)
else:
ret += "."
return ret
def parse_comment_extension(content, offset):
e = collections.OrderedDict()
e["Type"] = "Comment Extension"
e["Offset"] = offset
ascii_data = ""
bytecount = struct.unpack("=B", content[2])[0]
if bytecount == 0:
return "", content[3:]
content = content[3:]
offset += 3
while True:
ascii_data += content[:bytecount]
content = content[bytecount:]
offset += bytecount
bytecount = struct.unpack("=B", content[0])[0]
content = content[1:]
offset += 1
if bytecount == 0:
e["Comment"] = ascii_data
e["Entropy"] = entropy2(ascii_data)
print bcolor.WARNING + "INFO: File contains a comment" + bcolor.OKGREEN
md5sum = hashlib.md5(ascii_data).hexdigest()
fname = "{1}_{0}_comment.dat".format(sys.argv[1], md5sum)
print "Writing comment to {0}".format(fname) + bcolor.ENDC
open(fname, "wb").write(ascii_data)
return e, content, offset
def parse_plain_text_extension(content, offset):
e = collections.OrderedDict()
e["Type"] = "Plain Text Extension"
e["Offset"] = offset
ascii_data = ""
bytecount = struct.unpack("=B", content[2])[0]
if bytecount == 0:
return "", content[3:]
content = content[3:]
offset += 3
while True:
ascii_data += content[:bytecount]
content = content[bytecount:]
offset += bytecount
bytecount = struct.unpack("=B", content[0])[0]
content = content[1:]
offset += 1
if bytecount == 0:
e["Comment"] = ascii_data
e["Entropy"] = entropy2(ascii_data)
print bcolor.WARNING + "INFO: File contains plain text section" + bcolor.OKGREEN
md5sum = hashlib.md5(ascii_data).hexdigest()
fname = "{1}_{0}_plaintext.dat".format(sys.argv[1], md5sum)
print "Writing plaintext to {0}".format(fname) + bcolor.ENDC
open(fname, "wb").write(ascii_data)
return e, content, offset
extension = {
0xf9: parse_graphics_control_extension,
0x01: parse_plain_text_extension,
0xff: parse_application_extension,
0xfe: parse_comment_extension,
}
def get_signature(content, offset):
sig = content[:3]
if sig != "GIF":
raise BadFileFormat("No GIF signature")
return sig, content[3:], offset + 3
def get_version(content, offset):
ver = content[:3]
if ver not in ["87a", "89a"]:
raise BadFileFormat("Incorrect version signature ({0})".format(ver))
return ver, content[3:], offset + 3
def get_logical_screen(content, offset):
width = struct.unpack("=H", content[:2])[0]
height = struct.unpack("=H", content[2:4])[0]
return width, height, content[4:], offset + 4
def get_packed_fields(content, offset):
"""
<Packed Fields> = Global Color Table Flag 1 Bit
Color Resolution 3 Bits
Sort Flag 1 Bit
Size of Global Color Table 3 Bits
"""
packed_fields = struct.unpack("=B", content[0])[0]
fields = {}
fields[GLOBAL_COLOR_TABLE_PRESENT] = (bits(7, 7, packed_fields) == 1)
# Number of bits per primary color available
# to the original image, minus 1. This value represents the size of
# the entire palette from which the colors in the graphic were
# selected, not the number of colors actually used in the graphic.
# For example, if the value in this field is 3, then the palette of
# the original image had 4 bits per primary color available to create
# the image.
fields[COLOR_RESOLUTION] = bits(6, 4, packed_fields) + 1
fields[GLOBAL_COLOR_TABLE_SORTED] = (bits(3, 3, packed_fields) == 1)
# To determine that actual size of the color table,
# raise 2 to [the value of the field
fields[GLOBAL_COLOR_TABLE_SIZE] = 2**(bits(2, 0, packed_fields)+1)
return fields, content[1:], offset + 1
def get_background_color_index(content, offset):
return struct.unpack("=B", content[0])[0], content[1:], offset + 1
def get_pixel_asepct_ratio(content, offset):
pixel_aspect_ratio = struct.unpack("=B", content[0])[0]
# If the value of the field is not 0, this approximation of the aspect ratio
# is computed based on the formula:
# Aspect Ratio = (Pixel Aspect Ratio + 15) / 64
# The Pixel Aspect Ratio is defined to be the quotient of the pixel's
# width over its height.
if pixel_aspect_ratio != 0:
pixel_aspect_ratio = (pixel_aspect_ratio + 15) / 64
return pixel_aspect_ratio, content[1:], offset + 1
def pp(h):
for k, v in h.items():
if isinstance(v, dict):
pp(v)
else:
maxout = 1025 * 5
if type(v) in [list, str] and len(v) > maxout:
print "{0}: {1}...[trunkated output (total bytes: {2})]".format(k, v[:maxout], len(v))
else:
print "{0}: {1}".format(k, v)
def get_color_table(table_type, content, size, offset):
tbl = []
for i in range(size):
tbl.append(struct.unpack("=BBB", content[(i*3):(i*3)+3]))
ct = ColorTable(table_type, tbl)
return ct, content[(size*3):], offset + (size*3)
class ColorTable(object):
def __init__(self, table_type, table):
self.type = table_type
self.table = table
def __str__(self):
if len(self.table) > 3:
snip = ", ...]"
else:
snip = "]"
return "".join(["{0} Color Table: [".format(self.type),
", ".join([str(n) for n in self.table[:3]]),
snip])
def is_extension(content):
return ord(content[0]) == EXTENSION_INTRODUCER
def is_image_descriptor(content):
return ord(content[0]) == IMAGE_BLOCK_LABEL
def parse_extension(content, offset):
ext = {}
type_value = ord(content[1])
fun = extension.get(type_value, None)
if fun is None:
ext["Type"] = "UNKNOWN ({0})".format(hex(type_value))
print bcolor.FAIL + "UNKNOWN EXTENSION!!" + bcolor.ENDC
print hexprint(content[:512])
sys.exit(1)
else:
ext, content, offset = fun(content, offset)
return ext, content, offset
def bits(s, e, byte):
"""
Extract bits start, end, byte
Ex. bits(4,2,27) == 0b110 (extracting bits 4, 3 and 2)
"""
byte = byte>>e
return byte & [1, 3, 7, 15, 31, 63, 127, 255][s-e]
def parse_gif_header(data, offset):
signature, data, offset = get_signature(data, offset)
version, data, offset = get_version(data, offset)
w, h, data, offset = get_logical_screen(data, offset)
fields, data, offset = get_packed_fields(data, offset)
background_color_index, data, offset = get_background_color_index(data, | |
<filename>htc-api/api/htc_api.py
#!flask/bin/python
from flask import Flask, jsonify, request, abort, g, send_from_directory
from flask_cors import CORS
from flask.ext.autodoc import Autodoc
from flask_cache import Cache
import logging
import re
#need simplejson to deal with Postgres Decimal types
import simplejson as json
#NOTE: may need to run on Linux: "ln -s /usr/local/pgsql/lib/libpq.so.5 /usr/lib64/libpq.so.5"
import psycopg2 as pg
import psycopg2.pool as pgp
import sys
import time
import postgis2geojson as p2g
from psycopg2.extras import RealDictCursor
app = Flask(__name__)
# define the cache config, register the cache instance, and bind it to the app
cache = Cache(app,config={'CACHE_TYPE': 'simple'})
CORS(app)
auto = Autodoc(app)
global pool
global log
#Postgres connection management
def setup_pool():
global pool
with open('htc_login.txt') as f:
#each of these is expected to appear on a separate line
host = f.readline().rstrip()
port = f.readline().rstrip()
db = f.readline().rstrip()
user = f.readline().rstrip()
pw = f.readline().rstrip()
pool = pgp.ThreadedConnectionPool(20, 100, host=host, port=port, database=db, user=user, password=pw)
#get current db connection if holding one, otherwise get a new one from the pool
def get_db_con():
global pool
max_attempts = 10
con = getattr(g, '_database', None)
if con is None:
#Need to get a connection, use a try loop to handle pool depletions a bit better
#Otherwise psycopg2.pool throws exception and server returns 500 error to client
for attempt in range(1, max_attempts):
try:
con = g._database = pool.getconn()
if (attempt > 1):
log.debug("connection newly acquired from pool, attempt=%s" % attempt)
return con
except:
#On any errors, add exponentially increasing time delays.
#This seems to handle at least 30X the pool size in requests without hard errors.
e = sys.exc_info()[0]
log.error("exception during connection attempt=%s: %s" % (attempt, e))
if (attempt == max_attempts):
#give up!
raise
time.sleep(attempt**2)
else:
log.debug("connection reused from session variable.")
con.autocommit = True
return con
#Automatically return db connections
@app.teardown_appcontext
def return_db_con(exception):
global pool
con = getattr(g, '_database', None)
if con is not None:
pool.putconn(con)
#log.debug("connection returned to pool.")
#format simple data for return as JSON
def getData(conn, query, params=None):
"Use this for non-geometry SELECTs, produces plain json based on DB field names"
with conn.cursor(cursor_factory=RealDictCursor) as cur:
if (params):
cur.execute(query, params)
else:
cur.execute(query)
return json.dumps(cur.fetchall(), indent=2)
#removes CR LF characters from string, for safer logging
def sanitize(s):
return re.sub("[\r\n]+", " ", s)
#Get IP of client making the call, TO BE USED FOR DEBUGGING PURPOSES ONLY!
#Should handle running behind proxy, but could be subject to spoofing
#Reference: http://esd.io/blog/flask-apps-heroku-real-ip-spoofing.html
def get_ip():
if not request.headers.getlist("X-Forwarded-For"):
ip = request.remote_addr
else:
ip = request.headers.getlist("X-Forwarded-For")[0]
#be sure to remove any CRLF characters, to limit log entry spoofing
return sanitize(ip)
#
#API calls
#
#Documentation Index
@app.route('/htc/api/v1')
@cache.cached(timeout=300) # cache this view for 5 minutes
def documentation():
return auto.html(title='MA High Tech Counsel API Documentation')
#All Counties
#
#Request geojson of all counties
@app.route('/htc/api/v1/counties', methods=['GET'])
@auto.doc()
@cache.cached(timeout=300) # cache this view for 5 minutes
def get_counties_all():
"""Counties in GeoJSON"""
log.debug("entering get_counties_all() IP=%s" % get_ip())
con = get_db_con()
sql = "SELECT s.ct_fips, s.ct_name, s.sq_mi, s.pop, s.pop_male / s.pop as pct_male, s.pop_female / s.pop as pct_female, s.pop_sm, " \
"chr.hs_graduate as chr_hs_grad, chr.college as chr_college, chr.unemployed as chr_unemployed, chr.diabetes_rate as chr_diabetes, " \
"chr.adult_obesity as chr_adult_obesity, chr.adult_smoking as chr_adult_smoking, opioid.deaths as opioid_deaths, " \
"age.fact_pop_0_4 as pop_0_4,age.fact_pop_5_9 as pop_5_9,age.fact_pop_10_14 as pop_10_14,age.fact_pop_15_19 as pop_15_19, "\
"age.fact_pop_20_24 as pop_20_24,age.fact_pop_25_29 as pop_25_29,age.fact_pop_30_34 as pop_30_34,age.fact_pop_35_39 as pop_35_39, " \
"age.fact_pop_40_44 as pop_40_44,age.fact_pop_45_49 as pop_45_49,age.fact_pop_50_54 as pop_50_54,age.fact_pop_55_59 as pop_55_59, " \
"age.fact_pop_60_64 as pop_60_64,age.fact_pop_65_69 as pop_65_69,age.fact_pop_70_74 as pop_70_74,age.fact_pop_75_79 as pop_75_79, " \
"age.fact_pop_80_84 as pop_80_84,age.fact_pop_85_110 as pop_85_110, " \
"ST_AsGeoJSON(the_geom) AS geometry " \
"FROM synth_ma.county_stats s " \
"JOIN synth_ma.ma_opioid_county opioid ON opioid.countyfp = s.ct_fips AND opioid.year = '2015' " \
"JOIN tiger_cb14_500k.county g ON g.statefp = '25' AND g.countyfp = s.ct_fips " \
"JOIN synth_ma.ma_county_age age ON age.ct_fips = s.ct_fips " \
"JOIN county_health.chr ON chr.statefp = '25' AND chr.release_year = 2016 AND chr.countyfp = s.ct_fips"
data = p2g.getData(con, sql)
log.debug("leaving get_counties_all()")
return data
#Request geojson of all counties (synthetic data)
@app.route('/htc/api/v1/synth/counties', methods=['GET'])
@auto.doc()
@cache.cached(timeout=300) # cache this view for 5 minutes
def get_synth_counties_all():
"""Counties in GeoJSON synthetic"""
log.debug("entering get_synth_counties_all() IP=%s" % get_ip())
con = get_db_con()
sql = "SELECT s.ct_fips, s.ct_name, s.sq_mi, s.pop, CASE WHEN s.pop > 0 THEN s.pop_male / s.pop ELSE 0 END AS pct_male, CASE WHEN s.pop > 0 THEN s.pop_female / s.pop ELSE 0 END AS pct_female, s.pop_sm, " \
"ST_AsGeoJSON(s.ct_poly) AS geometry, " \
"dd.rate as pct_diabetes, dhd.rate as pct_heart_disease, doa.rate as pct_opioid_addiction " \
"FROM synth_ma.synth_county_pop_stats s " \
"JOIN synth_ma.synth_county_disease_stats dd ON dd.ct_fips = s.ct_fips AND dd.disease_name = 'diabetes' " \
"JOIN synth_ma.synth_county_disease_stats dhd ON dhd.ct_fips = s.ct_fips AND dhd.disease_name = 'heart_disease' " \
"JOIN synth_ma.synth_county_disease_stats doa ON doa.ct_fips = s.ct_fips AND doa.disease_name = 'opioid_addiction' "
data = p2g.getData(con, sql)
log.debug("leaving get_synth_counties_all()")
return data
#Request list of all counties
@app.route('/htc/api/v1/counties/list', methods=['GET'])
@auto.doc()
@cache.cached(timeout=300) # cache this view for 5 minutes
def get_counties():
"""Counties list in JSON"""
log.debug("entering get_counties() IP=%s" % get_ip())
con = get_db_con()
sql = "SELECT ct_name, ct_fips " \
"FROM synth_ma.county_stats"
data = getData(con, sql)
log.debug("leaving get_counties()")
return data
#Request list of all counties (synthetic)
@app.route('/htc/api/v1/synth/counties/list', methods=['GET'])
@auto.doc()
@cache.cached(timeout=300) # cache this view for 5 minutes
def get_synth_counties():
"""Counties list in JSON synthetic"""
log.debug("entering get_synth_counties() IP=%s" % get_ip())
con = get_db_con()
sql = "SELECT ct_name, ct_fips " \
"FROM synth_ma.synth_county_pop_stats"
data = getData(con, sql)
log.debug("leaving get_synth_counties()")
return data
#Request list of disease names that we have statistics for (synthetic)
@app.route('/htc/api/v1/synth/diseases/list', methods=['GET'])
@auto.doc()
@cache.cached(timeout=300)
def get_synth_diseases():
"""Disease list in JSON synthetic"""
log.debug("entering get_synth_diseases() IP=%s" %get_ip())
con = get_db_con()
sql = "SELECT DISTINCT disease_name FROM synth_ma.synth_county_disease_stats"
data = getData(con, sql)
log.debug("leaving get_synth_diseases()")
return data
#Request geojson of only the geometry of all counties
@app.route('/htc/api/v1/counties/geoms', methods=['GET'])
@auto.doc()
@cache.cached(timeout=300) # cache this view for 5 minutes
def get_counties_geom():
"""Counties in GeoJSON, geometry only"""
log.debug("entering get_counties_geom() IP=%s" % get_ip())
con = get_db_con()
sql = "SELECT countyfp AS ct_fips, ST_AsGeoJSON(the_geom) AS geometry " \
"FROM tiger_cb14_500k.county WHERE statefp='25'"
data = p2g.getData(con, sql)
log.debug("leaving get_counties_geom()")
return data
#Request only the statistics of all counties
@app.route('/htc/api/v1/counties/stats', methods=['GET'])
@auto.doc()
@cache.cached(timeout=300) # cache this view for 5 minutes
def get_counties_stats():
"""Counties in JSON, statistics only"""
log.debug("entering get_counties_stats() IP=%s" % get_ip())
con = get_db_con()
sql = "SELECT s.ct_fips, s.ct_name, s.sq_mi, s.pop, s.pop_male / s.pop as pct_male, s.pop_female / s.pop as pct_female, s.pop_sm, " \
"chr.hs_graduate / 100 as chr_hs_grad, chr.college / 100 as chr_college, chr.unemployed / 100 as chr_unemployed, chr.diabetes_rate / 100 as chr_diabetes, " \
"chr.adult_obesity / 100 as chr_adult_obesity, chr.adult_smoking / 100 as chr_adult_smoking, opioid.deaths as opioid_deaths, " \
"age.fact_pop_0_4 as pop_0_4,age.fact_pop_5_9 as pop_5_9,age.fact_pop_10_14 as pop_10_14,age.fact_pop_15_19 as pop_15_19, "\
"age.fact_pop_20_24 as pop_20_24,age.fact_pop_25_29 as pop_25_29,age.fact_pop_30_34 as pop_30_34,age.fact_pop_35_39 as pop_35_39, " \
"age.fact_pop_40_44 as pop_40_44,age.fact_pop_45_49 as pop_45_49,age.fact_pop_50_54 as pop_50_54,age.fact_pop_55_59 as pop_55_59, " \
"age.fact_pop_60_64 as pop_60_64,age.fact_pop_65_69 as pop_65_69,age.fact_pop_70_74 as pop_70_74,age.fact_pop_75_79 as pop_75_79, " \
"age.fact_pop_80_84 as pop_80_84,age.fact_pop_85_110 as pop_85_110 " \
"FROM synth_ma.county_stats s " \
"JOIN synth_ma.ma_opioid_county opioid ON opioid.countyfp = s.ct_fips AND opioid.year = '2015' " \
"JOIN synth_ma.ma_county_age age ON age.ct_fips = s.ct_fips " \
"JOIN county_health.chr ON chr.statefp = '25' AND chr.release_year = 2016 AND chr.countyfp = s.ct_fips"
data = getData(con, sql)
log.debug("leaving get_counties_stats()")
return data
#Request only the statistics of all counties (synthetic)
@app.route('/htc/api/v1/synth/counties/stats', methods=['GET'])
@auto.doc()
@cache.cached(timeout=300) # cache this view for 5 minutes
def get_synth_counties_stats():
"""Counties in JSON, statistics only synthetic"""
log.debug("entering get_synth_counties_stats() IP=%s" % get_ip())
con = get_db_con()
sql = "SELECT s.ct_fips, s.ct_name, s.sq_mi, s.pop, CASE WHEN s.pop > 0 THEN s.pop_male / s.pop ELSE 0 END AS pct_male, CASE WHEN s.pop > 0 THEN s.pop_female / s.pop ELSE 0 END AS pct_female, s.pop_sm, " \
"dd.rate as pct_diabetes, dhd.rate as pct_heart_disease, doa.rate as pct_opioid_addiction " \
"FROM synth_ma.synth_county_pop_stats s " \
"JOIN synth_ma.synth_county_disease_stats dd ON dd.ct_fips = s.ct_fips AND dd.disease_name = 'diabetes' " \
"JOIN synth_ma.synth_county_disease_stats dhd ON dhd.ct_fips = s.ct_fips AND dhd.disease_name = 'heart_disease' " \
"JOIN synth_ma.synth_county_disease_stats doa ON doa.ct_fips = s.ct_fips AND doa.disease_name = 'opioid_addiction' "
data = getData(con, sql)
log.debug("leaving get_synth_counties_stats()")
return data
#Single County
#
#Request geojson of single county by name
@app.route('/htc/api/v1/counties/name/<string:ct_name>', methods=['GET'])
@auto.doc()
@cache.memoize(timeout=300) # cache this view for 5 minutes
def get_county_by_name(ct_name):
"""County in GeoJSON, by name"""
log.debug("entering get_county_by_name() IP=%s" % get_ip())
con = get_db_con()
sql = "SELECT s.ct_fips, s.ct_name, s.sq_mi, s.pop, s.pop_male / s.pop as pct_male, s.pop_female / s.pop as pct_female, s.pop_sm, " \
"chr.hs_graduate as chr_hs_grad, chr.college as chr_college, chr.unemployed as chr_unemployed, chr.diabetes_rate as chr_diabetes, " \
"chr.adult_obesity as chr_adult_obesity, chr.adult_smoking as chr_adult_smoking, opioid.deaths as opioid_deaths, " \
"age.fact_pop_0_4 as pop_0_4,age.fact_pop_5_9 as pop_5_9,age.fact_pop_10_14 as pop_10_14,age.fact_pop_15_19 as pop_15_19, "\
"age.fact_pop_20_24 as | |
"""
return (source, target) in self._arcs
def sources(self) -> Set[Node]:
"""
Get all nodes in the graph that have no parents.
Return
------
List[node]
Nodes in the graph that have no parents.
Example
-------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (1, 3), (2, 3)})
>>> g.sources()
{1}
"""
return {node for node in self._nodes if len(self._parents[node]) == 0}
def sinks(self) -> Set[Node]:
"""
Get all nodes in the graph that have no children.
Return
------
List[node]
Nodes in the graph that have no children.
Example
-------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (1, 3), (2, 3)})
>>> g.sinks()
{3}
"""
return {node for node in self._nodes if len(self._children[node]) == 0}
def reversible_arcs(self) -> Set[DirectedEdge]:
"""
Get all reversible (aka covered) arcs in the DAG.
Return
------
Set[arc]
Return all reversible (aka covered) arcs in the DAG. An arc i -> j is *covered* if the :math:`Pa(j) = Pa(i) \cup {i}`.
Reversing a reversible arc results in a DAG in the same Markov equivalence class.
Example
-------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (1, 3), (2, 3)})
>>> g.reversible_arcs()
{(1, 2), (2, 3)}
"""
reversible_arcs = set()
for i, j in self._arcs:
if self._parents[i] == (self._parents[j] - {i}):
reversible_arcs.add((i, j))
return reversible_arcs
def is_reversible(self, i: Node, j: Node) -> bool:
"""
Check if the arc ``i`` -> ``j`` is reversible (aka covered), i.e., if :math:`pa(i) = pa(j) \setminus \{i\}`
Parameters
----------
i:
source of the arc
j:
target of the arc
Returns
-------
True if the arc is reversible, otherwise False.
Example
-------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (1, 3), (2, 3)})
>>> g.is_reversible(1, 2)
True
>>> g.is_reversible(1, 3)
False
"""
return self._parents[i] == self._parents[j] - {i}
def arcs_in_vstructures(self) -> Set[Tuple]:
"""
Get all arcs in the graph that participate in a v-structure.
Return
------
Set[arc]
Return all arcs in the graph in a v-structure (aka an immorality). A v-structure is formed when i->j<-k but
there is no arc between i and k. Arcs that participate in a v-structure are identifiable from observational
data.
Example
-------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 3), (2, 3)})
>>> g.arcs_in_vstructures()
{(1, 3), (2, 3))
"""
return {(i, j) for i, j in self._arcs if self._parents[j] - self._neighbors[i] - {i}}
def vstructures(self) -> Set[Tuple]:
"""
Get all v-structures in the graph, i.e., triples of the form (i, k, j) such that ``i``->k<-``j`` and ``i``
is not adjacent to ``j``.
Return
------
Set[Tuple]
Return all triples in the graph in a v-structure (aka an immorality). A v-structure is formed when i->j<-k but
there is no arc between i and k. Arcs that participate in a v-structure are identifiable from observational
data.
Example
-------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 3), (2, 3)})
>>> g.vstructures()
{(1, 3, 2)}
"""
vstructs = set()
for node in self._nodes:
for p1, p2 in itr.combinations(self._parents[node], 2):
if p1 not in self._parents[p2] and p2 not in self._parents[p1]:
vstructs.add((p1, node, p2))
return vstructs
def triples(self) -> Set[Tuple]:
"""
Return all triples of the form (``i``, ``j``, ``k``) such that ``i`` and ``k`` are both adjacent to ``j``.
Returns
-------
Set[Tuple]
Triples in the graph.
Examples
--------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 3), (2, 3), (1, 2)})
>>> g.triples()
{frozenset({1, 3, 2})}
"""
t = set()
for node in self._nodes:
t |= {frozenset({n1, node, n2}) for n1, n2 in itr.combinations(self._neighbors[node], 2)}
return t
def upstream_most(self, s: Set[Node]) -> Set[Node]:
"""
Return the set of nodes which in ``s`` which have no ancestors in ``s``.
Parameters
----------
s:
Set of nodes
Returns
-------
The set of nodes in ``s`` with no ancestors in ``s``.
"""
return {node for node in s if not self.ancestors_of(node) & s}
# === COMPARISON
def shd(self, other) -> int:
"""
Compute the structural Hamming distance between this DAG and the DAG ``other``.
Parameters
----------
other:
the DAG to which the SHD will be computed.
Return
------
int
The structural Hamming distance between :math:`G_1` and :math:`G_2` is the minimum number of arc additions,
deletions, and reversals required to transform :math:`G_1` into :math:`G_2` (and vice versa).
Example
-------
>>> from graphical_models import DAG
>>> g1 = DAG(arcs={(1, 2), (2, 3)})
>>> g2 = DAG(arcs={(2, 1), (2, 3)})
>>> g1.shd(g2)
1
"""
if isinstance(other, DAG):
self_arcs_reversed = {(j, i) for i, j in self._arcs}
other_arcs_reversed = {(j, i) for i, j in other._arcs}
additions = other._arcs - self._arcs - self_arcs_reversed
deletions = self._arcs - other._arcs - other_arcs_reversed
reversals = self.arcs & other_arcs_reversed
return len(additions) + len(deletions) + len(reversals)
def shd_skeleton(self, other) -> int:
"""
Compute the structure Hamming distance between the skeleton of this DAG and the skeleton of the graph ``other``.
Parameters
----------
other:
the DAG to which the SHD of the skeleton will be computed.
Return
------
int
The structural Hamming distance between :math:`G_1` and :math:`G_2` is the minimum number of arc additions,
deletions, and reversals required to transform :math:`G_1` into :math:`G_2` (and vice versa).
Example
-------
>>> from graphical_models import DAG
>>> g1 = DAG(arcs={(1, 2), (2, 3)})
>>> g2 = DAG(arcs={(2, 1), (2, 3)})
>>> g1.shd_skeleton(g2)
0
>>> g1 = DAG(arcs={(1, 2)})
>>> g2 = DAG(arcs={(1, 2), (2, 3)})
>>> g1.shd_skeleton(g2)
1
"""
return len(self.skeleton.symmetric_difference(other.skeleton))
def markov_equivalent(self, other, interventions=None) -> bool:
"""
Check if this DAG is (interventionally) Markov equivalent to the DAG ``other``.
Parameters
----------
other:
Another DAG.
interventions:
If not None, check whether the two DAGs are interventionally Markov equivalent under the interventions.
Examples
--------
>>> from graphical_models import DAG
>>> d1 = DAG(arcs={(0, 1), (1, 2)})
>>> d2 = DAG(arcs={(2, 1), (1, 0)})
>>> d3 = DAG(arcs={(0, 1), (2, 1)})
>>> d4 = DAG(arcs={(1, 0), (1, 2)})
>>> d1.markov_equivalent(d2)
True
>>> d2.markov_equivalent(d1)
True
>>> d1.markov_equivalent(d3)
False
>>> d1.markov_equivalent(d2, [{2}])
False
>>> d1.markov_equivalent(d4, [{2}])
True
"""
if interventions is None:
return self.cpdag() == other.cpdag()
else:
return self.interventional_cpdag(interventions, self.cpdag()) == other.interventional_cpdag(interventions,
other.cpdag())
def is_imap(self, other) -> bool:
"""
Check if this DAG is an IMAP of the DAG ``other``, i.e., all d-separation statements in this graph
are also d-separation statements in ``other``.
Parameters
----------
other:
Another DAG.
See Also
--------
is_minimal_imap
Returns
-------
bool
True if ``other`` is an I-MAP of this DAG, otherwise False.
Examples
--------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (3, 2)})
>>> other = DAG(arcs={(1, 2)})
>>> g.is_imap(other)
True
>>> other = DAG(arcs={(1, 2), (2, 3)})
>>> g.is_imap(other)
False
"""
return all(other.dsep(node, nondesc, parents) for node, nondesc, parents in self.local_markov_statements())
def is_minimal_imap(self, other, certify=False, check_imap=True) -> Union[bool, Tuple[bool, Any]]:
"""
Check if this DAG is a minimal IMAP of `other`, i.e., it is an IMAP and no proper subgraph of this DAG
is an IMAP of other. Deleting the arc i->j retains IMAPness when `i` is d-separated from `j` in `other`
given the parents of `j` besides `i` in this DAG.
Parameters
----------
other:
Another DAG.
certify:
If True and this DAG is not an IMAP of other, return a certificate of non-minimality in the form
of an edge i->j that can be deleted while retaining IMAPness.
check_imap:
If True, first check whether this DAG is an IMAP of other, if False, this DAG is assumed to be an IMAP
of other.
See Also
--------
is_imap
Returns
-------
bool
True if ``other`` is a minimal I-MAP of this DAG, otherwise False.
Examples
--------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (3, 2)})
>>> other = DAG(arcs={(1, 2)})
>>> g.is_minimal_imap(other)
False
"""
if check_imap and not self.is_imap(other):
if certify:
return False, None
else:
return False
certificate = next(((i, j) for i, j in self._arcs if other.dsep(i, j, self._parents[j] - {i})), None)
if certify:
return certificate is None, certificate
else:
return certificate | |
<reponame>event-driven-robotics/models<filename>nxsdk_modules_ncl/dnn/src/data_structures.py<gh_stars>10-100
#
# Copyright © 2020 Intel Corporation.
#
# This software and the related documents are Intel copyrighted
# materials, and your use of them is governed by the express
# license under which they were provided to you (License). Unless
# the License provides otherwise, you may not use, modify, copy,
# publish, distribute, disclose or transmit this software or the
# related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with
# no express or implied warranties, other than those that are
# expressly stated in the License.
"""
Data structures for CNN partitioner.
"""
import os
import pickle
from concurrent.futures import ThreadPoolExecutor
from enum import IntEnum
import numpy as np
class Layer:
"""Container for partitions in a layer and other high level information.
:param int | str layerId: The layer id, as a label or digit.
:param str layerType: The layer type, e.g. "Conv2D" or "Dense".
Obtained by calling __class__.__name__ on a Keras layer.
:param dict compartmentKwargs: Loihi compartment parameters, typical keys:
``vThMant``, ``biasExp``.
:param dict connectionKwargs: Loihi connection parameters, typical keys:
``numWeightBits``, ``weightExponent``.
:param np.ndarray coreIdMap: Integer tensor of same shape as layer. Each
element indicates which core the neuron belongs to.
:param np.ndarray multiplicityMap: Integer tensor of same shape as layer,
except that the channel dimension is removed. Each element indicates
to how many destination cores the neuron needs to send its spikes.
:param Layer | None postLayer: The post-synaptic layer. Not applicable in
output layer.
"""
def __init__(self, layerId, layerType, compartmentKwargs, connectionKwargs,
coreIdMap, multiplicityMap, postLayer=None):
assert isinstance(layerId, (int, np.integer, str))
assert isinstance(layerType, str)
assert isinstance(compartmentKwargs, dict)
assert isinstance(connectionKwargs, dict)
assert isinstance(coreIdMap, np.ndarray)
assert isinstance(multiplicityMap, np.ndarray)
if postLayer is not None:
assert isinstance(postLayer, Layer)
self.id = layerId
self.type = layerType
self.compartmentKwargs = compartmentKwargs
self.connectionKwargs = connectionKwargs
self.coreIdMap = coreIdMap
self.multiplicityMap = multiplicityMap
self.postLayer = postLayer
self.coreOccupancy = None
self._srcIdMap = {}
self._isMapped = False
self._partitions = []
# Multiplier for complex layers
multiplier = 2 if 'Complex' in layerType else 1
self._numCores = 1 if coreIdMap.size == 0 \
else multiplier * (np.max(coreIdMap) + 1)
self._numSyn = 0
self._numSynEntries = 0
self._numSynMemWords = 0
self._numInputAxons = 0
self._numOutputAxons = 0
self._numOutputAxonCfgEntries = 0
self._inputAxonCost = 0
self._outputAxonCost = 0
self._synapseCost = 0
def genCxResourceMap(self):
"""Generate a compartment resource map.
Maps from global layer-wide compartment id to its
``(chipId, coreId, cxId)`` address.
:raises AssertionError: Layer must be mapped before cxResourceMap can
be generated.
:return: cxResourceMap
:rtype: np.ndarray
"""
assert self._isMapped, \
"Layer must be mapped before cxResourceMap can be generated."
# Initialize cxResourceMap
numCx = 0
for p in self.partitions:
numCx += p.compartmentGroup.numCompartments
cxResourceMap = np.zeros((numCx, 3), int)
# Populate cxResourceMap
for p in self.partitions:
# Get global layer-wide compartment ids
cxGrp = p.compartmentGroup
globalCxIds = cxGrp.relToAbsDestCxIdxMap
cxResourceMap[globalCxIds, 0] = p.chipId
cxResourceMap[globalCxIds, 1] = p.coreId
cxResourceMap[globalCxIds, 2] = cxGrp.cxIds
return cxResourceMap
def genInputAxonResourceMap(self):
"""Generate a resource map for input axons.
Maps from global layer-wide input axon id to its
``(chipId, coreId, axonId)`` address.
:raises AssertionError: Layer must be mapped before resource map can
be generated.
:return: inputAxonResourceMap
:rtype: np.ndarray
"""
assert self._isMapped, \
"Layer must be mapped before InputAxonResourceMap can be " \
"generated."
# Initialize cxResourceMap
numCx = 0
for p in self.partitions:
numCx += p.compartmentGroup.numCompartments
cxResourceMap = np.zeros((numCx, 3), int)
# Populate cxResourceMap
for p in self.partitions:
# Get global layer-wide compartment ids
cxGrp = p.compartmentGroup
globalCxIds = cxGrp.relToAbsDestCxIdxMap
axonIds = np.concatenate([axonGroup.srcNodeIds for axonGroup
in p.inputAxonGroups])
cxResourceMap[globalCxIds, 0] = p.chipId
cxResourceMap[globalCxIds, 1] = p.coreId
cxResourceMap[globalCxIds, 2] = axonIds
return cxResourceMap
def addPartition(self, partition):
"""Add partition to layer, and update cost properties.
:param Partition partition: Partition.
"""
self._partitions.append(partition)
self._numSyn += partition.numSyn
self._numSynEntries += partition.numSynEntries
self._numSynMemWords += partition.numSynMemWords
self._numInputAxons += partition.numInputAxons
self._numOutputAxons += partition.numOutputAxons
self._numOutputAxonCfgEntries += partition.numOutputAxonCfgEntries
self._inputAxonCost += partition.inputAxonCost
self._outputAxonCost += partition.outputAxonCost
self._synapseCost += partition.synapseCost
def updateSrcIdMap(self, key, value):
"""Update source id map.
:param int key: Global source id.
:param tuple[InputAxonGroup, int] value: A tuple containing the input
axon group and the source id relative to that axon.
"""
if key not in self._srcIdMap.keys():
self._srcIdMap[key] = []
self._srcIdMap[key].append(value)
@property
def partitions(self):
"""List of layer partitions.
:return: Layer partitions.
:rtype: list[Partition]
"""
return self._partitions
@property
def srcIdMap(self):
"""Source id map.
This is a helper container that is built by layer ``L`` and used to
construct output axons in layer ``L-1``.
:return: Source id map. Dictionary mapping from global source ids to a
tuple containing the input axon group and the source id relative
to that axon.
:rtype: dict[int, tuple[InputAxonGroup, int]]
"""
return self._srcIdMap
def clearTemp(self):
"""Clean up temporary data."""
self._srcIdMap = None
@property
def numCores(self):
"""Number of cores used by layer.
:return: Number of cores used by layer.
:rtype: int
"""
return self._numCores
@property
def numSyn(self):
"""Number of synapses in layer.
:return: Number of synapses in layer.
:rtype: int
"""
return self._numSyn
@property
def numSynEntries(self):
"""Number of synEntries in layer.
:return: Number of synEntries in layer.
:rtype: int
"""
return self._numSynEntries
@property
def numSynMemWords(self):
"""Number of synMemWords used by layer.
:return: Number of synMemWords used by layer.
:rtype: int
"""
return self._numSynMemWords
@property
def numInputAxons(self):
"""Number of input axons in layer.
:return: Number of input axons in layer.
:rtype: int
"""
return self._numInputAxons
@property
def numOutputAxons(self):
"""Number of output axons in layer.
:return: Number of output axons in layer.
:rtype: int
"""
return self._numOutputAxons
@property
def numOutputAxonCfgEntries(self):
"""Number of output axon config entries in layer.
:return: Number of output axon config entries in layer.
:rtype: int
"""
return self._numOutputAxonCfgEntries
@property
def inputAxonCost(self):
"""The total input axon cost of this layer.
:return: Axon cost.
:rtype: float
"""
return self._inputAxonCost
@property
def outputAxonCost(self):
"""The total output axon cost of this layer.
:return: Axon cost.
:rtype: float
"""
return self._outputAxonCost
@property
def synapseCost(self):
"""The total synapse cost of this layer.
:return: Synapse cost.
:rtype: float
"""
return self._synapseCost
@property
def coreCost(self):
"""The total core cost of this layer.
:return: Core cost.
:rtype: int
"""
return self._numCores
@property
def cost(self):
"""The total cost of partitioning this layer.
:return: Partitioning cost of layer.
:rtype: float
"""
return (self.inputAxonCost + self.outputAxonCost +
self.synapseCost + self.coreCost)
def setMapped(self):
"""Set flag that this layer is mapped."""
self._isMapped = True
def asDict(self):
"""Return certain attributes of ``Layer`` as dict.
:return: Selection of ``Layer`` attributes as dictionary.
:rtype: dict
"""
return {'id': self.id, 'multiplicityMap': self.multiplicityMap,
'coreIdMap': self.coreIdMap,
'coreOccupancy': self.coreOccupancy}
def serializeLayer(layer, path):
"""Save layer as pickle file.
:param Layer layer: Layer to serialize.
:param str path: Where to save output file.
"""
postLayer = layer.postLayer
# PostLayer will be None if the last layers of the network are "virtual"
# layers like Flatten or Reshape. We do not want to serialize those.
if postLayer is None:
return
# Temporarily overwrite pointer to parent layer to avoid redundant storage.
layer.postLayer = postLayer.id
with open(os.path.join(path, layer.id + '.pickle'), 'wb') as f:
pickle.dump(layer, f)
layer.postLayer = postLayer
def deserializeLayer(path, filename):
"""Load layer from pickle file.
:param str path: Directory to saved file.
:param str filename: Name of file.
:return: Deserialized layer.
:rtype: Layer
"""
with open(os.path.join(path, filename), 'rb') as f:
return pickle.load(f)
def saveMappableLayers(layers, path):
"""Store each partitioned and compiled layer as pickle file on disk.
:param list[Layer] layers: List of Layer objects.
:param str path: Where to save partition.
"""
path = os.path.join(path, 'compiled_partitions')
if not os.path.exists(path):
os.makedirs(path)
# Save each individual layer as pickle file.
with ThreadPoolExecutor() as executor:
futures = [executor.submit(serializeLayer, layer, path)
for layer in layers]
for future in futures:
future.result()
def loadMappableLayers(path):
"""Load compiled partitions from disk.
The partitions are stored in the subfolder ``<path>/compiled_partitions``.
The method expects one pickle file for each layer, and skips over any
non-pickle files in that folder.
:raises FileNotFoundError if the directory does not exist or contains no
pickle files.
:param str path: Path to stored partition files.
:return: List of compiled ``Layer`` objects.
:rtype: list[Layer]
"""
path = os.path.join(path, 'compiled_partitions')
if not os.path.exists(path):
raise FileNotFoundError
filenames = [f for f in os.listdir(path)
| |
'Sales', 53000, 2009, 53000),
('Wilkinson', 'IT', 60000, 2011, 60000),
('Johnson', 'Marketing', 40000, 2012, 40000),
('Moore', 'IT', 34000, 2013, 50000),
('Adams', 'Accounting', 50000, 2013, 50000),
], lambda row: (row.name, row.department, row.salary, row.hire_date.year, row.max_salary_year))
def test_cume_dist(self):
"""
Compute the cumulative distribution for the employees based on the
salary in increasing order. Equal to rank/total number of rows (12).
"""
qs = Employee.objects.annotate(cume_dist=Window(
expression=CumeDist(),
order_by=F('salary').asc(),
)).order_by('salary', 'name')
# Round result of cume_dist because Oracle uses greater precision.
self.assertQuerysetEqual(qs, [
('Moore', 'IT', 34000, 0.0833333333),
('Williams', 'Accounting', 37000, 0.1666666667),
('Smith', 'Marketing', 38000, 0.25),
('Johnson', 'Marketing', 40000, 0.3333333333),
('Jenson', 'Accounting', 45000, 0.5),
('Jones', 'Accounting', 45000, 0.5),
('Adams', 'Accounting', 50000, 0.5833333333),
('Brown', 'Sales', 53000, 0.6666666667),
('Smith', 'Sales', 55000, 0.75),
('Wilkinson', 'IT', 60000, 0.8333333333),
('Johnson', 'Management', 80000, 0.9166666667),
('Miller', 'Management', 100000, 1),
], lambda row: (row.name, row.department, row.salary, round(row.cume_dist, 10)))
def test_nthvalue(self):
qs = Employee.objects.annotate(
nth_value=Window(expression=NthValue(
expression='salary', nth=2),
order_by=[F('hire_date').asc(), F('name').desc()],
partition_by=F('department'),
)
).order_by('department', 'hire_date', 'name')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', datetime.date(2005, 11, 1), 45000, None),
('Jenson', 'Accounting', datetime.date(2008, 4, 1), 45000, 45000),
('Williams', 'Accounting', datetime.date(2009, 6, 1), 37000, 45000),
('Adams', 'Accounting', datetime.date(2013, 7, 1), 50000, 45000),
('Wilkinson', 'IT', datetime.date(2011, 3, 1), 60000, None),
('Moore', 'IT', datetime.date(2013, 8, 1), 34000, 34000),
('Miller', 'Management', datetime.date(2005, 6, 1), 100000, None),
('Johnson', 'Management', datetime.date(2005, 7, 1), 80000, 80000),
('Smith', 'Marketing', datetime.date(2009, 10, 1), 38000, None),
('Johnson', 'Marketing', datetime.date(2012, 3, 1), 40000, 40000),
('Smith', 'Sales', datetime.date(2007, 6, 1), 55000, None),
('Brown', 'Sales', datetime.date(2009, 9, 1), 53000, 53000),
], lambda row: (row.name, row.department, row.hire_date, row.salary, row.nth_value))
def test_lead(self):
"""
Determine what the next person hired in the same department makes.
Because the dataset is ambiguous, the name is also part of the
ordering clause. No default is provided, so None/NULL should be
returned.
"""
qs = Employee.objects.annotate(lead=Window(
expression=Lead(expression='salary'),
order_by=[F('hire_date').asc(), F('name').desc()],
partition_by='department',
)).order_by('department', F('hire_date').asc(), F('name').desc())
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 37000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 50000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 34000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 80000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 40000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 53000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.lead))
def test_lead_offset(self):
"""
Determine what the person hired after someone makes. Due to
ambiguity, the name is also included in the ordering.
"""
qs = Employee.objects.annotate(lead=Window(
expression=Lead('salary', offset=2),
partition_by='department',
order_by=F('hire_date').asc(),
))
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 37000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 50000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), None),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), None),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), None),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), None),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), None),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.lead),
ordered=False
)
@skipUnlessDBFeature('supports_default_in_lead_lag')
def test_lead_default(self):
qs = Employee.objects.annotate(lead_default=Window(
expression=Lead(expression='salary', offset=5, default=60000),
partition_by=F('department'),
order_by=F('department').asc(),
))
self.assertEqual(list(qs.values_list('lead_default', flat=True).distinct()), [60000])
def test_ntile(self):
"""
Compute the group for each of the employees across the entire company,
based on how high the salary is for them. There are twelve employees
so it divides evenly into four groups.
"""
qs = Employee.objects.annotate(ntile=Window(
expression=Ntile(num_buckets=4),
order_by=F('salary').desc(),
)).order_by('ntile', '-salary', 'name')
self.assertQuerysetEqual(qs, [
('Miller', 'Management', 100000, 1),
('Johnson', 'Management', 80000, 1),
('Wilkinson', 'IT', 60000, 1),
('Smith', 'Sales', 55000, 2),
('Brown', 'Sales', 53000, 2),
('Adams', 'Accounting', 50000, 2),
('Jenson', 'Accounting', 45000, 3),
('Jones', 'Accounting', 45000, 3),
('Johnson', 'Marketing', 40000, 3),
('Smith', 'Marketing', 38000, 4),
('Williams', 'Accounting', 37000, 4),
('Moore', 'IT', 34000, 4),
], lambda x: (x.name, x.department, x.salary, x.ntile))
def test_percent_rank(self):
"""
Calculate the percentage rank of the employees across the entire
company based on salary and name (in case of ambiguity).
"""
qs = Employee.objects.annotate(percent_rank=Window(
expression=PercentRank(),
order_by=[F('salary').asc(), F('name').asc()],
)).order_by('percent_rank')
# Round to account for precision differences among databases.
self.assertQuerysetEqual(qs, [
('Moore', 'IT', 34000, 0.0),
('Williams', 'Accounting', 37000, 0.0909090909),
('Smith', 'Marketing', 38000, 0.1818181818),
('Johnson', 'Marketing', 40000, 0.2727272727),
('Jenson', 'Accounting', 45000, 0.3636363636),
('Jones', 'Accounting', 45000, 0.4545454545),
('Adams', 'Accounting', 50000, 0.5454545455),
('Brown', 'Sales', 53000, 0.6363636364),
('Smith', 'Sales', 55000, 0.7272727273),
('Wilkinson', 'IT', 60000, 0.8181818182),
('Johnson', 'Management', 80000, 0.9090909091),
('Miller', 'Management', 100000, 1.0),
], transform=lambda row: (row.name, row.department, row.salary, round(row.percent_rank, 10)))
def test_nth_returns_null(self):
"""
Find the nth row of the data set. None is returned since there are
fewer than 20 rows in the test data.
"""
qs = Employee.objects.annotate(nth_value=Window(
expression=NthValue('salary', nth=20),
order_by=F('salary').asc()
))
self.assertEqual(list(qs.values_list('nth_value', flat=True).distinct()), [None])
def test_multiple_partitioning(self):
"""
Find the maximum salary for each department for people hired in the
same year.
"""
qs = Employee.objects.annotate(max=Window(
expression=Max('salary'),
partition_by=[F('department'), ExtractYear(F('hire_date'))],
)).order_by('department', 'hire_date', 'name')
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 45000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 37000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 50000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 34000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 100000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 40000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 53000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.max))
def test_multiple_ordering(self):
"""
Accumulate the salaries over the departments based on hire_date.
If two people were hired on the same date in the same department, the
ordering clause will render a different result for those people.
"""
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
partition_by='department',
order_by=[F('hire_date').asc(), F('name').asc()],
)).order_by('department', 'sum')
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 90000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 127000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 177000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 94000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 180000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 78000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 108000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum))
@skipUnlessDBFeature('supports_frame_range_fixed_distance')
def test_range_n_preceding_and_following(self):
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
order_by=F('salary').asc(),
partition_by='department',
frame=ValueRange(start=-2, end=2),
))
self.assertIn('RANGE BETWEEN 2 PRECEDING AND 2 FOLLOWING', str(qs.query))
self.assertQuerysetEqual(qs, [
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 37000),
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 90000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 90000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 50000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 53000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 40000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 34000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 80000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum), ordered=False)
def test_range_unbound(self):
"""A query with RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING."""
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
partition_by='age',
order_by=[F('age').asc()],
frame=ValueRange(start=None, end=None),
)).order_by('department', 'hire_date', 'name')
self.assertIn('RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING', str(qs.query))
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 45000, datetime.date(2005, 11, 1), 165000),
('Jenson', 'Accounting', 45000, datetime.date(2008, 4, 1), 165000),
('Williams', 'Accounting', 37000, datetime.date(2009, 6, 1), 165000),
('Adams', 'Accounting', 50000, datetime.date(2013, 7, 1), 130000),
('Wilkinson', 'IT', 60000, datetime.date(2011, 3, 1), 194000),
('Moore', 'IT', 34000, datetime.date(2013, 8, 1), 194000),
('Miller', 'Management', 100000, datetime.date(2005, 6, 1), 194000),
('Johnson', 'Management', 80000, datetime.date(2005, 7, 1), 130000),
('Smith', 'Marketing', 38000, datetime.date(2009, 10, 1), 165000),
('Johnson', 'Marketing', 40000, datetime.date(2012, 3, 1), 148000),
('Smith', 'Sales', 55000, datetime.date(2007, 6, 1), 148000),
('Brown', 'Sales', 53000, datetime.date(2009, 9, 1), 148000)
], transform=lambda row: (row.name, row.department, row.salary, row.hire_date, row.sum))
@skipIf(
connection.vendor == 'sqlite' and connection.Database.sqlite_version_info < (3, 27),
'Nondeterministic failure on SQLite < 3.27.'
)
def test_subquery_row_range_rank(self):
qs | |
jointsolutions]) and len(jointsolutions)>0:
return [AST.SolverSolution(var.name,jointeval=jointsolutions,isHinge=self.isHinge(var.name))]
solutions = []
if len(eqns) > 1:
neweqns = []
listsymbols = []
symbolgen = cse_main.numbered_symbols('const')
for e in eqns:
enew, symbols = self.groupTerms(e.subs(varsym.subs),[varsym.cvar,varsym.svar,var], symbolgen)
# remove coupled equations
if any([(m[0]>0)+(m[1]>0)+(m[2]>0)>1 for m in Poly(enew,varsym.cvar,varsym.svar,var).monoms]):
continue
# ignore any equations with degree 3 or more
if Poly(enew,varsym.svar).degree > maxdegree or Poly(enew,varsym.cvar).degree > maxdegree:
log.debug('ignoring equation: ',enew)
continue
if Poly(enew,varsym.svar).coeff() == S.Zero or Poly(enew,varsym.cvar) == S.Zero or Poly(enew,varsym.var) == S.Zero:
log.debug('equation %s is allowing trivial solution for variable %s, ignoring ',e,varsym.name)
continue
rank = self.codeComplexity(enew)
for s in symbols:
rank += self.codeComplexity(s[1])
neweqns.append((rank,enew))
listsymbols += symbols
# since we're solving for two variables, we only want to use two equations, so
# start trying all the equations starting from the least complicated ones to the most until a solution is found
eqcombinations = []
for eqs in combinations(neweqns,2):
eqcombinations.append((eqs[0][0]+eqs[1][0],[Eq(e[1],0) for e in eqs]))
eqcombinations.sort(lambda x, y: x[0]-y[0])
hasgoodsolution = False
for icomb,comb in enumerate(eqcombinations):
# skip if too complex
if len(solutions) > 0 and comb[0] > 200:
break
# try to solve for both sin and cos terms
if not self.has_any_symbols(comb[1],varsym.svar) or not self.has_any_symbols(comb[1], varsym.cvar):
continue
try:
s = solve(comb[1],[varsym.svar,varsym.cvar])
except PolynomialError, e:
log.debug('solveSingleVariable: failed: %s',e)
continue
if s is not None:
sollist = None
if hasattr(s,'has_key'):
if s.has_key(varsym.svar) and s.has_key(varsym.cvar):
sollist = [(s[varsym.svar],s[varsym.cvar])]
else:
sollist = []
else:
sollist = s
solversolution = AST.SolverSolution(var.name,jointeval=[],isHinge=self.isHinge(var.name))
goodsolution = 0
for svarsol,cvarsol in sollist:
# solutions cannot be trivial
if (svarsol-cvarsol).subs(listsymbols).expand() == S.Zero:
break
if svarsol.subs(listsymbols).expand() == S.Zero and abs(cvarsol.subs(listsymbols).expand()) - S.One != S.Zero:
break
if cvarsol.subs(listsymbols).expand() == S.Zero and abs(svarsol.subs(listsymbols).expand()) - S.One != S.Zero:
break
# check the numerator and denominator if solutions are the same or for possible divide by zeros
svarfrac=fraction(svarsol)
svarfrac = [svarfrac[0].subs(listsymbols), svarfrac[1].subs(listsymbols)]
cvarfrac=fraction(cvarsol)
cvarfrac = [cvarfrac[0].subs(listsymbols), cvarfrac[1].subs(listsymbols)]
if self.equal(svarfrac[0],cvarfrac[0]) and self.equal(svarfrac[1],cvarfrac[1]):
break
if not self.isValidSolution(svarfrac[0]) or not self.isValidSolution(svarfrac[1]) or not self.isValidSolution(cvarfrac[0]) or not self.isValidSolution(cvarfrac[1]):
continue
# check if there exists at least one test solution with non-zero denominators
if subs is None:
testeqs = [svarfrac[1].subs(othersubs),cvarfrac[1].subs(othersubs)]
else:
testeqs = [svarfrac[1].subs(subs).subs(othersubs),cvarfrac[1].subs(subs).subs(othersubs)]
testsuccess = False
for testconsistentvalue in self.testconsistentvalues:
if all([testeq.subs(testconsistentvalue).evalf()!=S.Zero for testeq in testeqs]):
testsuccess = True
break
if not testsuccess:
continue
scomplexity = self.codeComplexity(svarfrac[0])+self.codeComplexity(svarfrac[1])
ccomplexity = self.codeComplexity(cvarfrac[0])+self.codeComplexity(cvarfrac[1])
if scomplexity > 1200 or ccomplexity > 1200:
log.debug('equation too complex for single variable solution (%d,%d).... (probably wrong?)',scomplexity,ccomplexity)
break
if scomplexity < 500:
svarfrac[1] = simplify(svarfrac[1])
if self.chop(svarfrac[1])== 0:
break
if ccomplexity < 500:
cvarfrac[1] = simplify(cvarfrac[1])
if self.chop(cvarfrac[1])== 0:
break
# sometimes the returned simplest solution makes really gross approximations
svarfracsimp_denom = self.trigsimp(svarfrac[1],othersolvedvars)
cvarfracsimp_denom = self.trigsimp(cvarfrac[1],othersolvedvars)
# self.simplifyTransform could help in reducing denoms further...
denomsequal = False
if self.equal(svarfracsimp_denom,cvarfracsimp_denom):
denomsequal = True
elif self.equal(svarfracsimp_denom,-cvarfracsimp_denom):
cvarfrac[0] = -cvarfrac[0]
cvarfracsimp_denom = -cvarfracsimp_denom
if self.equal(svarfracsimp_denom,cvarfracsimp_denom) and not svarfracsimp_denom.is_number:
log.debug('%s solution: denominator is equal %s, doing a global substitution',var.name,svarfracsimp_denom)
denom = self.gsymbolgen.next()
solversolution.dictequations.append((denom,sign(svarfracsimp_denom)))
svarsolsimp = self.trigsimp(svarfrac[0],othersolvedvars)*denom
cvarsolsimp = self.trigsimp(cvarfrac[0],othersolvedvars)*denom
solversolution.FeasibleIsZeros = False
solversolution.presetcheckforzeros.append(svarfracsimp_denom)
expandedsol = atan2(svarsolsimp,cvarsolsimp)
else:
svarfracsimp_num = self.trigsimp(svarfrac[0],othersolvedvars)
cvarfracsimp_num = self.trigsimp(cvarfrac[0],othersolvedvars)
svarsolsimp = svarfracsimp_num/svarfracsimp_denom
cvarsolsimp = cvarfracsimp_num/cvarfracsimp_denom
if svarsolsimp.is_number and cvarsolsimp.is_number:
if abs(svarsolsimp**2+cvarsolsimp**2-S.One).evalf() > 1e-10:
log.debug('%s solution: atan2(%s,%s), sin/cos not on circle so ignoring',var.name,svarsolsimp,cvarsolsimp)
continue
expandedsol = atan2check(svarsolsimp,cvarsolsimp)
solversolution.FeasibleIsZeros = False
log.debug('%s solution: atan2 check for joint',var.name)
solversolution.jointeval.append(expandedsol)
if unknownvars is not None:
unsolvedsymbols = []
for unknownvar in unknownvars:
if unknownvar != var:
unsolvedsymbols += self.Variable(unknownvar).vars
if len(unsolvedsymbols) > 0:
solversolution.equationsused = [eq for eq in eqns if not eq.has_any_symbols(*unsolvedsymbols)]
else:
solversolution.equationsused = eqns
if len(solversolution.equationsused) > 0:
log.info('%s solution: equations used for atan2: %s',var.name, str(solversolution.equationsused))
if len(self.checkForDivideByZero(expandedsol)) == 0:
goodsolution += 1
if len(solversolution.jointeval) == len(sollist) and len(sollist) > 0:
solutions.append(solversolution)
if goodsolution > 0:
hasgoodsolution = True
if len(sollist) == goodsolution and goodsolution == 1:
break
if len(solutions) >= maxsolutions:
# probably more than enough already?
break
if len(solutions) > 0 or hasgoodsolution: # found a solution without any divides, necessary for pr2 head_torso lookat3d ik
return solutions
# solve one equation
for ieq,eq in enumerate(eqns):
symbolgen = cse_main.numbered_symbols('const')
eqnew, symbols = self.groupTerms(eq.subs(varsym.subs), [varsym.cvar,varsym.svar,varsym.var], symbolgen)
try:
# ignore any equations with degree 3 or more
ps = Poly(eqnew,varsym.svar)
pc = Poly(eqnew,varsym.cvar)
if ps.degree > maxdegree or pc.degree > maxdegree:
log.debug('cannot solve equation with high degree: %s',str(eqnew))
continue
if ps.coeff(0) == S.Zero and len(ps.monoms) > 0:
log.debug('equation %s has trivial solution, ignoring...', ps)
continue
if pc.coeff(0) == S.Zero and len(pc.monoms) > 0:
log.debug('equation %s has trivial solution, ignoring...', pc)
continue
except polys.polynomial.PolynomialError:
# might not be a polynomial, so ignore
continue
equationsused = None
if unknownvars is not None:
unsolvedsymbols = []
for unknownvar in unknownvars:
if unknownvar != var:
unsolvedsymbols += self.Variable(unknownvar).vars
if len(unsolvedsymbols) > 0:
equationsused = [eq2 for ieq2,eq2 in enumerate(eqns) if ieq2!=ieq and not eq2.has_any_symbols(*unsolvedsymbols)]
else:
equationsused = eqns[:]
equationsused.pop(ieq)
numcvar = self.countVariables(eqnew,varsym.cvar)
numsvar = self.countVariables(eqnew,varsym.svar)
if numcvar == 1 and numsvar == 1:
a = Wild('a',exclude=[varsym.svar,varsym.cvar])
b = Wild('b',exclude=[varsym.svar,varsym.cvar])
c = Wild('c',exclude=[varsym.svar,varsym.cvar])
m = eqnew.match(a*varsym.cvar+b*varsym.svar+c)
if m is not None:
symbols += [(varsym.svar,sin(var)),(varsym.cvar,cos(var))]
asinsol = trigsimp(asin(-m[c]/abs(sqrt(m[a]*m[a]+m[b]*m[b]))).subs(symbols),deep=True)
constsol = -atan2(m[a],m[b]).subs(symbols).evalf()
jointsolutions = [constsol+asinsol,constsol+pi.evalf()-asinsol]
if all([self.isValidSolution(s) and self.isValidSolution(s) for s in jointsolutions]):
solutions.append(AST.SolverSolution(var.name,jointeval=jointsolutions,isHinge=self.isHinge(var.name)))
solutions[-1].equationsused = equationsused
continue
if numcvar > 0:
try:
# substitute cos
if self.countVariables(eqnew,varsym.svar) <= 1 or (self.countVariables(eqnew,varsym.cvar) <= 2 and self.countVariables(eqnew,varsym.svar) == 0): # anything more than 1 implies quartic equation
tempsolutions = solve(eqnew.subs(varsym.svar,sqrt(1-varsym.cvar**2)),varsym.cvar)
jointsolutions = [self.trigsimp(s.subs(symbols+varsym.subsinv),othersolvedvars) for s in tempsolutions]
if all([self.isValidSolution(s) and self.isValidSolution(s) for s in jointsolutions]):
solutions.append(AST.SolverSolution(var.name,jointevalcos=jointsolutions,isHinge=self.isHinge(var.name)))
solutions[-1].equationsused = equationsused
continue
except self.CannotSolveError,e:
log.debug(e)
except NotImplementedError:
pass
if numsvar > 0:
# substitute sin
try:
if self.countVariables(eqnew,varsym.svar) <= 1 or (self.countVariables(eqnew,varsym.svar) <= 2 and self.countVariables(eqnew,varsym.cvar) == 0): # anything more than 1 implies quartic equation
tempsolutions = solve(eqnew.subs(varsym.cvar,sqrt(1-varsym.svar**2)),varsym.svar)
jointsolutions = [self.trigsimp(s.subs(symbols+varsym.subsinv),othersolvedvars) for s in tempsolutions]
if all([self.isValidSolution(s) and self.isValidSolution(s) for s in jointsolutions]):
solutions.append(AST.SolverSolution(var.name,jointevalsin=jointsolutions,isHinge=self.isHinge(var.name)))
solutions[-1].equationsused = equationsused
continue
except self.CannotSolveError,e:
log.debug(e)
except NotImplementedError:
pass
if numcvar == 0 and numsvar == 0:
tempsolutions = solve(eqnew,var)
jointsolutions = [self.trigsimp(s.subs(symbols),othersolvedvars) for s in tempsolutions]
if all([self.isValidSolution(s) and s != S.Zero for s in jointsolutions]) and len(jointsolutions) > 0:
solutions.append(AST.SolverSolution(var.name,jointeval=jointsolutions,isHinge=self.isHinge(var.name)))
solutions[-1].equationsused = equationsused
continue
try:
solution = self.solveHighDegreeEquationsHalfAngle([eqnew],varsym,symbols)
solutions.append(solution.subs(symbols))
solutions[-1].equationsused = equationsused
except self.CannotSolveError,e:
log.debug(e)
if len(solutions) > 0:
return solutions
return [self.solveHighDegreeEquationsHalfAngle(eqns,varsym)]
def solvePairVariables(self,raweqns,var0,var1,othersolvedvars,maxcomplexity=50,unknownvars=None):
# make sure both variables are hinges
if not self.isHinge(var0.name) or not self.isHinge(var1.name):
raise self.CannotSolveError('pairwise variables only supports hinge joints')
varsym0 = self.Variable(var0)
varsym1 = self.Variable(var1)
cvar0,svar0 = varsym0.cvar, varsym0.svar
cvar1,svar1 = varsym1.cvar, varsym1.svar
varsubs=varsym0.subs+varsym1.subs
varsubsinv = varsym0.subsinv+varsym1.subsinv
unknownvars=[cvar0,svar0,cvar1,svar1]
reducesubs = [(svar0**2,1-cvar0**2),(svar1**2,1-cvar1**2)]
eqns = [eq.subs(varsubs).subs(reducesubs).expand() for eq in raweqns if eq.has_any_symbols(var0,var1)]
if len(eqns) <= 1:
raise self.CannotSolveError('not enough equations')
# group equations with single variables
symbolgen = cse_main.numbered_symbols('const')
orgeqns = []
allsymbols = []
for eq in eqns:
eqnew, symbols = self.groupTerms(eq, unknownvars, symbolgen)
allsymbols += symbols
orgeqns.append([self.codeComplexity(eq),Poly(eqnew,*unknownvars)])
orgeqns.sort(lambda x, y: x[0]-y[0])
neweqns = orgeqns[:]
pairwisesubs = [(svar0*cvar1,Symbol('s0c1')),(svar0*svar1,Symbol('s0s1')),(cvar0*cvar1,Symbol('c0c1')),(cvar0*svar1,Symbol('c0s1')),(cvar0*svar0,Symbol('s0c0')),(cvar1*svar1,Symbol('c1s1'))]
pairwiseinvsubs = [(f[1],f[0]) for f in pairwisesubs]
pairwisevars = [f[1] for f in pairwisesubs]
reduceeqns = [Poly(eq.as_basic().subs(pairwisesubs),*pairwisevars) for rank,eq in orgeqns if rank < 4*maxcomplexity]
for i,eq in enumerate(reduceeqns):
if eq.TC != S.Zero and not eq.TC.is_Symbol:
n=symbolgen.next()
allsymbols.append((n,eq.TC.subs(allsymbols)))
reduceeqns[i] += n-eq.TC
# try to at least subtract as much paired variables out
eqcombs = [c for c in combinations(reduceeqns,2)]
while len(eqcombs) > 0 and len(neweqns) < 20:
eq0,eq1 = eqcombs.pop()
for i in range(6):
monom = [0,0,0,0,0,0]
monom[i] = 1
if eq0.coeff(*monom) != 0 and eq1.coeff(*monom) != 0:
tempeq = (eq0.as_basic()*eq1.coeff(*monom)-eq0.coeff(*monom)*eq1.as_basic()).subs(allsymbols+pairwiseinvsubs).expand()
if self.codeComplexity(tempeq) > 200:
continue
eq = simplify(tempeq)
if eq == S.Zero:
continue
peq = Poly(eq,*pairwisevars)
if peq.degree > 0 and self.codeComplexity(eq) > maxcomplexity:
# don't need such complex equations
continue
if not self.isExpressionUnique(eqns,eq) or not self.isExpressionUnique(eqns,-eq):
continue
if eq.has_any_symbols(*unknownvars): # be a little strict about new candidates
eqns.append(eq)
eqnew, symbols = self.groupTerms(eq, unknownvars, symbolgen)
allsymbols += symbols
neweqns.append([self.codeComplexity(eq),Poly(eqnew,*unknownvars)])
orgeqns = neweqns[:]
# try to solve for all pairwise variables
systemofequations = []
for i in range(len(reduceeqns)):
if reduceeqns[i].has_any_symbols(pairwisevars[4],pairwisevars[5]):
| |
this case is given by the column **Attrition** which contains categorical variables therefore requires numerical encoding. We numerically encode it by creating a dictionary with the mapping given as 1 : Yes and 0 : No
# In[ ]:
# Define a dictionary for the target mapping
target_map = {'Yes':1, 'No':0}
# Use the pandas apply method to numerically encode our attrition target variable
target = attrition["Attrition"].apply(lambda x: target_map[x])
target.head(3)
# However just by a quick inspection of the counts of the number of 'Yes' and 'No' in the target variable tells us that there is quite a large skew in target as shown
# In[ ]:
data = [go.Bar(x=attrition["Attrition"].value_counts().index.values,y= attrition["Attrition"].value_counts().values)]
py.iplot(data, filename='basic-bar')
# Therefore we have to keep in mind that there is quite a big imbalance in our target variable. Many statistical techniques have been put forth to treat imbalances in data (oversampling or undersampling). In this notebook, I will use an oversampling technique known as SMOTE to treat this imbalance.
# # 3. Implementing Machine Learning Models
#
# Having performed some exploratory data analysis and simple feature engineering as well as having ensured that all categorical values are encoded, we are now ready to proceed onto building our models.
#
# As alluded to in the introduction of this notebook, we will aim to evaluate and contrast the performances of a handful of different learning models.
#
# **Splitting Data into Train and Test sets**
#
# But before we even start training a model, we will have to partition our dataset into a training set and a test set (unlike Kaggle competitions where the train and test data are already segregated for you). To split our data we will utilise sklearn's
# In[ ]:
# Import the train_test_split method
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import StratifiedShuffleSplit
# Split data into train and test sets as well as for validation and testing
train, test, target_train, target_val = train_test_split(attrition_final,target,train_size= 0.80,random_state=0);
#train, test, target_train, target_val = StratifiedShuffleSplit(attrition_final, target, random_state=0);
# **SMOTE to oversample due to the skewness in target**
#
# Since we have already noted the severe imbalance in the values within the target variable, let us implement the SMOTE method in the dealing with this skewed value via the imblearn Python package.
# In[ ]:
oversampler=SMOTE(random_state=0)
smote_train, smote_target = oversampler.fit_sample(train,target_train)
# ----
# ## A. Random Forest Classifier
#
# The Random Forest method, first introduced by Breiman in 2001 can be grouped under the category of ensemble models. Why ensemble? The building block of a Random Forest is the ubiquitous Decision Tree. The decision tree as a standalone model is often considered a "weak learner" as its predictive performance is relatively poor. However a Random Forest gathers a group (or ensemble) of decision trees and uses their combined predictive capabilities to obtain relatively strong predictive performance - "strong learner".
#
# This principle of using a collection of "weak learners" to come together to create a "strong learner" underpins the basis of ensemble methods which one regularly comes across in Machine learning. For a really good read that drives home the basics of the Random Forest, refer to this [CitizenNet blog][1]
#
#
# [1]: http://blog.citizennet.com/blog/2012/11/10/random-forests-ensembles-and-performance-metrics
# **Initialising Random Forest parameters**
#
# We will utilise the Scikit-learn library to construct a Random Forest model. To do so, we have to first define our set of parameters that we will feed into our Random Forest classifier as follows
# In[ ]:
seed = 0 # We set our random seed to zero for reproducibility
# Random Forest parameters
rf_params = {'n_jobs': -1,'n_estimators': 1000,'max_features': 0.3,'max_depth': 4,'min_samples_leaf': 2,'max_features' : 'sqrt','random_state' : seed,'verbose': 0}
# Having defined our parameters, we can initialise a Random Forest object by using scikit-learn's **RandomForestClassifier** and unpacking the parameters by adding the double asterisks symbols as follows
# In[ ]:
rf = RandomForestClassifier(**rf_params)
# The next step after prepping our Random Forest model would be to start building a forest of trees using our training set and fitting it to our attrition target variable. We do so by simply using the **fit** call as follows
# In[ ]:
rf.fit(smote_train, smote_target)
print("Fitting of Random Forest finished")
# Having fitted our forest of trees with our parameters to the training set against our target variable, we now have a learning model **rf** which we can make predictions out of. To use our Random Forest in predicting against our test data, we can use sklearn's **.predict** method as follows
# In[ ]:
rf_predictions = rf.predict(test)
print("Predictions finished")
# Scoring the model
# In[ ]:
print("Accuracy score: {}".format(accuracy_score(target_val, rf_predictions)))
print("="*80)
print(classification_report(target_val, rf_predictions))
# **Accuracy of the model**
#
# As observed, our Random Forest returns an accuracy of approx 88% for its predictions and on first glance this might seem to be a pretty good performing model. However when we think about how skewed our target variable where the distribution of yes and no's are 84% and 26%, therefore our model is only predicting slightly better than random guessing.
#
# It would be more informative to balance out the precision and recall scores as show in the classification report outputs. Where it falls down to the business considerations over whether one should prioritise for a metric over the other - i.e. your Precision vs Recall.
# ### Feature Ranking via the Random Forest
#
# The Random Forest classifier in Sklearn also contains a very convenient attribute **feature_importances_** which tells us which features within our dataset has been given most importance through the Random Forest algorithm. Shown below is an Interactive Plotly diagram of the various feature importances.
# In[ ]:
# Scatter plot
trace = go.Scatter(y = rf.feature_importances_,x = attrition_final.columns.values,mode='markers',marker=dict(sizemode = 'diameter',sizeref = 1,size = 13,color = rf.feature_importances_,colorscale='Portland',showscale=True),text = attrition_final.columns.values)
data = [trace]
layout= go.Layout(autosize= True,title= 'Random Forest Feature Importance',hovermode= 'closest',xaxis= dict(ticklen= 5,showgrid=False,zeroline=False,showline=False),yaxis=dict(title= 'Feature Importance',showgrid=False,zeroline=False,ticklen= 5,gridwidth= 2),showlegend= False)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatter2010')
# ### Visualising Tree Diagram with Graphviz
#
# Let us now visualise how a single decision tree traverses the features in our data as the DecisionTreeClassifier object of sklearn comes with a very convenient **export_graphviz** method that exports the tree diagram into a .png format which you can view from the output of this kernel.
# In[ ]:
from sklearn import tree
from IPython.display import Image as PImage
from subprocess import check_call
from PIL import Image, ImageDraw, ImageFont
import re
decision_tree = tree.DecisionTreeClassifier(max_depth = 4)
decision_tree.fit(train, target_train)
# Predicting results for test dataset
y_pred = decision_tree.predict(test)
# Export our trained model as a .dot file
with open("tree1.dot", 'w') as f:
f = tree.export_graphviz(decision_tree,out_file=f,max_depth = 4,impurity = False,feature_names = attrition_final.columns.values,class_names = ['No', 'Yes'],rounded = True,filled= True )
#Convert .dot to .png to allow display in web notebook
check_call(['dot','-Tpng','tree1.dot','-o','tree1.png'])
# Annotating chart with PIL
img = Image.open("tree1.png")
draw = ImageDraw.Draw(img)
img.save('sample-out.png')
PImage("sample-out.png", height=2000, width=1900)
# ----
#
# ## B. Gradient Boosted Classifier
#
# Gradient Boosting is also an ensemble technique much like the Random Forest where a combination of weak Tree learners are brought together to form a relatively stronger learner. The technique involves defining some sort of function (loss function) that you want minimised and an method/algorithm to minimise this. Therefore as the name suggests, the algorithm used to minimise the loss function is that of a gradient descent method which adds decision trees which "point" in the direction that reduces our loss function (downward gradient).
#
# To set up a Gradient Boosting classifier is easy enough in Sklearn and it involves only a handful of lines of code. Again we first set up our classifier's parameters
#
# **Initialising Gradient Boosting Parameters**
#
# In general there are a handful of key parameter when setting up tree-based or gradient boosted models. These are always going to be the number of estimators, the maximum depth with which you want your model to be trained to, and the minimum samples per leaf
# In[ ]:
# Gradient Boosting Parameters
gb_params ={'n_estimators': 1500,'max_features': 0.9,'learning_rate' : 0.25,'max_depth': 4,'min_samples_leaf': 2,'subsample': 1,'max_features' : 'sqrt','random_state' : seed,'verbose': 0}
# Having defined our parameters, we can now apply the usual fit and predict methods on our train and test sets respectively
# In[ ]:
gb = GradientBoostingClassifier(**gb_params)
# Fit the model to our SMOTEd train and | |
exit()
if adrs:
s = [str(c.index), effect.name]
else:
s = [str(c.index), effect.id_]
for x in metrics:
if value <= x[1]:
effect_dct[(effect, count)][x] += 1.0
s.append('1')
else:
s.append('0')
if continuous:
s.append(str(value))
else:
s.append(str(int(value)))
ss.append(s)
break
self.accuracies = effect_dct
final_accs = self.results_analysed(ra_named, metrics, effect_type())
ss = sorted(ss, key=lambda xx: xx[0])
#ss = sorted(ss, key=lambda xx: int(xx[0]))
top_pairwise = [0.0] * 10
for s in ss:
if s[2] == '1':
top_pairwise[0] += 1.0
if s[3] == '1':
top_pairwise[1] += 1.0
if s[4] == '1':
top_pairwise[2] += 1.0
if s[5] == '1':
top_pairwise[3] += 1.0
if s[6] == '1':
top_pairwise[4] += 1.0
if s[7] == '1':
top_pairwise[5] += 1.0
if s[8] == '1':
top_pairwise[6] += 1.0
if s[9] == '1':
top_pairwise[7] += 1.0
if s[10] == '1':
top_pairwise[8] += 1.0
if s[11] == '1':
top_pairwise[9] += 1.0
sj = ','.join(s)
sj += '\n'
ra_out.write(sj)
ra_out.close()
cov = [0] * 10
for effect, c in list(self.accuracies.keys()):
accs = self.accuracies[effect, c]
for m_i in range(len(metrics)):
v = accs[metrics[m_i]]
if v > 0.0:
cov[m_i] += 1
if continuous:
headers = ['0.1%ile', '.25%ile', '0.5%ile', '1%ile', '5%ile',
'10%ile', '20%ile', '33%ile', '50%ile', '100%ile']
else:
headers = ['top10', 'top25', 'top50', 'top100', 'top{}'.format(len(self.compound_pairs)),
'top1%', 'top5%', 'top10%', 'top50%', 'top100%']
# Create average indication accuracy list in percent
ia = []
for m in metrics:
ia.append(final_accs[m] * 100.0)
# Create average pairwise accuracy list in percent
pa = [(x * 100.0 / len(ss)) for x in top_pairwise]
# Indication coverage
cov = map(int, cov)
# Append 3 lists to df and write to file
with open(summ, 'w') as sf:
sf.write("\t" + '\t'.join(headers) + '\n')
ast = "\t".join(map(str, [format(x, ".3f") for x in ia]))
pst = "\t".join(map(str, [format(x, ".3f") for x in pa]))
cst = "\t".join(map(str, cov)) + '\n'
sf.write('aia\t{}\napa\t{}\nic\t{}\n'.format(ast, pst, cst))
# pretty print the average indication accuracies
cut = 0
print("\taia")
for m in metrics:
print("{}\t{:.3f}".format(headers[cut], final_accs[m] * 100.0))
cut += 1
print('\n')
def ml(self, method='rf', effect=None, benchmark=False, adrs=False, predict=[], threshold=0.5,
negative='random', seed=42, out=''):
"""!
Create an ML classifier for a specified indication to make drug-disease predictions or all inds for benchmarking
@param method str: type of machine learning algorithm to use ('rf' or 'log')
@param effect Indication or ADR: provide a specific Indication or ADR object to train a classifer
@param benchmark bool: benchmark the ML pipeline by training a classifier with LOOCV for each Indication or ADR
@param adrs bool: if the models are trained with ADRs instead of Indications
@param predict list: provide a list of Compound objects to classify with the model (only used in
combination with effect=Indication/ADR object)
@param threshold float: decision threshold for positive vs negative classification
@param negative str: choose random negative samples (default) or 'inverse' for most opposite signatures
@param seed int: choose a seed for reproducibility
@param out str: file name extension for the output of benchmark (note: must have benchmark=True)
@return Returns None
"""
if method in ['1csvm', 'svm']:
print('SVMs are currently unsupported by this version of cando.py. Please choose "log" or "rf" - quitting.')
quit()
if out:
if not os.path.exists('./raw_results/'):
os.system('mkdir raw_results')
if not os.path.exists('./results_analysed_named/'):
os.system('mkdir results_analysed_named')
paired_negs = {}
# gather approved compound signatures for training
def split_cs(efct, cmpd=None):
mtrx = []
for cm in efct.compounds:
if cmpd:
if cm.id_ == cmpd.id_:
continue
if self.indication_proteins:
if len(efct.proteins) >= 3:
eps = []
for ep in efct.proteins:
ep_index = self.protein_id_to_index[ep.id_]
eps.append(cm.sig[ep_index])
mtrx.append(eps)
else:
mtrx.append(cm.sig)
return mtrx, [1] * len(mtrx)
def choose_negatives(efct, neg_set=negative, s=None, hold_out=None, avoid=[], test=None):
if neg_set == 'inverse':
if not self.compute_distance and not self.read_dists:
print('Please compute all compound-compound distances before using inverse_negatives().\n'
'Re-run with "compute_distance=True" or read in pre-computed distance file "read_dists="'
'in the CANDO object instantiation -- quitting.')
quit()
negatives = []
used = avoid
def pick_first_last(cmpd, s):
if neg_set == 'inverse':
r = int(len(self.compounds) / 2)
shuffled = [cx[0].id_ for cx in cmpd.similar][::-1][0:r]
else:
shuffled = [cx.id_ for cx in self.compounds]
if s:
random.seed(s)
random.shuffle(shuffled)
else:
s = random.randint(0, len(self.compounds) - 1)
random.seed(s)
random.shuffle(shuffled)
for si in range(len(shuffled)):
n = shuffled[si]
if n in used:
continue
inv = self.get_compound(n)
if inv not in efct.compounds:
if n not in used:
paired_negs[cmpd] = inv
return inv
if test:
inv = pick_first_last(c, s)
return inv
for ce in efct.compounds:
if hold_out:
if ce.id_ == hold_out.id_:
continue
inv = pick_first_last(ce, s)
if self.indication_proteins:
if len(efct.proteins) >= 3:
eps = []
for ep in efct.proteins:
ep_index = self.protein_id_to_index[ep.id_]
eps.append(inv.sig[ep_index])
negatives.append(eps)
else:
negatives.append(inv.sig)
used.append(inv.id_)
return negatives, [0] * len(negatives), used
def model(meth, samples, labels, params=None, seed=None):
if meth == 'rf':
m = RandomForestClassifier(n_estimators=100, random_state=seed)
m.fit(samples, labels)
return m
elif meth == 'svm':
m = svm.SVC(kernel='rbf', gamma='scale', degree=3, random_state=seed)
m.fit(samples, labels)
return m
elif meth == '1csvm':
keep = []
for i in range(len(samples)):
if labels[i] == 1:
keep.append(samples[i])
m = svm.OneClassSVM(kernel='poly', gamma='scale', degree=2)
m.fit(keep)
return m
elif meth == 'log':
m = LogisticRegression(penalty='l2', solver='newton-cg', random_state=seed)
m.fit(samples, labels)
return m
else:
print("Please enter valid machine learning method ('rf', '1csvm', 'log', or 'svm')")
quit()
if benchmark:
if adrs:
effects = sorted(self.adrs, key=lambda x: (len(x.compounds), x.id_))[::-1]
else:
effects = sorted(self.indications, key=lambda x: (len(x.compounds), x.id_))[::-1]
if out:
frr = open('./raw_results/raw_results_ml_{}'.format(out), 'w')
frr.write('Compound,Effect,Prob,Neg,Neg_prob\n')
fran = open('./results_analysed_named/results_analysed_named_ml_{}'.format(out), 'w')
fsum = open('summary_ml-{}'.format(out), 'w')
else:
if len(effect.compounds) < 1:
print('No compounds associated with {} ({}), quitting.'.format(effect.name, effect.id_))
quit()
elif self.indication_proteins and len(effect.proteins) <= 2:
print('Less than 3 proteins associated with {} ({}), quitting.'.format(effect.name, effect.id_))
effects = [effect]
rf_scores = []
for e in effects:
if len(e.compounds) < 2:
continue
if self.indication_proteins:
if not len(e.proteins) >= 3:
continue
tp_fn = [0, 0]
fp_tn = [0, 0]
for c in e.compounds:
pos = split_cs(e, cmpd=c)
negs = choose_negatives(e, s=seed, hold_out=c, avoid=[])
already_used = negs[2]
train_samples = np.array(pos[0] + negs[0])
train_labels = np.array(pos[1] + negs[1])
mdl = model(method, train_samples, train_labels, seed=seed)
test_neg = choose_negatives(e, s=seed, avoid=already_used, test=c)
if self.indication_proteins:
eps_pos = []
eps_neg = []
for ep in e.proteins:
ep_index = self.protein_id_to_index[ep.id_]
eps_pos.append(c.sig[ep_index])
eps_neg.append(test_neg.sig[ep_index])
pred = mdl.predict_proba(np.array([eps_pos]))
pred_neg = mdl.predict_proba(np.array([eps_neg]))
else:
pred = mdl.predict_proba(np.array([c.sig]))
pred_neg = mdl.predict_proba(np.array([test_neg.sig]))
pos_class = list(mdl.classes_).index(1)
if pred[0][pos_class] > threshold:
tp_fn[0] += 1
else:
tp_fn[1] += 1
if pred_neg[0][pos_class] > threshold:
fp_tn[0] += 1
else:
fp_tn[1] += 1
if benchmark and out:
frr.write('{},{},{},{},{}\n'.format(c.id_, e.id_, pred[0][pos_class],
test_neg.id_, pred_neg[0][pos_class]))
# predict whether query drugs are associated with this indication
if predict:
print('Indication: {}'.format(e.name))
print('Leave-one-out cross validation: TP={}, FP={}, FN={}, TN={}, Acc={:0.3f}'.format(
tp_fn[0], fp_tn[0], tp_fn[1], fp_tn[1], 100 * ((tp_fn[0]+fp_tn[1]) / (float(len(e.compounds))*2))))
negs = choose_negatives(e, s=seed)
pos = split_cs(e)
train_samples = np.array(pos[0] + negs[0])
train_labels = np.array(pos[1] + negs[1])
mdl = model(method, train_samples, train_labels, seed=seed)
print('\tCompound\tProb')
for c in predict:
inv = choose_negatives(effect, s=seed, test=c, avoid=negs[2])
if self.indication_proteins:
eps_pos = []
eps_neg = []
for ep in e.proteins:
ep_index = self.protein_id_to_index[ep.id_]
eps_pos.append(c.sig[ep_index])
eps_neg.append(test_neg.sig[ep_index])
pred = mdl.predict_proba(np.array([eps_pos]))
pred_neg = mdl.predict_proba(np.array([test_neg.sig]))
else:
pred = mdl.predict_proba(np.array([c.sig]))
pred_inv = mdl.predict_proba(np.array([inv.sig]))
pos_class = list(mdl.classes_).index(1)
print('\t{}\t{:0.3f}'.format(c.name, pred[0][pos_class]))
#print('\t{}\t{:0.3f}\t(random negative of {})'.format(inv.name, pred_inv[0][pos_class], c.name))
# append loocv results to combined list
rf_scores.append((e, tp_fn, fp_tn))
sm = [0, 0, 0, 0]
if benchmark:
for rf_score in rf_scores:
efct = rf_score[0]
tfp = rf_score[1]
ffp = rf_score[2]
acc = (tfp[0] + ffp[1]) / (float(len(efct.compounds) * 2))
sm[0] += len(efct.compounds)
sm[1] += acc
sm[2] += (acc * len(efct.compounds))
if acc > 0.5:
sm[3] += 1
if out:
fran.write('{}\t{}\t{}\t{}\t{:0.3f}\t{}\n'.format(efct.id_, len(efct.compounds),
tfp[0], tfp[1], 100 * acc, efct.name))
if out:
fsum.write('aia\t{:0.3f}\n'.format(100 * (sm[1]/len(rf_scores))))
fsum.write('apa\t{:0.3f}\n'.format(100 * (sm[2] / sm[0])))
fsum.write('ic\t{}\n'.format(sm[3]))
print('aia\t{:0.3f}'.format(100 * (sm[1]/len(rf_scores))))
print('apa\t{:0.3f}'.format(100 * (sm[2] / sm[0])))
print('ic\t{}'.format(sm[3]))
return
def raw_results_roc(self, rr_files, labels, save='roc-raw_results.pdf'):
if len(labels) != len(rr_files):
print('Please enter a label for each input raw results file '
'({} files, {} labels).'.format(len(rr_files), len(labels)))
quit()
n_per_d = {}
dt = {}
ds = {}
metrics = {}
truth = []
scores = []
for rr_file in rr_files:
for l in open(rr_file, 'r').readlines()[1:]:
ls = l.strip().split(',')
pp = float(ls[2])
truth.append(1)
scores.append(pp)
np = float(ls[4])
truth.append(0)
scores.append(np)
if ls[1] not in n_per_d:
n_per_d[ls[1]] = 1
else:
n_per_d[ls[1]] += 1
pr | |
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import glob
import os
import re
import time
from ironic_lib import disk_utils
from ironic_lib import metrics_utils
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import netutils
from oslo_utils import strutils
import six
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common import image_service
from ironic.common import keystone
from ironic.common import states
from ironic.common import utils
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import image_cache
from ironic.drivers import utils as driver_utils
from ironic import objects
# TODO(Faizan): Move this logic to common/utils.py and deprecate
# rootwrap_config.
# This is required to set the default value of ironic_lib option
# only if rootwrap_config does not contain the default value.
if CONF.rootwrap_config != '/etc/ironic/rootwrap.conf':
root_helper = 'sudo ironic-rootwrap %s' % CONF.rootwrap_config
CONF.set_default('root_helper', root_helper, 'ironic_lib')
LOG = logging.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
SUPPORTED_CAPABILITIES = {
'boot_option': ('local', 'netboot'),
'boot_mode': ('bios', 'uefi'),
'secure_boot': ('true', 'false'),
'trusted_boot': ('true', 'false'),
'disk_label': ('msdos', 'gpt'),
}
DISK_LAYOUT_PARAMS = ('root_gb', 'swap_mb', 'ephemeral_gb')
# All functions are called from deploy() directly or indirectly.
# They are split for stub-out.
_IRONIC_SESSION = None
def _get_ironic_session():
global _IRONIC_SESSION
if not _IRONIC_SESSION:
auth = keystone.get_auth('service_catalog')
_IRONIC_SESSION = keystone.get_session('service_catalog',
auth=auth)
return _IRONIC_SESSION
def _wrap_ipv6(ip):
if netutils.is_valid_ipv6(ip):
return "[%s]" % ip
return ip
def get_ironic_api_url():
"""Resolve Ironic API endpoint
either from config of from Keystone catalog.
"""
ironic_api = CONF.conductor.api_url
if not ironic_api:
try:
ironic_session = _get_ironic_session()
ironic_api = keystone.get_service_url(ironic_session)
except (exception.KeystoneFailure,
exception.CatalogNotFound,
exception.KeystoneUnauthorized) as e:
raise exception.InvalidParameterValue(_(
"Couldn't get the URL of the Ironic API service from the "
"configuration file or keystone catalog. Keystone error: "
"%s") % six.text_type(e))
# NOTE: we should strip '/' from the end because it might be used in
# hardcoded ramdisk script
ironic_api = ironic_api.rstrip('/')
return ironic_api
def discovery(portal_address, portal_port):
"""Do iSCSI discovery on portal."""
utils.execute('iscsiadm',
'-m', 'discovery',
'-t', 'st',
'-p', '%s:%s' % (_wrap_ipv6(portal_address), portal_port),
run_as_root=True,
check_exit_code=[0],
attempts=5,
delay_on_retry=True)
def login_iscsi(portal_address, portal_port, target_iqn):
"""Login to an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (_wrap_ipv6(portal_address), portal_port),
'-T', target_iqn,
'--login',
run_as_root=True,
check_exit_code=[0],
attempts=5,
delay_on_retry=True)
error_occurred = False
try:
# Ensure the login complete
verify_iscsi_connection(target_iqn)
# force iSCSI initiator to re-read luns
force_iscsi_lun_update(target_iqn)
# ensure file system sees the block device
check_file_system_for_iscsi_device(portal_address,
portal_port,
target_iqn)
except (exception.InstanceDeployFailure,
processutils.ProcessExecutionError) as e:
with excutils.save_and_reraise_exception():
error_occurred = True
LOG.error("Failed to login to an iSCSI target due to %s", e)
finally:
if error_occurred:
try:
logout_iscsi(portal_address, portal_port, target_iqn)
delete_iscsi(portal_address, portal_port, target_iqn)
except processutils.ProcessExecutionError as e:
LOG.warning("An error occurred when trying to cleanup "
"failed ISCSI session error %s", e)
def check_file_system_for_iscsi_device(portal_address,
portal_port,
target_iqn):
"""Ensure the file system sees the iSCSI block device."""
check_dir = "/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-1" % (portal_address,
portal_port,
target_iqn)
total_checks = CONF.disk_utils.iscsi_verify_attempts
for attempt in range(total_checks):
if os.path.exists(check_dir):
break
time.sleep(1)
if LOG.isEnabledFor(logging.DEBUG):
existing_devs = ', '.join(glob.iglob('/dev/disk/by-path/*iscsi*'))
LOG.debug("iSCSI connection not seen by file system. Rechecking. "
"Attempt %(attempt)d out of %(total)d. Available iSCSI "
"devices: %(devs)s.",
{"attempt": attempt + 1,
"total": total_checks,
"devs": existing_devs})
else:
msg = _("iSCSI connection was not seen by the file system after "
"attempting to verify %d times.") % total_checks
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def verify_iscsi_connection(target_iqn):
"""Verify iscsi connection."""
LOG.debug("Checking for iSCSI target to become active.")
for attempt in range(CONF.disk_utils.iscsi_verify_attempts):
out, _err = utils.execute('iscsiadm',
'-m', 'node',
'-S',
run_as_root=True,
check_exit_code=[0])
if target_iqn in out:
break
time.sleep(1)
LOG.debug("iSCSI connection not active. Rechecking. Attempt "
"%(attempt)d out of %(total)d",
{"attempt": attempt + 1,
"total": CONF.disk_utils.iscsi_verify_attempts})
else:
msg = _("iSCSI connection did not become active after attempting to "
"verify %d times.") % CONF.disk_utils.iscsi_verify_attempts
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def force_iscsi_lun_update(target_iqn):
"""force iSCSI initiator to re-read luns."""
LOG.debug("Re-reading iSCSI luns.")
utils.execute('iscsiadm',
'-m', 'node',
'-T', target_iqn,
'-R',
run_as_root=True,
check_exit_code=[0])
def logout_iscsi(portal_address, portal_port, target_iqn):
"""Logout from an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (_wrap_ipv6(portal_address), portal_port),
'-T', target_iqn,
'--logout',
run_as_root=True,
check_exit_code=[0],
attempts=5,
delay_on_retry=True)
def delete_iscsi(portal_address, portal_port, target_iqn):
"""Delete the iSCSI target."""
# Retry delete until it succeeds (exit code 0) or until there is
# no longer a target to delete (exit code 21).
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (_wrap_ipv6(portal_address), portal_port),
'-T', target_iqn,
'-o', 'delete',
run_as_root=True,
check_exit_code=[0, 21],
attempts=5,
delay_on_retry=True)
def _replace_lines_in_file(path, regex_pattern, replacement):
with open(path) as f:
lines = f.readlines()
compiled_pattern = re.compile(regex_pattern)
with open(path, 'w') as f:
for line in lines:
line = compiled_pattern.sub(replacement, line)
f.write(line)
def _replace_root_uuid(path, root_uuid):
root = 'UUID=%s' % root_uuid
pattern = r'(\(\(|\{\{) ROOT (\)\)|\}\})'
_replace_lines_in_file(path, pattern, root)
def _replace_boot_line(path, boot_mode, is_whole_disk_image,
trusted_boot=False, iscsi_boot=False):
if is_whole_disk_image:
boot_disk_type = 'boot_whole_disk'
elif trusted_boot:
boot_disk_type = 'trusted_boot'
elif iscsi_boot:
boot_disk_type = 'boot_iscsi'
else:
boot_disk_type = 'boot_partition'
if boot_mode == 'uefi' and not CONF.pxe.ipxe_enabled:
pattern = '^((set )?default)=.*$'
boot_line = '\\1=%s' % boot_disk_type
else:
pxe_cmd = 'goto' if CONF.pxe.ipxe_enabled else 'default'
pattern = '^%s .*$' % pxe_cmd
boot_line = '%s %s' % (pxe_cmd, boot_disk_type)
_replace_lines_in_file(path, pattern, boot_line)
def _replace_disk_identifier(path, disk_identifier):
pattern = r'(\(\(|\{\{) DISK_IDENTIFIER (\)\)|\}\})'
_replace_lines_in_file(path, pattern, disk_identifier)
def switch_pxe_config(path, root_uuid_or_disk_id, boot_mode,
is_whole_disk_image, trusted_boot=False,
iscsi_boot=False):
"""Switch a pxe config from deployment mode to service mode.
:param path: path to the pxe config file in tftpboot.
:param root_uuid_or_disk_id: root uuid in case of partition image or
disk_id in case of whole disk image.
:param boot_mode: if boot mode is uefi or bios.
:param is_whole_disk_image: if the image is a whole disk image or not.
:param trusted_boot: if boot with trusted_boot or not. The usage of
is_whole_disk_image and trusted_boot are mutually exclusive. You can
have one or neither, but not both.
:param iscsi_boot: if boot is from an iSCSI volume or not.
"""
if not is_whole_disk_image:
_replace_root_uuid(path, root_uuid_or_disk_id)
else:
_replace_disk_identifier(path, root_uuid_or_disk_id)
_replace_boot_line(path, boot_mode, is_whole_disk_image, trusted_boot,
iscsi_boot)
def get_dev(address, port, iqn, lun):
"""Returns a device path for given parameters."""
dev = ("/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-%s"
% (address, port, iqn, lun))
return dev
def deploy_partition_image(
address, port, iqn, lun, image_path,
root_mb, swap_mb, ephemeral_mb, ephemeral_format, node_uuid,
preserve_ephemeral=False, configdrive=None,
boot_option=None, boot_mode="bios", disk_label=None):
"""All-in-one function to deploy a partition image to a node.
:param address: The iSCSI IP address.
:param port: The iSCSI port number.
:param iqn: The iSCSI qualified name.
:param lun: The iSCSI logical unit number.
:param image_path: Path for the instance's disk image.
:param root_mb: Size of the root partition in megabytes.
:param swap_mb: Size of the swap partition in megabytes.
:param ephemeral_mb: Size of the ephemeral partition in megabytes. If 0,
no ephemeral partition will be created.
:param ephemeral_format: The type of file system to format the ephemeral
partition.
:param node_uuid: node's uuid. Used for logging.
:param preserve_ephemeral: If True, no filesystem is written to the
ephemeral block device, preserving whatever content it had (if the
partition table has not changed).
:param configdrive: Optional. Base64 encoded Gzipped configdrive content
or configdrive HTTP URL.
:param boot_option: Can be "local" or "netboot". "netboot" by default.
:param boot_mode: Can be "bios" or "uefi". "bios" by default.
:param disk_label: The disk label to be used when creating the
partition table. Valid values are: "msdos", "gpt" or None; If None
Ironic will figure it out according to the boot_mode parameter.
:raises: InstanceDeployFailure if image virtual size is bigger than root
partition size.
:returns: a dictionary containing the following keys:
'root uuid': UUID of root partition
'efi system partition uuid': UUID of the uefi system partition
(if boot mode is uefi).
NOTE: If key exists but value is None, it means partition doesn't
exist.
"""
boot_option = boot_option or get_default_boot_option()
image_mb = disk_utils.get_image_mb(image_path)
if image_mb > root_mb:
msg = (_('Root partition is too small for requested image. Image '
'virtual size: %(image_mb)d MB, Root size: %(root_mb)d MB')
% {'image_mb': image_mb, 'root_mb': root_mb})
raise exception.InstanceDeployFailure(msg)
with _iscsi_setup_and_handle_errors(address, port, iqn, lun) as dev:
uuid_dict_returned = disk_utils.work_on_disk(
dev, root_mb, swap_mb, ephemeral_mb, ephemeral_format, image_path,
node_uuid, | |
import hashlib
import os
import subprocess
import sys
import tempfile
import warnings
from copy import deepcopy
from datetime import datetime
from logging import getLogger
from time import time, sleep
from typing import Optional, Mapping, Sequence, Any, Callable, Union
from pathlib2 import Path
from ..backend_api import Session
from ..backend_interface.util import get_or_create_project, exact_match_regex
from ..storage.util import hash_dict
from ..task import Task
from ..backend_api.services import tasks as tasks_service
logger = getLogger('clearml.automation.job')
class BaseJob(object):
_job_hash_description = 'job_hash={}'
_job_hash_property = 'pipeline_job_hash'
_hashing_callback = None
def __init__(self):
# type: () -> ()
"""
Base Job is an abstract CLearML Job
"""
self._is_cached_task = False
self._worker = None
self.task_parameter_override = None
self.task = None
self.task_started = False
def get_metric(self, title, series):
# type: (str, str) -> (float, float, float)
"""
Retrieve a specific scalar metric from the running Task.
:param str title: Graph title (metric)
:param str series: Series on the specific graph (variant)
:return: A tuple of min value, max value, last value
"""
metrics, title, series, values = self.get_metric_req_params(title, series)
res = self.task.send(
tasks_service.GetAllRequest(
id=[self.task.id],
page=0,
page_size=1,
only_fields=['id', ] + metrics
)
)
response = res.wait()
return tuple(response.response_data['tasks'][0]['last_metrics'][title][series][v] for v in values)
@staticmethod
def get_metric_req_params(title, series):
title = hashlib.md5(str(title).encode('utf-8')).hexdigest()
series = hashlib.md5(str(series).encode('utf-8')).hexdigest()
metric = 'last_metrics.{}.{}.'.format(title, series)
values = ['min_value', 'max_value', 'value']
metrics = [metric + v for v in values]
return metrics, title, series, values
def launch(self, queue_name=None):
# type: (str) -> bool
"""
Send Job for execution on the requested execution queue
:param str queue_name:
:return False if Task is not in "created" status (i.e. cannot be enqueued) or cannot be enqueued
"""
if self._is_cached_task:
return False
try:
Task.enqueue(task=self.task, queue_name=queue_name)
return True
except Exception as ex:
logger.warning('Error enqueuing Task {} to {}: {}'.format(self.task, queue_name, ex))
return False
def abort(self):
# type: () -> ()
"""
Abort currently running job (can be called multiple times)
"""
if not self.task or self._is_cached_task:
return
try:
self.task.stopped()
except Exception as ex:
logger.warning(ex)
def elapsed(self):
# type: () -> float
"""
Return the time in seconds since job started. Return -1 if job is still pending
:return: Seconds from start.
"""
if not self.task_started and str(self.task.status) != Task.TaskStatusEnum.in_progress:
return -1
self.task_started = True
if not self.task.data.started:
self.task.reload()
if not self.task.data.started:
return -1
return (datetime.now(tz=self.task.data.started.tzinfo) - self.task.data.started).total_seconds()
def iterations(self):
# type: () -> int
"""
Return the last iteration value of the current job. -1 if job has not started yet
:return: Task last iteration.
"""
if not self.task_started and self.task.status != Task.TaskStatusEnum.in_progress:
return -1
self.task_started = True
return self.task.get_last_iteration()
def task_id(self):
# type: () -> str
"""
Return the Task id.
:return: The Task ID.
"""
return self.task.id
def status(self):
# type: () -> str
"""
Return the Job Task current status, see Task.TaskStatusEnum
:return: Task status Task.TaskStatusEnum in string.
"""
return self.task.status
def wait(self, timeout=None, pool_period=30.):
# type: (Optional[float], float) -> bool
"""
Wait until the task is fully executed (i.e., aborted/completed/failed)
:param timeout: maximum time (minutes) to wait for Task to finish
:param pool_period: check task status every pool_period seconds
:return: True, if Task finished.
"""
tic = time()
while timeout is None or time() - tic < timeout * 60.:
if self.is_stopped():
return True
sleep(pool_period)
return self.is_stopped()
def get_console_output(self, number_of_reports=1):
# type: (int) -> Sequence[str]
"""
Return a list of console outputs reported by the Task.
Returned console outputs are retrieved from the most updated console outputs.
:param int number_of_reports: number of reports to return, default 1, the last (most updated) console output
:return: List of strings each entry corresponds to one report.
"""
return self.task.get_reported_console_output(number_of_reports=number_of_reports)
def worker(self):
# type: () -> str
"""
Return the current worker id executing this Job. If job is pending, returns None
:return: ID of the worker executing / executed the job, or None if job is still pending.
"""
if self.is_pending():
return self._worker
if self._worker is None:
# the last console outputs will update the worker
self.get_console_output(number_of_reports=1)
# if we still do not have it, store empty string
if not self._worker:
self._worker = ''
return self._worker
def is_running(self):
# type: () -> bool
"""
Return True, if job is currently running (pending is considered False)
:return: True, if the task is currently in progress.
"""
return self.task.status == Task.TaskStatusEnum.in_progress
def is_stopped(self):
# type: () -> bool
"""
Return True, if job finished executing (for any reason)
:return: True the task is currently one of these states, stopped / completed / failed / published.
"""
return self.task.status in (
Task.TaskStatusEnum.stopped, Task.TaskStatusEnum.completed,
Task.TaskStatusEnum.failed, Task.TaskStatusEnum.published)
def is_failed(self):
# type: () -> bool
"""
Return True, if job is has executed and failed
:return: True the task is currently in failed state
"""
return self.task.status in (Task.TaskStatusEnum.failed, )
def is_completed(self):
# type: () -> bool
"""
Return True, if job is has executed and completed successfully
:return: True the task is currently in completed or published state
"""
return self.task.status in (Task.TaskStatusEnum.completed, Task.TaskStatusEnum.published)
def is_aborted(self):
# type: () -> bool
"""
Return True, if job is has executed and aborted
:return: True the task is currently in aborted state
"""
return self.task.status in (Task.TaskStatusEnum.stopped, )
def is_pending(self):
# type: () -> bool
"""
Return True, if job is waiting for execution
:return: True the task is currently is currently queued.
"""
return self.task.status in (Task.TaskStatusEnum.queued, Task.TaskStatusEnum.created)
def started(self):
# type: () -> bool
"""
Return True, if job already started, or ended. False, if created/pending.
:return: False, if the task is currently in draft mode or pending.
"""
if not self.task_started and self.task.status in (
Task.TaskStatusEnum.in_progress, Task.TaskStatusEnum.created):
return False
self.task_started = True
return True
def delete(self):
# type: () -> bool
"""
Delete the current temporary job (before launching)
Return False if the Job/Task could not deleted
"""
if not self.task or self._is_cached_task:
return False
if self.task.delete():
self.task = None
return True
return False
def is_cached_task(self):
# type: () -> bool
"""
:return: True if the internal Task is a cached one, False otherwise.
"""
return self._is_cached_task
@classmethod
def register_hashing_callback(cls, a_function):
# type: (Callable[[dict], dict]) -> None
"""
Allow to customize the dict used for hashing the Task.
Provided function will be called with a dict representing a Task,
allowing to return a modified version of the representation dict.
:param a_function: Function manipulating the representation dict of a function
"""
assert callable(a_function)
cls._hashing_callback = a_function
@classmethod
def _create_task_hash(cls, task, section_overrides=None, params_override=None):
# type: (Task, Optional[dict], Optional[dict]) -> Optional[str]
"""
Create Hash (str) representing the state of the Task
:param task: A Task to hash
:param section_overrides: optional dict (keys are Task's section names) with task overrides.
:param params_override: Alternative to the entire Task's hyper parameters section
(notice this should not be a nested dict but a flat key/value)
:return: str hash of the Task configuration
"""
if not task:
return None
if section_overrides and section_overrides.get('script'):
script = section_overrides['script']
if not isinstance(script, dict):
script = script.to_dict()
else:
script = task.data.script.to_dict() if task.data.script else {}
# if we have a repository, we must make sure we have a specific version_num to ensure consistency
if script.get('repository') and not script.get('version_num') and not script.get('tag'):
return None
# we need to ignore `requirements` section because ir might be changing from run to run
script.pop("requirements", None)
hyper_params = task.get_parameters() if params_override is None else params_override
configs = task.get_configuration_objects()
# currently we do not add the docker image to the hash (only args and setup script),
# because default docker image will cause the step to change
docker = None
if hasattr(task.data, 'container'):
docker = dict(**(task.data.container or dict()))
docker.pop('image', None)
hash_func = 'md5' if Session.check_min_api_version('2.13') else 'crc32'
# make sure that if we only have docker args/bash,
# we use encode it, otherwise we revert to the original encoding (excluding docker altogether)
repr_dict = dict(script=script, hyper_params=hyper_params, configs=configs, docker=docker) \
if docker else dict(script=script, hyper_params=hyper_params, configs=configs)
# callback for modifying the representation dict
if cls._hashing_callback:
repr_dict = cls._hashing_callback(deepcopy(repr_dict))
return hash_dict(repr_dict, hash_func=hash_func)
@classmethod
def _get_cached_task(cls, task_hash):
# type: (str) -> Optional[Task]
| |
"""
Mostly copied from wandb client code
Modified "next_sample" code to do the following:
-accepts a 'failure_cost' argument
-if failure cost 'c' is nonzero, modifies expected improvement of each
sample according to:
e' = p e / (p (1-c) + c)
where 'p' is probability of success and 'e' is unmodified expected improvement
-returns expected improvements for whole sample
Bayesian Search
Check out https://arxiv.org/pdf/1206.2944.pdf
for explanation of bayesian optimization
We do bayesian optimization and handle the cases where some X values are integers
as well as the case where X is very large.
"""
import numpy as np
#from sklearn.gaussian_process import GaussianProcessRegressor
#from sklearn.gaussian_process.kernels import Matern
#import scipy.stats as stats
import math
#from wandb.util import get_module
#from wandb.sweeps.base import Search
#from wandb.sweeps.params import HyperParameter, HyperParameterSet
#sklearn.gaussian = get_module('sklearn.gaussian_process')
#sklearn.linear = get_module('sklearn.linear_model')
#sklearn.svm = get_module('sklearn.svm')
#sklearn.discriminant = get_module('sklearn.discriminant_analysis')
#scipy.stats = get_module('scipy.stats')
import sklearn.gaussian_process as gaussian
import sklearn.linear_model as linear_model
import sklearn.svm as svm
import sklearn.discriminant_analysis as discriminant
import scipy.stats
def fit_normalized_gaussian_process(X, y, nu=1.5):
"""
We fit a gaussian process but first subtract the mean and divide by stddev.
To undo at prediction tim, call y_pred = gp.predict(X) * y_stddev + y_mean
"""
gp = gaussian.GaussianProcessRegressor(
kernel=gaussian.kernels.Matern(nu=nu), n_restarts_optimizer=2, alpha=0.0000001, random_state=2
)
if len(y) == 1:
y = np.array(y)
y_mean = y[0]
y_stddev = 1
else:
y_mean = np.mean(y)
y_stddev = np.std(y) + 0.0001
y_norm = (y - y_mean) / y_stddev
gp.fit(X, y_norm)
return gp, y_mean, y_stddev
def train_logistic_regression(X, y):
lr = linear.LogisticRegression()
lr.fit(X, y.astype(int))
return lambda X : lr.predict_proba(X)[...,1], 0, 1
def train_rbf_svm(X, y):
svc = svm.SVC(probability=True)
svc.fit(X, y.astype(int))
return lambda X : svc.predict_proba(X)[...,1], 0, 1
def train_qda(X,y):
qda = discriminant.QuadraticDiscriminantAnalysis()
qda.fit(X, y.astype(int))
return lambda X : qda.predict_proba(X)[...,1], 0, 1
def sigmoid(x):
return np.exp(-np.logaddexp(0, -x))
def random_sample(X_bounds, num_test_samples):
num_hyperparameters = len(X_bounds)
test_X = np.empty((num_test_samples, num_hyperparameters))
for ii in range(num_test_samples):
for jj in range(num_hyperparameters):
if type(X_bounds[jj][0]) == int:
assert (type(X_bounds[jj][1]) == int)
test_X[ii, jj] = np.random.randint(
X_bounds[jj][0], X_bounds[jj][1])
else:
test_X[ii, jj] = np.random.uniform() * (
X_bounds[jj][1] - X_bounds[jj][0]
) + X_bounds[
jj
][
0
]
return test_X
def predict(X, y, test_X, nu=1.5):
gp, norm_mean, norm_stddev = fit_normalized_gaussian_process(X, y, nu=nu)
y_pred, y_std = gp.predict([test_X], return_std=True)
y_std_norm = y_std * norm_stddev
y_pred_norm = (y_pred * norm_stddev) + norm_mean
return y_pred_norm[0], y_std_norm[0]
def train_runtime_model(sample_X, runtimes, X_bounds, nu=1.5, model='gaussian'):
if sample_X.shape[0] != runtimes.shape[0]:
raise ValueError("Sample X and runtimes must be the same length")
if model=='gaussian':
return train_gaussian_process(sample_X, runtimes, X_bounds, nu=nu)
elif model=='logistic' and runtimes.any() and not runtimes.all():
return train_logistic_regression(sample_X, runtimes)
elif model=='rbf_svm' and runtimes.any() and not runtimes.all():
return train_rbf_svm(sample_X, runtimes)
elif model=='qda' and runtimes.sum() > 1 and runtimes.sum() < len(runtimes) - 1:
return train_qda(sample_X, runtimes)
else:
return None, 0, 1
#def train_failure_model(sample_X, failures, X_bounds):
# if sample_X.shape[0] != failures.shape[0]:
# raise ValueError("Sample X and runtimes must be the same length")
#
# return train_gaussian_process(sample_X, runtimes, X_bounds)
def train_gaussian_process(
sample_X, sample_y, X_bounds, current_X=None, nu=1.5, max_samples=100
):
"""
Trains a Gaussian Process function from sample_X, sample_y data
Handles the case where there are other training runs in flight (current_X)
Arguments:
sample_X - vector of already evaluated sets of hyperparameters
sample_y - vector of already evaluated loss function values
X_bounds - minimum and maximum values for every dimension of X
current_X - hyperparameters currently being explored
nu - input to the Matern function, higher numbers make it smoother 0.5, 1.5, 2.5 are good values
see http://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.Matern.html
Returns:
gp - the gaussian process function
y_mean - mean
y_stddev - stddev
To make a prediction with gp on real world data X, need to call:
(gp.predict(X) * y_stddev) + y_mean
"""
if current_X is not None:
current_X = np.array(current_X)
if len(current_X.shape) != 2:
raise ValueError("Current X must be a 2 dimensional array")
# we can't let the current samples be bigger than max samples
# because we need to use some real samples to build the curve
if current_X.shape[0] > max_samples - 5:
print(
"current_X is bigger than max samples - 5 so dropping some currently running parameters"
)
current_X = current_X[:(max_samples - 5), :]
if len(sample_y.shape) != 1:
raise ValueError("Sample y must be a 1 dimensional array")
if sample_X.shape[0] != sample_y.shape[0]:
raise ValueError(
"Sample X and sample y must be the same size {} {}".format(
sample_X.shape[0], sample_y.shape[0]
)
)
if X_bounds is not None and sample_X.shape[1] != len(X_bounds):
raise ValueError(
"Bounds must be the same length as Sample X's second dimension"
)
# gaussian process takes a long time to train, so if there's more than max_samples
# we need to sample from it
if sample_X.shape[0] > max_samples:
sample_indices = np.random.randint(sample_X.shape[0], size=max_samples)
X = sample_X[sample_indices]
y = sample_y[sample_indices]
else:
X = sample_X
y = sample_y
gp, y_mean, y_stddev = fit_normalized_gaussian_process(X, y, nu=nu)
if current_X is not None:
# if we have some hyperparameters running, we pretend that they return
# the prediction of the function we've fit
X = np.append(X, current_X, axis=0)
current_y_fantasy = (gp.predict(current_X) * y_stddev) + y_mean
y = np.append(y, current_y_fantasy)
gp, y_mean, y_stddev = fit_normalized_gaussian_process(X, y, nu=nu)
return gp.predict, y_mean, y_stddev
def filter_weird_values(sample_X, sample_y):
is_row_finite = ~(np.isnan(sample_X).any(axis=1) | np.isnan(sample_y))
sample_X = sample_X[is_row_finite, :]
sample_y = sample_y[is_row_finite]
return sample_X, sample_y
def next_sample(
sample_X,
sample_y,
X_bounds=None,
runtimes=None,
failures=None,
current_X=None,
nu=1.5,
max_samples_for_gp=100,
improvement=0.01,
num_points_to_try=1000,
opt_func="expected_improvement",
failure_cost=0,
test_X=None,
):
"""
Calculates the best next sample to look at via bayesian optimization.
Check out https://arxiv.org/pdf/1206.2944.pdf
for explanation of bayesian optimization
Arguments:
sample_X - 2d array of already evaluated sets of hyperparameters
sample_y - 1d array of already evaluated loss function values
X_bounds - 2d array minimum and maximum values for every dimension of X
runtimes - vector of length sample_y - should be the time taken to train each model in sample X
failures - vector of length sample_y - should be True for models where training failed and False where
training succeeded. This model will throw out NaNs and Infs so if you want it to avaoid
failure values for X, use this failure vector.
current_X - hyperparameters currently being explored
nu - input to the Matern function, higher numbers make it smoother 0.5, 1.5, 2.5 are good values
see http://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.Matern.html
max_samples_for_gp - maximum samples to consider (since algo is O(n^3)) for performance, but also adds some randomness
improvement - amount of improvement to optimize for -- higher means take more exploratory risks
num_points_to_try - number of X values to try when looking for value with highest
expected probability of improvement
opt_func - one of {"expected_improvement", "prob_of_improvement"} - whether to optimize expected
improvement of probability of improvement. Expected improvement is generally better - may want
to remove probability of improvement at some point. (But I think prboability of improvement
is a little easier to calculate)
test_X - X values to test when looking for the best values to try
Returns:
suggested_X - X vector to try running next
suggested_X_prob_of_improvement - probability of the X vector beating the current best
suggested_X_predicted_y - predicted output of the X vector
test_X - 2d array of length num_points_to_try by num features: tested X values
y_pred - 1d array of length num_points_to_try: predicted values for test_X
y_pred_std - 1d array of length num_points_to_try: predicted std deviation for test_X
e_i - expected improvement
prob_of_improve 1d array of lenth num_points_to_try: predicted porbability of improvement
prob_of_failure 1d array of predicted probabilites of failure
suggested_X_prob_of_failure
expected_runtime 1d array of expected runtimes
"""
# Sanity check the data
sample_X = np.array(sample_X)
sample_y = np.array(sample_y)
failures = np.array(failures)
if test_X is not None:
test_X = np.array(test_X)
if len(sample_X.shape) != 2:
raise ValueError("Sample X must be a 2 dimensional array")
if len(sample_y.shape) != 1:
raise ValueError("Sample y must be a 1 dimensional array")
if sample_X.shape[0] != sample_y.shape[0]:
raise ValueError("Sample X and y must be same length")
if test_X is not None:
# if test_X is set, usually this is for simulation/testing
if X_bounds is not None:
raise ValueError("Can't set test_X and X_bounds")
else:
# normal case where we randomly sample our test_X
if X_bounds is None:
raise ValueError("Must pass in test_X or X_bounds")
filtered_X, filtered_y = filter_weird_values(sample_X, sample_y)
# We train our runtime prediction model on *filtered_X* throwing out the sample points with
# NaN values | |
else:
return ['*Simple* %s' % item[1]]
if allTypeDict.has_key(item[1]):
if item[1] not in alreadyListed:
alreadyListed.append(item[1])
strList.extend(self.recurseTypeList(allTypeDict,
allTypeDict[item[1]], level+1, alreadyListed))
return strList
class PartWriter:
"""Generates a string representation of a typecode representing
a <message><part>
"""
def __init__(self):
self.typecode = None
self.tns = None
self.name = None
self.docCode = []
def __recurse_tdc(self, tp):
"""tp -- schema.TypeDescriptionComponent instance
"""
tp.type
if isinstance(tp.type, TypeDescriptionComponent):
tp.type
tp = self.__recurse_tdc(tp.type)
else:
return tp.type
def fromPart(self, part):
"""part -- wsdl.Part instance
"""
bti = BaseTypeInterpreter()
if part.getType():
tp = part.getType()
elif part.getElement():
tp = part.getElement()
self.name = tp.getName()
return
else:
raise WsdlGeneratorError, 'whoa! part typing problem!'
self.typecode = []
self.tns = tp.getTargetNamespace()
if not isinstance(tp, ZSI.wsdlInterface.ZSITypeAdapter):
raise TypeError, 'not a type adapter'
elif tp.isSimpleType():
if tp.getQName():
tpc = bti.get_typeclass(tp.getQName(),
tp.getTargetNamespace())
self.docCode.append('%s' % part.getName())
self.docCode.append(bti.get_pythontype(tp.getQName(),
tp.getTargetNamespace()))
if not tpc:
# fail over
t = tp.getName()
else:
t = tpc
self.typecode.append('%s(pname="%s",aname="_%s",optional=1)' \
%(t, part.getName(),
part.getName()))
elif tp.getName():
self.docCode.append(part.getName())
self.docCode.append(tp.getName())
self.typecode.append('%s(pname="%s",aname="_%s",optional=1)' \
%(tp.getName(), part.getName(),
part.getName()))
else:
raise WsdlGeneratorError, 'shouldnt happen'
elif tp.isComplexType():
self.docCode.append(part.getName())
self.docCode.append(tp.getName())
self.typecode.append('%s( name="%s", ns=ns )'\
%(tp.getName(), part.getName()))
else:
raise WsdlGeneratorError, 'shouldnt happen'
return
class SchemaDescription:
"""Generates classes for all global definitions and declarations in
a schema instance.
"""
def __init__(self):
self.nsh = NamespaceHash()
self.typeDict = {}
return
def fromWsdl(self, schema, alternateWriter):
"""schema -- schema.Schema instance
"""
if not isinstance(schema, ZSISchemaAdapter):
raise TypeError, 'type %s not a Schema' %(schema.__class__)
self.header = '%s \n# %s \n#\n# %s \n%s\n' \
%('#'*30, 'targetNamespace',
schema.getTargetNamespace(), '#'*30)
self.header += '\n\n# imported as: %s' % \
self.nsh.getAlias(schema.getTargetNamespace())
self.header += '\nclass %s:' % \
self.nsh.getModuleName(schema.getTargetNamespace())
self.header += "\n%stargetNamespace = '%s'" % \
(ID1, schema.getTargetNamespace())
self.body = ''
self.last = ''
self.class_dict = {}
self.class_list = []
self.generate(schema.getTypesDict(), alternateWriter)
self.generate(schema.getElementsDict(), alternateWriter)
self.getClassDefs(self.class_list, self.class_dict)
self.body += '\n\n# define class alias for subsequent ns classes'
self.body += '\n%s = %s' \
% ( self.nsh.getAlias(schema.getTargetNamespace()),
self.nsh.getModuleName(schema.getTargetNamespace()))
if self.last:
self.body += self.last
def generate(self, sdict, alternateWriter):
if alternateWriter:
exec( 'import %s' % alternateWriter[0] )
alternateWriter = '%s.%s()' % (alternateWriter[0],
alternateWriter[1] )
for name, tp in sdict.items():
defaultWriter = 'self.__class__.TypeWriter()'
if alternateWriter:
exec( 'tw = %s' % alternateWriter )
else:
exec( 'tw = %s' % defaultWriter )
ref = weakref.ref(self)
tw.fromType(tp, ref)
if tw.precede:
if self.class_dict.has_key(tw.precede):
self.class_dict[tw.precede].append(tw)
else:
self.class_dict[tw.precede] = [tw]
else:
self.class_list.append(tw.name)
#self.extractCode(tw)
self.body += tw.extractCode()
self.typeDict.update(tw.typeDict)
def getClassDefs(self, class_list, class_dict):
check_list = []
for indx in range(len(class_list)):
if class_dict.has_key(class_list[indx]):
for tw in class_dict[class_list[indx]]:
#self.extractCode(tw)
self.body += tw.extractCode()
check_list.append(tw.name)
else:
del class_dict[class_list[indx]]
if check_list:
self.getClassDefs(check_list, class_dict)
else:
for l in class_dict.values():
for tw in l:
#self.extractCode(tw)
self.body += tw.extractCode()
def extractCode(self, tw):
self.body += tw.prepend.getvalue()
self.body += tw.classdef.getvalue()
self.body += tw.classvar.getvalue()
self.body += tw.initdef.getvalue()
self.body += tw.initcode.getvalue()
self.body += tw.basector.getvalue()
self.body += tw.postpend.getvalue()
def write(self, fd=sys.stdout):
fd.write(self.header)
fd.write(self.body)
fd.write('\n'*4)
class TypeWriter:
"""Generates a string representation of a typecode representing
a schema declaration or definition.
"""
def __init__(self):
self.bti = BaseTypeInterpreter()
self.nsh = NamespaceHash()
self.name = None
self.precede = None
self.prepend = StringWriter()
self.classdef = StringWriter()
self.classvar = StringWriter()
self.initdef = StringWriter()
self.initcode = StringWriter()
self.basector = StringWriter()
self.postpend = StringWriter()
self.allOptional = False
self.hasRepeatable = False
self.typeList = []
self.typeDict = {}
self.localDefs = []
return
def extractCode(self):
formattedType = ''
formattedType += self.prepend.getvalue()
formattedType += self.classdef.getvalue()
formattedType += self.classvar.getvalue()
formattedType += self.initdef.getvalue()
formattedType += self.initcode.getvalue()
formattedType += self.basector.getvalue()
formattedType += self.postpend.getvalue()
formattedType += self.extractSubtypes()
return formattedType
def extractSubtypes(self):
subTypes = ''
for t in self.localDefs:
subTypes += t.extractCode()
formatted = []
for l in string.split(subTypes, '\n'):
if l:
formatted.append('%s%s' % (ID1, l))
else:
formatted.append(l)
return '\n'.join(formatted)
def fromType(self, myType, parentRef):
"""myType -- Type representation
"""
tp = myType
if tp.isSimpleType():
self.name = tp.getName() + '_Def'
self._fromSimpleType(tp)
elif tp.isWildCard():
self._fromWildCard(tp)
elif tp.isElement():
self.name = tp.getName() + '_Dec'
self._fromElement(tp)
elif tp.isComplexType():
self.name = tp.getName() + '_Def'
self._fromComplexType(tp)
elif tp.isAttribute():
self._fromAttribute(tp)
else:
raise WsdlGeneratorError, 'WARNING: NOT HANDLED %s' \
% (tp.__class__)
alias = self.nsh.getAlias(tp.getTargetNamespace())
key = "%s.%s_Def" % (alias, tp.getName())
if self.typeList:
self.typeList.sort()
# add entry to type dictionary for later use in
# docstring generation
self.typeDict[key] = self.typeList
return
def _fromSimpleType(self, tp):
tp = tp.getDefinition()
if tp.getName():
tpc = tp.getTypeclass()
self.initdef.set('\n%sdef __init__(self, name=None, ns=None, **kw):' % (ID2))
objName = '_' + tp.getName()
if tpc:
typeName = self.bti.get_pythontype(None, None, tpc)
if not typeName:
typeName = 'Any'
self.precede = '%s' % (tpc)
self.classdef.set('\n\n%sclass %s(%s):' \
% (ID1, tp.getName() + '_Def', tpc))
self.initcode.set('\n%saname = None' % ID3 )
self.initcode.write('\n%sif name:' % ID3)
self.initcode.write('\n%skw["pname"] = name' \
% ID4)
self.initcode.write('\n%skw["aname"] = "_%%s" %% name' \
% ID4)
self.basector.set('\n%s%s.__init__(self, **kw)'\
% (ID3,tpc))
else:
typeName = 'Any'
# XXX: currently, unions will get shuffled thru here.
self.classdef.set('\n\n%sclass %s(ZSI.TC.Any):' \
% (ID1, tp.getName() + '_Def'))
self.initcode.write('\n%s# probably a union - dont trust it' % ID3)
self.basector.set('\n%sZSI.TC.Any.__init__(self,pname=name,aname="_%%s" %% name , optional=1,repeatable=1, **kw)' % ID3)
self.typeDoc('optional=1', objName, typeName)
else:
raise WsdlGeneratorError, 'shouldnt happen'
def _fromWildCard(self, tp):
# XXX: not particularly trustworthy either. pending further work.
tp = tp.getDeclaration()
self.classdef.set('\n\n%sclass %s(ZSI.TC.XML):' \
% (ID1, tp.getName()))
self.initdef.set('\n%s__init__(self,pname):' % (ID2))
self.basector.set('\n%sZSI.TC.XML.__init__(self,pname,**kw)' % ID3)
def _fromAttribute(self, tp):
self.classdef.set('\n\n%sclass %s:' % (ID1, tp.getName()))
self.classvar.set('\n%s# not yet implemented' % ID2)
self.classvar.write('\n%s# attribute declaration' % ID2)
self.classvar.write('\n%spass\n' % ID2)
def _fromElement(self, tp):
etp = tp.getType()
if etp and etp.isDefinition():
if etp.isSimpleType():
self._elementSimpleType(tp, etp)
elif etp.isComplexType():
self._elementComplexType(tp, etp)
elif not etp:
self.classdef.set('\n\n%sclass %s_Dec(Struct):' \
% (ID1, tp.getName()))
self.initdef.set('\n%sdef __init__(self, name=None, ns=None, **kw):' % (ID2))
self.basector.set('\n%sStruct.__init__(self, self.__class__, [], pname="%s", aname="_%s", inline=1)' % (ID3,tp.getName(),tp.getName()))
else:
raise WsdlGeneratorError, 'Unknown type(%s) not handled ' \
% (etp.__class__)
def _elementSimpleType(self, tp, etp):
tpc = etp.getTypeclass()
self.precede = '%s' % (tpc)
self.classdef.set('\n\n%sclass %s(%s):' \
% (ID1, tp.getName() + '_Dec', tpc))
self.classvar.set('\n%sliteral = "%s"' % (ID2, tp.getName()))
self.classvar.write('\n%sschema = "%s"' % \
(ID2,tp.getTargetNamespace()))
self.initdef.set('\n\n%sdef __init__(self, name=None, ns=None, **kw):' \
% ID2)
self.initcode.set('\n%sname = name or self.__class__.literal' \
% ID3)
self.initcode.write('\n%sns = ns or self.__class__.schema' % ID3)
self.basector.set('\n\n%s%s.__init__(self,pname=name, aname="_%%s" %% name, **kw)' % (ID3,tpc))
typeName = self.bti.get_pythontype(None, None, tpc)
self.typeDoc('', '__param', typeName)
def _elementComplexType(self, tp, etp):
if etp.getName():
self.precede = '%s' %(etp.getName() + '_Def' )
if etp.getTargetNamespace() != tp.getTargetNamespace():
nsp = etp.getTargetNamespace()
self.classdef.set('\n\n%sclass %s(%s.%s):' \
% (ID1, tp.getName() + '_Dec',
self.nsh.getAlias(nsp),
etp.getName() + '_Def'))
else:
self.classdef.set('\n\n%sclass %s(%s):' \
% (ID1, tp.getName() + '_Dec',
etp.getName() + '_Def'))
self.classvar.set('\n%sliteral = "%s"' % ( ID2, tp.getName()))
self.classvar.write('\n%sschema = "%s"' \
% ( ID2,tp.getTargetNamespace()))
self.initdef.set('\n\n%sdef __init__(self, name=None, ns=None, **kw):' %(ID2))
self.initcode.set('\n%sname = name or self.__class__.literal'\
% ID3 )
self.initcode.write('\n%sns = ns or self.__class__.schema'\
% ID3 )
nsp = etp.getTargetNamespace()
typeName = '%s.%s_Def' % (self.nsh.getAlias(nsp), etp.getName())
self.basector.set('\n\n%s%s.__init__(self, name=name, ns=ns, **kw)' % (ID3, typeName))
self.postpend.set('\n%sself.typecode = %s(name=name, ns=ns, **kw)' % (ID3, typeName))
self.typeDoc('', '_' + tp.getName(), typeName)
else:
# at this point what we have is an global element declaration
# containing a local complex type definition.
# so, this is a little odd voodoo so that we can
# use the code for processing complex types.
self._fromComplexType(etp.expressLocalAsGlobal(tp))
# now we are discaring the _Dec(LOCAL_Def) subclassing
# and expressing the declaration w/the local def as
# a def. retweek the classdef and off we go...
self.classdef.set('\n\n%sclass %s(ZSI.TCcompound.Struct):' \
%(ID1, tp.getName() + '_Dec'))
self.classvar.set( re.sub('type', 'literal',
self.classvar.getvalue()))
self.initcode.set('\n%sname = name or self.__class__.literal\n%sns = ns or self.__class__.schema\n%s' % ( ID3, ID3, self.initcode.getvalue()))
return
def _fromComplexType(self, tp):
if isinstance(tp, ZSI.wsdlInterface.ZSISchemaTypeAdapter ):
# the "usual"
tp = tp.getDefinition()
else:
# this is when an element has
# a local complex type def
pass
typecodelist = '['
if tp.isComplexContent():
self._complexTypeComplexContent(tp)
return
if tp.isSimpleContent():
self._complexTypeSimpleContent(tp)
return
# ok, it's not derived content and therefore has a model group
# write out the class def and class variables
self.classdef.set('\n\n%sclass %s:' \
%(ID1, tp.getName() + '_Def'))
if self._complexTypeHandleAttributes(tp):
# not yet implemented
pass
self.classvar.set("\n%sschema = '%s'" % (ID2,
tp.getTargetNamespace()))
self.classvar.write("\n%stype = '%s'\n" % (ID2, tp.getName()))
self.initdef.set('\n%sdef __init__(self, name=None, ns=None, **kw):' % (ID2))
typecodelist = '['
mg = tp.getModelGroup()
if mg.isAll() or mg.isSequence():
typecodelist += self._complexTypeAllOrSequence(tp, | |
1
)
# 3 texts for the choice
self.assertEqual(
len(item._my_map['question']['choices'][region_name][0]['texts']),
3
)
self.assertEqual(
item._my_map['question']['choices'][region_name][0]['id'],
choice_identifier
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['choices'][region_name][0]['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['choices'][region_name][0]['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['choices'][region_name][0]['texts'])
self.assertEqual(
item.get_question().get_choices(),
{
region_name: [{
'id': choice_identifier,
'text': self._english_text,
'name': ''
}]
}
)
form = self._bank.get_question_form_for_update(item.ident)
form.remove_choice_language(self._english().language_type, choice_identifier, region_name)
self._bank.update_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['choices'][region_name][0]['texts']),
2
)
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['choices'][region_name][0]['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['choices'][region_name][0]['texts'])
self.assertEqual(
item.get_question().get_choices(),
{
region_name: [{
'id': choice_identifier,
'text': self._hindi_text,
'name': ''
}]
}
)
def test_can_replace_a_choice_text(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [QTI_QUESTION_RECORD,
MULTI_LANGUAGE_INLINE_CHOICE_QUESTION_RECORD])
region_name = 'REGION_1'
form.add_inline_region(region_name)
choice_identifier = 'foobar'
form.add_choice(self._english(), region_name, identifier=choice_identifier)
form.add_choice(self._hindi(), region_name, identifier=choice_identifier)
form.add_choice(self._telugu(), region_name, identifier=choice_identifier)
question = self._bank.create_question(form)
item = self._bank.get_item(item.ident)
# 1 region
self.assertEqual(
len(list(item._my_map['question']['choices'].keys())),
1
)
# 1 choice in this region
self.assertEqual(
len(item._my_map['question']['choices'][region_name]),
1
)
# 3 texts for the choice
self.assertEqual(
len(item._my_map['question']['choices'][region_name][0]['texts']),
3
)
self.assertEqual(
item._my_map['question']['choices'][region_name][0]['id'],
choice_identifier
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['choices'][region_name][0]['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['choices'][region_name][0]['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['choices'][region_name][0]['texts'])
self.assertEqual(
item.get_question().get_choices(),
{
region_name: [{
'id': choice_identifier,
'text': self._english_text,
'name': ''
}]
}
)
new_english_feedback = DisplayText(display_text_map={
'text': 'foo',
'languageTypeId': '639-2%3AENG%40ISO',
'scriptTypeId': '15924%3ALATN%40ISO',
'formatTypeId': 'TextFormats%3APLAIN%40okapia.net'
})
form = self._bank.get_question_form_for_update(item.ident)
form.edit_choice(new_english_feedback, choice_identifier, region_name)
self._bank.update_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['choices'][region_name][0]['texts']),
3
)
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['choices'][region_name][0]['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['choices'][region_name][0]['texts'])
self.assertIn(self._str_txt(new_english_feedback), item._my_map['question']['choices'][region_name][0]['texts'])
self.assertEqual(
item.get_question().get_choices(),
{
region_name: [{
'id': choice_identifier,
'text': 'foo',
'name': ''
}]
}
)
def test_setting_proxy_locale_gets_choice_text_in_specified_language(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [QTI_QUESTION_RECORD,
MULTI_LANGUAGE_INLINE_CHOICE_QUESTION_RECORD])
region_name = 'REGION_1'
form.add_inline_region(region_name)
choice_identifier = 'foobar'
form.add_choice(self._english(), region_name, identifier=choice_identifier)
form.add_choice(self._hindi(), region_name, identifier=choice_identifier)
form.add_choice(self._telugu(), region_name, identifier=choice_identifier)
self._bank.create_question(form)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.get_question().get_choices(),
{
region_name: [{
'id': choice_identifier,
'text': self._hindi_text,
'name': ''
}]
}
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.get_question().get_choices(),
{
region_name: [{
'id': choice_identifier,
'text': self._english_text,
'name': ''
}]
}
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.get_question().get_choices(),
{
region_name: [{
'id': choice_identifier,
'text': self._telugu_text,
'name': ''
}]
}
)
def test_english_default_choice_text_if_locale_code_not_available(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [QTI_QUESTION_RECORD,
MULTI_LANGUAGE_INLINE_CHOICE_QUESTION_RECORD])
region_name = 'REGION_1'
form.add_inline_region(region_name)
choice_identifier = 'foobar'
form.add_choice(self._english(), region_name, identifier=choice_identifier)
form.add_choice(self._telugu(), region_name, identifier=choice_identifier)
self._bank.create_question(form)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.get_question().get_choices(),
{
region_name: [{
'id': choice_identifier,
'text': self._english_text,
'name': ''
}]
}
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.get_question().get_choices(),
{
region_name: [{
'id': choice_identifier,
'text': self._english_text,
'name': ''
}]
}
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.get_question().get_choices(),
{
region_name: [{
'id': choice_identifier,
'text': self._telugu_text,
'name': ''
}]
}
)
def test_first_available_choice_text_if_locale_code_and_english_not_available(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [QTI_QUESTION_RECORD,
MULTI_LANGUAGE_INLINE_CHOICE_QUESTION_RECORD])
region_name = 'REGION_1'
form.add_inline_region(region_name)
choice_identifier = 'foobar'
form.add_choice(self._telugu(), region_name, identifier=choice_identifier)
self._bank.create_question(form)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.get_question().get_choices(),
{
region_name: [{
'id': choice_identifier,
'text': self._telugu_text,
'name': ''
}]
}
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.get_question().get_choices(),
{
region_name: [{
'id': choice_identifier,
'text': self._telugu_text,
'name': ''
}]
}
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.get_question().get_choices(),
{
region_name: [{
'id': choice_identifier,
'text': self._telugu_text,
'name': ''
}]
}
)
def test_multi_language_plays_well_with_randomized_order(self):
form = self._bank.get_item_form_for_create([RANDOMIZED_INLINE_CHOICE_ITEM_RECORD])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [QTI_QUESTION_RECORD,
MULTI_LANGUAGE_INLINE_CHOICE_QUESTION_RECORD])
region_name = 'REGION_1'
form.add_inline_region(region_name)
form.add_choice(self._english(), region_name, identifier='1')
form.add_choice(self._hindi(), region_name, identifier='1')
form.add_choice(self._telugu(), region_name, identifier='1')
form.add_choice(self._english(), region_name, identifier='2')
form.add_choice(self._hindi(), region_name, identifier='2')
form.add_choice(self._telugu(), region_name, identifier='2')
form.add_choice(self._english(), region_name, identifier='3')
form.add_choice(self._hindi(), region_name, identifier='3')
form.add_choice(self._telugu(), region_name, identifier='3')
self._bank.create_question(form)
different_hindi_order = 0
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
for i in range(0, 10):
choices = hi_item.get_question().get_choices()
choice_order = [c['id'] for c in choices[region_name]]
choice_texts = [c['text'] for c in choices[region_name]]
if choice_order != ['1', '2', '3']:
different_hindi_order += 1
self.assertEqual(
choice_texts,
[self._hindi_text, self._hindi_text, self._hindi_text]
)
self.assertTrue(different_hindi_order > 0)
different_english_order = 0
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
for i in range(0, 10):
choices = en_item.get_question().get_choices()
choice_order = [c['id'] for c in choices[region_name]]
choice_texts = [c['text'] for c in choices[region_name]]
if choice_order != ['1', '2', '3']:
different_english_order += 1
self.assertEqual(
choice_texts,
[self._english_text, self._english_text, self._english_text]
)
self.assertTrue(different_english_order > 0)
different_telugu_order = 0
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
for i in range(0, 10):
choices = te_item.get_question().get_choices()
choice_order = [c['id'] for c in choices[region_name]]
choice_texts = [c['text'] for c in choices[region_name]]
if choice_order != ['1', '2', '3']:
different_telugu_order += 1
self.assertEqual(
choice_texts,
[self._telugu_text, self._telugu_text, self._telugu_text]
)
self.assertTrue(different_telugu_order > 0)
class MultiLanguageAnswerFeedbackTests(MultiLanguageBaseTestCase):
def setUp(self):
super(MultiLanguageAnswerFeedbackTests, self).setUp()
def tearDown(self):
super(MultiLanguageAnswerFeedbackTests, self).tearDown()
def test_can_set_multiple_answer_feedbacks(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_answer_form_for_create(item.ident, [MULTI_LANGUAGE_FEEDBACK_ANSWER_RECORD])
form.add_feedback(self._english())
form.add_feedback(self._hindi())
form.add_feedback(self._telugu())
self._bank.create_answer(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['answers'][0]['feedbacks']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['answers'][0]['feedbacks'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['answers'][0]['feedbacks'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['answers'][0]['feedbacks'])
self.assertEqual(
item.get_answers().next().feedback.text,
self._english_text
)
def test_can_clear_answer_feedbacks(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_answer_form_for_create(item.ident, [MULTI_LANGUAGE_FEEDBACK_ANSWER_RECORD])
form.add_feedback(self._english())
form.add_feedback(self._hindi())
form.add_feedback(self._telugu())
answer = self._bank.create_answer(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['answers'][0]['feedbacks']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['answers'][0]['feedbacks'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['answers'][0]['feedbacks'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['answers'][0]['feedbacks'])
self.assertEqual(
item.get_answers().next().feedback.text,
self._english_text
)
form = self._bank.get_answer_form_for_update(answer.ident)
form.clear_feedbacks()
self._bank.update_answer(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['answers'][0]['feedbacks']),
0
)
self.assertEqual(
item.get_answers().next().feedback.text,
''
)
def test_can_remove_an_answer_feedback_by_language(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_answer_form_for_create(item.ident, [MULTI_LANGUAGE_FEEDBACK_ANSWER_RECORD])
form.add_feedback(self._english())
form.add_feedback(self._hindi())
form.add_feedback(self._telugu())
answer = self._bank.create_answer(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['answers'][0]['feedbacks']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['answers'][0]['feedbacks'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['answers'][0]['feedbacks'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['answers'][0]['feedbacks'])
self.assertEqual(
item.get_answers().next().feedback.text,
self._english_text
)
form = self._bank.get_answer_form_for_update(answer.ident)
form.remove_feedback_language(self._english().language_type)
self._bank.update_answer(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['answers'][0]['feedbacks']),
2
)
self.assertIn(self._str_txt(self._hindi()), item._my_map['answers'][0]['feedbacks'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['answers'][0]['feedbacks'])
self.assertEqual(
item.get_answers().next().feedback.text,
self._hindi_text
)
def test_can_replace_an_answer_feedback(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_answer_form_for_create(item.ident, [MULTI_LANGUAGE_FEEDBACK_ANSWER_RECORD])
form.add_feedback(self._english())
form.add_feedback(self._hindi())
form.add_feedback(self._telugu())
answer = self._bank.create_answer(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['answers'][0]['feedbacks']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['answers'][0]['feedbacks'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['answers'][0]['feedbacks'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['answers'][0]['feedbacks'])
self.assertEqual(
item.get_answers().next().feedback.text,
self._english_text
)
new_english_feedback = DisplayText(display_text_map={
'text': 'foo',
'languageTypeId': '639-2%3AENG%40ISO',
'scriptTypeId': '15924%3ALATN%40ISO',
'formatTypeId': 'TextFormats%3APLAIN%40okapia.net'
})
form = self._bank.get_answer_form_for_update(answer.ident)
form.edit_feedback(new_english_feedback)
self._bank.update_answer(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['answers'][0]['feedbacks']),
3
)
self.assertIn(self._str_txt(self._hindi()), item._my_map['answers'][0]['feedbacks'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['answers'][0]['feedbacks'])
self.assertIn(self._str_txt(new_english_feedback), item._my_map['answers'][0]['feedbacks'])
self.assertEqual(
item.get_answers().next().feedback.text,
'foo'
)
def test_setting_proxy_locale_gets_answer_feedback_in_specified_language(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_answer_form_for_create(item.ident, [MULTI_LANGUAGE_FEEDBACK_ANSWER_RECORD])
form.add_feedback(self._english())
form.add_feedback(self._hindi())
form.add_feedback(self._telugu())
self._bank.create_answer(form)
item = self._bank.get_item(item.ident)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.get_answers().next().feedback.text,
self._hindi_text
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.get_answers().next().feedback.text,
self._english_text
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.get_answers().next().feedback.text,
self._telugu_text
)
def test_english_default_answer_feedback_if_locale_code_not_available(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_answer_form_for_create(item.ident, [MULTI_LANGUAGE_FEEDBACK_ANSWER_RECORD])
form.add_feedback(self._english())
form.add_feedback(self._telugu())
self._bank.create_answer(form)
item = self._bank.get_item(item.ident)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.get_answers().next().feedback.text,
self._english_text
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.get_answers().next().feedback.text,
self._english_text
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.get_answers().next().feedback.text,
self._telugu_text
)
def test_first_available_answer_feedback_if_locale_code_and_english_not_available(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_answer_form_for_create(item.ident, [MULTI_LANGUAGE_FEEDBACK_ANSWER_RECORD])
form.add_feedback(self._telugu())
self._bank.create_answer(form)
item = self._bank.get_item(item.ident)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.get_answers().next().feedback.text,
self._telugu_text
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.get_answers().next().feedback.text,
self._telugu_text
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.get_answers().next().feedback.text,
self._telugu_text
)
class MultiLanguageTextInteractionTests(MultiLanguageBaseTestCase):
def setUp(self):
super(MultiLanguageTextInteractionTests, self).setUp()
def tearDown(self):
super(MultiLanguageTextInteractionTests, self).tearDown()
def test_can_set_multiple_question_texts(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_TEXT_INTERACTION_RECORD])
form.add_text(self._english())
form.add_text(self._hindi())
form.add_text(self._telugu())
self._bank.create_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['texts']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['texts'])
self.assertEqual(
item.get_question().get_text().text,
self._english_text
)
def test_can_clear_question_texts(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_QUESTION_RECORD])
form.add_text(self._english())
form.add_text(self._hindi())
form.add_text(self._telugu())
question = self._bank.create_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['texts']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['texts'])
self.assertEqual(
item.get_question().get_text().text,
self._english_text
)
form = self._bank.get_question_form_for_update(question.ident)
form.clear_texts()
self._bank.update_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['texts']),
0
)
self.assertEqual(
item.get_question().get_text().text,
''
)
def test_can_remove_a_question_text_by_language(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_QUESTION_RECORD])
form.add_text(self._english())
form.add_text(self._hindi())
form.add_text(self._telugu())
question = self._bank.create_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['texts']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['texts'])
form = self._bank.get_question_form_for_update(question.ident)
form.remove_text_language(self._english().language_type)
self._bank.update_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['texts']),
2
)
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['texts'])
self.assertEqual(
item.get_question().get_text().text,
self._hindi_text
)
def test_can_replace_a_question_text(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_QUESTION_RECORD])
form.add_text(self._english())
form.add_text(self._hindi())
form.add_text(self._telugu())
question = self._bank.create_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['texts']),
3
)
self.assertIn(self._str_txt(self._english()), | |
postCellId="../DA9/0/"/>
</projection>
<projection id="NC_PDA_DA9_Serotonin" postsynapticPopulation="DA9" presynapticPopulation="PDA" synapse="">
<connection id="0" preCellId="../PDA/0/" postCellId="../DA9/0/"/>
</projection>
<projection id="NC_PDA_DD6_Serotonin" postsynapticPopulation="DD6" presynapticPopulation="PDA" synapse="">
<connection id="0" preCellId="../PDA/0/" postCellId="../DD6/0/"/>
</projection>
<projection id="NC_PDA_PVNR_Serotonin" postsynapticPopulation="PVNR" presynapticPopulation="PDA" synapse="">
<connection id="0" preCellId="../PDA/0/" postCellId="../PVNR/0/"/>
</projection>
<projection id="NC_PDA_VD13_Serotonin" postsynapticPopulation="VD13" presynapticPopulation="PDA" synapse="">
<connection id="0" preCellId="../PDA/0/" postCellId="../VD13/0/"/>
</projection>
<projection id="NC_PDB_AS11_Generic_GJ" postsynapticPopulation="AS11" presynapticPopulation="PDB" synapse="">
<connection id="0" preCellId="../PDB/0/" postCellId="../AS11/0/"/>
</projection>
<projection id="NC_PDB_RID_Generic_GJ" postsynapticPopulation="RID" presynapticPopulation="PDB" synapse="">
<connection id="0" preCellId="../PDB/0/" postCellId="../RID/0/"/>
</projection>
<projection id="NC_PDB_VD13_FMRFamide" postsynapticPopulation="VD13" presynapticPopulation="PDB" synapse="">
<connection id="0" preCellId="../PDB/0/" postCellId="../VD13/0/"/>
</projection>
<projection id="NC_PDEL_AVKL_Dopamine" postsynapticPopulation="AVKL" presynapticPopulation="PDEL" synapse="">
<connection id="0" preCellId="../PDEL/0/" postCellId="../AVKL/0/"/>
</projection>
<projection id="NC_PDEL_DVA_Dopamine" postsynapticPopulation="DVA" presynapticPopulation="PDEL" synapse="">
<connection id="0" preCellId="../PDEL/0/" postCellId="../DVA/0/"/>
</projection>
<projection id="NC_PDEL_PDER_Dopamine" postsynapticPopulation="PDER" presynapticPopulation="PDEL" synapse="">
<connection id="0" preCellId="../PDEL/0/" postCellId="../PDER/0/"/>
</projection>
<projection id="NC_PDEL_PDER_Generic_GJ" postsynapticPopulation="PDER" presynapticPopulation="PDEL" synapse="">
<connection id="0" preCellId="../PDEL/0/" postCellId="../PDER/0/"/>
</projection>
<projection id="NC_PDEL_PVCR_Generic_GJ" postsynapticPopulation="PVCR" presynapticPopulation="PDEL" synapse="">
<connection id="0" preCellId="../PDEL/0/" postCellId="../PVCR/0/"/>
</projection>
<projection id="NC_PDEL_PVM_Generic_GJ" postsynapticPopulation="PVM" presynapticPopulation="PDEL" synapse="">
<connection id="0" preCellId="../PDEL/0/" postCellId="../PVM/0/"/>
</projection>
<projection id="NC_PDEL_PVM_Dopamine" postsynapticPopulation="PVM" presynapticPopulation="PDEL" synapse="">
<connection id="0" preCellId="../PDEL/0/" postCellId="../PVM/0/"/>
</projection>
<projection id="NC_PDEL_PVR_Dopamine" postsynapticPopulation="PVR" presynapticPopulation="PDEL" synapse="">
<connection id="0" preCellId="../PDEL/0/" postCellId="../PVR/0/"/>
</projection>
<projection id="NC_PDEL_VA9_Dopamine" postsynapticPopulation="VA9" presynapticPopulation="PDEL" synapse="">
<connection id="0" preCellId="../PDEL/0/" postCellId="../VA9/0/"/>
</projection>
<projection id="NC_PDEL_VD11_Dopamine" postsynapticPopulation="VD11" presynapticPopulation="PDEL" synapse="">
<connection id="0" preCellId="../PDEL/0/" postCellId="../VD11/0/"/>
</projection>
<projection id="NC_PDER_AVKL_Dopamine" postsynapticPopulation="AVKL" presynapticPopulation="PDER" synapse="">
<connection id="0" preCellId="../PDER/0/" postCellId="../AVKL/0/"/>
</projection>
<projection id="NC_PDER_DVA_Dopamine" postsynapticPopulation="DVA" presynapticPopulation="PDER" synapse="">
<connection id="0" preCellId="../PDER/0/" postCellId="../DVA/0/"/>
</projection>
<projection id="NC_PDER_PDEL_Generic_GJ" postsynapticPopulation="PDEL" presynapticPopulation="PDER" synapse="">
<connection id="0" preCellId="../PDER/0/" postCellId="../PDEL/0/"/>
</projection>
<projection id="NC_PDER_PVCL_Dopamine" postsynapticPopulation="PVCL" presynapticPopulation="PDER" synapse="">
<connection id="0" preCellId="../PDER/0/" postCellId="../PVCL/0/"/>
</projection>
<projection id="NC_PDER_PVCR_Dopamine" postsynapticPopulation="PVCR" presynapticPopulation="PDER" synapse="">
<connection id="0" preCellId="../PDER/0/" postCellId="../PVCR/0/"/>
</projection>
<projection id="NC_PDER_PVM_Generic_GJ" postsynapticPopulation="PVM" presynapticPopulation="PDER" synapse="">
<connection id="0" preCellId="../PDER/0/" postCellId="../PVM/0/"/>
</projection>
<projection id="NC_PDER_VA8_Generic_GJ" postsynapticPopulation="VA8" presynapticPopulation="PDER" synapse="">
<connection id="0" preCellId="../PDER/0/" postCellId="../VA8/0/"/>
</projection>
<projection id="NC_PDER_VD9_Generic_GJ" postsynapticPopulation="VD9" presynapticPopulation="PDER" synapse="">
<connection id="0" preCellId="../PDER/0/" postCellId="../VD9/0/"/>
</projection>
<projection id="NC_PHAL_AVDR_Glutamate" postsynapticPopulation="AVDR" presynapticPopulation="PHAL" synapse="">
<connection id="0" preCellId="../PHAL/0/" postCellId="../AVDR/0/"/>
</projection>
<projection id="NC_PHAL_AVFL_Glutamate" postsynapticPopulation="AVFL" presynapticPopulation="PHAL" synapse="">
<connection id="0" preCellId="../PHAL/0/" postCellId="../AVFL/0/"/>
</projection>
<projection id="NC_PHAL_AVG_Glutamate" postsynapticPopulation="AVG" presynapticPopulation="PHAL" synapse="">
<connection id="0" preCellId="../PHAL/0/" postCellId="../AVG/0/"/>
</projection>
<projection id="NC_PHAL_AVHL_Glutamate" postsynapticPopulation="AVHL" presynapticPopulation="PHAL" synapse="">
<connection id="0" preCellId="../PHAL/0/" postCellId="../AVHL/0/"/>
</projection>
<projection id="NC_PHAL_AVHR_Glutamate" postsynapticPopulation="AVHR" presynapticPopulation="PHAL" synapse="">
<connection id="0" preCellId="../PHAL/0/" postCellId="../AVHR/0/"/>
</projection>
<projection id="NC_PHAL_DVA_Glutamate" postsynapticPopulation="DVA" presynapticPopulation="PHAL" synapse="">
<connection id="0" preCellId="../PHAL/0/" postCellId="../DVA/0/"/>
</projection>
<projection id="NC_PHAL_PHAR_Generic_GJ" postsynapticPopulation="PHAR" presynapticPopulation="PHAL" synapse="">
<connection id="0" preCellId="../PHAL/0/" postCellId="../PHAR/0/"/>
</projection>
<projection id="NC_PHAL_PHAR_Glutamate" postsynapticPopulation="PHAR" presynapticPopulation="PHAL" synapse="">
<connection id="0" preCellId="../PHAL/0/" postCellId="../PHAR/0/"/>
</projection>
<projection id="NC_PHAL_PHBL_Glutamate" postsynapticPopulation="PHBL" presynapticPopulation="PHAL" synapse="">
<connection id="0" preCellId="../PHAL/0/" postCellId="../PHBL/0/"/>
</projection>
<projection id="NC_PHAL_PHBR_Glutamate" postsynapticPopulation="PHBR" presynapticPopulation="PHAL" synapse="">
<connection id="0" preCellId="../PHAL/0/" postCellId="../PHBR/0/"/>
</projection>
<projection id="NC_PHAL_PVQL_Glutamate" postsynapticPopulation="PVQL" presynapticPopulation="PHAL" synapse="">
<connection id="0" preCellId="../PHAL/0/" postCellId="../PVQL/0/"/>
</projection>
<projection id="NC_PHAR_AVG_Glutamate" postsynapticPopulation="AVG" presynapticPopulation="PHAR" synapse="">
<connection id="0" preCellId="../PHAR/0/" postCellId="../AVG/0/"/>
</projection>
<projection id="NC_PHAR_AVHR_Glutamate" postsynapticPopulation="AVHR" presynapticPopulation="PHAR" synapse="">
<connection id="0" preCellId="../PHAR/0/" postCellId="../AVHR/0/"/>
</projection>
<projection id="NC_PHAR_DA8_Glutamate" postsynapticPopulation="DA8" presynapticPopulation="PHAR" synapse="">
<connection id="0" preCellId="../PHAR/0/" postCellId="../DA8/0/"/>
</projection>
<projection id="NC_PHAR_DVA_Glutamate" postsynapticPopulation="DVA" presynapticPopulation="PHAR" synapse="">
<connection id="0" preCellId="../PHAR/0/" postCellId="../DVA/0/"/>
</projection>
<projection id="NC_PHAR_PHAL_Generic_GJ" postsynapticPopulation="PHAL" presynapticPopulation="PHAR" synapse="">
<connection id="0" preCellId="../PHAR/0/" postCellId="../PHAL/0/"/>
</projection>
<projection id="NC_PHAR_PHAL_Glutamate" postsynapticPopulation="PHAL" presynapticPopulation="PHAR" synapse="">
<connection id="0" preCellId="../PHAR/0/" postCellId="../PHAL/0/"/>
</projection>
<projection id="NC_PHAR_PHBL_Glutamate" postsynapticPopulation="PHBL" presynapticPopulation="PHAR" synapse="">
<connection id="0" preCellId="../PHAR/0/" postCellId="../PHBL/0/"/>
</projection>
<projection id="NC_PHAR_PHBR_Glutamate" postsynapticPopulation="PHBR" presynapticPopulation="PHAR" synapse="">
<connection id="0" preCellId="../PHAR/0/" postCellId="../PHBR/0/"/>
</projection>
<projection id="NC_PHAR_PVPL_Generic_GJ" postsynapticPopulation="PVPL" presynapticPopulation="PHAR" synapse="">
<connection id="0" preCellId="../PHAR/0/" postCellId="../PVPL/0/"/>
</projection>
<projection id="NC_PHAR_PVQL_Glutamate" postsynapticPopulation="PVQL" presynapticPopulation="PHAR" synapse="">
<connection id="0" preCellId="../PHAR/0/" postCellId="../PVQL/0/"/>
</projection>
<projection id="NC_PHBL_AVAL_Serotonin" postsynapticPopulation="AVAL" presynapticPopulation="PHBL" synapse="">
<connection id="0" preCellId="../PHBL/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_PHBL_AVAR_Serotonin" postsynapticPopulation="AVAR" presynapticPopulation="PHBL" synapse="">
<connection id="0" preCellId="../PHBL/0/" postCellId="../AVAR/0/"/>
</projection>
<projection id="NC_PHBL_AVDL_Serotonin" postsynapticPopulation="AVDL" presynapticPopulation="PHBL" synapse="">
<connection id="0" preCellId="../PHBL/0/" postCellId="../AVDL/0/"/>
</projection>
<projection id="NC_PHBL_PHBR_Serotonin" postsynapticPopulation="PHBR" presynapticPopulation="PHBL" synapse="">
<connection id="0" preCellId="../PHBL/0/" postCellId="../PHBR/0/"/>
</projection>
<projection id="NC_PHBL_PHBR_Generic_GJ" postsynapticPopulation="PHBR" presynapticPopulation="PHBL" synapse="">
<connection id="0" preCellId="../PHBL/0/" postCellId="../PHBR/0/"/>
</projection>
<projection id="NC_PHBL_PVCL_Serotonin" postsynapticPopulation="PVCL" presynapticPopulation="PHBL" synapse="">
<connection id="0" preCellId="../PHBL/0/" postCellId="../PVCL/0/"/>
</projection>
<projection id="NC_PHBL_VA12_Serotonin" postsynapticPopulation="VA12" presynapticPopulation="PHBL" synapse="">
<connection id="0" preCellId="../PHBL/0/" postCellId="../VA12/0/"/>
</projection>
<projection id="NC_PHBR_AVAL_Serotonin" postsynapticPopulation="AVAL" presynapticPopulation="PHBR" synapse="">
<connection id="0" preCellId="../PHBR/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_PHBR_AVAR_Serotonin" postsynapticPopulation="AVAR" presynapticPopulation="PHBR" synapse="">
<connection id="0" preCellId="../PHBR/0/" postCellId="../AVAR/0/"/>
</projection>
<projection id="NC_PHBR_AVDL_Serotonin" postsynapticPopulation="AVDL" presynapticPopulation="PHBR" synapse="">
<connection id="0" preCellId="../PHBR/0/" postCellId="../AVDL/0/"/>
</projection>
<projection id="NC_PHBR_AVDR_Serotonin" postsynapticPopulation="AVDR" presynapticPopulation="PHBR" synapse="">
<connection id="0" preCellId="../PHBR/0/" postCellId="../AVDR/0/"/>
</projection>
<projection id="NC_PHBR_AVFL_Serotonin" postsynapticPopulation="AVFL" presynapticPopulation="PHBR" synapse="">
<connection id="0" preCellId="../PHBR/0/" postCellId="../AVFL/0/"/>
</projection>
<projection id="NC_PHBR_AVHL_Generic_GJ" postsynapticPopulation="AVHL" presynapticPopulation="PHBR" synapse="">
<connection id="0" preCellId="../PHBR/0/" postCellId="../AVHL/0/"/>
</projection>
<projection id="NC_PHBR_DA8_Serotonin" postsynapticPopulation="DA8" presynapticPopulation="PHBR" synapse="">
<connection id="0" preCellId="../PHBR/0/" postCellId="../DA8/0/"/>
</projection>
<projection id="NC_PHBR_PHBL_Serotonin" postsynapticPopulation="PHBL" presynapticPopulation="PHBR" synapse="">
<connection id="0" preCellId="../PHBR/0/" postCellId="../PHBL/0/"/>
</projection>
<projection id="NC_PHBR_PHBL_Generic_GJ" postsynapticPopulation="PHBL" presynapticPopulation="PHBR" synapse="">
<connection id="0" preCellId="../PHBR/0/" postCellId="../PHBL/0/"/>
</projection>
<projection id="NC_PHBR_PVCL_Serotonin" postsynapticPopulation="PVCL" presynapticPopulation="PHBR" synapse="">
<connection id="0" preCellId="../PHBR/0/" postCellId="../PVCL/0/"/>
</projection>
<projection id="NC_PHBR_PVCR_Serotonin" postsynapticPopulation="PVCR" presynapticPopulation="PHBR" synapse="">
<connection id="0" preCellId="../PHBR/0/" postCellId="../PVCR/0/"/>
</projection>
<projection id="NC_PHBR_VA12_Serotonin" postsynapticPopulation="VA12" presynapticPopulation="PHBR" synapse="">
<connection id="0" preCellId="../PHBR/0/" postCellId="../VA12/0/"/>
</projection>
<projection id="NC_PHCL_AVAL_Glutamate" postsynapticPopulation="AVAL" presynapticPopulation="PHCL" synapse="">
<connection id="0" preCellId="../PHCL/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_PHCL_DA9_Generic_GJ" postsynapticPopulation="DA9" presynapticPopulation="PHCL" synapse="">
<connection id="0" preCellId="../PHCL/0/" postCellId="../DA9/0/"/>
</projection>
<projection id="NC_PHCL_DA9_Glutamate" postsynapticPopulation="DA9" presynapticPopulation="PHCL" synapse="">
<connection id="0" preCellId="../PHCL/0/" postCellId="../DA9/0/"/>
</projection>
<projection id="NC_PHCL_DVA_Glutamate" postsynapticPopulation="DVA" presynapticPopulation="PHCL" synapse="">
<connection id="0" preCellId="../PHCL/0/" postCellId="../DVA/0/"/>
</projection>
<projection id="NC_PHCL_LUAL_Glutamate" postsynapticPopulation="LUAL" presynapticPopulation="PHCL" synapse="">
<connection id="0" preCellId="../PHCL/0/" postCellId="../LUAL/0/"/>
</projection>
<projection id="NC_PHCL_PHCR_Generic_GJ" postsynapticPopulation="PHCR" presynapticPopulation="PHCL" synapse="">
<connection id="0" preCellId="../PHCL/0/" postCellId="../PHCR/0/"/>
</projection>
<projection id="NC_PHCL_PLML_Generic_GJ" postsynapticPopulation="PLML" presynapticPopulation="PHCL" synapse="">
<connection id="0" preCellId="../PHCL/0/" postCellId="../PLML/0/"/>
</projection>
<projection id="NC_PHCL_PVCL_Glutamate" postsynapticPopulation="PVCL" presynapticPopulation="PHCL" synapse="">
<connection id="0" preCellId="../PHCL/0/" postCellId="../PVCL/0/"/>
</projection>
<projection id="NC_PHCL_VA12_Generic_GJ" postsynapticPopulation="VA12" presynapticPopulation="PHCL" synapse="">
<connection id="0" preCellId="../PHCL/0/" postCellId="../VA12/0/"/>
</projection>
<projection id="NC_PHCL_VA12_Glutamate" postsynapticPopulation="VA12" presynapticPopulation="PHCL" synapse="">
<connection id="0" preCellId="../PHCL/0/" postCellId="../VA12/0/"/>
</projection>
<projection id="NC_PHCR_AVHR_Glutamate" postsynapticPopulation="AVHR" presynapticPopulation="PHCR" synapse="">
<connection id="0" preCellId="../PHCR/0/" postCellId="../AVHR/0/"/>
</projection>
<projection id="NC_PHCR_DA9_Glutamate" postsynapticPopulation="DA9" presynapticPopulation="PHCR" synapse="">
<connection id="0" preCellId="../PHCR/0/" postCellId="../DA9/0/"/>
</projection>
<projection id="NC_PHCR_DVA_Glutamate" postsynapticPopulation="DVA" presynapticPopulation="PHCR" synapse="">
<connection id="0" preCellId="../PHCR/0/" postCellId="../DVA/0/"/>
</projection>
<projection id="NC_PHCR_LUAR_Glutamate" postsynapticPopulation="LUAR" presynapticPopulation="PHCR" synapse="">
<connection id="0" preCellId="../PHCR/0/" postCellId="../LUAR/0/"/>
</projection>
<projection id="NC_PHCR_PHCL_Generic_GJ" postsynapticPopulation="PHCL" presynapticPopulation="PHCR" synapse="">
<connection id="0" preCellId="../PHCR/0/" postCellId="../PHCL/0/"/>
</projection>
<projection id="NC_PHCR_PHCL_Glutamate" postsynapticPopulation="PHCL" presynapticPopulation="PHCR" synapse="">
<connection id="0" preCellId="../PHCR/0/" postCellId="../PHCL/0/"/>
</projection>
<projection id="NC_PHCR_PVCR_Generic_GJ" postsynapticPopulation="PVCR" presynapticPopulation="PHCR" synapse="">
<connection id="0" preCellId="../PHCR/0/" postCellId="../PVCR/0/"/>
</projection>
<projection id="NC_PHCR_PVCR_Glutamate" postsynapticPopulation="PVCR" presynapticPopulation="PHCR" synapse="">
<connection id="0" preCellId="../PHCR/0/" postCellId="../PVCR/0/"/>
</projection>
<projection id="NC_PHCR_VA12_Glutamate" postsynapticPopulation="VA12" presynapticPopulation="PHCR" synapse="">
<connection id="0" preCellId="../PHCR/0/" postCellId="../VA12/0/"/>
</projection>
<projection id="NC_PHCR_VA12_Generic_GJ" postsynapticPopulation="VA12" presynapticPopulation="PHCR" synapse="">
<connection id="0" preCellId="../PHCR/0/" postCellId="../VA12/0/"/>
</projection>
<projection id="NC_PLML_HSNL_Glutamate" postsynapticPopulation="HSNL" presynapticPopulation="PLML" synapse="">
<connection id="0" preCellId="../PLML/0/" postCellId="../HSNL/0/"/>
</projection>
<projection id="NC_PLML_LUAL_Generic_GJ" postsynapticPopulation="LUAL" presynapticPopulation="PLML" synapse="">
<connection id="0" preCellId="../PLML/0/" postCellId="../LUAL/0/"/>
</projection>
<projection id="NC_PLML_PHCL_Generic_GJ" postsynapticPopulation="PHCL" presynapticPopulation="PLML" synapse="">
<connection id="0" preCellId="../PLML/0/" postCellId="../PHCL/0/"/>
</projection>
<projection id="NC_PLML_PVCL_Generic_GJ" postsynapticPopulation="PVCL" presynapticPopulation="PLML" synapse="">
<connection id="0" preCellId="../PLML/0/" postCellId="../PVCL/0/"/>
</projection>
<projection id="NC_PLMR_AS6_Glutamate" postsynapticPopulation="AS6" presynapticPopulation="PLMR" synapse="">
<connection id="0" preCellId="../PLMR/0/" postCellId="../AS6/0/"/>
</projection>
<projection id="NC_PLMR_AVAL_Glutamate" postsynapticPopulation="AVAL" presynapticPopulation="PLMR" synapse="">
<connection id="0" preCellId="../PLMR/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_PLMR_AVAR_Glutamate" postsynapticPopulation="AVAR" presynapticPopulation="PLMR" synapse="">
<connection id="0" preCellId="../PLMR/0/" postCellId="../AVAR/0/"/>
</projection>
<projection id="NC_PLMR_AVDL_Glutamate" postsynapticPopulation="AVDL" presynapticPopulation="PLMR" synapse="">
<connection id="0" preCellId="../PLMR/0/" postCellId="../AVDL/0/"/>
</projection>
<projection id="NC_PLMR_AVDR_Glutamate" postsynapticPopulation="AVDR" presynapticPopulation="PLMR" synapse="">
<connection id="0" preCellId="../PLMR/0/" postCellId="../AVDR/0/"/>
</projection>
<projection id="NC_PLMR_DVA_Glutamate" postsynapticPopulation="DVA" presynapticPopulation="PLMR" synapse="">
<connection id="0" preCellId="../PLMR/0/" postCellId="../DVA/0/"/>
</projection>
<projection id="NC_PLMR_HSNR_Glutamate" postsynapticPopulation="HSNR" presynapticPopulation="PLMR" synapse="">
<connection id="0" preCellId="../PLMR/0/" postCellId="../HSNR/0/"/>
</projection>
<projection id="NC_PLMR_LUAR_Generic_GJ" postsynapticPopulation="LUAR" presynapticPopulation="PLMR" synapse="">
<connection id="0" preCellId="../PLMR/0/" postCellId="../LUAR/0/"/>
</projection>
<projection id="NC_PLMR_PDEL_Glutamate" postsynapticPopulation="PDEL" presynapticPopulation="PLMR" synapse="">
<connection id="0" preCellId="../PLMR/0/" postCellId="../PDEL/0/"/>
</projection>
<projection id="NC_PLMR_PDER_Glutamate" postsynapticPopulation="PDER" presynapticPopulation="PLMR" synapse="">
<connection id="0" preCellId="../PLMR/0/" postCellId="../PDER/0/"/>
</projection>
<projection id="NC_PLMR_PVCL_Glutamate" postsynapticPopulation="PVCL" presynapticPopulation="PLMR" synapse="">
<connection id="0" preCellId="../PLMR/0/" postCellId="../PVCL/0/"/>
</projection>
<projection id="NC_PLMR_PVCR_Generic_GJ" postsynapticPopulation="PVCR" presynapticPopulation="PLMR" synapse="">
<connection id="0" preCellId="../PLMR/0/" postCellId="../PVCR/0/"/>
</projection>
<projection id="NC_PLMR_PVR_Generic_GJ" postsynapticPopulation="PVR" presynapticPopulation="PLMR" synapse="">
<connection id="0" preCellId="../PLMR/0/" postCellId="../PVR/0/"/>
</projection>
<projection id="NC_PLNL_SAADL_Acetylcholine" postsynapticPopulation="SAADL" presynapticPopulation="PLNL" synapse="">
<connection id="0" preCellId="../PLNL/0/" postCellId="../SAADL/0/"/>
</projection>
<projection id="NC_PLNL_SMBVL_Acetylcholine" postsynapticPopulation="SMBVL" presynapticPopulation="PLNL" synapse="">
<connection id="0" preCellId="../PLNL/0/" postCellId="../SMBVL/0/"/>
</projection>
<projection id="NC_PLNR_SAADR_Acetylcholine" postsynapticPopulation="SAADR" presynapticPopulation="PLNR" synapse="">
<connection id="0" preCellId="../PLNR/0/" postCellId="../SAADR/0/"/>
</projection>
<projection id="NC_PLNR_SMBVR_Acetylcholine" postsynapticPopulation="SMBVR" presynapticPopulation="PLNR" synapse="">
<connection id="0" preCellId="../PLNR/0/" postCellId="../SMBVR/0/"/>
</projection>
<projection id="NC_PQR_AVAL_Glutamate" postsynapticPopulation="AVAL" presynapticPopulation="PQR" synapse="">
<connection id="0" preCellId="../PQR/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_PQR_AVAR_Glutamate" postsynapticPopulation="AVAR" presynapticPopulation="PQR" synapse="">
<connection id="0" preCellId="../PQR/0/" postCellId="../AVAR/0/"/>
</projection>
<projection id="NC_PQR_AVDL_Glutamate" postsynapticPopulation="AVDL" presynapticPopulation="PQR" synapse="">
<connection id="0" preCellId="../PQR/0/" postCellId="../AVDL/0/"/>
</projection>
<projection id="NC_PQR_AVDR_Glutamate" postsynapticPopulation="AVDR" presynapticPopulation="PQR" synapse="">
<connection id="0" preCellId="../PQR/0/" postCellId="../AVDR/0/"/>
</projection>
<projection id="NC_PQR_AVG_Glutamate" postsynapticPopulation="AVG" presynapticPopulation="PQR" synapse="">
<connection id="0" preCellId="../PQR/0/" postCellId="../AVG/0/"/>
</projection>
<projection id="NC_PQR_LUAR_Glutamate" postsynapticPopulation="LUAR" presynapticPopulation="PQR" synapse="">
<connection id="0" preCellId="../PQR/0/" postCellId="../LUAR/0/"/>
</projection>
<projection id="NC_PQR_PVNL_Glutamate" postsynapticPopulation="PVNL" presynapticPopulation="PQR" synapse="">
<connection id="0" preCellId="../PQR/0/" postCellId="../PVNL/0/"/>
</projection>
<projection id="NC_PQR_PVPL_Generic_GJ" postsynapticPopulation="PVPL" presynapticPopulation="PQR" synapse="">
<connection id="0" preCellId="../PQR/0/" postCellId="../PVPL/0/"/>
</projection>
<projection id="NC_PVCL_AS1_Glutamate" postsynapticPopulation="AS1" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" postCellId="../AS1/0/"/>
</projection>
<projection id="NC_PVCL_AVAL_Glutamate" postsynapticPopulation="AVAL" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_PVCL_AVAL_Generic_GJ" postsynapticPopulation="AVAL" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_PVCL_AVAR_Glutamate" postsynapticPopulation="AVAR" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" postCellId="../AVAR/0/"/>
</projection>
<projection id="NC_PVCL_AVBL_Glutamate" postsynapticPopulation="AVBL" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" postCellId="../AVBL/0/"/>
</projection>
<projection id="NC_PVCL_AVBR_Glutamate" postsynapticPopulation="AVBR" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" postCellId="../AVBR/0/"/>
</projection>
<projection id="NC_PVCL_AVDL_Glutamate" postsynapticPopulation="AVDL" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" postCellId="../AVDL/0/"/>
</projection>
<projection id="NC_PVCL_AVDR_Glutamate" postsynapticPopulation="AVDR" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" postCellId="../AVDR/0/"/>
</projection>
<projection id="NC_PVCL_AVEL_Glutamate" postsynapticPopulation="AVEL" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" postCellId="../AVEL/0/"/>
</projection>
<projection id="NC_PVCL_AVER_Glutamate" postsynapticPopulation="AVER" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" postCellId="../AVER/0/"/>
</projection>
<projection id="NC_PVCL_AVJL_Generic_GJ" postsynapticPopulation="AVJL" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" postCellId="../AVJL/0/"/>
</projection>
<projection id="NC_PVCL_AVJL_Glutamate" postsynapticPopulation="AVJL" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" postCellId="../AVJL/0/"/>
</projection>
<projection id="NC_PVCL_AVJR_Generic_GJ" postsynapticPopulation="AVJR" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" postCellId="../AVJR/0/"/>
</projection>
<projection id="NC_PVCL_AVJR_Glutamate" postsynapticPopulation="AVJR" presynapticPopulation="PVCL" synapse="">
<connection id="0" preCellId="../PVCL/0/" | |
elif token == 'DISASM_INTEL':
self.disasm_intel = rest
elif token == 'DISASM_ATTSV': # AT&T System V
self.disasm_att = rest
elif token == 'UNAME':
self.uname = rest
if viclass():
msgb("UNAME", rest)
elif token == 'IFORM':
if filling_extra:
if len(self.extra_iforms_input) == 0:
die("Need to have a PATTERN line before " +
"the IFORM line for " + self.iclass)
self.extra_iforms_input[-1] = rest
else:
self.iform_input = rest
else:
setattr(self,token,rest.strip())
# die("Unhandled token in line: " + line)
else:
print("NEXT FEW LINES: ")
for x in lines[0:20]:
print("INPUT LINE: %s" % (x.strip()))
die("Missing colon in line: " + line)
if reached_closing_bracket:
if found_operands == False:
die("Did not find operands for " + self.iclass)
for k in structured_input_dict.keys():
if structured_input_dict[k] == False:
if structured_input_tags[k]:
die("Required token missing: "+ k)
if debug:
msge("\tReturning...")
return True
return False
def add_scalable_attribute(self, scalable_widths, agi):
"""Look for operations that have width codes that are scalable
width codes (z,v,a,p,p2,s,spw8,spw,spw3,spw2,
etc. (auto-derived) , and add an attribute SCALABLE"""
scalable = False
for op in self.operands:
if op.oc2:
s= op.oc2.upper()
#msge("RRR Checking for %s in %s" % (s, str(scalable_widths)))
if s in scalable_widths:
scalable=True
break
if op.lookupfn_name:
#msge("OPNAME: " + op.lookupfn_name)
scalable = look_for_scalable_nt(agi, op.lookupfn_name)
if scalable:
break
if scalable:
s = "SCALABLE"
self.add_attribute(s)
def add_fixed_base_attribute(self):
"""Look for STACKPUSH/STACKPOP operands and then add an
attribute that says fixed_base0 or fixed_base1 depending on
which base reg has the SrSP operand."""
stack_memop_indx = -1
if vattr():
msgb("ATTRIBUTE-FOR-STACKOP: CHECKING", self.iclass)
for op in self.operands:
if op.is_ntluf():
if vattr():
msgb("ATTRIBUTE-NTLUF", "%s = %s" % (op.name,op.lookupfn_name))
if op.lookupfn_name == 'SrSP':
if op.name == 'BASE0':
stack_memop_indx = 0
elif op.name == 'BASE1':
stack_memop_indx = 1
else:
pass # skip other fields
if stack_memop_indx != -1:
if vattr():
msgb("ATTRIBUTE-FOR-STACKOP",
"%s memop index %s" % (self.iclass, stack_memop_indx))
s = "FIXED_BASE%d" % stack_memop_indx
self.add_attribute(s)
self.add_stack_attribute(stack_memop_indx)
def __str__(self):
return self.dump_str()
def dump_str(self, pad='', brief=False):
s = []
s.append(pad)
s.append(self.iclass)
if self.uname:
s.append(" uname=%s" % str(self.uname))
s.append(" inum=%s " % str(self.inum))
if field_check(self,'iform') and self.iform:
s.append(" iform=%s " % str(self.iform))
if field_check(self,'iform_input') and self.iform_input:
s.append(" iform_input=%s " % str(self.iform_input))
if field_check(self,'isa_set') and self.isa_set:
s.append(" isa_set=%s " % str(self.isa_set))
s.append("pattern len=%d\n" % len(self.ipattern.bits))
s.append(" %s ipattern: %s\n" % (pad,self.ipattern.just_bits()) )
if brief:
return ''.join(s)
if self.prebindings:
s.append('prebindings: \n\t' +
'\n\t'.join( [str(x) for x in list(self.prebindings.values())]) + '\n')
for op in self.operands:
s.append(pad)
s.append(" ")
s.append(op.dump_str(pad))
s.append("\n")
return ''.join(s)
def look_for_scalable_nt(agi, nt_name):
"""Look for a nonterminal that is sensitive to EOSZ. It looks
recursively at NTs in the patterns, but that does not occur in x86."""
try:
gi = agi.generator_dict[nt_name]
except:
die("Generator not found for nt_name: %s" % (nt_name))
for rule in gi.parser_output.instructions:
for b in rule.ipattern.bits:
if b.token == 'EOSZ':
return True
elif b.is_nonterminal():
r_nt_name = b.nonterminal_name()
if look_for_scalable_nt(agi, r_nt_name): # RECUR
return True
return False
def mk_opnd(agi, s, default_vis='DEFAULT'):
op = opnds.parse_one_operand(s,
default_vis,
agi.xtypes,
agi.widths_dict)
return op
def add_flags_register_operand(agi,ii):
"""If the instruction has flags, then add a flag register operand."""
if field_check(ii,'flags_info') and \
ii.flags_info and ii.flags_info.x86_flags():
rw = ii.flags_info.rw_action()
(memidx_dummy,regidx) = find_max_memidx_and_regidx(ii.operands)
s = "REG%d=rFLAGS():%s:SUPP" % ( regidx, rw )
if vflag():
msgb("RFLAGS-APPEND", "%s <-- %s" % ( ii.iclass, s))
op = mk_opnd(agi,s)
if op:
ii.operands.append(op)
def add_flags_register_operand_all(agi,parser):
for ii in parser.instructions:
add_flags_register_operand(agi,ii)
def rewrite_stack_push(op,memidx,regidx):
s = []
#s.append("REG%d=SrSP():rw:SUPP" % (regidx))
s.append("MEM%d:w:%s:SUPP" % (memidx, op.oc2))
s.append("BASE%d=SrSP():rw:SUPP" % (memidx))
if memidx == 0:
s.append("SEG0=FINAL_SSEG0():r:SUPP") # note FINAL_SSEG0()
else:
s.append("SEG1=FINAL_SSEG1():r:SUPP") # note FINAL_SSEG1() ***
return s
def rewrite_stack_pop(op,memidx,regidx):
s = []
#s.append("REG%d=SrSP():rw:SUPP" % (regidx))
s.append("MEM%d:r:%s:SUPP" % (memidx, op.oc2))
s.append("BASE%d=SrSP():rw:SUPP" % (memidx))
if memidx == 0:
s.append("SEG0=FINAL_SSEG0():r:SUPP") # note FINAL_SSEG()
else:
s.append("SEG1=FINAL_SSEG1():r:SUPP") # note FINAL_SSEG1() ***
return s
def expand_stack_operand(op, memidx, regidx):
"""Replace the STACKPUSH and STACKPOP operands by a sequence of operands
@type op: opnds.operand_info_t
@param op: input operand that is a stack push or pop
@type memidx: integer
@param memidx: index of the memop we should use, either 0 or 1.
@type regidx: integer
@param regidx: index of the first register we should use for
the rSP() operand
@rtype: [ strings ]
@return: additional text of operands (to be processed) for the
stack pointer access, memop, base, & seg.
"""
if vstack():
msgb("EXPANDING STACK OP", "%s memidx %d regidx %d"
% (op.bits, memidx, regidx))
if op.bits == 'XED_REG_STACKPUSH':
out = rewrite_stack_push(op,memidx,regidx)
elif op.bits == 'XED_REG_STACKPOP':
out = rewrite_stack_pop(op,memidx,regidx)
else:
out = None
if vstack():
msgb("STACKOPS", str(out))
return out
def find_max_memidx_and_regidx(operands):
"find the maximum memidx and regidx"
memidx = 0
regidx = 0
verbose = False
for op in operands:
if verbose:
msgb("OPNAME", op.name)
if op.name == 'MEM0':
memidx = 1
elif op.name == 'MEM1':
memidx = 2 # this should cause an error if it is ever used
rnm = reg_operand_name_pattern.match(op.name)
if rnm:
current_regidx = int(rnm.group('regno'))
if verbose:
msgb("COMPARE REGS", "current %d max %d" %
( current_regidx, regidx))
if current_regidx >= regidx:
if verbose:
msgb("BUMPING regidx")
regidx = current_regidx+1
return (memidx, regidx)
def parse_operand_spec(agi,operand_spec):
"""build a list classes holding operand info"""
#print str(operand_spec)
operands = []
reset_any = False
for w in operand_spec:
op = mk_opnd(agi,w)
if op:
if op.type == 'xed_reset':
reset_any = True
else:
operands.append(op)
##############################################################
# expand stack operands
#
found_stackop = None
for op in operands:
# msgb("BITS", str(op.bits))
if op.bits == 'XED_REG_STACKPUSH' or op.bits == 'XED_REG_STACKPOP':
found_stackop = op
break
if found_stackop:
(memidx, regidx) = find_max_memidx_and_regidx(operands)
new_strings = expand_stack_operand(found_stackop, memidx, regidx)
# make new operands based on these strings.
if new_strings:
for s in new_strings:
new_op = mk_opnd(agi,s)
if new_op:
operands.append(new_op)
#
##############################################################
return (operands, reset_any)
##################################################################
# Structured input / output of instructions
##################################################################
def is_nonterminal_line(s):
g = nonterminal_start_pattern.search(s)
if g:
# remove everything from the parens to the end of the line
# including two colons
t = parens_to_end_of_line.sub('', s)
wrds = t.split()
short_nt_name = wrds[-1]
if len(wrds) == 1:
type = None
msge("NONTERMINAL: " + short_nt_name + " notype")
else:
type = wrds[0]
msge("NONTERMINAL: " + short_nt_name + " type= " + type)
return (short_nt_name, type)
return (None,None)
def remove_instructions(agi):
for g in agi.generator_list:
ii = g.parser_output.instructions[0]
if field_check(ii,'iclass'):
g.parser_output = remove_overridden_versions(g.parser_output)
def remove_overridden_versions(parser):
"""Remove instructions that have newer versions using a dictionary
of lists."""
d = {}
for ii in parser.instructions:
if ii.iclass in parser.deleted_instructions:
continue # drop this record
if ii.uname in parser.deleted_unames:
msge("DROPPING UNAME %s" % (ii.uname))
continue # drop this record
if ii.iclass in d:
if ii.version == d[ii.iclass][0].version:
d[ii.iclass].append(ii)
elif ii.version > d[ii.iclass][0].version:
# we have an updated version. drop the old stuff and start over
del d[ii.iclass]
d[ii.iclass] = [ii]
else:
pass # drop this record
else:
# add first element of this iclass
d[ii.iclass] = [ii]
iis = []
for ilist in list(d.values()):
iis.extend(ilist)
parser.instructions = iis
return parser
def read_input(agi, lines):
"""Read the input from a flat token-per-line file or a structured
ISA input file"""
msge("read_input " + str(global_inum))
# first line must be a nonterminal
(nt_name, nt_type) = is_nonterminal_line(lines[0])
if not nt_name:
die("Did not find a nonterminal: " + lines[0])
parser = None
# see if we have encountered this nonterminal before
try:
gi = agi.generator_dict[nt_name]
# we have a re-occurrence of an earlier nonterminal. We extend it now.
msge("FOUND OLD PARSER FOR " + nt_name)
parser = gi.parser_output
except:
# need to make a new generator & parser
parser = parser_t()
parser.nonterminal_line = lines[0].strip()
parser.nonterminal_name = nt_name
parser.nonterminal_type = nt_type
gi = agi.make_generator(nt_name)
gi.parser_output = parser
agi.nonterminal_dict.record_nonterminal(nt_name, nt_type)
msge("Nonterminal " + parser.nonterminal_line)
msge("Nonterminal name " + parser.nonterminal_name)
lines.pop(0)
# The {...} defined "instruction" patterns must have the substring
# "INSTRUCTIONS" in their name.
if instructions_pattern.search(parser.nonterminal_name):
nlines = read_structured_input(agi,
agi.common.options,
parser,
lines,
agi.common.state_bits)
else:
nlines = read_flat_input(agi,
agi.common.options,
parser,
lines,
agi.common.state_bits)
return nlines
def read_structured_input(agi, options, parser, lines, state_dict):
msge("read_structured_input")
while len(lines) != 0:
if verb4():
msge("NEXTLINE " + lines[0])
first_line = no_comments(lines[0])
if first_line == '':
lines.pop(0)
continue
first_line = slash_expand.expand_all_slashes(first_line)
if udelete_pattern.search(first_line):
m = udelete_full_pattern.search(first_line)
uname = m.group('uname')
msge("REGISTERING UDELETE | |
<filename>CodeGenX64/legalize.py<gh_stars>100-1000
import collections
import dataclasses
import sys
from typing import List, Dict, Optional, Tuple
from Base import canonicalize
from Base import ir
from Base import liveness
from Base import lowering
from Base import opcode_tab as o
from Base import optimize
from Base import reg_stats
from Base import sanity
from Base import serialize
from CodeGenX64 import isel_tab
from CodeGenX64 import regs
def DumpBbl(bbl: ir.Bbl):
print("\n".join(serialize.BblRenderToAsm(bbl)))
def DumpFun(reason: str, fun: ir.Fun):
print("#" * 60)
print(f"# {reason}", fun.name)
print("#" * 60)
print("\n".join(serialize.FunRenderToAsm(fun)))
_SUPPORTED_IN_ALL_WIDTHS = {
o.ADD, o.SUB, o.XOR, o.AND, o.OR,
}
def IsOutOfBoundImmediate(opcode, op, pos) -> bool:
if not isinstance(op, ir.Const):
return False
if opcode in {o.DIV, o.REM, o.MUL}:
return True
if pos == 2 and opcode in {o.ST, o.ST_STK, o.ST_MEM}:
return True
if op.kind in {o.DK.S8, o.DK.S16, o.DK.S32, o.DK.S64, o.DK.A64, o.DK.C64}:
return op.value < -(1 << 31) or (1 << 31) <= op.value
elif op.kind in {o.DK.U8, o.DK.U16, o.DK.U32, o.DK.U64}:
return (1 << 31) <= op.value
elif op.kind is {o.DK.F64, o.DK.F32}:
return True
else:
assert False, f"unknown op: {op}"
def _InsRewriteOutOfBoundsImmediates(
ins: ir.Ins, fun: ir.Fun, unit: ir.Unit) -> Optional[List[ir.Ins]]:
inss = []
if ins.opcode.kind in {o.OPC_KIND.ALU, o.OPC_KIND.COND_BRA, o.OPC_KIND.ALU1, o.OPC_KIND.ST}:
for pos, op in enumerate(ins.operands):
if IsOutOfBoundImmediate(ins.opcode, op, pos):
if op.kind in {o.DK.F32, o.DK.F64}:
inss += lowering.InsEliminateImmediateViaMem(ins, pos, fun, unit,
o.DK.A64, o.DK.U32)
else:
inss.append(lowering.InsEliminateImmediateViaMov(ins, pos, fun))
inss.append(ins)
return inss
def _FunRewriteOutOfBoundsImmediates(fun: ir.Fun, unit: ir.Unit) -> int:
return ir.FunGenericRewrite(fun, _InsRewriteOutOfBoundsImmediates, unit=unit)
def _InsRewriteDivRem(
ins: ir.Ins, fun: ir.Fun) -> Optional[List[ir.Ins]]:
inss = []
opc = ins.opcode
ops = ins.operands
if opc is o.DIV and ops[0].kind.flavor != o.DK_FLAVOR_F:
rax = fun.FindOrAddCpuReg(regs.CPU_REGS_MAP["rax"], ops[0].kind)
rdx = fun.FindOrAddCpuReg(regs.CPU_REGS_MAP["rdx"], ops[0].kind)
return [ir.Ins(o.MOV, [rax, ops[1]]),
ir.Ins(o.DIV, [rdx, rax, ops[2]]), # note the notion of src/dst regs is murky here
ir.Ins(o.MOV, [ops[0], rax])]
elif opc is o.REM and ops[0].kind.flavor != o.DK_FLAVOR_F:
rax = fun.FindOrAddCpuReg(regs.CPU_REGS_MAP["rax"], ops[0].kind)
rdx = fun.FindOrAddCpuReg(regs.CPU_REGS_MAP["rdx"], ops[0].kind)
return [ir.Ins(o.MOV, [rax, ops[1]]),
ir.Ins(o.DIV, [rdx, rax, ops[2]]), # note the notion of src/dst regs is murky here
ir.Ins(o.MOV, [ops[0], rdx])]
else:
return None
def _FunRewriteDivRem(fun: ir.Fun) -> int:
return ir.FunGenericRewrite(fun, _InsRewriteDivRem)
def NeedsAABFromRewrite(ins: ir.Ins):
opc = ins.opcode
ops = ins.operands
if opc.kind not in {o.OPC_KIND.ALU, o.OPC_KIND.LEA}:
return False
if opc in {o.DIV, o.REM} and ops[0].kind.flavor != o.DK_FLAVOR_F:
return False
if opc in {o.LEA_MEM, o.LEA_STK}:
return False
return True
def _InsRewriteIntoAABForm(
ins: ir.Ins, fun: ir.Fun) -> Optional[List[ir.Ins]]:
ops = ins.operands
if not NeedsAABFromRewrite(ins):
return None
if ops[0] == ops[1]:
ops[0].flags |= ir.REG_FLAG.TWO_ADDRESS
return None
if ops[0] == ops[2] and o.OA.COMMUTATIVE in ins.opcode.attributes:
ir.InsSwapOps(ins, 1, 2)
ops[0].flags |= ir.REG_FLAG.TWO_ADDRESS
return [ins]
else:
reg = fun.GetScratchReg(ins.operands[0].kind, "aab", False)
reg.flags |= ir.REG_FLAG.TWO_ADDRESS
return [ir.Ins(o.MOV, [reg, ops[1]]),
ir.Ins(ins.opcode, [reg, reg, ops[2]]),
ir.Ins(o.MOV, [ops[0], reg])]
def _FunRewriteIntoAABForm(fun: ir.Fun, unit: ir.Unit) -> int:
"""Bring instructions into A A B form (dst == src1). See README.md"""
return ir.FunGenericRewrite(fun, _InsRewriteIntoAABForm)
def _InsMoveEliminationCpu(ins: ir.Ins, _fun: ir.Fun) -> Optional[List[ir.Ins]]:
# TODO: handle conv
if ins.opcode not in {o.MOV}:
return None
dst, src = ins.operands[0], ins.operands[1]
if not isinstance(src, ir.Reg):
return None
assert dst.cpu_reg and src.cpu_reg
if src.cpu_reg != dst.cpu_reg:
return None
return []
def _FunMoveEliminationCpu(fun: ir.Fun) -> int:
return ir.FunGenericRewrite(fun, _InsMoveEliminationCpu)
def PhaseOptimize(fun: ir.Fun, unit: ir.Unit, opt_stats: Dict[str, int], fout):
optimize.FunCfgInit(fun, unit)
optimize.FunOptBasic(fun, opt_stats, allow_conv_conversion=True)
def PhaseLegalization(fun: ir.Fun, unit: ir.Unit, _opt_stats: Dict[str, int], fout):
"""
Does a lot of the heavily lifting so that the instruction selector can remain
simple and table driven.
* lift almost all regs to 32bit width
* rewrite Ins that cannot be expanded
* rewrite immediates that cannot be expanded except stack offsets which are dealt with in
another pass
TODO: missing is a function to change calling signature so that
"""
fun.cpu_live_in = regs.PushPopInterface.GetCpuRegsForInSignature(fun.input_types)
fun.cpu_live_out = regs.PushPopInterface.GetCpuRegsForOutSignature(fun.output_types)
if fun.kind is not o.FUN_KIND.NORMAL:
return
# Getting rid of the pusharg/poparg now relieves us form having to pay to attention to the
# invariant that pushargs/popargs must be adjacent.
lowering.FunPushargConversion(fun, regs.PushPopInterface)
lowering.FunPopargConversion(fun, regs.PushPopInterface)
# We did not bother with this addressing mode
lowering.FunEliminateStkLoadStoreWithRegOffset(fun, base_kind=o.DK.A64,
offset_kind=o.DK.S32)
# TODO: switch this to a WithRegOffset flavor
lowering.FunEliminateMemLoadStore(fun, base_kind=o.DK.A64,
offset_kind=o.DK.S32)
canonicalize.FunCanonicalize(fun)
# TODO: add a cfg linearization pass to improve control flow
optimize.FunCfgExit(fun, unit) # not this may affect immediates as it flips branches
# Handle most overflowing immediates.
# This excludes immediates related to stack offsets which have not been determined yet
_FunRewriteOutOfBoundsImmediates(fun, unit)
# Recompute Everything (TODO: make this more selective to reduce work)
reg_stats.FunComputeRegStatsExceptLAC(fun)
reg_stats.FunDropUnreferencedRegs(fun)
liveness.FunComputeLivenessInfo(fun)
reg_stats.FunComputeRegStatsLAC(fun)
reg_stats.FunSeparateLocalRegUsage(fun)
# DumpRegStats(fun, local_reg_stats)
# mul/div/rem need special treatment
_FunRewriteDivRem(fun)
_FunRewriteIntoAABForm(fun, unit)
# if fun.name == "fibonacci": DumpFun("end of legal", fun)
# if fun.name == "write_s": exit(1)
sanity.FunCheck(fun, None)
# optimize.FunOptBasic(fun, opt_stats, allow_conv_conversion=False)
KIND_AND_LAC = Tuple[o.DK, bool]
def _FunGlobalRegStats(fun: ir.Fun, reg_kind_map: Dict[o.DK, o.DK]) -> Dict[KIND_AND_LAC, List[ir.Reg]]:
out: Dict[KIND_AND_LAC, List[ir.Reg]] = collections.defaultdict(list)
for reg in fun.regs:
if not reg.HasCpuReg() and ir.REG_FLAG.GLOBAL in reg.flags:
out[(reg_kind_map[reg.kind], ir.REG_FLAG.LAC in reg.flags)].append(reg)
for v in out.values():
v.sort()
return out
def DumpRegStats(fun: ir.Fun, stats: Dict[reg_stats.REG_KIND_LAC, int], fout):
local_lac = 0
local_not_lac = 0
for (kind, lac), count in stats.items():
if lac:
local_lac += count
else:
local_not_lac += count
allocated_lac = []
allocated_not_lac = []
global_lac = []
global_not_lac = []
for reg in fun.regs:
if ir.REG_FLAG.GLOBAL not in reg.flags: continue
if reg.HasCpuReg():
if ir.REG_FLAG.LAC in reg.flags:
allocated_lac.append(reg)
else:
allocated_not_lac.append(reg)
else:
if ir.REG_FLAG.LAC in reg.flags:
global_lac.append(reg)
else:
global_not_lac.append(reg)
if fout:
print(f"# REGSTATS {fun.name:20s} "
f"all: {len(allocated_lac):2} {len(allocated_not_lac):2} "
f"glo: {len(global_lac):2} {len(global_not_lac):2} "
f"loc: {local_lac:2} {local_not_lac:2}", file=fout)
@dataclasses.dataclass()
class RegsNeeded:
"""estimate for how many regs are needed"""
global_lac: int = 0
global_not_lac: int = 0
local_lac: int = 0
local_not_lac: int = 0
def __dir__(self):
return f"RegNeeded: {self.global_lac} {self.global_not_lac} {self.local_lac} {self.local_not_lac}"
def _spilling_needed(needed: RegsNeeded, num_global_lac: int,
num_local_not_lac: int) -> bool:
""" Note: this assumes the early condition of the pools with only two lists populated"""
return (needed.global_lac + needed.local_lac > num_global_lac or
needed.global_lac + needed.local_lac + needed.global_not_lac + needed.local_not_lac >
num_global_lac + num_local_not_lac)
def _maybe_move_excess(src, dst, n):
if n < len(src):
if dst is not None:
dst += src[n:]
del src[n:]
def _popcount(x):
return bin(x).count('1')
def _FindMaskCoveringTheLowOrderSetBits(bits: int, count: int) -> int:
if count == 0: return 0
mask = 1
n = 0
while n < count:
if (mask & bits) != 0:
n += 1
mask <<= 1
return mask - 1
def _GetRegPoolsForGlobals(needed: RegsNeeded, regs_lac: int,
regs_not_lac: int, regs_preallocated: int) -> Tuple[int, int]:
"""
Allocate global registers
Initially all allocatable regs are either in pools.global_lac and pools.local_not_lac
We want to assign a subset of these to global_lac and global_not_lac
We want the low numbers regs to stay in pools.local_not_lac as much as possible
to avoid moves as this is where the parameters arrive and results get returned
We also want to use as few callee saved registers as possible
If we need to spill, earmark one more local_not_lac reg to handle the spilling
TODO: this needs some more thinking - the worst case could require more regs
"""
num_regs_lac = _popcount(regs_lac)
num_regs_not_lac = _popcount(regs_not_lac)
spilling_needed = _spilling_needed(needed, num_regs_lac, num_regs_not_lac)
global_lac = regs_lac
local_lac = 0
# excess lac globals can be used for lac locals
if num_regs_lac > needed.global_lac:
mask = _FindMaskCoveringTheLowOrderSetBits(global_lac, needed.global_lac)
local_lac = global_lac & ~mask
global_lac = global_lac & mask
# we can use local_not_lac as global_not lac but only if they are not pre-allocated
# because the global allocator does not check for live range conflicts
global_not_lac = 0
if num_regs_not_lac > needed.local_not_lac + spilling_needed:
mask = _FindMaskCoveringTheLowOrderSetBits(
regs_not_lac, needed.local_not_lac + spilling_needed)
global_not_lac = regs_not_lac & ~(mask | regs_preallocated)
if _popcount(local_lac) > needed.local_lac:
mask = _FindMaskCoveringTheLowOrderSetBits(local_lac, needed.local_lac)
global_not_lac |= local_lac & ~mask
return global_lac, global_not_lac
def PhaseGlobalRegAlloc(fun: ir.Fun, _opt_stats: Dict[str, int], fout):
"""
These phase introduces CpuReg for globals and situations where we have no choice
which register to use, e.g. function parameters and results ("pre-allocated" regs).
After this function has been run all globals will have a valid cpu_reg and
we have to be careful to not introduce new globals subsequently.
If not enough cpu_regs are available for all globals, some of them will be spilled.
We err on the site of spilling more, the biggest danger is to over-allocate and then
lack registers for intra-bbl register allocation.
The whole global allocator is terrible and so | |
"""
Copyright (c) 2017 Matterport, Inc.
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import os
import random
import itertools
import numpy as np
from skimage.measure import find_contours
import cv2
from models.model import detection_layer, unmold_detections
from models.modules import *
from utils import *
def tileImages(image_list, padding_x=5, padding_y=5, background_color=0):
"""Tile images"""
height = image_list[0][0].shape[0]
width = image_list[0][0].shape[1]
result_image = np.full((height * len(image_list) + padding_y * (len(image_list) + 1), width * len(image_list[0]) + padding_x * (len(image_list[0]) + 1), 3), fill_value=background_color, dtype=np.uint8)
for index_y, images in enumerate(image_list):
for index_x, image in enumerate(images):
offset_x = index_x * width + (index_x + 1) * padding_x
offset_y = index_y * height + (index_y + 1) * padding_y
if image.ndim == 2:
image = np.expand_dims(image, axis=-1).tile((1, 1, 3))
pass
result_image[offset_y:offset_y + height, offset_x:offset_x + width] = image
continue
continue
return result_image
############################################################
# Batch visualization
############################################################
def visualizeBatchDeMoN(options, input_dict, results, indexOffset=0, prefix='', concise=False):
cornerColorMap = {'gt': np.array([255, 0, 0]), 'pred': np.array([0, 0, 255]), 'inp': np.array([0, 255, 0])}
topdownSize = 256
for batchIndex in range(len(input_dict['image_1'])):
pose = input_dict['pose'][batchIndex]
for resultIndex, result in enumerate(results):
if concise and resultIndex < len(results) - 1:
continue
depth_pred = invertDepth(result['depth'][batchIndex]).detach().cpu().numpy().squeeze()
depth_gt = input_dict['depth'][batchIndex].squeeze()
if depth_pred.shape[0] != depth_gt.shape[0]:
depth_pred = cv2.resize(depth_pred, (depth_gt.shape[1], depth_gt.shape[0]))
pass
if options.scaleMode != 'variant':
valid_mask = np.logical_and(depth_gt > 1e-4, depth_pred > 1e-4)
depth_gt_values = depth_gt[valid_mask]
depth_pred_values = depth_pred[valid_mask]
scale = np.exp(np.mean(np.log(depth_gt_values) - np.log(depth_pred_values)))
depth_pred *= scale
pass
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_depth_pred_' + str(len(results) - 1 - resultIndex) + '.png', drawDepthImage(depth_pred))
if 'flow' in result:
flow_pred = result['flow'][batchIndex, :2].detach().cpu().numpy().transpose((1, 2, 0))
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_flow_pred_' + str(len(results) - 1 - resultIndex) + '.png', cv2.resize(drawFlowImage(flow_pred), (256, 192)))
pass
if 'rotation' in result and resultIndex >= len(results) - 2:
pass
continue
if not concise:
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_depth_gt.png', drawDepthImage(input_dict['depth'][batchIndex]))
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_image_0.png', (input_dict['image_1'][batchIndex].transpose((1, 2, 0)) + 0.5) * 255)
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_image_1.png', (input_dict['image_2'][batchIndex].transpose((1, 2, 0)) + 0.5) * 255)
flow_gt = input_dict['flow'][batchIndex, :2].transpose((1, 2, 0))
cv2.imwrite(options.test_dir + '/' + str(indexOffset + batchIndex) + '_flow_gt.png', cv2.resize(drawFlowImage(flow_gt), (256, 192)))
pass
continue
return
def visualizeBatchPair(options, config, inp_pair, detection_pair, indexOffset=0, prefix='', suffix='', write_ply=False, write_new_view=False):
detection_images = []
for pair_index, (input_dict, detection_dict) in enumerate(zip(inp_pair, detection_pair)):
image_dict = visualizeBatchDetection(options, config, input_dict, detection_dict, indexOffset=indexOffset, prefix=prefix, suffix='_' + str(pair_index), prediction_suffix=suffix, write_ply=write_ply, write_new_view=write_new_view)
detection_images.append(image_dict['detection'])
continue
detection_image = tileImages([detection_images])
return
def visualizeBatchRefinement(options, config, input_dict, results, indexOffset=0, prefix='', suffix='', concise=False):
if not concise:
image = (input_dict['image'].detach().cpu().numpy().transpose((0, 2, 3, 1))[0] + 0.5) * 255
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_image_0.png', image)
image_2 = (input_dict['image_2'].detach().cpu().numpy().transpose((0, 2, 3, 1))[0] + 0.5) * 255
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_image_1.png', image_2)
depth_gt = input_dict['depth'].detach().cpu().numpy().squeeze()
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth_gt.png', drawDepthImage(depth_gt))
flow_gt = input_dict['flow'][0, :2].detach().cpu().numpy().transpose((1, 2, 0))
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_flow_gt.png', cv2.resize(drawFlowImage(flow_gt), (256, 192)))
pass
numbers = []
for resultIndex, result in enumerate(results):
if 'mask' in result and (options.losses == '' or '0' in options.losses):
masks = result['mask'].detach().cpu().numpy()
masks = np.concatenate([np.maximum(1 - masks.sum(0, keepdims=True), 0), masks], axis=0).transpose((1, 2, 0))
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_segmentation_' + str(len(results) - 1 - resultIndex) + '.png', drawSegmentationImage(masks, blackIndex=0) * (masks.max(-1, keepdims=True) > 0.5).astype(np.uint8))
pass
if concise:
continue
if 'depth' in result and (options.losses == '' or '3' in options.losses):
depth_pred = invertDepth(result['depth']).detach().cpu().numpy().squeeze()
if depth_pred.shape[0] != depth_gt.shape[0]:
depth_pred = cv2.resize(depth_pred, (depth_gt.shape[1], depth_gt.shape[0]))
pass
if options.scaleMode != 'variant':
valid_mask = np.logical_and(depth_gt > 1e-4, depth_pred > 1e-4)
depth_gt_values = depth_gt[valid_mask]
depth_pred_values = depth_pred[valid_mask]
scale = np.exp(np.mean(np.log(depth_gt_values) - np.log(depth_pred_values)))
depth_pred *= scale
pass
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth_pred_' + str(len(results) - 1 - resultIndex) + '.png', drawDepthImage(depth_pred))
pass
if 'plane_depth' in result and (options.losses == '' or '3' in options.losses):
depth_pred = invertDepth(result['plane_depth']).detach().cpu().numpy().squeeze()
if depth_pred.shape[0] != depth_gt.shape[0]:
depth_pred = cv2.resize(depth_pred, (depth_gt.shape[1], depth_gt.shape[0]))
pass
if options.scaleMode != 'variant':
valid_mask = np.logical_and(depth_gt > 1e-4, depth_pred > 1e-4)
depth_gt_values = depth_gt[valid_mask]
depth_pred_values = depth_pred[valid_mask]
scale = np.exp(np.mean(np.log(depth_gt_values) - np.log(depth_pred_values)))
depth_pred *= scale
pass
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth_pred_plane_' + str(len(results) - 1 - resultIndex) + '.png', drawDepthImage(depth_pred))
pass
if 'flow' in result and (options.losses == '' or '1' in options.losses):
flow_pred = result['flow'][0, :2].detach().cpu().numpy().transpose((1, 2, 0))
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_flow_pred_' + str(len(results) - 1 - resultIndex) + '.png', cv2.resize(drawFlowImage(flow_pred), (256, 192)))
pass
if 'rotation' in result and resultIndex >= len(results) - 2:
pass
if 'plane' in result and resultIndex > 0:
numbers.append(np.linalg.norm(result['plane'].detach().cpu().numpy() - results[0]['plane'].detach().cpu().numpy()))
pass
if 'warped_image' in result and resultIndex >= len(results) - 2:
warped_image = ((result['warped_image'].detach().cpu().numpy().transpose((0, 2, 3, 1))[0] + 0.5) * 255).astype(np.uint8)
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_image_warped_' + str(len(results) - 1 - resultIndex) + '.png', warped_image)
pass
if 'plane_depth_one_hot' in result:
depth_pred = invertDepth(result['plane_depth_one_hot']).detach().cpu().numpy().squeeze()
if depth_pred.shape[0] != depth_gt.shape[0]:
depth_pred = cv2.resize(depth_pred, (depth_gt.shape[1], depth_gt.shape[0]))
pass
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth_pred_plane_onehot_' + str(len(results) - 1 - resultIndex) + '.png', drawDepthImage(depth_pred))
pass
continue
if 'parameter' in options.suffix:
print('plane diff', numbers)
pass
return
def visualizeBatchDetection(options, config, input_dict, detection_dict, indexOffset=0, prefix='', suffix='', prediction_suffix='', write_ply=False, write_new_view=False):
image_dict = {}
images = input_dict['image'].detach().cpu().numpy().transpose((0, 2, 3, 1))
images = unmold_image(images, config)
image = images[0]
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_image' + suffix + '.png', image[80:560])
if 'warped_image' in input_dict:
warped_images = input_dict['warped_image'].detach().cpu().numpy().transpose((0, 2, 3, 1))
warped_images = unmold_image(warped_images, config)
warped_image = warped_images[0]
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_image' + suffix + '_warped.png', warped_image[80:560])
pass
if 'warped_depth' in input_dict:
warped_depth = input_dict['warped_depth'].detach().cpu().numpy()
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth' + suffix + '_warped.png', drawDepthImage(warped_depth[80:560]))
pass
if 'warped_mask' in input_dict:
warped_mask = input_dict['warped_mask'].detach().cpu().numpy()[0]
pass
if 'depth' in input_dict:
depths = input_dict['depth'].detach().cpu().numpy()
depth_gt = depths[0]
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth' + suffix + '.png', drawDepthImage(depth_gt[80:560]))
pass
windows = (0, 0, images.shape[1], images.shape[2])
windows = (0, 0, images.shape[1], images.shape[2])
class_colors = ColorPalette(config.NUM_CLASSES).getColorMap().tolist()
if 'mask' in input_dict:
box_image = image.copy()
boxes = input_dict['bbox'][0].detach().cpu().numpy()
masks = input_dict['mask'][0].detach().cpu().numpy()
if config.NUM_PARAMETER_CHANNELS > 0:
depths = masks[:, :, :, 1]
masks = masks[:, :, :, 0]
pass
segmentation_image = image * 0.0
for box, mask in zip(boxes, masks):
box = np.round(box).astype(np.int32)
mask = cv2.resize(mask, (box[3] - box[1], box[2] - box[0]))
segmentation_image[box[0]:box[2], box[1]:box[3]] = np.minimum(segmentation_image[box[0]:box[2], box[1]:box[3]] + np.expand_dims(mask, axis=-1) * np.random.randint(255, size=(3, ), dtype=np.int32), 255)
continue
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_segmentation' + suffix + '.png', segmentation_image.astype(np.uint8)[80:560])
if config.NUM_PARAMETER_CHANNELS > 0 and not config.OCCLUSION:
depth_image = np.zeros((image.shape[0], image.shape[1]))
for box, patch_depth in zip(boxes, depths):
box = np.round(box).astype(np.int32)
patch_depth = cv2.resize(patch_depth, (box[3] - box[1], box[2] - box[0]), cv2.INTER_NEAREST)
depth_image[box[0]:box[2], box[1]:box[3]] = patch_depth
continue
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth_patch' + suffix + '.png', drawDepthImage(depth_image[80:560]))
pass
pass
if 'boundary' in detection_dict:
boundary_pred = detection_dict['boundary'].detach().cpu().numpy()[0]
boundary_gt = input_dict['boundary'].detach().cpu().numpy()[0]
for name, boundary in [('gt', boundary_gt), ('pred', boundary_pred)]:
boundary_image = image.copy()
boundary_image[boundary[0] > 0.5] = np.array([255, 0, 0])
boundary_image[boundary[1] > 0.5] = np.array([0, 0, 255])
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_boundary' + suffix + '_' + name + '.png', boundary_image)
continue
pass
if 'depth' in detection_dict:
depth_pred = detection_dict['depth'][0].detach().cpu().numpy()
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth' + suffix + prediction_suffix + '.png', drawDepthImage(depth_pred[80:560]))
if options.debug:
valid_mask = (depth_gt > 1e-4) * (input_dict['segmentation'].detach().cpu().numpy()[0] >= 0) * (detection_dict['mask'].detach().cpu().numpy().squeeze() > 0.5)
pass
pass
if 'depth_np' in detection_dict:
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth' + suffix + prediction_suffix + '_np.png', drawDepthImage(detection_dict['depth_np'].squeeze().detach().cpu().numpy()[80:560]))
pass
if 'depth_ori' in detection_dict:
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_depth' + suffix + prediction_suffix + '_ori.png', drawDepthImage(detection_dict['depth_ori'].squeeze().detach().cpu().numpy()[80:560]))
pass
if 'detection' in detection_dict and len(detection_dict['detection']) > 0:
detections = detection_dict['detection'].detach().cpu().numpy()
detection_masks = detection_dict['masks'].detach().cpu().numpy().transpose((1, 2, 0))
if 'flag' in detection_dict:
detection_flags = detection_dict['flag']
else:
detection_flags = {}
pass
instance_image, normal_image, depth_image = draw_instances(config, image, depth_gt, detections[:, :4], detection_masks > 0.5, detections[:, 4].astype(np.int32), detections[:, 6:], detections[:, 5], draw_mask=True, transform_planes=False, detection_flags=detection_flags)
image_dict['detection'] = instance_image
cv2.imwrite(options.test_dir + '/' + str(indexOffset) + '_segmentation' + suffix + prediction_suffix + '.png', instance_image[80:560])
else:
image_dict['detection'] = np.zeros(image.shape, dtype=image.dtype)
| |
import os
from copy import deepcopy
from pathlib import Path
import numpy as np
import pytest
from qualang_tools.bakery.bakery import baking
from qualang_tools.bakery.randomized_benchmark_c1 import c1_table, c1_ops
def gauss(amplitude, mu, sigma, length):
t = np.linspace(-length / 2, length / 2, length)
gauss_wave = amplitude * np.exp(-((t - mu) ** 2) / (2 * sigma ** 2))
return [float(x) for x in gauss_wave]
def abs_path_to(rel_path: str) -> str:
source_path = Path(__file__).resolve()
source_dir = source_path.parent
return os.path.join(source_dir, rel_path)
@pytest.fixture
def config():
def IQ_imbalance(g, phi):
c = np.cos(phi)
s = np.sin(phi)
N = 1 / ((1 - g ** 2) * (2 * c ** 2 - 1))
return [
float(N * x) for x in [(1 - g) * c, (1 + g) * s, (1 - g) * s, (1 + g) * c]
]
return {
"version": 1,
"controllers": {
"con1": {
"type": "opx1",
"analog_outputs": {
1: {"offset": +0.0},
2: {"offset": +0.0},
3: {"offset": +0.0},
},
"digital_outputs": {1: {}, 2: {}},
}
},
"elements": {
"qe1": {
"singleInput": {"port": ("con1", 1)},
"intermediate_frequency": 0,
"operations": {
"playOp": "constPulse",
"a_pulse": "arb_pulse1",
"playOp2": "constPulse2",
},
"digitalInputs": {
"digital_input1": {
"port": ("con1", 1),
"delay": 0,
"buffer": 0,
}
},
},
"qe2": {
"mixInputs": {
"I": ("con1", 2),
"Q": ("con1", 3),
"lo_frequency": 0,
"mixer": "mixer_qubit",
},
"intermediate_frequency": 0,
"operations": {"constOp": "constPulse_mix", "gaussOp": "gauss_pulse"},
},
},
"pulses": {
"constPulse": {
"operation": "control",
"length": 1000, # in ns
"waveforms": {"single": "const_wf"},
},
"constPulse2": {
"operation": "control",
"length": 1000, # in ns
"waveforms": {"single": "const_wf"},
"digital_marker": "ON",
},
"arb_pulse1": {
"operation": "control",
"length": 100, # in ns
"waveforms": {"single": "arb_wf"},
},
"constPulse_mix": {
"operation": "control",
"length": 80,
"waveforms": {"I": "const_wf", "Q": "zero_wf"},
},
"gauss_pulse": {
"operation": "control",
"length": 80,
"waveforms": {"I": "gauss_wf", "Q": "zero_wf"},
},
},
"waveforms": {
"zero_wf": {"type": "constant", "sample": 0.0},
"const_wf": {"type": "constant", "sample": 0.2},
"arb_wf": {"type": "arbitrary", "samples": [i / 200 for i in range(100)]},
"gauss_wf": {"type": "arbitrary", "samples": gauss(0.2, 0, 15, 80)},
},
"digital_waveforms": {
"ON": {"samples": [(1, 0)]},
},
"mixers": {
"mixer_qubit": [
{
"intermediate_frequency": 0,
"lo_frequency": 0,
"correction": IQ_imbalance(0.0, 0.0),
}
],
},
}
def test_c1_data():
c1_correct_table = np.load(abs_path_to("c1_table.npy"))
c1_correct_ops = np.load(abs_path_to("c1_ops.npy"), allow_pickle=True)
assert (c1_correct_table == c1_table).all()
assert (c1_correct_ops == np.array(c1_ops, dtype=object)).all()
def test_override_waveform(config):
cfg = deepcopy(config)
with baking(cfg, padding_method="right", override=True) as b_ref:
b_ref.play("gaussOp", "qe2")
ref_length = b_ref.get_op_length("qe2")
with baking(
cfg,
padding_method="right",
override=False,
baking_index=b_ref.get_baking_index(),
) as b_new:
samples = [[0.2] * 30, [0.0] * 30]
b_new.add_op("customOp", "qe2", samples)
b_new.play("customOp", "qe2")
assert b_new.get_op_length("qe2") == ref_length
def test_out_boolean(config):
cfg = deepcopy(config)
with baking(cfg) as b:
assert not b.is_out()
b.play("playOp", "qe1")
assert b.get_current_length("qe1") == 1000
assert b.is_out()
with baking(cfg) as b:
assert b.get_current_length("qe1") == 0
assert not b.is_out()
def test_delete_Op(config):
cfg = deepcopy(config)
with baking(cfg) as b:
b.play("playOp", "qe1")
b.play("gaussOp", "qe2")
assert "baked_Op_0" in cfg["elements"]["qe1"]["operations"]
assert "qe1_baked_pulse_0" in cfg["pulses"]
assert "qe2_baked_pulse_0" in cfg["pulses"]
b.delete_baked_op("qe1")
assert "baked_Op_0" not in cfg["elements"]["qe1"]["operations"]
assert "qe1_baked_pulse_0" not in cfg["pulses"]
with b:
b.play("playOp", "qe1")
assert "baked_Op_0" in cfg["elements"]["qe1"]["operations"]
b.delete_baked_op()
assert "baked_Op_0" not in cfg["elements"]["qe1"]["operations"]
assert "baked_Op_0" not in cfg["elements"]["qe2"]["operations"]
with baking(cfg) as b:
b.add_digital_waveform("dig_wf", [(1, 0)])
b.add_op("new_Op", "qe1", [0.3] * 100, "dig_wf")
b.play("new_Op", "qe1")
assert "qe1_baked_digital_wf_0" in cfg["digital_waveforms"]
b.delete_baked_op()
assert "qe1_baked_digital_wf_0" not in cfg["digital_waveforms"]
def test_indices_behavior(config):
cfg = deepcopy(config)
with baking(cfg) as b1:
b1.play("gaussOp", "qe2")
assert all(
[
cfg["waveforms"]["qe2_baked_wf_I_0"]["samples"][i]
== gauss(0.2, 0, 15, 80)[i]
for i in range(80)
]
)
print(b1.get_op_name("qe2"), cfg["waveforms"]["qe2_baked_wf_I_0"]["samples"])
with b1:
b1.play("gaussOp", "qe2", amp=2)
print(b1.get_op_name("qe2"), cfg["waveforms"]["qe2_baked_wf_I_0"]["samples"])
assert all(
[
cfg["waveforms"]["qe2_baked_wf_I_0"]["samples"][i]
== gauss(0.4, 0, 15, 80)[i]
for i in range(80)
]
)
print(config["waveforms"].keys())
def test_play_at_negative_t(config):
cfg = deepcopy(config)
with baking(config=cfg, padding_method="symmetric_r") as b:
const_Op = [0.3, 0.3, 0.3, 0.3, 0.3]
const_Op2 = [0.2, 0.2, 0.2, 0.3, 0.3]
b.add_op("Op1", "qe2", [const_Op, const_Op2]) # qe1 is a mixInputs element
Op3 = [0.1, 0.1, 0.1, 0.1]
Op4 = [0.1, 0.1, 0.1, 0.1]
b.add_op("Op2", "qe2", [Op3, Op4])
b.play("Op1", "qe2")
# The baked waveform is at this point I: [0.3, 0.3, 0.3, 0.3, 0.3]
# Q: [0.2, 0.2, 0.2, 0.3, 0.3]
b.play_at(
"Op2", "qe2", t=-2
) # t indicates the time index where these new samples should be added
# The baked waveform is now I: [0.3, 0.3, 0.3, 0.4, 0.4, 0.1, 0.1]
# Q: [0.2, 0.2, 0.2, 0.4, 0.4, 0.1, 0.1]
print(b.get_waveforms_dict())
assert np.array_equal(
np.round(np.array(b.get_waveforms_dict()["waveforms"]["qe2_baked_wf_I_0"]), 4),
np.array([0, 0, 0, 0, 0.3, 0.3, 0.3, 0.4, 0.4, 0.1, 0.1, 0, 0, 0, 0, 0]),
)
def test_negative_wait(config):
cfg = deepcopy(config)
with baking(config=cfg, padding_method="symmetric_r") as b:
const_Op = [0.3, 0.3, 0.3, 0.3, 0.3]
const_Op2 = [0.2, 0.2, 0.2, 0.3, 0.3]
b.add_op("Op1", "qe2", [const_Op, const_Op2]) # qe1 is a mixInputs element
Op3 = [0.1, 0.1, 0.1, 0.1]
Op4 = [0.1, 0.1, 0.1, 0.1]
b.add_op("Op2", "qe2", [Op3, Op4])
b.play("Op1", "qe2")
# The baked waveform is at this point I: [0.3, 0.3, 0.3, 0.3, 0.3]
# Q: [0.2, 0.2, 0.2, 0.3, 0.3]
b.wait(-3, "qe2")
b.play(
"Op2", "qe2"
) # t indicates the time index where these new samples should be added
# The baked waveform is now I: [0.3, 0.3, 0.3, 0.4, 0.4, 0.1, 0.1]
# Q: [0.2, 0.2, 0.2, 0.4, 0.4, 0.1, 0.1]
print(b.get_waveforms_dict())
assert np.array_equal(
np.round(np.array(b.get_waveforms_dict()["waveforms"]["qe2_baked_wf_I_0"]), 4),
np.array([0, 0, 0, 0, 0, 0.3, 0.3, 0.4, 0.4, 0.4, 0.1, 0, 0, 0, 0, 0]),
)
def test_play_at_negative_t_too_large(config):
cfg = deepcopy(config)
with baking(config=cfg, padding_method="symmetric_r") as b:
const_Op = [0.3, 0.3, 0.3, 0.3, 0.3]
const_Op2 = [0.2, 0.2, 0.2, 0.3, 0.3]
b.add_op("Op1", "qe2", [const_Op, const_Op2]) # qe1 is a mixInputs element
Op3 = [0.1, 0.1, 0.1, 0.1]
Op4 = [0.1, 0.1, 0.1, 0.1]
b.add_op("Op2", "qe2", [Op3, Op4])
b.play("Op1", "qe2")
# The baked waveform is at this point I: [0.3, 0.3, 0.3, 0.3, 0.3]
# Q: [0.2, 0.2, 0.2, 0.3, 0.3]
with pytest.raises(
Exception,
match="too large for current baked samples length",
):
b.play_at(
"Op2", "qe2", t=-6
) # t indicates the time index where these new samples should be added
# The baked waveform is now I: [0.3, 0.3, 0.3, 0.4, 0.4, 0.1, 0.1]
# Q: [0.2, 0.2, 0.2, 0.4, 0.4, 0.1, 0.1]
def test_negative_wait_too_large(config):
cfg = deepcopy(config)
with baking(config=cfg, padding_method="symmetric_r") as b:
const_Op = [0.3, 0.3, 0.3, 0.3, 0.3]
const_Op2 = [0.2, 0.2, 0.2, 0.3, 0.3]
b.add_op("Op1", "qe2", [const_Op, const_Op2]) # qe1 is a mixInputs element
Op3 = [0.1, 0.1, 0.1, 0.1]
Op4 = [0.1, 0.1, 0.1, 0.1]
b.add_op("Op2", "qe2", [Op3, Op4])
b.play("Op1", "qe2")
# The baked waveform is at this point I: [0.3, 0.3, 0.3, 0.3, 0.3]
# Q: [0.2, 0.2, 0.2, 0.3, 0.3]
with pytest.raises(
Exception,
match="too large for current baked samples length",
):
b.wait(-6, "qe2")
b.play(
"Op2", "qe2"
) # t indicates the time index where these new samples should be added
# The baked waveform is now I: [0.3, 0.3, 0.3, 0.4, 0.4, 0.1, 0.1]
# Q: [0.2, 0.2, 0.2, 0.4, 0.4, 0.1, 0.1]
def test_align_command(config):
cfg = deepcopy(config)
with baking(cfg) as b:
b.play("playOp", "qe1")
b.play("gaussOp", "qe2")
b.align()
assert b.get_op_length("qe2") == b.get_op_length("qe1")
with b:
b.play("playOp", "qe1")
b.play("gaussOp", "qe2")
b.align("qe1", "qe2")
assert b.get_op_length("qe2") == b.get_op_length("qe1")
def test_add_digital_wf(config):
cfg = deepcopy(config)
with baking(cfg) as b:
b.add_digital_waveform("dig_wf", [(1, 0)])
b.add_digital_waveform("dig_wf2", [(0, 25), (1, 13), (0, 12)])
b.add_op("Op2", "qe1", [0.2] * 80, digital_marker="dig_wf2")
b.add_op("Op", "qe1", [0.1, 0.1, 0.1], digital_marker="dig_wf")
b.play("Op", "qe1")
b.play("Op2", "qe1")
print(cfg["pulses"]["qe1_baked_pulse_0"])
print(cfg["waveforms"]["qe1_baked_wf_0"])
print(cfg["digital_waveforms"])
assert cfg["digital_waveforms"]["qe1_baked_digital_wf_0"]["samples"] == [
(1, 0),
(0, 25),
(1, 13),
(0, 12),
]
def test_constraint_length(config):
cfg = deepcopy(config)
with baking(cfg) as b:
b.add_op("Op", "qe1", [0.2] * 1000)
b.add_op("Op2", "qe2", [[0.2] * 700, [0.3] * 700])
b.play("Op", "qe1")
b.play("Op2", "qe2")
assert b.get_op_length() == 1000
with baking(cfg, baking_index=b.get_baking_index()) as b2:
b2.add_op("Op", "qe1", [0.2] * 300)
b2.add_op("Op2", "qe2", [[0.2] * 700, [0.3] * 700])
b2.play("Op", "qe1")
b2.play("Op2", "qe2")
assert b2.get_op_length() == 1000
assert b2.get_op_length("qe1") == 1000 == b2.get_op_length("qe2")
def test_low_sampling_rate(config):
cfg = deepcopy(config)
for i, rate in enumerate([0.1e9, 0.2e9, 0.34e9, 0.4234e9, 0.5e9, 0.788e9]):
with baking(config, sampling_rate=rate) as b:
b.add_op("Op2", "qe2", [[0.2] * 700, [0.3] * 700])
b.play("Op2", "qe2")
assert config["waveforms"][f"qe2_baked_wf_I_{i}"]["sampling_rate"] == int(rate)
def test_high_sampling_rate(config):
cfg = deepcopy(config)
for i, rate in enumerate([3e9, 2.546453e9, 8.7654e9, 1.234e9, 2e9, 4e9]):
with baking(config, sampling_rate=rate, padding_method="symmetric_r") as b:
b.play("gaussOp", "qe2")
print(b.get_current_length("qe2"))
# Need for assertion
def test_delete_samples_within_baking(config):
cfg = deepcopy(config)
with baking(cfg) as b:
b.add_op("Op2", "qe2", [[0.2] * | |
<reponame>catubc/MOTION
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import cv2, os, sys, glob
import scipy
import sklearn
import imageio
import matplotlib.cm as cm
import matplotlib
import time
from sklearn import decomposition, metrics, manifold, svm
from tsne import bh_sne
from matplotlib.path import Path
from numpy import linalg as LA
from scipy.signal import butter, filtfilt, cheby1
from scipy.spatial import distance
#**************************************************************************************************************************
#*************************************************CODE START***************************************************************
#**************************************************************************************************************************
class MOTION(object):
''' Class to detect motion in behaviour video;
self.crop() to select only part of video (speeds up analysis)
self.dimreduce() to reduce dimensionality of video and increase SNR
self.detect_motion() compute euclidean distance between frames and plots timecourse
'''
def __init__(self,filename):
print "...current session: ", filename
self.filename = filename
def crop(self):
''' Function crops the FOV for image registration (stable region) and area of interest
Also converts .avi -> .npy format for both stacks + entire original movie.
Currently only rGb channel saved; possibly might improve to average all
'''
#**************** SELECT CROPPED AREA TO TRACK MOTION (smaller is faster) **********************
#Load and save to disk # frames, frame rate and sample frame for cropping
if os.path.exists(os.path.split(self.filename)[0]+"/nframes.txt")==False:
camera = cv2.VideoCapture(self.filename)
self.frame_rate = camera.get(5)
ctr=0
print "reading frames"
while True:
print (ctr)
(grabbed, frame) = camera.read()
if not grabbed: break
ctr+=1
if ctr==100:
image_original = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
np.save(os.path.split(self.filename)[0]+"/original_image.npy", image_original)
self.n_frames=ctr
np.savetxt(os.path.split(self.filename)[0]+"/nframes.txt",[self.n_frames])
np.savetxt(os.path.split(self.filename)[0]+"/frame_rate.txt",[self.frame_rate])
cv2.destroyAllWindows()
camera.release()
else:
image_original = np.load(os.path.split(self.filename)[0]+"/original_image.npy")
self.n_frames = np.loadtxt(os.path.split(self.filename)[0]+"/nframes.txt",dtype='int32')
self.frame_rate = np.loadtxt(os.path.split(self.filename)[0]+"/frame_rate.txt",dtype='float32')
self.frame_xwidth = len(image_original); self.frame_ywidth = len(image_original[0])
#Run cropping functions on sample frame
self.crop_frame_box(image_original, motion_correct_flag=True) #DEFINE BOX AREAS FOR CROPPING; first define area for register
self.crop_frame_box(image_original, motion_correct_flag=False) #DEFINE BOX AREAS FOR CROPPING; first define area for register
#Convert original file and cropped to .npy
crop_registry = np.load(self.filename[:-4]+'_registry_cropped.npz')
self.x1_registry = crop_registry['x1']; self.x2_registry = crop_registry['x2']
self.y1_registry = crop_registry['y1']; self.y2_registry = crop_registry['y2']
crop_area = np.load(self.filename[:-4]+'_'+self.area+'_cropped.npz')
self.x1 = crop_area['x1']; self.x2 = crop_area['x2']
self.y1 = crop_area['y1']; self.y2 = crop_area['y2']
if os.path.exists(self.filename[:-4]+'_'+self.area+'_cropped.npy')==False:
print "... converting .avi -> .npy files (only Green channel) ..."
if os.path.exists(self.filename[:-4]+'.npy')==False:
original_frames = np.zeros((self.n_frames, self.frame_xwidth,self.frame_ywidth),dtype=np.uint8)
cropped_frames = np.zeros((self.n_frames, self.x2-self.x1,self.y2-self.y1),dtype=np.uint8)
registry_frames = np.zeros((self.n_frames, self.x2_registry-self.x1_registry,self.y2_registry-self.y1_registry),dtype=np.uint8)
camera = cv2.VideoCapture(self.filename)
ctr = 0
while True:
if ctr%1000==0: print " loading frame: ", ctr
if 'luis' in self.filename:
if ctr>15000:
print "...************ too many frames, exiting on 15000..."
break
(grabbed, frame) = camera.read()
if not grabbed: break
#Save copy of frame for .npy file
if os.path.exists(self.filename[:-4]+'.npy')==False:
original_frames[ctr]=frame[:,:,1] #Save green ch only
#original_frames.append(np.uint8(np.mean(frame, axis=2))) #Save average of RGB chans
#Crop FOV for analysis
cropped_frames[ctr]=frame[:,:,1][self.x1:self.x2,self.y1:self.y2]
#cropped_frames.append(np.uint8(np.mean(frame[self.x1:self.x2,self.y1:self.y2],axis=2)))
#Crop FOV for registry
registry_frames[ctr]=frame[:,:,1][self.x1_registry:self.x2_registry,self.y1_registry:self.y2_registry]
#registry_frames.append(np.uint8(np.mean(frame[self.x1_registry:self.x2_registry,self.y1_registry:self.y2_registry],axis=2)))
ctr+=1
#Save original movie in .npy format
if os.path.exists(self.filename[:-4]+'.npy')==False:
np.save(self.filename[:-4]+'.npy', original_frames) #This is the entire movie converted to .npy
#Save cropped movie area and registry area
np.save(self.filename[:-4]+'_'+self.area+'_cropped', cropped_frames) #just cropped movie
np.save(self.filename[:-4]+'_registry_cropped', registry_frames) #just cropped movie
def binarize_frames(self):
''' Reduce the size/dimensionality of the sample/frame by calling various functions
This also binarizes the frames (i.e. all vals are 0/1
TODO: Check this step, investigate if preserving more information in the would be hefpful
'''
#area_filename = self.filename[:-4]+"_"+self.area+"_"+self.mode+".npy"
#area_filename = self.filename[:-4]+"_"+self.area+"_"+self.mode+".npy"
area_filename = self.filename[:-4]+"_"+self.area+"_cropped_registered_"+self.mode+".npy"
self.movie_filename = self.filename[:-4]+'.npy'
if os.path.exists(area_filename)==False:
frames = np.load(self.filename[:-4]+"_"+self.area+"_cropped_registered.npy")
rLow=100; rHigh=255
reduced_frames = []
contour_frames = []
edge_frames = []
frame_count = 0
for frame in frames:
if (frame_count%1000)==0: print " reducing frame: ", frame_count
#Crop frame before processing
#frame = frame[self.x1:self.x2,self.y1:self.y2]
if self.mode=='all':
reduced_frames.append(self.decimate(frame, frame_count, rLow, rHigh))
elif self.mode == 'contours':
contour_frames.append(self.find_contours(frame, frame_count, rLow, rHigh))
elif self.mode=='edges':
edge_frames.append(self.find_edges(frame, frame_count, rLow, rHigh))
frame_count += 1
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
if self.mode=='all':
np.save(area_filename, np.nan_to_num(reduced_frames))
self.decimated_frames = np.nan_to_num(reduced_frames)
elif self.mode=='contours':
np.save(area_filename, np.nan_to_num(contour_frames))
self.decimated_frames = np.nan_to_num(contour_frames)
elif self.mode=='edges':
np.save(area_filename, np.nan_to_num(edge_frames))
self.decimated_frames = np.nan_to_num(edge_frames)
else:
self.decimated_frames = np.load(area_filename,mmap_mode='c')
def detect_movement(self):
''' Detect movement as euclidean distance between frames
'''
print "... detecting movement ..."
if os.path.exists(self.filename[:-4]+"_diff_array.npy")==False:
self.compute_diff()
#Plot data
t = np.arange(len(self.diff_array))/(self.frame_rate)
plt.plot(t, self.diff_array)
#Plotting parameters
plt.xlim(0,t[-1])
plt.yticks([])
font_size = 20
plt.xlabel("Time (sec)", fontsize = font_size)
plt.ylabel("Movement index (a.u.)", fontsize = font_size)
plt.tick_params(axis='both', which='both', labelsize=font_size)
plt.title(self.filename, fontsize = font_size)
plt.show(block=True)
else:
self.diff_array = np.load(self.filename[:-4]+"_diff_array.npy")
def read_metadata(self, output):
decimated_filename = self.filename[:-4]+"_"+self.area+"_cropped_registered_"+self.mode+".npy"
n_frames = len(np.load(decimated_filename,mmap_mode='c'))
names = np.loadtxt(self.filename[:-4]+"_"+self.area+"_"+str(self.methods[self.method])+"_clusternames.txt",dtype='str')
print names
indexes = np.load(self.filename[:-4]+"_"+self.area+"_"+str(self.methods[self.method])+"_clusterindexes.npy")
#Licking
idx = np.where(names=='lick')[0]
if len(idx)!=0: output.lick_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=len(indexes[idx][0])/float(n_frames)
else: output.lick_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=0
#Pawing
idx = np.where(names=='paw')[0]
if len(idx)!=0: output.paw_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=len(indexes[idx][0])/float(n_frames)
else: output.paw_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=0
#Add scratching to pawing
idx = np.where(names=='scratch')[0]
if len(idx)!=0: output.scratch_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=len(indexes[idx][0])/float(n_frames)
else: output.scratch_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=0
data = np.load(glob.glob(os.path.split(self.filename)[0]+'/*_metadata.npz')[0])
if data['drift']=='y': self.drift=1
elif data['drift']=='n': self.drift=0
else: print "...exception..."; quit()
if data['spout_moved']=='y': self.spout_moved=1
elif data['spout_moved']=='n': self.spout_moved=0
else: print "...exception..."; quit()
if data['hand_inview']=='y': self.hand_inview=1
elif data['hand_inview']=='n': self.hand_inview=0
else: print "...exception..."; quit()
if data['camera_moved']=='y': self.camera_moved=1
elif data['camera_moved']=='n': self.camera_moved=0
else: print "...exception..."; quit()
output.drift_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=self.drift
output.spout_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=self.spout_moved
self.other_exclusion=data['other_exclusion']
return output
def load_frames(self, cluster_name):
names = np.loadtxt(self.filename[:-4]+"_"+self.area+"_"+str(self.methods[self.method])+"_clusternames.txt",dtype='str')
print names
indexes = np.load(self.filename[:-4]+"_"+self.area+"_"+str(self.methods[self.method])+"_clusterindexes.npy")
cluster_index = np.where(names ==cluster_name)[0]
if len(cluster_index)==0:
return None
cluster_indexes = indexes[cluster_index][0] #List of indexes for selected behaviour
#Load movie
self.movie_filename = self.filename[:-4]+'.npy'
enlarge = 100 #Make movie FOV larger than original cropping rectangle by 50pixels or so; otherwies difficult to see what's going on;
movie_array = np.load(self.movie_filename, mmap_mode='c')[:, max(0,self.x1-enlarge):self.x2+enlarge, max(0,self.y1-enlarge):self.y2+enlarge]
print movie_array.shape
if len(cluster_index)==0:
return movie_array[0]*0
#Randomly return one of these images;
return movie_array[np.random.choice(cluster_indexes)]
def save_metadata(self):
print self.filename[:-4]
metadata = []
drift = raw_input("Did camera drift ? (y/n) " )
spout_moved = raw_input("Was spout readjusted ? (y/n) " )
hand_inview = raw_input("Did hand enter the screen ? (y/n) " )
camera_moved = raw_input("Did camera move or otherwise jump ? (y/n) " )
other_exclusion = raw_input("Any thing else to note (y/n or type out) ")
np.savez(self.filename[:-4]+"_metadata.npz", drift=drift, spout_moved=spout_moved, hand_inview=hand_inview, camera_moved=camera_moved, other_exclusion=other_exclusion)
def annotate_frames(self):
''' Function to annotate frames in partially supervised fashion
Calls mupltiple functions
'''
#Subsample frames to further reduce dimensionality and speed up processing
if True: self.subsample_frames()
else: self.data_subsampled = self.decimated_frames
#Scale the frame information by some coefficient of movement
if False: self.scale_moving_frames()
#Run dim reduction
self.dimension_reduction()
#Filter transformed distributions to remove camera drift (usually)
if True:
self.filter_PCA(self.data_dim_reduction, filtering=True, plotting=True)
#Cluster data
self.cluster_methods = ['KMeans', 'MeanShift', 'DBSCAN', 'manual']
self.cluster_method = 3
self.cluster_data()
#Review clusters and re/cut them
#self.review_clusters()
self.export_clusters(recluster_flag=False)
def resplit_cluster(self, cluster):
''' Recluster previously split clusters...
'''
print "... resplitting cluster: ", cluster
#THIS NEEDS TO BE SIMPLIFIED
#Subsample frames to further reduce dimensionality and speed up processing
if True: self.subsample_frames()
else: self.data_subsampled = self.decimated_frames
#Scale the frame information by some coefficient of movement
if False: self.scale_moving_frames()
#Run dim reduction
self.dimension_reduction()
#Filter transformed distributions to remove camera drift (usually)
if True:
self.filter_PCA(self.data_dim_reduction, filtering=True, plotting=False)
self.load_clusters()
#Load clustered info
cluster_names = np.loadtxt(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusternames.txt", dtype='str')
cluster_indexes = np.load(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusterindexes.npy")
#Assign clusters to unique ids
cumulative_indexes=[]
unique_names = np.unique(self.cluster_names)
print self.cluster_names
print unique_names
unique_indexes = []
for ctr1, unique_name in enumerate(unique_names):
unique_indexes.append([])
for ctr, cluster_name in enumerate(self.cluster_names):
if unique_name==cluster_name:
unique_indexes[ctr1].extend(self.cluster_indexes[ctr])
cluster_id = np.where(unique_names==cluster)[0]
print "... cluster_id: ", cluster_id
self.unique_indexes = unique_indexes[cluster_id]
#Cluster data
self.cluster_methods = ['KMeans', 'MeanShift', 'DBSCAN', 'manual']
self.cluster_method = 3
self.cluster_data(indexes=unique_indexes[cluster_id]) #Send indexes for the selected cluster after collapsing over unique valus
def resave_clusters(self,indexes):
''' Load original cluster labels and re-adjust based on resplit cluster
'''
reclustered_id_indexes = np.int32(indexes)
print "... reclustered id indexes: ", len(reclustered_id_indexes)
#Load clustered info
original_cluster_names = np.loadtxt(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusternames.txt", dtype='str')
original_cluster_indexes = np.load(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusterindexes.npy")
#Delete the cluster that was just resplit
temp_index = np.where(original_cluster_names==self.recluster_id)[0]
#print "... reclustered id : ", temp_index
original_cluster_names = np.delete(original_cluster_names, temp_index,0)
original_cluster_indexes = np.delete(original_cluster_indexes, temp_index,0)
#Append new labels back in; first convert to lists, easier to work with due to variable length
cluster_names_array = []
for k in range(len(original_cluster_names)):
cluster_names_array.append(original_cluster_names[k])
#Add new labels back in from newly identified self.cluster_names
for k in range(len(self.cluster_names)):
cluster_names_array.append(self.cluster_names[k])
self.cluster_names = cluster_names_array
#Do the same with cluster indexes
cluster_indexes_array = []
for k in range(len(original_cluster_indexes)):
cluster_indexes_array.append(original_cluster_indexes[k])
#Add new labels back in ******************* NOTE: Indexes will be relative to the previously clustered indexes not 0
for k in range(len(self.cluster_indexes)):
print k, len(self.cluster_indexes[k]), len(reclustered_id_indexes)
print self.cluster_indexes[k]
cluster_indexes_array.append(reclustered_id_indexes[np.int32(self.cluster_indexes[k])])
self.cluster_indexes = cluster_indexes_array
print ".... check that all frames have been saved..."
print len(self.cluster_indexes)
#print np.unique(np.array(self.cluster_indexes))
#*****Re-assign clusters to unique ids after adding the new split cluster labels back in
cumulative_indexes=[]
unique_names = np.unique(self.cluster_names)
print "...reclustered data..."
print self.cluster_names
for k in range(len(self.cluster_indexes)):
print len(self.cluster_indexes[k])
print "\n\n... unique data..."
print unique_names
unique_indexes = []
for ctr1, unique_name in enumerate(unique_names):
unique_indexes.append([])
for ctr, cluster_name in enumerate(self.cluster_names):
if unique_name==cluster_name:
unique_indexes[ctr1].extend(self.cluster_indexes[ctr])
print len(unique_indexes[ctr1])
#cluster_id = np.where(unique_names==cluster)[0]
#print "... cluster_id: ", cluster_id
np.savetxt(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusternames_new.txt", unique_names,fmt='%s')
np.save(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusterindexes_new.npy", unique_indexes)
self.export_clusters(recluster_flag=True)
def manual_label(self):
filename_manuallabel = self.filename[:-4]+"_"+self.area+"_manuallabels.npz"
if os.path.exists(filename_manuallabel)==False:
plt.plot(self.diff_array)
mean = np.mean(self.diff_array)
top_cutoff = np.max(self.diff_array)*.55
bottom_cutoff = np.mean(self.diff_array)*.05
plt.plot([0,len(self.diff_array)],[top_cutoff,top_cutoff])
plt.plot([0,len(self.diff_array)],[bottom_cutoff,bottom_cutoff])
plt.show(block=True)
print "... limitting annotation to 50 events max..."
indexes = np.where(self.diff_array>top_cutoff)[0]
indexes = indexes[np.random.randint(len(indexes),size=50)]
print "... # frames: ", len(indexes)
indexes2 = np.where(self.diff_array<bottom_cutoff)[0]
indexes2 = indexes2[np.random.randint(len(indexes2),size=50)]
print "... # frames: ", len(indexes2)
indexes = np.hstack((indexes,indexes2))
print "... # total frames to annotate: ", len(indexes)
enlarge=100
movie_array = np.load(self.movie_filename, mmap_mode='c')[:, max(0,self.x1-enlarge):self.x2+enlarge, max(0,self.y1-enlarge):self.y2+enlarge]
#Select most active frames
data_ = movie_array[indexes]
border = 30
fontsize=15
classifier = np.array([0,0,0,0])
classifier_list = []
self.complete=False
for k,frame in enumerate(data_):
if self.complete==True: break #Exit by clicking outside the annotation box
#Make nice box around each frame to use for annotation
temp = np.zeros((frame.shape[0]+border,frame.shape[1]+border))
temp[:, :border/2]=100
temp[:, frame.shape[1]+border/2:]=150
temp[:border/2]=50
temp[frame.shape[0]+border/2:]=200
temp[border/2:frame.shape[0]+border/2,border/2:frame.shape[1]+border/2]=frame[:,:,1]
#Make plots
fig, ax = plt.subplots()
ax.imshow(temp)
self.cid = fig.canvas.mpl_connect('button_press_event', self.on_click_classify)
plt.suptitle("frame: "+str(k)+" / "+str(len(data_)),fontsize=fontsize)
plt.title("Lick: "+str(classifier[0]),fontsize=fontsize)
plt.xlabel("Stationary: "+str(classifier[1]),fontsize=fontsize)
plt.ylabel("Paw :"+str(classifier[2]),fontsize=fontsize)
plt.show(block=True)
y | |
+= 0.00000000021 * math.cos(2.63713620085 + 13656.10430865991 * self.t)
X2 += 0.00000000020 * math.cos(4.83742385337 + 33326.8225506577 * self.t)
X2 += 0.00000000018 * math.cos(1.65998530501 + 52156.38033973211 * self.t)
X2 += 0.00000000023 * math.cos(4.13597342584 + 41962.7645544209 * self.t)
X2 += 0.00000000018 * math.cos(6.07003180743 + 52389.3491960699 * self.t)
X2 += 0.00000000017 * math.cos(3.47243675916 + 58946.76070187749 * self.t)
X2 += 0.00000000016 * math.cos(1.64800690653 + 23754.95056618569 * self.t)
X2 += 0.00000000019 * math.cos(2.36847359761 + 39609.8984006491 * self.t)
X2 += 0.00000000017 * math.cos(0.25513432917 + 51536.15281431789 * self.t)
X2 += 0.00000000018 * math.cos(0.34702974356 + 18849.4713674577 * self.t)
X2 += 0.00000000015 * math.cos(0.95866304490 + 76674.88034692229 * self.t)
X2 += 0.00000000015 * math.cos(1.75514605179 + 48733.47515566649 * self.t)
X2 += 0.00000000015 * math.cos(4.34995902728 + 853.4401992355 * self.t)
X2 += 0.00000000014 * math.cos(3.61473044000 + 82815.90673926489 * self.t)
X2 += 0.00000000014 * math.cos(5.02500731242 + 52179.9314359899 * self.t)
X2 += 0.00000000014 * math.cos(1.69891784853 + 15874.8614128467 * self.t)
X2 += 0.00000000014 * math.cos(2.36729572184 + 66941.2891439017 * self.t)
X2 += 0.00000000015 * math.cos(5.49473150242 + 16984.2399649401 * self.t)
X2 += 0.00000000015 * math.cos(5.05704278229 + 38654.2986590405 * self.t)
X2 += 0.00000000013 * math.cos(0.64687702919 + 53765.1229959157 * self.t)
X2 += 0.00000000013 * math.cos(1.62167132693 + 208702.98131511007 * self.t)
X2 += 0.00000000013 * math.cos(3.82178316362 + 50057.2862402535 * self.t)
X2 += 0.00000000016 * math.cos(2.05228361219 + 51646.3591355373 * self.t)
X2 += 0.00000000012 * math.cos(3.97912977913 + 28421.3433519297 * self.t)
X2 += 0.00000000013 * math.cos(4.04493324307 + 155418.28411483529 * self.t)
X2 += 0.00000000016 * math.cos(0.65996554595 + 55618.6250455973 * self.t)
X2 += 0.00000000012 * math.cos(3.62569930460 + 5661.5758666357 * self.t)
X2 += 0.00000000015 * math.cos(0.75515577986 + 52195.71986153169 * self.t)
X2 += 0.00000000014 * math.cos(2.81046707561 + 26514.7451499337 * self.t)
X2 += 0.00000000014 * math.cos(3.30095837282 + 110013.18843293248 * self.t)
X2 += 0.00000000013 * math.cos(0.56442607871 + 213.0552779545 * self.t)
X2 += 0.00000000014 * math.cos(2.10561918260 + 26728.0442453717 * self.t)
X2 += 0.00000000013 * math.cos(3.66951257967 + 52172.1687652739 * self.t)
X2 += 0.00000000011 * math.cos(5.46295418561 + 63498.71419893629 * self.t)
X2 += 0.00000000011 * math.cos(0.61230507574 + 79219.55298381469 * self.t)
X2 += 0.00000000010 * math.cos(4.66712610746 + 103292.47445359109 * self.t)
X2 += 0.00000000011 * math.cos(5.20556952105 + 31748.99137324289 * self.t)
X2 += 0.00000000014 * math.cos(3.33453776277 + 6283.3196674749 * self.t)
X2 += 0.00000000010 * math.cos(0.83705988710 + 52290.48938931711 * self.t)
X2 += 0.00000000012 * math.cos(2.60449391926 + 13521.50762410789 * self.t)
X2 += 0.00000000009 * math.cos(3.03188275218 + 35192.0539531753 * self.t)
X2 += 0.00000000009 * math.cos(1.24305000805 + 76045.1961380193 * self.t)
X2 += 0.00000000011 * math.cos(5.89042281055 + 62389.33564684289 * self.t)
X2 += 0.00000000010 * math.cos(4.16418363493 + 51749.45190975589 * self.t)
X2 += 0.00000000009 * math.cos(4.66321261687 + 124778.42747620228 * self.t)
X2 += 0.00000000009 * math.cos(0.22808750229 + 339142.98465794802 * self.t)
X2 += 0.00000000009 * math.cos(4.75513146544 + 78244.28348130629 * self.t)
X2 += 0.00000000009 * math.cos(2.64262688750 + 20426.32727493849 * self.t)
X2 += 0.00000000011 * math.cos(2.92721365751 + 38520.1896094555 * self.t)
X2 += 0.00000000010 * math.cos(3.30244477369 + 16983.75232997309 * self.t)
X2 += 0.00000000009 * math.cos(1.16009282528 + 1109.6223695769 * self.t)
X2 += 0.00000000009 * math.cos(4.65609022876 + 52061.61081194667 * self.t)
X2 += 0.00000000009 * math.cos(2.15470374859 + 104881.54734887488 * self.t)
X2 += 0.00000000008 * math.cos(1.58129311338 + 68050.66769599509 * self.t)
X2 += 0.00000000008 * math.cos(4.01456932062 + 2333.44021035551 * self.t)
X2 += 0.00000000008 * math.cos(0.50473636519 + 12431.79883291429 * self.t)
X2 += 0.00000000008 * math.cos(4.71148146957 + 28206.9108194361 * self.t)
X2 += 0.00000000008 * math.cos(0.22428318331 + 136101.09157450669 * self.t)
X2 += 0.00000000008 * math.cos(5.13855904986 + 213.5429129215 * self.t)
X2 += 0.00000000008 * math.cos(1.53446312549 + 24499.0740637739 * self.t)
X2 += 0.00000000008 * math.cos(0.19632874417 + 103396.25664217169 * self.t)
X2 += 0.00000000007 * math.cos(4.83084149979 + 52168.93655363109 * self.t)
X2 += 0.00000000009 * math.cos(0.68229010552 + 21535.70582703189 * self.t)
X2 += 0.00000000007 * math.cos(5.67305147777 + 53284.94101775829 * self.t)
X2 += 0.00000000007 * math.cos(4.91493686906 + 105461.23493587368 * self.t)
X2 += 0.00000000007 * math.cos(2.13257749573 + 69160.04624808849 * self.t)
X2 += 0.00000000009 * math.cos(0.17767669866 + 7994.77225950771 * self.t)
X2 += 0.00000000009 * math.cos(3.44284310428 + 73711.99974514729 * self.t)
X2 += 0.00000000008 * math.cos(1.40334354171 + 59414.7256922319 * self.t)
X2 += 0.00000000008 * math.cos(3.92015227538 + 52183.1636476327 * self.t)
X2 += 0.00000000007 * math.cos(1.95913527567 + 64742.2018006147 * self.t)
X2 += 0.00000000007 * math.cos(1.83770637593 + 78267.83457756409 * self.t)
X2 += 0.00000000007 * math.cos(5.50637575808 + 26107.81671995749 * self.t)
X2 += 0.00000000007 * math.cos(3.29290677599 + 26068.47719815791 * self.t)
X2 += 0.00000000006 * math.cos(2.48089512320 + 25028.7650288685 * self.t)
X2 += 0.00000000007 * math.cos(6.10010912620 + 51962.7510051939 * self.t)
X2 += 0.00000000007 * math.cos(5.20852269117 + 18208.05780571871 * self.t)
X2 += 0.00000000008 * math.cos(3.20036201627 + 44937.3745090319 * self.t)
X2 += 0.00000000008 * math.cos(1.81720177562 + 51066.18391357149 * self.t)
X2 += 0.00000000006 * math.cos(5.72654755255 + 105411.23831396949 * self.t)
X2 += 0.00000000007 * math.cos(4.86085983191 + 65697.80154222329 * self.t)
X2 += 0.00000000006 * math.cos(2.84309765149 + 88477.23878841709 * self.t)
X2 += 0.00000000006 * math.cos(2.90025697766 + 78477.25233764409 * self.t)
X2 += 0.00000000006 * math.cos(6.10721529063 + 52602.6482915079 * self.t)
X2 += 0.00000000007 * math.cos(3.76564310534 + 78283.62300310588 * self.t)
X2 += 0.00000000006 * math.cos(2.54939515992 + 19805.0711090663 * self.t)
X2 += 0.00000000005 * math.cos(0.40790961084 + 108903.80988083909 * self.t)
X2 += 0.00000000006 * math.cos(2.08187740275 + 129380.37759516528 * self.t)
X2 += 0.00000000005 * math.cos(4.74517813371 + 74821.37829724069 * self.t)
X2 += 0.00000000007 * math.cos(5.47163902652 + 45892.9742506405 * self.t)
X2 += 0.00000000005 * math.cos(3.96385649366 + 1059.1381127057 * self.t)
X2 += 0.00000000005 * math.cos(2.11024903764 + 26084.2656236997 * self.t)
X2 += 0.00000000005 * math.cos(0.40769424239 + 26092.0282944157 * self.t)
X2 += 0.00000000007 * math.cos(4.95181465375 + 71493.2426409605 * self.t)
X2 += 0.00000000005 * math.cos(3.72498845601 + 81706.5281871715 * self.t)
X2 += 0.00000000005 * math.cos(1.28192740378 + 51322.85371887989 * self.t)
X2 += 0.00000000005 * math.cos(0.38902168011 + 33968.23611239669 * self.t)
X2 += 0.00000000005 * math.cos(1.36259031496 + 78256.83969520529 * self.t)
X2 += 0.00000000005 * math.cos(2.94315074391 + 26617.35028918529 * self.t)
X2 += 0.00000000006 * math.cos(0.40666986327 + 78260.07190684808 * self.t)
X2 += 0.00000000004 * math.cos(3.59234736829 + 25661.5487681817 * self.t)
X2 += 0.00000000005 * math.cos(5.20687247513 + 94138.57083756929 * self.t)
X2 += 0.00000000004 * math.cos(5.73436372945 + 22645.08437912529 * self.t)
X2 += 0.00000000005 * math.cos(5.43911125012 + 93029.19228547589 * self.t)
X2 += 0.00000000004 * math.cos(1.51966869369 + 22760.01130277751 * self.t)
X2 += 0.00000000004 * math.cos(4.11367481672 + 29416.28261533789 * self.t)
X2 += 0.00000000004 * math.cos(0.33419603419 + 76145.18938182769 * self.t)
X2 += 0.00000000004 * math.cos(0.81789657863 + 181506.18725640948 * self.t)
X2 += 0.00000000004 * math.cos(5.87548494754 + 13521.9952590749 * self.t)
X2 += 0.00000000004 * math.cos(0.98530676575 + 99799.90288672148 * self.t)
X2 += 0.00000000004 * math.cos(3.64991710996 + 79853.02613748988 * self.t)
X2 += 0.00000000005 * math.cos(5.51168134551 + 44181.52165860769 * self.t)
X2 += 0.00000000004 * math.cos(4.17969593700 + 4551.7096795753 * self.t)
X2 += 0.00000000004 * math.cos(1.07013306418 + 32371.2228090491 * self.t)
X2 += 0.00000000004 * math.cos(4.56317466224 + 32858.36992533629 * self.t)
X2 += 0.00000000004 * math.cos(0.76064444657 + 54509.2464935039 * self.t)
X2 += 0.00000000004 * math.cos(5.34376558738 + 155468.28073673949 * self.t)
X2 += 0.00000000003 * math.cos(3.33102397097 + 162188.99471608088 * self.t)
X2 += 0.00000000003 * math.cos(1.90784099869 + 24498.58642880689 * self.t)
X2 += 0.00000000005 * math.cos(0.70628118340 + 78271.06678920689 * self.t)
X2 += 0.00000000004 * math.cos(4.75154050928 + 234790.88445668427 * self.t)
X2 += 0.00000000003 * math.cos(5.20128667447 + 77734.26227711148 * self.t)
X2 += 0.00000000004 * math.cos(4.16379409199 + 7879.84533585549 * self.t)
X2 += 0.00000000004 * math.cos(5.89383677298 + 64608.09275102969 * self.t)
X2 += 0.00000000004 * math.cos(4.51512332677 + 51116.18053547569 * self.t)
X2 += 0.00000000003 * math.cos(1.82486201671 + 28306.41642827749 * self.t)
X2 += 0.00000000004 * math.cos(5.71282679191 + 39744.0074502341 * self.t)
X2 += 0.00000000003 * math.cos(1.53805667122 + 104332.1866228805 * self.t)
X2 += 0.00000000004 * math.cos(3.89140851351 + 80482.71034639288 * self.t)
X2 += 0.00000000003 * math.cos(1.85783050998 + 37410.32342239509 * self.t)
X2 += 0.00000000004 * math.cos(1.55416627058 + 24864.32911827909 * self.t)
X2 += 0.00000000003 * math.cos(2.39011669525 + 103822.16541868569 * self.t)
X2 += 0.00000000004 * math.cos(4.09022608915 + 27311.96479983631 * self.t)
X2 += 0.00000000003 * math.cos(2.12318372488 + 120226.47397914348 * self.t)
X2 += 0.00000000003 * math.cos(4.30318589016 + 102133.09927959349 * self.t)
X2 += 0.00000000003 * math.cos(1.44971105205 + 150866.33061777649 * self.t)
X2 += 0.00000000003 * math.cos(5.04167948013 + 90830.10494218889 * self.t)
X2 += 0.00000000003 * math.cos(3.27662875658 + 77624.05595589208 * self.t)
X2 += 0.00000000003 * math.cos(3.21324303917 + 129484.15978374588 * self.t)
X2 += 0.00000000003 * math.cos(4.58517957507 + 125887.80602829569 * self.t)
X2 += 0.00000000003 * math.cos(5.16830743449 + 130969.45049044909 * self.t)
X2 += 0.00000000003 * math.cos(4.95343110449 + 58459.1259506233 * self.t)
X2 += 0.00000000002 | |
import os
from pathlib import Path
from io import BytesIO
import time
import re
import hashlib
import threading
from functools import wraps
import logging
import mimetypes
import urllib.request as request
import http.cookiejar as http_cookiejar
from http import HTTPStatus
# import ssl
# ssl._create_default_https_context = ssl._create_unverified_context # Disable context for gismap.by
from PIL import Image
from twms import projections
from twms import config
DEFAULT_HEADERS = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0",
"Connection": "Keep-Alive"}
def prepare_opener(tries=4, delay=3, backoff=2, headers=dict()):
"""Build HTTP opener with custom headers (User-Agent) and cookie support.
Retry HTTP request using an exponential backoff:
* Retry only on network issues
* Raise HTTPError immediatly, to handle it with complex code
https://wiki.python.org/moin/PythonDecoratorLibrary#Retry
http://www.katasonov.com/ru/2014/10/python-urllib2-decorators-and-exceptions-fun/
:param int tries: number of times to try (not retry) before giving up
:param int delay: initial delay between retries in seconds
:param int backoff: backoff multiplier e.g. value of 2 will double the
delay each retry
:param dict headers: Update opener headers (add new and spoof existing).
:rtype: http.client.HTTPResponse
"""
cj = http_cookiejar.CookieJar()
# if use_proxy:
# proxy_info = {
# 'user': 'login',
# 'pass': '<PASSWORD>',
# 'host': "proxyaddress",
# 'port': 8080}
# proxy_support = urllib.request.ProxyHandler({
# "http": "http://%(user)s:%(pass)s@%(host)s:%(port)d" % proxy_info})
# opener = urllib.request.build_opener(
# urllib.request.HTTPCookieProcessor(cj),
# # urllib2.HTTPHandler(debuglevel=1), # Debug output
# proxy_support)
opener = request.build_opener(request.HTTPCookieProcessor(cj))
hdrs = {**DEFAULT_HEADERS, **headers}
opener.addheaders = list(hdrs.items())
@wraps(opener.open)
def retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return opener.open(*args, **kwargs)
except request.HTTPError as err:
# Prevent catching HTTPError as subclass of URLError
# logging.error(err)
raise
except request.URLError as err:
logging.debug(f"{err}, retrying '{args[0]}' in {mdelay} seconds...")
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return opener.open(*args, **kwargs)
return retry
class TileFetcher(object):
def __init__(self, layer):
self.layer = layer
self.opener = prepare_opener(headers=self.layer.get('headers', dict()))
self.fetching_now = {}
self.thread_responses = {} # Dicts are thread safe
self.zhash_lock = {}
def fetch(self, z, x, y):
"""Return None if no image can be served."""
zhash = repr((z, x, y, self.layer))
try:
self.zhash_lock[zhash] += 1
except KeyError:
self.zhash_lock[zhash] = 1
if zhash not in self.fetching_now:
thread = threading.Thread(
None, self.threadworker, None, (z, x, y, zhash))
thread.start()
self.fetching_now[zhash] = thread
if self.fetching_now[zhash].is_alive():
self.fetching_now[zhash].join()
resp = self.thread_responses[zhash]
self.zhash_lock[zhash] -= 1
if not self.zhash_lock[zhash]:
del self.thread_responses[zhash]
del self.fetching_now[zhash]
del self.zhash_lock[zhash]
return resp
def threadworker(self, z, x, y, zhash):
f_names = ('tms', 'wms', 'tms_google_sat')
if self.layer['fetch'] not in f_names:
raise ValueError("fetch must be " + ', '.join(f_names))
# Call fetcher by it's name
self.thread_responses[zhash] = getattr(self, self.layer['fetch'])(z, x, y)
def wms(self, z, x, y):
"""Use tms instead.
Possible features to implement:
* TNE based on histogram
* Big tile request (e.g. 512x512)
Leave possibility to request arbitrary (other than cache 'proj')
projection from WMS by 'wms_proj' parameter, as server may be broken.
"""
tile_id = f"{self.layer['prefix']} z{z}/x{x}/y{y}"
if 'max_zoom' in self.layer and z > self.layer['max_zoom']:
logging.debug(f"{tile_id}: zoom limit")
return None
req_proj = self.layer.get("wms_proj", self.layer["proj"])
width = 256 # Using larger source size to rescale better in python
height = 256
tile_bbox = "{},{},{},{}".format(*projections.from4326(
projections.bbox_by_tile(z, x, y, req_proj), req_proj))
remote = self.layer['remote_url'].replace('{bbox}', tile_bbox)
remote = remote.replace('{width}', str(width))
remote = remote.replace('{height}', str(height))
remote = remote.replace('{proj}', req_proj)
# MOBAC cache path style
tile_path = config.tiles_cache + self.layer["prefix"] + "/{:.0f}/{:.0f}/{:.0f}{}".format(z, x, y, self.layer['ext'])
partial_path, ext = os.path.splitext(tile_path) # '.ext' with leading dot
tne_path = partial_path + '.tne'
os.makedirs(os.path.dirname(tile_path), exist_ok=True)
if 'cache_ttl' in self.layer:
for ex in (ext, '.dsc' + ext, '.ups' + ext, '.tne'):
fp = partial_path + ex
if os.path.exists(fp):
if os.stat(fp).st_mtime < (time.time() - self.layer["cache_ttl"]):
os.remove(fp)
logging.info(f"wms: fetching z{z}/x{x}/y{y} {self.layer['name']} {remote}")
im_bytes = self.opener(remote).read()
if im_bytes:
im = Image.open(BytesIO(im_bytes))
else:
return None
if width != 256 and height != 256:
im = im.resize((256, 256), Image.ANTIALIAS)
im = im.convert("RGBA")
ic = Image.new("RGBA", (256, 256), self.layer.get("empty_color", config.default_background))
if im.histogram() == ic.histogram():
logging.debug(f"{tile_id}: TNE - empty histogram '{tne_path}'")
Path(tne_path, exist_ok=True).touch()
return None
im.save(tile_path)
return im
def tms(self, z, x, y):
"""Fetch tile by coordinates, r/w cache.
Function fetches image, checks it validity and detects actual
image format (ignores server Content-Type). All tiles with
Content-Type not matching default for this layer will be
converted before saving to cache.
TNE - tile not exist (got HTTP 404 or default tile for empty zones aka "dead tile")
Cache is structured according to tile coordinates.
Actual tile image projection specified in config file.
MBTiles
https://wiki.openstreetmap.org/wiki/MBTiles
https://github.com/mapbox/mbtiles-spec
https://docs.mapbox.com/help/glossary/mbtiles/
:rtype: :py:class:`~PIL.Image.Image`. Otherwise None, if
no image can be served from cache or from remote.
"""
need_fetch = False
tile_parsed = False
tile_dead = False
tile_id = f"{self.layer['prefix']} z{z}/x{x}/y{y}"
target_mimetype = mimetypes.types_map[self.layer['ext']]
remote = ''
if 'max_zoom' in self.layer and z > self.layer['max_zoom']:
logging.debug(f"{tile_id}: zoom limit")
return None
# MOBAC cache path style
tile_path = config.tiles_cache + self.layer['prefix'] + "/{:.0f}/{:.0f}/{:.0f}{}".format(z, x, y, self.layer['ext'])
partial_path, ext = os.path.splitext(tile_path) # '.ext' with leading dot
tne_path = partial_path + '.tne'
os.makedirs(os.path.dirname(tile_path), exist_ok=True)
# Do not delete, only replace if tile exists!
if os.path.exists(tne_path):
tne_lifespan = time.time() - os.stat(tne_path).st_mtime
if tne_lifespan > config.cache_tne_ttl:
logging.info(f"{tile_id}: TTL tne reached {tne_path}")
need_fetch = True
else:
logging.info(f"{tile_id}: tile cached as TNE {tne_path}")
if 'cache_ttl' in self.layer:
# for ex in (ext, '.dsc.' + ext, '.ups.' + ext, '.tne'):
if os.path.exists(tile_path):
tile_lifespan = time.time() - os.stat(tile_path).st_mtime
# tile_lifespan_h = tile_lifespan / 60 / 60
# logging.debug(f"{tile_id}: lifespan {tile_lifespan_h:.0f} h {fp}")
if tile_lifespan > self.layer["cache_ttl"]:
logging.info(f"{tile_id}: TTL tile reached for {tile_path}")
need_fetch = True
if not os.path.exists(tile_path) and not os.path.exists(tne_path):
need_fetch = True
# Fetching image
if need_fetch and 'remote_url' in self.layer:
if 'transform_tile_number' in self.layer:
trans_z, trans_x, trans_y = self.layer['transform_tile_number'](z, x, y)
else:
trans_z, trans_x, trans_y = z, x, y
# Placeholder substitution
# TMS
remote = self.layer['remote_url'].replace('{z}', str(trans_z))
remote = remote.replace('{x}', str(trans_x))
remote = remote.replace('{y}', str(trans_y))
remote = remote.replace('{-y}', str(tile_slippy_to_tms(trans_z, trans_x, trans_y)[2]))
remote = remote.replace('{q}', tile_to_quadkey(trans_z, trans_x, trans_y)) # Bing
# WMS, no real difference with TMS except missing *.tne feature
width = 256
height = 256
tile_bbox = "{},{},{},{}".format(*projections.from4326(
projections.bbox_by_tile(z, x, y, self.layer['proj']), self.layer['proj']))
remote = remote.replace('{bbox}', tile_bbox)
remote = remote.replace('{width}', str(width))
remote = remote.replace('{height}', str(height))
remote = remote.replace('{proj}', self.layer["proj"])
try:
# Got response, need to verify content
logging.info(f"{tile_id}: FETCHING {remote}")
remote_resp = self.opener(remote)
remote_bytes = remote_resp.read()
if remote_bytes:
try:
im = Image.open(BytesIO(remote_bytes))
im.load() # Validate image
tile_parsed = True
except (OSError, AttributeError):
# Catching invalid pictures
logging.error(f"{tile_id}: failed to parse response as image {tne_path}")
logging.debug(f"{tile_id}: invalid image {remote_resp.status}: {remote_resp.msg} - {remote_resp.reason} {remote_resp.url}\n{remote_resp.headers}")
# try:
# logging.debug(remote_bytes.decode('utf-8'))
# except UnicodeDecodeError:
# logging.debug(remote_bytes)
# if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
# with open('err.htm', mode='wb') as f:
# f.write(remote_bytes)
else:
logging.warning(f"{tile_id}: empty response")
except request.HTTPError as err:
# Heuristic: TNE or server is defending tiles
# HTTP 403 must be inspected manually
logging.error('\n'.join([str(k) for k in (err, err.headers, err.read().decode('utf-8'))]))
if err.status == HTTPStatus.NOT_FOUND:
logging.warning(f"{tile_id}: TNE - {err} '{tne_path}'")
Path(tne_path, exist_ok=True).touch()
except request.URLError as err:
# Nothing we can do: no connection, cannot guess TNE or not
logging.error(f"{tile_id} URLError '{err}'")
# Save something in cache
# Sometimes server returns file instead of empty HTTP response
if 'dead_tile' in self.layer:
# Compare bytestring with dead tile hash
if len(remote_bytes) == self.layer['dead_tile']['size']:
hasher = hashlib.md5()
hasher.update(remote_bytes)
if hasher.hexdigest() == self.layer['dead_tile']['md5']:
# Tile is recognized as empty
# An example http://ecn.t0.tiles.virtualearth.net/tiles/a120210103101222.jpeg?g=0
# SASPlanet writes empty files with '.tne' ext
logging.warning(f"{tile_id}: TNE - dead tile '{tne_path}'")
tile_dead = True
Path(tne_path, exist_ok=True).touch()
logging.debug(f"tile parsed {tile_parsed}, dead {tile_dead}")
if tile_parsed and not tile_dead:
# All well, save tile to cache
logging.info(f"{tile_id}: saving {tile_path}")
# Preserving original image if possible, as encoding is lossy
# Storing all images into one format, just like SAS.Planet does
if im.get_format_mimetype() != target_mimetype:
logging.warning(f"{tile_id} unexpected image Content-Type {im.get_format_mimetype()}, converting to '{target_mimetype}'")
image_bytes = im_convert(im, target_mimetype)
else:
image_bytes = remote_bytes
with open(tile_path, 'wb') as f:
f.write(image_bytes)
if os.path.exists(tne_path):
os.remove(tne_path)
return im
# If TTL is ok or fetching failed
if os.path.exists(tile_path):
try:
im = Image.open(tile_path)
im.load()
logging.info(f"{tile_id}: cache tms {tile_path}")
return im
except OSError:
logging.warning(f"{tile_id}: failed to parse image from cache '{tile_path}'")
# os.remove(tile_path) # Cached tile is broken - remove it
logging.warning(f"{tile_id}: unreachable tile {remote}")
def tms_google_sat(self, z, x, y):
| |
# The MIT License (MIT)
# Copyright (c) 2018 - Universidad del Cauca, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import paperUtils
import paperSave
import globalVar
import os
import matplotlib.pyplot as plt
import numpy as np
import graphUtils
import sys
import re
import time
from PIL import Image
class ScientoPyClass:
def __init__(self, from_gui=False):
# Parameters variables
self.criterion = 'authorKeywords'
self.graphType = 'bar_trends'
self.length = 10
self.skipFirst = 0
self.topics = ''
self.startYear = globalVar.DEFAULT_START_YEAR
self.endYear = globalVar.DEFAULT_END_YEAR
self.savePlot = ''
self.noPlot = False
self.agrForGraph = False
self.wordCloudMask = ''
self.windowWidth = 2
self.previousResults = False
self.onlyFirst = False
self.graphTitle = ''
self.pYear = False
self.plotWidth = globalVar.DEFAULT_PLOT_WIDTH
self.plotHeight = globalVar.DEFAULT_PLOT_HEIGHT
self.trend = False
self.yLog = False
self.filter = ""
self.fromGui = from_gui
# Working variables
self.papersDict = []
self.resultsFileName = ''
self.extResultsFileName = ''
self.lastPreviousResults = ''
self.preprocessBriefFileName = os.path.join(globalVar.DATA_OUT_FOLDER, globalVar.PREPROCESS_LOG_FILE)
self.preprocessDatasetFile = os.path.join(globalVar.DATA_OUT_FOLDER, globalVar.OUTPUT_FILE_NAME)
self.topicResults = []
self.yearArray = []
self.startYearIndex = 0
self.endYearIndex = 0
def closePlot(self):
plt.close()
def scientoPy(self, args=''):
globalVar.cancelProcess = False
globalVar.progressText = "Reading dataset"
globalVar.progressPer = 0
# To let progress bar open
if self.fromGui:
time.sleep(0.01)
if args == '':
args = self
print("\n\nScientoPy: %s" % (globalVar.SCIENTOPY_VERSION))
print("================\n")
# Check python version
if sys.version_info[0] < 3:
print("ERROR, you are using Python 2, Python 3.X.X required")
print("")
exit()
# Validate window Width
if args.windowWidth < 1:
print("ERROR: minimum windowWidth 1")
exit()
# Validate start and end years
if args.startYear > args.endYear:
print("ERROR: startYear > endYear")
exit()
# Create output folders if not exist
if not os.path.exists(os.path.join(globalVar.GRAPHS_OUT_FOLDER)):
os.makedirs(os.path.join(globalVar.GRAPHS_OUT_FOLDER))
if not os.path.exists(os.path.join(globalVar.RESULTS_FOLDER)):
os.makedirs(os.path.join(globalVar.RESULTS_FOLDER))
# Select the input file
if args.previousResults:
INPUT_FILE = os.path.join(globalVar.RESULTS_FOLDER, globalVar.OUTPUT_FILE_NAME)
else:
INPUT_FILE = os.path.join(globalVar.DATA_OUT_FOLDER, globalVar.OUTPUT_FILE_NAME)
# Start the output list empty
papersDictOut = []
topicList = []
loadDataSet = False
if len(self.papersDict) == 0 or args.previousResults:
loadDataSet = True
if args.previousResults == False and self.lastPreviousResults == True:
loadDataSet = True
# Open the dataset only if not loaded in papersDict
if loadDataSet:
self.papersDict = []
self.lastPreviousResults = args.previousResults
# Open the storage database and add to sel.fpapersDict
if not os.path.isfile(INPUT_FILE):
print("ERROR: %s file not found" % INPUT_FILE)
print("Make sure that you have run the preprocess step before run scientoPy")
exit()
ifile = open(INPUT_FILE, "r", encoding='utf-8')
print("Reading file: %s" % (INPUT_FILE))
globalVar.progressPer = 10
paperUtils.openFileToDict(ifile, self.papersDict)
ifile.close()
if globalVar.cancelProcess:
return
# If reading previous results, remove possible duplicated from multiple topics
if args.previousResults:
self.papersDict= paperUtils.removeDuplicates(self.papersDict)
print("Scopus papers: %s" % globalVar.papersScopus)
print("WoS papers: %s" % globalVar.papersWoS)
print("Omitted papers: %s" % globalVar.omitedPapers)
print("Total papers: %s" % len(self.papersDict))
# Create a self.yearArray
self.yearArray = range(args.startYear, args.endYear + 1)
yearPapers = {}
for i in range(args.startYear, args.endYear + 1):
yearPapers[i] = 0
# Filter papers with invalid year
self.papersDict = list(filter(lambda x: x["year"].isdigit(), self.papersDict))
# Filter the papers outside the year range
papersDictInside = self.papersDict.copy()
papersDictInside = list(filter(lambda x: int(x["year"]) >= args.startYear, papersDictInside))
papersDictInside = list(filter(lambda x: int(x["year"]) <= args.endYear, papersDictInside))
print("Total papers in range (%s - %s): %s" %
(args.startYear, args.endYear, len(papersDictInside)))
# If no papers in the range exit
if (len(papersDictInside) == 0):
print("ERROR: no papers found in the range.")
del papersDictInside
return
# Find the number of total papers per year
for paper in papersDictInside:
if int(paper["year"]) in yearPapers.keys():
yearPapers[int(paper["year"])] += 1
# Get the filter options
filterSubTopic = ""
if args.filter:
filterSubTopic = args.filter.strip()
print("Filter Sub Topic: %s" % filterSubTopic)
# Parse custom topics
if args.topics:
print("Custom topics entered:")
# Divide the topics by ;
topicsFirst = args.topics.split(";")
for x in topicsFirst:
topicList.append(x.split(","))
# Remove beginning and ending space from topics, and empty topics
for topic in topicList:
for idx, item in enumerate(topic):
topic[idx] = item.strip()
if not topic[idx]:
topic.remove(topic[idx])
if not topic:
topicList.remove(topic)
# Remove for each sub topic, start and end spaces
for item1 in topicList:
for item2 in item1:
item2 = item2.strip()
for topic in topicList:
print(topic)
# Find the top topics
else:
print("Finding the top topics...")
globalVar.progressPer = 30
globalVar.progressText = "Finding the top topics"
topicDic = {}
# For each paper, get the full topicDic
for paper in papersDictInside:
if globalVar.cancelProcess:
return
# For each item in paper criteria
for item in paper[args.criterion].split(";"):
# Strip paper item and upper case
item = item.strip()
item = item.upper()
# If paper item empty continue
if item == "":
continue
# If filter sub topic, omit items outside that do not match with the subtopic
if filterSubTopic != "" and len(item.split(",")) >= 2:
if (item.split(",")[1].strip().upper() != filterSubTopic.upper()):
continue
# If topic already in topicDic
if item in topicDic:
topicDic[item] += 1
# If topic is not in topicDic, create this in topicDic
else:
topicDic[item] = 1
# If onlyFirst, only keep the firt processesing
if args.onlyFirst:
break
# If trending analysis, the top topic list to analyse is bigger
if args.trend:
topicListLength = globalVar.TOP_TREND_SIZE
startList = 0
else:
topicListLength = args.length
startList = args.skipFirst
# Get the top topics by the topDic count
topTopcis = sorted(topicDic.items(),
key=lambda x: -x[1])[startList:(startList + topicListLength)]
# Put the topTopics in topic List
for topic in topTopcis:
topicList.append([topic[0]])
if len(topicList) == 0:
print("\nFINISHED : There is not results with your inputs criteria or filter")
del papersDictInside
return
# print("Topic list:")
# print(topicList)
# Create a dictonary in self.topicResults list per element in topicList
self.topicResults = []
for topics in topicList:
topicItem = {}
topicItem["upperName"] = topics[0].upper()
# If the topic name was given as an argument, use the first one given, else keep empty to use the first one found
if args.topics:
topicItem["name"] = topics[0]
else:
topicItem["name"] = ""
topicItem["allTopics"] = topics
topicItem["year"] = self.yearArray
topicItem["PapersCount"] = [0] * len(self.yearArray)
topicItem["PapersCountAccum"] = [0] * len(self.yearArray)
topicItem["PapersCountRate"] = [0] * len(self.yearArray)
topicItem["PapersTotal"] = 0
topicItem["AverageDocPerYear"] = 0 # ADY
topicItem["PapersInLastYears"] = 0
topicItem["PerInLastYears"] = 0 # PDLY
topicItem["CitedByCount"] = [0] * len(self.yearArray)
topicItem["CitedByCountAccum"] = [0] * len(self.yearArray)
topicItem["CitedByTotal"] = 0
topicItem["papers"] = []
topicItem["topicsFound"] = []
topicItem["hIndex"] = 0
topicItem["agr"] = 0 # Average growth rate
self.topicResults.append(topicItem)
# Find papers within the arguments, and fill the self.topicResults fields per year.
print("Calculating papers statistics...")
globalVar.progressText = "Calculating papers statistics"
papersLen = len(papersDictInside)
papersCounter = 0
# For each paper
for paper in papersDictInside:
papersCounter += 1
progressPer = int(float(papersCounter) / float(papersLen) * 100)
globalVar.progressPer = progressPer
if globalVar.cancelProcess:
return
# For each item in paper criteria
for item in paper[args.criterion].split(";"):
# Strip paper item and upper
item = item.strip()
itemUp = item.upper()
# For each topic in topic results
for topicItem in self.topicResults:
# for each sub topic
for subTopic in topicItem["allTopics"]:
# Check if the sub topic match with the paper item
if args.topics and "*" in subTopic.upper():
subTopicRegex = subTopic.upper().replace("*", ".*")
p = re.compile(subTopicRegex)
match = p.match(itemUp)
else:
match = subTopic.upper() == itemUp
# If match, sum it to the topicItem
if match:
yearIndex = topicItem["year"].index(int(paper["year"]))
topicItem["PapersCount"][yearIndex] += 1
topicItem["PapersTotal"] += 1
topicItem["CitedByCount"][yearIndex] += int(paper["citedBy"])
topicItem["CitedByTotal"] += int(paper["citedBy"])
# If no name in the topicItem, put the first one that was found
if topicItem["name"] | |
#!/usr/bin/python3
'''
Summary
-------
This application derives the parameter mirror_reflection_random_angle \
(mirror roughness, also called rnda here) \
for a given set of measured D80 of individual mirrors. The mean value of the measured D80 \
in cm is required and its sigma can be given optionally but will only be used for plotting. \
The individual mirror focal length can be taken into account if a mirror list which contains \
this information is used from the :ref:`Model Parameters DB` or if a new mirror list is given \
through the argument mirror_list. Random focal lengths can be used by turning on the argument \
use_random_focal length and a new value for it can be given through the argument random_flen.
The algorithm works as follow: A starting value of rnda is first defined as the one taken \
from the :ref:`Model Parameters DB` \
(or alternativelly one may want to set it using the argument rnda).\
Secondly, ray tracing simulations are performed for single mirror configurations for each \
mirror given in the mirror_list. The mean simulated D80 for all the mirrors is compared with \
the mean measured D80. A new value of rnda is then defined based on the sign of \
the difference between measured and simulated D80 and a new set of simulations \
is performed. This process repeat until the sign of the difference changes, \
meaning that the two final values of rnda brackets the optimal. These \
two values are used to find the optimal one by a linear \
interpolation. Finally, simulations are performed by using the the interpolated value \
of rnda, which is defined as the desired optimal.
A option no_tunning can be used if one only wants to simulate one value of rnda and compare \
the results with the measured ones.
The results of the tunning are plotted. See examples of the D80 vs rnda plot, on the left, \
and the D80 distributions, on the right.
.. _deriva_rnda_plot:
.. image:: images/derive_mirror_rnda_North-MST-FlashCam-D.png
:width: 49 %
.. image:: images/derive_mirror_rnda_North-MST-FlashCam-D_D80-distributions.png
:width: 49 %
Command line arguments
----------------------
telescope (str, required)
Telescope name (e.g. North-LST-1, South-SST-D, ...)
model_version (str, optional)
Model version (default=prod4)
mean_d80 (float, required)
Mean of measured D80 [cm]
sig_d80 (float, optional)
Std dev of measured D80 [cm]
rnda (float, optional)
Starting value of mirror_reflection_random_angle. If not given, the value from the \
default model will be used.
d80_list (file, optional)
File with single column list of measured D80 [cm]. It is used only for plotting the D80 \
distributions. If given, the measured distribution will be plotted on the top of the \
simulated one.
mirror_list (file, optional)
Mirror list file (in sim_telarray format) to replace the default one. It should be used \
if measured mirror focal lengths need to be taken into account.
use_random_flen (activation mode, optional)
Use random focal lengths, instead of the measured ones. The argument random_flen can be \
used to replace the default random_focal_length from the model.
random_flen (float, optional)
Value to replace the default random_focal_length. Only used if use_random_flen \
is activated.
test (activation mode, optional)
If activated, application will be faster by simulating only few mirrors.
verbosity (str, optional)
Log level to print (default=INFO).
Example
-------
MST - Prod5 (07.2020)
Runtime about 3 min.
.. code-block:: console
python applications/derive_mirror_rnda.py --site North --telescope MST-FlashCam-D --mean_d80 1.4 --sig_d80 0.16 --mirror_list mirror_MST_focal_lengths.dat --d80_list mirror_MST_D80.dat --rnda 0.0075
Expected output:
.. code-block:: console
Measured D80:
Mean = 1.400 cm, StdDev = 0.160 cm
Simulated D80:
Mean = 1.401 cm, StdDev = 0.200 cm
mirror_random_reflection_angle
Previous value = 0.007500
New value = 0.006378
.. todo::
* Change default model to default (after this feature is implemented in db_handler)
* Fix the setStyle. For some reason, sphinx cannot built docs with it on.
'''
import logging
import matplotlib.pyplot as plt
import argparse
import numpy as np
import astropy.units as u
import simtools.config as cfg
import simtools.util.general as gen
import simtools.io_handler as io
from simtools.ray_tracing import RayTracing
from simtools.model.telescope_model import TelescopeModel
# from simtools.visualize import setStyle
# setStyle()
def plotMeasuredDistribution(file, **kwargs):
data = np.loadtxt(file)
ax = plt.gca()
ax.hist(data, **kwargs)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-s',
'--site',
help='North or South',
type=str,
required=True
)
parser.add_argument(
'-t',
'--telescope',
help='Telescope model name (e.g. LST-1, SST-D, ...)',
type=str,
required=True
)
parser.add_argument(
'-m',
'--model_version',
help='Model version (default=prod4)',
type=str,
default='prod4'
)
parser.add_argument(
'--mean_d80',
help='Mean of measured D80 [cm]',
type=float,
required=True
)
parser.add_argument(
'--sig_d80',
help='Std dev of measured D80 [cm]',
type=float,
required=False
)
parser.add_argument(
'--d80_list',
help=(
'File with single column list of measured D80 [cm]. If given, the measured '
'distribution will be plotted on the top of the simulated one.'
),
type=str,
required=False
)
parser.add_argument(
'--rnda',
help='Starting value of mirror_reflection_random_angle',
type=float,
default=0.
)
parser.add_argument(
'--no_tunning',
help='Turn off the tunning - A single case will be simulated and plotted.',
action='store_true'
)
parser.add_argument(
'--mirror_list',
help=(
'Mirror list file to replace the default one. It should be used if measured mirror'
' focal lengths need to be accounted'
),
type=str,
required=False
)
parser.add_argument(
'--use_random_flen',
help=(
'Use random focal lengths. The argument random_flen can be used to replace the default'
' random_focal_length parameter.'
),
action='store_true'
)
parser.add_argument(
'--random_flen',
help='Value to replace the default random_focal_length.',
type=float,
required=False
)
parser.add_argument(
'--test',
help='Test option will be faster by simulating only 10 mirrors.',
action='store_true'
)
parser.add_argument(
'-v',
'--verbosity',
dest='logLevel',
action='store',
default='info',
help='Log level to print (default is INFO).'
)
args = parser.parse_args()
label = 'derive_mirror_rnda'
logger = logging.getLogger()
logger.setLevel(gen.getLogLevelFromUser(args.logLevel))
# Output directory to save files related directly to this app
outputDir = io.getApplicationOutputDirectory(cfg.get('outputLocation'), label)
tel = TelescopeModel(
site=args.site,
telescopeModelName=args.telescope,
modelVersion=args.model_version,
label=label
)
if args.mirror_list is not None:
mirrorListFile = cfg.findFile(name=args.mirror_list)
tel.changeParameter('mirror_list', args.mirror_list)
tel.addParameterFile('mirror_list', mirrorListFile)
if args.random_flen is not None:
tel.changeParameter('random_focal_length', str(args.random_flen))
def run(rnda, plot=False):
''' Runs the simulations for one given value of rnda '''
tel.changeParameter('mirror_reflection_random_angle', str(rnda))
ray = RayTracing.fromKwargs(
telescopeModel=tel,
singleMirrorMode=True,
mirrorNumbers=list(range(1, 10)) if args.test else 'all',
useRandomFocalLength=args.use_random_flen
)
ray.simulate(test=False, force=True) # force has to be True, always
ray.analyze(force=True)
# Plotting D80 histograms
if plot:
plt.figure(figsize=(8, 6), tight_layout=True)
ax = plt.gca()
ax.set_xlabel(r'D$_{80}$ [cm]')
bins = np.linspace(0.8, 3.5, 27)
ray.plotHistogram(
'd80_cm',
color='r',
linestyle='-',
alpha=0.5,
facecolor='r',
edgecolor='r',
bins=bins,
label='simulated'
)
# Only plot measured D80 if the data is given
if args.d80_list is not None:
d80ListFile = cfg.findFile(args.d80_list)
plotMeasuredDistribution(
d80ListFile,
color='b',
linestyle='-',
facecolor='None',
edgecolor='b',
bins=bins,
label='measured'
)
ax.legend(frameon=False)
plotFileName = label + '_' + tel.name + '_' + 'D80-distributions'
plotFile = outputDir.joinpath(plotFileName)
plt.savefig(str(plotFile) + '.pdf', format='pdf', bbox_inches='tight')
plt.savefig(str(plotFile) + '.png', format='png', bbox_inches='tight')
return ray.getMean('d80_cm').to(u.cm).value, ray.getStdDev('d80_cm').to(u.cm).value
# First - rnda from previous model
if args.rnda != 0:
rndaStart = args.rnda
else:
rndaStart = tel.getParameter('mirror_reflection_random_angle')
if isinstance(rndaStart, str):
rndaStart = rndaStart.split()
rndaStart = float(rndaStart[0])
if not args.no_tunning:
resultsRnda = list()
resultsMean = list()
resultsSig = list()
def collectResults(rnda, mean, sig):
resultsRnda.append(rnda)
resultsMean.append(mean)
resultsSig.append(sig)
stop = False
meanD80, sigD80 = run(rndaStart)
rnda = rndaStart
signDelta = np.sign(meanD80 - args.mean_d80)
collectResults(rnda, meanD80, sigD80)
while not stop:
newRnda = rnda - (0.1 * rndaStart * signDelta)
meanD80, sigD80 = run(newRnda)
newSignDelta = np.sign(meanD80 - args.mean_d80)
stop = (newSignDelta != signDelta)
signDelta = newSignDelta
rnda = newRnda
collectResults(rnda, meanD80, sigD80)
# Linear interpolation using two last rnda values
resultsRnda, resultsMean, resultsSig = gen.sortArrays(resultsRnda, resultsMean, resultsSig)
rndaOpt = np.interp(x=args.mean_d80, xp=resultsMean, fp=resultsRnda)
else:
rndaOpt = rndaStart
# Running the final simulation for rndaOpt
meanD80, sigD80 = run(rndaOpt, plot=True)
# Printing results to stdout
print('\nMeasured D80:')
if args.sig_d80 is not None:
print('Mean = {:.3f} cm, StdDev = {:.3f} cm'.format(args.mean_d80, args.sig_d80))
else:
print('Mean = {:.3f} cm'.format(args.mean_d80))
print('\nSimulated D80:')
print('Mean = {:.3f} cm, StdDev = {:.3f} cm'.format(meanD80, sigD80))
print('\nmirror_random_reflection_angle')
print('Previous value = {:.6f}'.format(rndaStart))
print('New value = {:.6f}\n'.format(rndaOpt))
# Plotting D80 vs rnda
plt.figure(figsize=(8, 6), tight_layout=True)
ax = plt.gca()
ax.set_xlabel(r'mirror$\_$random$\_$reflection$\_$angle')
ax.set_ylabel(r'$D_{80}$ [cm]')
if not args.no_tunning:
ax.errorbar(
resultsRnda,
resultsMean,
yerr=resultsSig,
color='k',
marker='o',
linestyle='none'
)
ax.errorbar(
[rndaOpt],
[meanD80],
yerr=[sigD80],
color='r',
marker='o',
linestyle='none',
label='rnda = {:.6f} (D80 = {:.3f} +/- {:.3f} cm)'.format(rndaOpt, meanD80, sigD80)
)
xlim = ax.get_xlim()
ax.plot(xlim, [args.mean_d80, args.mean_d80], | |
<gh_stars>0
import os
from pathlib import Path
import json as stable_json # use in case then orjson failed
import orjson as json # faster
import numpy as np
import pandas as pd
from tqdm import tqdm_notebook as tqdm
import yaml
from yaml import Loader as Loader
import re
from sklearn.metrics import (
accuracy_score,
auc,
roc_auc_score,
precision_recall_curve,
average_precision_score,
f1_score,
precision_score,
recall_score
)
from ue4nlp.ue_scores import *
import logging
log = logging.getLogger()
default_methods = {
"bald": bald,
"sampled_max_prob": sampled_max_prob,
"variance": probability_variance,
}
def unpad_preds(probs, sampled_probs, preds, labels):
true_sampled_probs = [
[p.tolist() for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(sampled_probs.transpose(1, 2, 3, 0), labels[:, :])
]
true_probs = [
[p.tolist() for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(probs, labels[:, :])
]
true_predictions = [
[p for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(preds, labels[:, :])
]
true_labels = [
[l for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(preds, labels[:, :])
]
return true_sampled_probs, true_probs, true_predictions, true_labels
def get_score_ratio_seq(sorted_indexes, answers, true_answers, ratio):
last_index = int(len(sorted_indexes) * ratio)
sel_indexes = sorted_indexes[:last_index]
unsel_indexes = sorted_indexes[last_index:]
sel_answers = []
for ind in sel_indexes:
sel_answers.append(true_answers[ind])
for ind in unsel_indexes:
sel_answers.append(answers[ind])
sel_true_answers = []
for ind in sel_indexes:
sel_true_answers.append(true_answers[ind])
for ind in unsel_indexes:
sel_true_answers.append(true_answers[ind])
score = sum([1.0 * (l == p) for l, p in zip(sel_answers, sel_true_answers)]) / len(
sel_answers
)
return score
def get_score_ratio(sorted_indexes, answers, true_answers, ratio, metric=accuracy_score, drop=False):
last_index = int(len(sorted_indexes) * ratio)
sel_indexes = sorted_indexes[:last_index]
unsel_indexes = sorted_indexes[last_index:]
if drop:
sel_answers = answers[unsel_indexes].tolist()
sel_true_answers = true_answers[unsel_indexes].tolist()
else:
sel_answers = (
true_answers[sel_indexes].tolist() + answers[unsel_indexes].tolist()
)
sel_true_answers = (
true_answers[sel_indexes].tolist() + true_answers[unsel_indexes].tolist()
)
score = metric(sel_true_answers, sel_answers)
return score
def is_ue_score(name):
return ("mahalanobis" in name or "nuq" in name or "mixup" in name or "ddu" in name or "disc" in name)
def calc_rejection_curve_aucs(
probabilities, labels, sampled_probabilities, model_answers, methods
):
ratio_list = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
predictions = np.argmax(probabilities, axis=-1)
errors = (labels != predictions).astype("uint8")
model_ues = 1 - np.max(probabilities, axis=1)
sorted_indexes_model = np.argsort(-model_ues)
results = {}
model_scores = [
get_score_ratio(sorted_indexes_model, model_answers, labels, ratio)
for ratio in ratio_list
]
results["max_prob"] = auc(ratio_list, model_scores)
for name, method_function in methods.items():
ue_scores = method_function(sampled_probabilities)
ensemble_answers = np.asarray(sampled_probabilities).mean(1).argmax(-1)
sorted_indexes_ensemble = np.argsort(-ue_scores)
if is_ue_score(name):
# because for this case we have ue scores in sampled_probabilities
ensemble_answers = predictions
ens_scores = [
get_score_ratio(sorted_indexes_ensemble, ensemble_answers, labels, ratio)
for ratio in ratio_list
]
results[name] = auc(ratio_list, ens_scores)
return results
def calc_rejection_curve_auc_seq(probs, labels, sampled_probs, model_answers, methods, avg_type='sum'):
sampled_probs, probs, predictions, labels = unpad_preds(
probs, sampled_probs, np.argmax(probs, axis=-1), labels
)
if methods is None:
methods = default_methods
ratio_list = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
errors = [1.0 * (l != p) for l, p in zip(labels, predictions)]
n_examples = len(errors)
ue_scores_max = np.zeros(n_examples)
for i in range(n_examples):
sent = probs[i]
true_probs_max = np.asarray([np.max(proba) for proba in sent])
ue_scores_max[i] = np.mean(1 - true_probs_max)
sorted_indexes_model = np.argsort(-ue_scores_max)
results = {}
model_scores = [
get_score_ratio_seq(sorted_indexes_model, predictions, labels, ratio)
for ratio in ratio_list
]
results["max_prob"] = auc(ratio_list, model_scores)
for name, method_function in methods.items():
ensemble_answers = [
np.asarray(p).mean(-1).argmax(-1).tolist() for p in sampled_probs
]
if is_ue_score(name):
# because for this case we have ue scores in sampled_probabilities
avg_type = 'max'
ensemble_answers = predictions
ue_scores = seq_ue(sampled_probs, method_function, avg_type=avg_type)
sorted_indexes_ensemble = np.argsort(-ue_scores)
ens_scores = [
get_score_ratio_seq(
sorted_indexes_ensemble, ensemble_answers, labels, ratio
)
for ratio in ratio_list
]
results[name] = auc(ratio_list, ens_scores)
return results
def calc_roc_aucs_seq(labels, probs, sampled_probs, methods=None, avg_type='sum'):
sampled_probs, probs, predictions, labels = unpad_preds(
probs, sampled_probs, np.argmax(probs, axis=-1), labels
)
if methods is None:
methods = default_methods
errors = [1.0 * (l != p) for l, p in zip(labels, predictions)]
results = {}
for name, method_function in methods.items():
if is_ue_score(name):
avg_type = 'max'
ue_scores = seq_ue(sampled_probs, method_function, avg_type=avg_type)
results[name] = roc_auc_score(errors, ue_scores)
n_examples = len(errors)
ue_scores_max = np.zeros(n_examples)
for i in range(n_examples):
sent = probs[i]
true_probs_max = np.asarray([np.max(proba) for proba in sent])
ue_scores_max[i] = np.mean(1 - true_probs_max)
results["max_prob"] = roc_auc_score(errors, ue_scores_max)
return results
def calc_roc_aucs(
probabilities, labels, sampled_probabilities, methods, oos=False, top3=False
):
predictions = np.argmax(probabilities, axis=-1)
if oos:
if len(np.unique(labels)) > 40:
#CLINC use class №42 as OOD
errors = (labels == 42).astype("uint8")
else:
#SNIPS and ROSTD case
errors = (labels == np.max(labels)).astype("uint8")
elif top3:
top3 = np.argsort(probabilities, axis=-1)[:, -3:]
errors = np.array(
[(l not in top3[i]) * 1 for i, l in enumerate(labels)]
).astype("uint8")
else:
#misclassification case
errors = (labels != predictions).astype("uint8")
results = {}
for name, method_function in methods.items():
ue_scores = method_function(sampled_probabilities)
results[name] = roc_auc_score(errors, ue_scores)
max_prob = 1.0 - np.max(probabilities, axis=-1)
results["max_prob"] = roc_auc_score(errors, max_prob)
return results
def rcc_auc(conf, risk, return_points=False):
# risk-coverage curve's area under curve
n = len(conf)
cr_pair = list(zip(conf, risk))
cr_pair.sort(key=lambda x: x[0], reverse=True)
cumulative_risk = [cr_pair[0][1]]
for i in range(1, n):
cumulative_risk.append(cr_pair[i][1] + cumulative_risk[-1])
points_x = []
points_y = []
auc = 0
for k in range(n):
auc += cumulative_risk[k] / (1 + k)
points_x.append((1 + k) / n) # coverage
points_y.append(cumulative_risk[k] / (1 + k)) # current avg. risk
if return_points:
return auc, points_x, points_y
else:
return auc
def calc_rcc_aucs(probabilities, labels, sampled_probabilities, methods):
predictions = np.argmax(probabilities, axis=-1)
risk_binary = (predictions != labels).astype(int)
conf = np.max(probabilities, axis=1)
results = {}
for name, method_function in methods.items():
ue_scores = method_function(sampled_probabilities)
results[name] = rcc_auc(-ue_scores, risk_binary)
results["max_prob"] = rcc_auc(conf, risk_binary)
return results
def rpp(conf, risk):
# reverse pair proportion
# for now only works when risk is binary
n = len(conf)
cr_pair = list(zip(conf, risk))
cr_pair.sort(key=lambda x: x[0], reverse=False)
pos_count, rp_count = 0, 0
for i in range(n):
if cr_pair[i][1] == 0: # risk==0
pos_count += 1
else:
rp_count += pos_count
return rp_count / (n ** 2)
def calc_rpp(probabilities, labels, sampled_probabilities, methods):
predictions = np.argmax(probabilities, axis=-1)
risk_binary = (predictions != labels).astype(int)
conf = np.max(probabilities, axis=1)
results = {}
for name, method_function in methods.items():
ue_scores = method_function(sampled_probabilities)
results[name] = rpp(-ue_scores, risk_binary)
results["max_prob"] = rpp(conf, risk_binary)
return results
def calc_pr_aucs(
answers, probabilities, eval_labels, sampled_probabilities, methods, oos
):
if not oos:
labels = (eval_labels != answers).astype("uint8")
elif len(np.unique(eval_labels)) > 40:
labels = (eval_labels == 42).astype("uint8")
else:
labels = (eval_labels == np.max(eval_labels)).astype("uint8")
results = {}
for name, method_function in methods.items():
ue_scores = method_function(sampled_probabilities)
results[name] = average_precision_score(labels, ue_scores)
max_prob = 1.0 - np.max(probabilities, axis=-1)
results["max_prob"] = average_precision_score(labels, max_prob)
return results
def calc_precision(
answers, probabilities, eval_labels, sampled_probabilities, methods, oos
):
if not oos:
labels = (eval_labels != answers).astype("uint8")
elif len(np.unique(eval_labels)) > 40:
labels = (eval_labels == 42).astype("uint8")
else:
labels = (eval_labels == np.max(eval_labels)).astype("uint8")
results = {}
for name, method_function in methods.items():
ue_scores = method_function(sampled_probabilities)
precision, recall, thresholds = precision_recall_curve(labels, ue_scores)
f1_score = 2*precision*recall/(precision+recall+1e-7)
results[name] = precision[np.argmax(f1_score)]
max_prob = 1.0 - np.max(probabilities, axis=-1)
precision, recall, thresholds = precision_recall_curve(labels, ue_scores)
f1_score = 2*precision*recall/(precision+recall+1e-7)
results["max_prob"] = precision[np.argmax(f1_score)]
return results
def calc_recall(
answers, probabilities, eval_labels, sampled_probabilities, methods, oos
):
if not oos:
labels = (eval_labels != answers).astype("uint8")
elif len(np.unique(eval_labels)) > 40:
labels = (eval_labels == 42).astype("uint8")
else:
labels = (eval_labels == np.max(eval_labels)).astype("uint8")
results = {}
for name, method_function in methods.items():
ue_scores = method_function(sampled_probabilities)
precision, recall, thresholds = precision_recall_curve(labels, ue_scores)
f1_score = 2*precision*recall/(precision+recall+1e-7)
results[name] = recall[np.argmax(f1_score)]
max_prob = 1.0 - np.max(probabilities, axis=-1)
precision, recall, thresholds = precision_recall_curve(labels, ue_scores)
f1_score = 2*precision*recall/(precision+recall+1e-7)
results["max_prob"] = recall[np.argmax(f1_score)]
return results
def calc_f1_score(
answers, probabilities, eval_labels, sampled_probabilities, methods, oos
):
if not oos:
labels = (eval_labels != answers).astype("uint8")
elif len(np.unique(eval_labels)) > 40:
labels = (eval_labels == 42).astype("uint8")
else:
labels = (eval_labels == np.max(eval_labels)).astype("uint8")
results = {}
for name, method_function in methods.items():
ue_scores = method_function(sampled_probabilities)
precision, recall, thresholds = precision_recall_curve(labels, ue_scores)
precision, recall = np.array(precision), np.array(recall)
f1_score = 2*precision*recall/(precision+recall+1e-7)
results[name] = np.max(f1_score)
max_prob = 1.0 - np.max(probabilities, axis=-1)
precision, recall, thresholds = precision_recall_curve(labels, max_prob)
precision, recall = np.array(precision), np.array(recall)
f1_score = 2*precision*recall/(precision+recall+1e-7)
results["max_prob"] = np.max(f1_score)
return results
def calc_rcc_aucs_seq(
probabilities, labels, sampled_probabilities, predictions, methods, avg_type='sum'
):
risk_binary = [1.0 * (l != p) for l, p in zip(labels, predictions)]
results = {}
# all this methods are experimental, for now look only on results['rcc_auc']
for name, method_function in methods.items():
if is_ue_score(name):
avg_type = 'max'
ue_scores = seq_ue(sampled_probabilities, | |
import logging, json
from threading import Thread
from django.conf import settings
from docker import Client
from docker.errors import APIError
from backend.models import Image
from backend.utils import (fetch_digest_from_response, get_optimal_docker_host,
remove_file_from_disk)
from backend.schedule import DockerSchedulerFactory
logger = logging.getLogger('hummer')
class ImageBuilder(object):
"""
ImageBuilder is to build image. One way is to use image file directly, the
other way is to use Dockerfile to build image.
is_image: 0|1|2, 0 represents build file, 1 represents image file,
2 represents container snapshot.
"""
build_file = None
is_image = 0
dockerfile = None
image = None
user = None
def __init__(self, build_file, is_image, dockerfile, image_id,
old_image_name, old_image_version):
self.build_file = build_file
self.is_image = is_image
self.dockerfile = dockerfile
self.image = Image.objects.get(id=image_id)
self.user = self.image.user
self.old_image_name = old_image_name
self.old_image_version = old_image_version
def create_image(self):
"""
Create image by two ways.
"""
target = None
if self.is_image != 0:
target = self._create_image_by_imagefile
else:
target = self._create_image_by_dockerfile
creating_thread = Thread(target=target)
creating_thread.start()
def _create_image_by_imagefile(self):
"""
Create image by imagefile.
"""
logger.debug("creating an image by imagefile.")
docker_host = get_optimal_docker_host()
if not docker_host:
logger.error("there is no available active docker host.")
self._update_image_status(status="failed")
return None
# TODO: create image on docker host
base_url = self._get_docker_host_base_url(docker_host)
image_name = self._get_image_name()
if self.is_image == 1:
token = self._load_image_on_docker_host(base_url, self.build_file,
image_name, self.image.version)
elif self.is_image == 2:
token = self._import_snapshot_on_docker_host(base_url,
self.build_file, image_name, self.image.version)
if not token:
logger.error("Import image on docker host failed")
self._update_image_status(status="failed")
return None
logger.info('Image %s:%s has been imported, with token %s', image_name,
self.image.version, token)
digest = self._push_image_to_registry(base_url, image_name,
self.image.version, token)
if not digest:
logger.error("Push image from docker host to registry failed")
self._update_image_status(status="failed")
return None
logger.info('Image %s:%s has been pushed to registry, with digest %s',
image_name, self.image.version, digest)
self._update_image_status(status="active", digest=digest, token=token)
remove_file_from_disk(self.build_file)
def _create_image_by_dockerfile(self):
"""
Create image by dockerfile, this maybe take a long time.
"""
logger.debug("creating an image by dockerfile.")
docker_host = get_optimal_docker_host()
if not docker_host:
logger.error("there is no available active docker host.")
self._update_image_status(status="failed")
return None
base_url = self._get_docker_host_base_url(docker_host)
image_name = self._get_image_name()
logger.debug('%s %s' % (base_url, image_name))
token = self._build_image_on_docker_host(
base_url=base_url,
build_file=self.build_file,
dockerfile=self.dockerfile,
image_name=image_name,
image_version=self.image.version)
if not token:
logger.error("Build image on docker host failed")
self._update_image_status(status="failed")
return None
logger.info('Image %s:%s has been builded, with token %s', image_name,
self.image.version, token)
digest = self._push_image_to_registry(base_url, image_name,
self.image.version, token)
if not digest:
logger.error("Push image from docker host to registry failed")
self._update_image_status(status="failed")
return None
logger.info('Image %s:%s has been pushed to registry, with digest %s',
image_name, self.image.version, digest)
self._update_image_status(status="active", digest=digest, token=token)
remove_file_from_disk(self.build_file)
def _update_image_status(self, status, digest=None, token=None):
"""
Update image metadata after building the image.
"""
self.image.status = status
if digest:
self.image.digest = digest
if token:
self.image.token = token
self.image.save()
def _get_build_docker_host(self):
"""
Returns the optimal docker host to build image.
"""
scheduler = DockerSchedulerFactory.get_scheduler()
docker_host = scheduler.get_optimal_docker_host()
logger.debug("select the optimal docher host %s" % docker_host)
return docker_host
def _get_docker_host_base_url(self, host):
"""
Returns the base url of docker host.
"""
return 'tcp://%s:%s' % (host, str(settings.DOCKER_PORT))
def _get_image_name(self):
"""
Returns the complete name of the build image.
"""
return '{}/{}/{}-{}'.format(settings.IMAGE_REGISTRY, self.user.username,
self.image.project.name, self.image.name)
def _load_image_on_docker_host(self, base_url, build_file, image_name,
image_version='latest'):
"""
Import container snapshot on the selected docker host.
'base_url': the url of docker host.
'build_file': the name of the build file in absolute path.
'image_name': the name of the image, containing registry address, user
name and image name.
'image_version': the version of the image.
Returns:
'token': the image token
"""
self._delete_image_on_docker_host(base_url, self.old_image_name,
self.old_image_version)
self._delete_image_on_docker_host(base_url, image_name, image_version)
client = Client(base_url=base_url)
try:
with open(build_file, 'rb') as fileobj:
client.load_image(fileobj)
except Exception:
logger.error('load image file on docker host %s failed.' % base_url)
return None
return self._tag_image_with_new_name(base_url, self.old_image_name,
self.old_image_version, image_name, image_version)
def _import_snapshot_on_docker_host(self, base_url, build_file, image_name,
image_version='latest'):
"""
Import container snapshot on the selected docker host.
'base_url': the url of docker host.
'build_file': the name of the build file in absolute path.
'image_name': the name of the image, containing registry address, user
name and image name.
'image_version': the version of the image.
Returns:
'token': the image token
"""
self._delete_image_on_docker_host(base_url, image_name, image_version)
client = Client(base_url=base_url)
try:
res_json = client.import_image_from_file(build_file, image_name,
image_version)
res = json.loads(res_json)
except Exception:
logger.error('import snapshot on docker host %s failed.' % base_url)
return None
return res.get('status', None)
def _delete_image_on_docker_host(self, base_url, image_name, image_version):
"""
Delete image from docker host if exists image called
image_name:image_version.
"""
image_complete_name = '%s:%s' %(image_name, image_version)
client = Client(base_url=base_url)
try:
client.remove_image(image=image_complete_name, force=True)
except Exception:
logger.info('There is no image called %s on docker host %s' %
(image_complete_name, base_url))
return None
logger.info('Image %s on docker host %s has been deleted.' %
(image_complete_name, base_url))
def _push_image_to_registry(self, base_url, image_name, image_version,
image_token):
"""
Push image from docker host to private registry.
Returns the sha256 digest of the image.
"""
image_complete_name = '%s:%s' %(image_name, image_version)
if not self._is_image_on_docker_host(base_url, image_token):
logger.error('There is no image called %s on docker host %s' %
(image_complete_name, base_url))
return None
client = Client(base_url=base_url)
try:
response = [res for res in client.push(image_complete_name,
stream=True)]
except Exception:
logger.error('Communicate with %s failed.' % base_url)
return None
try:
digest = fetch_digest_from_response(response[-1])
except Exception:
logger.error('Parse the digest response error.')
return None
return digest
def _is_image_on_docker_host(self, base_url, image_token):
"""
Check the image whether or not on docker host.
"""
client = Client(base_url=base_url)
try:
response = client.images(quiet=True)
except Exception:
logger.error("Connected %s failed." % base_url)
return False
if image_token not in response:
return False
return True
def _tag_image_with_new_name(self, base_url, old_image_name,
old_image_version, image_name, image_version):
"""
Docker tag old_image_name:old_image_version image_name:image_version.
"""
client = Client(base_url=base_url)
old_image = "{}:{}".format(old_image_name, old_image_version)
try:
response = client.tag(image=old_image, repository=image_name,
tag=image_version)
except Exception as e:
logger.debug(e)
response = False
if not response:
logger.info("Tag image {} to {}:{} failed.".format(old_image,
image_name, image_version))
return None
image_token = self._get_image_token_on_docker_host(base_url,
image_name, image_version)
self._delete_image_on_docker_host(base_url, old_image_name,
old_image_version)
return image_token
def _get_image_token_on_docker_host(self, base_url, image_name,
image_version):
"""
Given the image name and version, return the token of the image on the
docker host.
"""
image_complete_name = '%s:%s' %(image_name, image_version)
logger.debug(image_complete_name)
client = Client(base_url=base_url)
try:
images = client.images()
except Exception as e:
logger.debug(e)
logger.debug("Communicate with docker host {} failed.".format(
base_url))
return None
tokens = [image['Id'] for image in images
if image_complete_name in image['RepoTags']]
if not tokens:
logger.info("The docker host {} has no image {}:{}".format(base_url,
image_name, image_version))
return None
return tokens[0]
def _build_image_on_docker_host(self, base_url, build_file, dockerfile,
image_name, image_version):
"""
Build image on the selected docker host by Dockerfile.
'base_url': the url of docker host.
'build_file': the name of the build file in absolute path.
'dockerfile': Dockerfile path in build_file.
'image_name': the name of the image, containing registry address, user
name and image name.
'image_version': the version of the image.
Returns:
'token': the image token
"""
self._delete_image_on_docker_host(base_url, image_name, image_version)
client = Client(base_url=base_url)
fileobj = open(build_file, 'rb')
image_complete_name = '%s:%s' % (image_name, image_version)
try:
response = [line for line in client.build(
fileobj=fileobj,
custom_context=True,
dockerfile=dockerfile,
rm=True,
tag=image_complete_name)]
except APIError as error:
logger.debug(error)
logger.error('Cannot locate specified Dockerfile: %s.' %
(self.dockerfile))
fileobj.close()
return None
except Exception as error:
logger.debug(error)
logger.error('Build image %s failed.' % image_complete_name)
fileobj.close()
return None
fileobj.close()
return self._get_image_token(base_url, image_complete_name)
def _get_image_token(self, base_url, image_complete_name):
"""
"""
client = Client(base_url=base_url)
try:
token = client.inspect_image(image_complete_name).get('Id', None)
except Exception:
logger.error('Can\'t get the token of image %s on docker host %s' %
(image_complete_name, base_url))
return None
return token
class ImageDestroyer(object):
"""
ImageDestroyer is to destroy image instance by multiple threading.
"""
image = None
def __init__(self, image):
self.image = image
def destroy_image_instance(self):
deleting_thread = Thread(target=self._destroy_image_instance)
deleting_thread.start()
def _destroy_image_instance(self):
self._update_image_status(status='deleting')
# TODO: delete the image instance
self._delete_image_instance_on_all_hosts()
self._delete_image_metadata()
def _update_image_status(self, status):
self.image.status = status
self.image.save()
def _delete_image_metadata(self):
self.image.delete()
def _delete_image_instance_on_all_hosts(self):
"""
Delete image instance on all hosts.
"""
image_name = self._get_image_name()
image_version = self.image.version
scheduler = DockerSchedulerFactory.get_scheduler()
hosts = scheduler.get_docker_hosts()
for host in hosts:
base_url = self._get_docker_host_base_url(host)
self._delete_image_on_docker_host(base_url, image_name,
image_version)
def _get_image_name(self):
"""
Returns the complete name of the image.
"""
return '{}/{}/{}-{}'.format(settings.IMAGE_REGISTRY,
self.image.user.username,
self.image.project.name, self.image.name)
def _delete_image_on_docker_host(self, base_url, image_name, image_version):
"""
Delete image from docker host if exists image called
image_name:image_version.
"""
image_complete_name = '%s:%s' %(image_name, image_version)
client = Client(base_url=base_url)
try:
client.remove_image(image=image_complete_name, force=True)
except Exception:
logger.info('There is no image called %s on docker host %s' %
(image_complete_name, base_url))
return None
logger.info('Image %s on docker host %s has been deleted.' %
(image_complete_name, base_url))
def _get_docker_host_base_url(self, host):
"""
Returns | |
of 1/10 of QRS amplitude
noise_amp = qrs_amp / 10
# Get R-R intervals of consecutive beats, if any.
rr_intervals = np.diff(qrs_inds)
rr_intervals = rr_intervals[rr_intervals < self.rr_max]
if rr_intervals.any():
rr_recent = np.mean(rr_intervals)
else:
rr_recent = self.rr_init
# If an early QRS was detected, set last_qrs_ind so that it can be
# picked up.
last_qrs_ind = min(0, qrs_inds[0] - self.rr_min - 1)
self._set_init_params(qrs_amp_recent=qrs_amp,
noise_amp_recent=noise_amp,
rr_recent=rr_recent,
last_qrs_ind=last_qrs_ind)
self.learned_init_params = True
# Failed to find enough calibration beats. Use default values.
else:
if self.verbose:
print('Failed to find %d beats during learning.'
% n_calib_beats)
self._set_default_init_params()
def _set_init_params(self, qrs_amp_recent, noise_amp_recent, rr_recent,
last_qrs_ind):
"""
Set initial online parameters.
Parameters
----------
qrs_amp_recent : int, float
The mean of the signal QRS amplitudes.
noise_amp_recent : int, float
The mean of the signal noise amplitudes.
rr_recent : int
The mean of the signal R-R interval values.
last_qrs_ind : int
The index of the signal's early QRS detected.
Returns
-------
N/A
"""
self.qrs_amp_recent = qrs_amp_recent
self.noise_amp_recent = noise_amp_recent
# What happens if qrs_thr is calculated to be less than the explicit
# min threshold? Should print warning?
self.qrs_thr = max(0.25*self.qrs_amp_recent
+ 0.75*self.noise_amp_recent,
self.qrs_thr_min * self.transform_gain)
self.rr_recent = rr_recent
self.last_qrs_ind = last_qrs_ind
# No QRS detected initially
self.last_qrs_peak_num = None
def _set_default_init_params(self):
"""
Set initial running parameters using default values.
The steady state equation is:
`qrs_thr = 0.25*qrs_amp + 0.75*noise_amp`
Estimate that QRS amp is 10x noise amp, giving:
`qrs_thr = 0.325 * qrs_amp or 13/40 * qrs_amp`
Parameters
----------
N/A
Returns
-------
N/A
"""
if self.verbose:
print('Initializing using default parameters')
# Multiply the specified ECG thresholds by the filter and MWI gain
# factors
qrs_thr_init = self.qrs_thr_init * self.transform_gain
qrs_thr_min = self.qrs_thr_min * self.transform_gain
qrs_amp = 27/40 * qrs_thr_init
noise_amp = qrs_amp / 10
rr_recent = self.rr_init
last_qrs_ind = 0
self._set_init_params(qrs_amp_recent=qrs_amp,
noise_amp_recent=noise_amp,
rr_recent=rr_recent,
last_qrs_ind=last_qrs_ind)
self.learned_init_params = False
def _is_qrs(self, peak_num, backsearch=False):
"""
Check whether a peak is a QRS complex. It is classified as QRS
if it:
- Comes after the refractory period.
- Passes QRS threshold.
- Is not a T-wave (check it if the peak is close to the previous QRS).
Parameters
----------
peak_num : int
The peak number of the MWI signal to be inspected.
backsearch: bool, optional
Whether the peak is being inspected during backsearch.
Returns
-------
bool
Whether the peak is QRS (True) or not (False).
"""
i = self.peak_inds_i[peak_num]
if backsearch:
qrs_thr = self.qrs_thr / 2
else:
qrs_thr = self.qrs_thr
if (i-self.last_qrs_ind > self.ref_period
and self.sig_i[i] > qrs_thr):
if i-self.last_qrs_ind < self.t_inspect_period:
if self._is_twave(peak_num):
return False
return True
return False
def _update_qrs(self, peak_num, backsearch=False):
"""
Update live QRS parameters. Adjust the recent R-R intervals and
QRS amplitudes, and the QRS threshold.
Parameters
----------
peak_num : int
The peak number of the MWI signal where the QRS is detected.
backsearch: bool, optional
Whether the QRS was found via backsearch.
Returns
-------
N/A
"""
i = self.peak_inds_i[peak_num]
# Update recent R-R interval if the beat is consecutive (do this
# before updating self.last_qrs_ind)
rr_new = i - self.last_qrs_ind
if rr_new < self.rr_max:
self.rr_recent = 0.875*self.rr_recent + 0.125*rr_new
self.qrs_inds.append(i)
self.last_qrs_ind = i
# Peak number corresponding to last QRS
self.last_qrs_peak_num = self.peak_num
# QRS recent amplitude is adjusted twice as quickly if the peak
# was found via backsearch
if backsearch:
self.backsearch_qrs_inds.append(i)
self.qrs_amp_recent = (0.75*self.qrs_amp_recent
+ 0.25*self.sig_i[i])
else:
self.qrs_amp_recent = (0.875*self.qrs_amp_recent
+ 0.125*self.sig_i[i])
self.qrs_thr = max((0.25*self.qrs_amp_recent
+ 0.75*self.noise_amp_recent), self.qrs_thr_min)
return
def _is_twave(self, peak_num):
"""
Check whether a segment is a T-wave. Compare the maximum gradient of
the filtered signal segment with that of the previous QRS segment.
Parameters
----------
peak_num : int
The peak number of the MWI signal where the QRS is detected.
Returns
-------
bool
Whether a segment is a T-wave (True) or not (False).
"""
i = self.peak_inds_i[peak_num]
# Due to initialization parameters, last_qrs_ind may be negative.
# No way to check in this instance.
if self.last_qrs_ind - self.qrs_radius < 0:
return False
# Get half the QRS width of the signal to the left.
# Should this be squared?
sig_segment = normalize((self.sig_f[i - self.qrs_radius:i]
).reshape(-1, 1), axis=0)
last_qrs_segment = self.sig_f[self.last_qrs_ind - self.qrs_radius:
self.last_qrs_ind]
segment_slope = np.diff(sig_segment)
last_qrs_slope = np.diff(last_qrs_segment)
# Should we be using absolute values?
if max(segment_slope) < 0.5*max(abs(last_qrs_slope)):
return True
else:
return False
def _update_noise(self, peak_num):
"""
Update live noise parameters.
Parameters
----------
peak_num : int
The peak number.
Returns
-------
N/A
"""
i = self.peak_inds_i[peak_num]
self.noise_amp_recent = (0.875*self.noise_amp_recent
+ 0.125*self.sig_i[i])
return
def _require_backsearch(self):
"""
Determine whether a backsearch should be performed on prior peaks.
Parameters
----------
N/A
Returns
-------
bool
Whether to require backsearch (True) or not (False).
"""
if self.peak_num == self.n_peaks_i-1:
# If we just return false, we may miss a chance to backsearch.
# Update this?
return False
next_peak_ind = self.peak_inds_i[self.peak_num + 1]
if next_peak_ind-self.last_qrs_ind > self.rr_recent*1.66:
return True
else:
return False
def _backsearch(self):
"""
Inspect previous peaks from the last detected QRS peak (if any),
using a lower threshold.
Parameters
----------
N/A
Returns
-------
N/A
"""
if self.last_qrs_peak_num is not None:
for peak_num in range(self.last_qrs_peak_num + 1, self.peak_num + 1):
if self._is_qrs(peak_num=peak_num, backsearch=True):
self._update_qrs(peak_num=peak_num, backsearch=True)
# No need to update noise parameters if it was classified as
# noise. It would have already been updated.
def _run_detection(self):
"""
Run the QRS detection after all signals and parameters have been
configured and set.
Parameters
----------
N/A
Returns
-------
N/A
"""
if self.verbose:
print('Running QRS detection...')
# Detected QRS indices
self.qrs_inds = []
# QRS indices found via backsearch
self.backsearch_qrs_inds = []
# Iterate through MWI signal peak indices
for self.peak_num in range(self.n_peaks_i):
if self._is_qrs(self.peak_num):
self._update_qrs(self.peak_num)
else:
self._update_noise(self.peak_num)
# Before continuing to the next peak, do backsearch if
# necessary
if self._require_backsearch():
self._backsearch()
# Detected indices are relative to starting sample
if self.qrs_inds:
self.qrs_inds = np.array(self.qrs_inds) + self.sampfrom
else:
self.qrs_inds = np.array(self.qrs_inds)
if self.verbose:
print('QRS detection complete.')
def detect(self, sampfrom=0, sampto='end', learn=True, verbose=True):
"""
Detect QRS locations between two samples.
Parameters
----------
sampfrom : int, optional
The starting sample number to run the detection on.
sampto : int, optional
The final sample number to run the detection on. Set as
'end' to run on the entire signal.
learn : bool, optional
Whether to apply learning on the signal before running the
main detection. If learning fails or is not conducted, the
default configuration parameters will be used to initialize
these variables. See the `XQRS._learn_init_params` docstring
for details.
verbose : bool, optional
Whether to display the stages and outcomes of the detection
process.
Returns
-------
N/A
"""
if sampfrom < 0:
raise ValueError("'sampfrom' cannot be negative")
self.sampfrom = sampfrom
if sampto == 'end':
sampto = self.sig_len
elif sampto > self.sig_len:
raise ValueError("'sampto' cannot exceed the signal length")
self.sampto = sampto
self.verbose = verbose
# Don't attempt to run on a flat signal
if np.max(self.sig) == np.min(self.sig):
self.qrs_inds = np.empty(0)
if self.verbose:
print('Flat signal. Detection skipped.')
return
# Get/set signal configuration fields from Conf object
self._set_conf()
# Bandpass filter the signal
self._bandpass()
# Compute moving wave integration of filtered signal
self._mwi()
# Initialize the running parameters
if learn:
self._learn_init_params()
else:
self._set_default_init_params()
# Run the detection
self._run_detection()
def xqrs_detect(sig, fs, sampfrom=0, sampto='end', conf=None,
learn=True, verbose=True):
"""
Run the 'xqrs' QRS detection algorithm on a signal. See the
docstring of the XQRS class for algorithm details.
Parameters
----------
sig : ndarray
The input ECG signal to apply the QRS detection on.
fs : int, float
The sampling frequency of the input signal.
sampfrom : int, optional
The starting sample number to run the detection on.
sampto : str
The final sample number to run the detection on. Set as 'end' to
run on the entire signal.
conf : XQRS.Conf object, optional
The configuration object specifying signal configuration
parameters. See the docstring of the XQRS.Conf class.
learn : bool, optional
Whether to apply learning on the signal before running the main
| |
r"""
Spheres smoothly embedded in Euclidean Space
Let `E^{n+1}` be a Euclidean space of dimension `n+1` and `c \in E^{n+1}`. An
`n`-sphere with radius `r` and centered at `c`, usually denoted by
`\mathbb{S}^n_r(c)`, smoothly embedded in the Euclidean space `E^{n+1}` is an
`n`-dimensional smooth manifold together with a smooth embedding
.. MATH::
\iota \colon \mathbb{S}^n_r \to E^{n+1}
whose image consists of all points having the same Euclidean distance to the
fixed point `c`. If we choose Cartesian coordinates `(x_1, \ldots, x_{n+1})` on
`E^{n+1}` with `x(c)=0` then the above translates to
.. MATH::
\iota(\mathbb{S}^n_r(c)) = \left\{ p \in E^{n+1} : \lVert x(p) \rVert = r \right\}.
This corresponds to the standard `n`-sphere of radius `r` centered at `c`.
AUTHORS:
- <NAME> (2020): initial version
REFERENCES:
- \<NAME>: *Geometry I&II* [Ber1987]_, [Ber1987a]_
- \<NAME>: *Introduction to Smooth Manifolds* [Lee2013]_
EXAMPLES:
We start by defining a 2-sphere of unspecified radius `r`::
sage: r = var('r')
sage: S2_r = manifolds.Sphere(2, radius=r); S2_r
2-sphere S^2_r of radius r smoothly embedded in the Euclidean space E^3
The embedding `\iota` is constructed from scratch and can be returned by the
following command::
sage: i = S2_r.embedding(); i
Differentiable map iota from the 2-sphere S^2_r of radius r smoothly
embedded in the Euclidean space E^3 to the Euclidean space E^3
sage: i.display()
iota: S^2_r --> E^3
on A: (theta, phi) |--> (x, y, z) = (r*cos(phi)*sin(theta),
r*sin(phi)*sin(theta),
r*cos(theta))
As a submanifold of a Riemannian manifold, namely the Euclidean space,
the 2-sphere admits an induced metric::
sage: h = S2_r.induced_metric()
sage: h.display()
gamma = r^2 dtheta*dtheta + r^2*sin(theta)^2 dphi*dphi
The induced metric is also known as the *first fundamental form* (see
:meth:`~sage.manifolds.differentiable.pseudo_riemannian_submanifold.PseudoRiemannianSubmanifold.first_fundamental_form`)::
sage: h is S2_r.first_fundamental_form()
True
The *second fundamental form* encodes the extrinsic curvature of the
2-sphere as hypersurface of Euclidean space (see
:meth:`~sage.manifolds.differentiable.pseudo_riemannian_submanifold.PseudoRiemannianSubmanifold.second_fundamental_form`)::
sage: K = S2_r.second_fundamental_form(); K
Field of symmetric bilinear forms K on the 2-sphere S^2_r of radius r
smoothly embedded in the Euclidean space E^3
sage: K.display()
K = r dtheta*dtheta + r*sin(theta)^2 dphi*dphi
One quantity that can be derived from the second fundamental form is the
Gaussian curvature::
sage: K = S2_r.gauss_curvature()
sage: K.display()
S^2_r --> R
on A: (theta, phi) |--> r^(-2)
As we have seen, spherical coordinates are initialized by default. To
initialize stereographic coordinates retrospectively, we can use the following
command::
sage: S2_r.stereographic_coordinates()
Chart (S^2_r-{NP}, (y1, y2))
To get all charts corresponding to stereographic coordinates, we can use the
:meth:`~sage.manifolds.differentiable.examples.sphere.Sphere.coordinate_charts`::
sage: stereoN, stereoS = S2_r.coordinate_charts('stereographic')
sage: stereoN, stereoS
(Chart (S^2_r-{NP}, (y1, y2)), Chart (S^2_r-{SP}, (yp1, yp2)))
.. SEEALSO::
See :meth:`~sage.manifolds.differentiable.examples.sphere.Sphere.stereographic_coordinates`
and :meth:`~sage.manifolds.differentiable.examples.sphere.Sphere.spherical_coordinates`
for details.
.. NOTE::
Notice that the derived quantities such as the embedding as well as the
first and second fundamental forms must be computed from scratch again
when new coordinates have been initialized. That makes the usage of
previously declared objects obsolete.
Consider now a 1-sphere with barycenter `(1,0)` in Cartesian coordinates::
sage: E2 = EuclideanSpace(2)
sage: c = E2.point((1,0), name='c')
sage: S1c.<chi> = E2.sphere(center=c); S1c
1-sphere S^1(c) of radius 1 smoothly embedded in the Euclidean plane
E^2 centered at the Point c
sage: S1c.spherical_coordinates()
Chart (A, (chi,))
Get stereographic coordinates::
sage: stereoN, stereoS = S1c.coordinate_charts('stereographic')
sage: stereoN, stereoS
(Chart (S^1(c)-{NP}, (y1,)), Chart (S^1(c)-{SP}, (yp1,)))
The embedding takes now the following form in all coordinates::
sage: S1c.embedding().display()
iota: S^1(c) --> E^2
on A: chi |--> (x, y) = (cos(chi) + 1, sin(chi))
on S^1(c)-{NP}: y1 |--> (x, y) = (2*y1/(y1^2 + 1) + 1, (y1^2 - 1)/(y1^2 + 1))
on S^1(c)-{SP}: yp1 |--> (x, y) = (2*yp1/(yp1^2 + 1) + 1, -(yp1^2 - 1)/(yp1^2 + 1))
Since the sphere is a hypersurface, we can get a normal vector field by using
``normal``::
sage: n = S1c.normal(); n
Vector field n along the 1-sphere S^1(c) of radius 1 smoothly embedded in
the Euclidean plane E^2 centered at the Point c with values on the
Euclidean plane E^2
sage: n.display()
n = -cos(chi) e_x - sin(chi) e_y
Notice that this is just *one* normal field with arbitrary direction,
in this particular case `n` points inwards whereas `-n` points outwards.
However, the vector field `n` is indeed non-vanishing and hence the sphere
admits an orientation (as all spheres do)::
sage: orient = S1c.orientation(); orient
[Coordinate frame (S^1(c)-{SP}, (d/dyp1)), Vector frame (S^1(c)-{NP}, (f_1))]
sage: f = orient[1]
sage: f[1].display()
f_1 = -d/dy1
Notice that the orientation is chosen is such a way that `(\iota_*(f_1), -n)`
is oriented in the ambient Euclidean space, i.e. the last entry is the normal
vector field pointing outwards. Henceforth, the manifold admits
a volume form::
sage: h = S1c.induced_metric()
sage: h.display()
gamma = dchi*dchi
sage: eps = h.volume_form()
sage: eps.display()
eps_gamma = -dchi
"""
from sage.manifolds.differentiable.pseudo_riemannian_submanifold import \
PseudoRiemannianSubmanifold
from sage.categories.metric_spaces import MetricSpaces
from sage.categories.manifolds import Manifolds
from sage.categories.topological_spaces import TopologicalSpaces
from sage.rings.real_mpfr import RR
from sage.manifolds.differentiable.examples.euclidean import EuclideanSpace
class Sphere(PseudoRiemannianSubmanifold):
r"""
Sphere smoothly embedded in Euclidean Space.
An `n`-sphere of radius `r`smoothly embedded in a Euclidean space `E^{n+1}`
is a smooth `n`-dimensional manifold smoothly embedded into `E^{n+1}`,
such that the embedding constitutes a standard `n`-sphere of radius `r`
in that Euclidean space (possibly shifted by a point).
- ``n`` -- positive integer representing dimension of the sphere
- ``radius`` -- (default: ``1``) positive number that states the radius
of the sphere
- ``name`` -- (default: ``None``) string; name (symbol) given to the
sphere; if ``None``, the name will be set according to the input
(see convention above)
- ``ambient_space`` -- (default: ``None``) Euclidean space in which the
sphere should be embedded; if ``None``, a new instance of Euclidean
space is created
- ``center`` -- (default: ``None``) the barycenter of the sphere as point of
the ambient Euclidean space; if ``None`` the barycenter is set to the
origin of the ambient space's standard Cartesian coordinates
- ``latex_name`` -- (default: ``None``) string; LaTeX symbol to
denote the space; if ``None``, it will be set according to the input
(see convention above)
- ``coordinates`` -- (default: ``'spherical'``) string describing the
type of coordinates to be initialized at the sphere's creation; allowed
values are
- ``'spherical'`` spherical coordinates (see
:meth:`~sage.manifolds.differentiable.examples.sphere.Sphere.spherical_coordinates`))
- ``'stereographic'`` stereographic coordinates given by the
stereographic projection (see
:meth:`~sage.manifolds.differentiable.examples.sphere.Sphere.stereographic_coordinates`)
- ``names`` -- (default: ``None``) must be a tuple containing
the coordinate symbols (this guarantees the shortcut operator
``<,>`` to function); if ``None``, the usual conventions are used (see
examples below for details)
- ``unique_tag`` -- (default: ``None``) tag used to force the construction
of a new object when all the other arguments have been used previously
(without ``unique_tag``, the
:class:`~sage.structure.unique_representation.UniqueRepresentation`
behavior inherited from
:class:`~sage.manifolds.differentiable.pseudo_riemannian.PseudoRiemannianManifold`
would return the previously constructed object corresponding to these
arguments)
EXAMPLES:
A 2-sphere embedded in Euclidean space::
sage: S2 = manifolds.Sphere(2); S2
2-sphere S^2 of radius 1 smoothly embedded in the Euclidean space E^3
sage: latex(S2)
\mathbb{S}^{2}
The ambient Euclidean space is constructed incidentally::
sage: S2.ambient()
Euclidean space E^3
Another call creates another sphere and hence another Euclidean space::
sage: S2 is manifolds.Sphere(2)
False
sage: S2.ambient() is manifolds.Sphere(2).ambient()
False
By default, the barycenter is set to the coordinate origin of the
standard Cartesian coordinates in the ambient Euclidean space::
sage: c = S2.center(); c
Point on the Euclidean space E^3
sage: c.coord()
(0, 0, 0)
Each `n`-sphere is a compact manifold and a complete metric space::
sage: S2.category()
Join of Category of compact topological spaces and Category of smooth
manifolds over Real Field with 53 bits of precision and Category of
connected manifolds over Real Field with 53 bits of precision and
Category of complete metric spaces
If not stated otherwise, each `n`-sphere is automatically endowed with
spherical coordinates::
sage: S2.atlas()
[Chart (A, (theta, phi))]
sage: S2.default_chart()
Chart (A, (theta, phi))
sage: spher = S2.spherical_coordinates()
sage: spher is S2.default_chart()
True
Notice that the spherical coordinates do not cover the whole sphere. To
cover the entire sphere with charts, use stereographic coordinates instead::
sage: stereoN, stereoS = S2.coordinate_charts('stereographic')
sage: stereoN, stereoS
(Chart (S^2-{NP}, (y1, y2)), Chart (S^2-{SP}, (yp1, yp2)))
sage: list(S2.open_covers())
[Set {S^2} of open subsets of the 2-sphere S^2 of radius 1 smoothly embedded in the Euclidean space E^3,
Set {S^2-{NP}, S^2-{SP}} of open subsets of the 2-sphere S^2 of | |
# -*- coding: utf-8 -*-
"""Utility functions for manipulating data
"""
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
from warnings import warn
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.utils import check_X_y
from sklearn.utils import check_consistent_length
from sklearn.utils import check_random_state
from sklearn.utils import column_or_1d
from .utility import check_parameter
from .utility import precision_n_scores
MAX_INT = np.iinfo(np.int32).max
def _generate_data(n_inliers, n_outliers, n_features, coef, offset,
random_state):
"""Internal function to generate data samples.
Parameters
----------
n_inliers : int
The number of inliers.
n_outliers : int
The number of outliers.
n_features : int
The number of features (dimensions).
coef : float in range [0,1)+0.001
The coefficient of data generation.
offset : int
Adjust the value range of Gaussian and Uniform.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : numpy array of shape (n_train, n_features)
Data.
y : numpy array of shape (n_train,)
Ground truth.
"""
inliers = coef * random_state.randn(n_inliers, n_features) + offset
outliers = random_state.uniform(low=-1 * offset, high=offset,
size=(n_outliers, n_features))
X = np.r_[inliers, outliers]
y = np.r_[np.zeros((n_inliers,)), np.ones((n_outliers,))]
return X, y
def get_outliers_inliers(X, y):
"""Internal method to separate inliers from outliers.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples
y : list or array of shape (n_samples,)
The ground truth of input samples.
Returns
-------
X_outliers : numpy array of shape (n_samples, n_features)
Outliers.
X_inliers : numpy array of shape (n_samples, n_features)
Inliers.
"""
X_outliers = X[np.where(y == 1)]
X_inliers = X[np.where(y == 0)]
return X_outliers, X_inliers
def generate_data(n_train=1000, n_test=500, n_features=2, contamination=0.1,
train_only=False, offset=10, behaviour='old',
random_state=None):
"""Utility function to generate synthesized data.
Normal data is generated by a multivariate Gaussian distribution and
outliers are generated by a uniform distribution.
Parameters
----------
n_train : int, (default=1000)
The number of training points to generate.
n_test : int, (default=500)
The number of test points to generate.
n_features : int, optional (default=2)
The number of features (dimensions).
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e.
the proportion of outliers in the data set. Used when fitting to
define the threshold on the decision function.
train_only : bool, optional (default=False)
If true, generate train data only.
offset : int, optional (default=10)
Adjust the value range of Gaussian and Uniform.
behaviour : str, default='old'
Behaviour of the returned datasets which can be either 'old' or
'new'. Passing ``behaviour='new'`` returns
"X_train, y_train, X_test, y_test", while passing ``behaviour='old'``
returns "X_train, X_test, y_train, y_test".
.. versionadded:: 0.7.0
``behaviour`` is added in 0.7.0 for back-compatibility purpose.
.. deprecated:: 0.7.0
``behaviour='old'`` is deprecated in 0.20 and will not be possible
in 0.7.2.
.. deprecated:: 0.7.2.
``behaviour`` parameter will be deprecated in 0.7.2 and removed in
0.7.4.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X_train : numpy array of shape (n_train, n_features)
Training data.
y_train : numpy array of shape (n_train,)
Training ground truth.
X_test : numpy array of shape (n_test, n_features)
Test data.
y_test : numpy array of shape (n_test,)
Test ground truth.
"""
# initialize a random state and seeds for the instance
random_state = check_random_state(random_state)
offset_ = random_state.randint(low=offset)
coef_ = random_state.random_sample() + 0.001 # in case of underflow
n_outliers_train = int(n_train * contamination)
n_inliers_train = int(n_train - n_outliers_train)
X_train, y_train = _generate_data(n_inliers_train, n_outliers_train,
n_features, coef_, offset_, random_state)
if train_only:
return X_train, y_train
n_outliers_test = int(n_test * contamination)
n_inliers_test = int(n_test - n_outliers_test)
X_test, y_test = _generate_data(n_inliers_test, n_outliers_test,
n_features, coef_, offset_, random_state)
if behaviour == 'old':
warn('behaviour="old" is deprecated and will be removed '
'in version 0.7.2. Please use behaviour="new", which '
'makes the returned datasets in the order of '
'X_train, X_test, y_train, y_test.',
FutureWarning)
return X_train, y_train, X_test, y_test
else:
return X_train, X_test, y_train, y_test
def generate_contextual_data(n_train=1000, n_test=500, n_features=2, n_contexts=2, contamination=0.1,
train_only=False, offset=10, random_state=None):
assert n_train % n_contexts == 0, f"Cannot create {n_train} points with {n_contexts} contexts."
assert n_test % n_contexts == 0, f"Cannot create {n_test} points with {n_contexts} contexts."
# initialize a random state and seeds for the instance
random_state = check_random_state(random_state)
coefs_ = random_state.random_sample(n_contexts) + 0.001 # in case of underflow
offsets_ = random_state.choice(np.arange(offset), n_contexts, replace=False)
n_outliers_train = int(n_train * contamination / n_contexts)
n_inliers_train = int((n_train / n_contexts) - n_outliers_train)
n_outliers_test = int(n_test * contamination / n_contexts)
n_inliers_test = int((n_test / n_contexts) - n_outliers_test)
X_train, y_train, c_train, X_test, y_test, c_test = [[], [], [], [], [], []]
for i in range(n_contexts):
Xtrain, ytrain = _generate_data(n_inliers_train, n_outliers_train, n_features,
coefs_[i], offsets_[i], random_state)
Xtest, ytest = _generate_data(n_inliers_test, n_outliers_test, n_features,
coefs_[i], offsets_[i], random_state)
X_train.append(np.c_[np.full(n_inliers_train + n_outliers_train, i), Xtrain])
y_train.append(ytrain)
X_test.append(np.c_[np.full(n_inliers_test + n_outliers_test, i), Xtest])
y_test.append(ytest)
X_train = np.concatenate(X_train)
y_train = np.concatenate(y_train)
X_test = np.concatenate(X_test)
y_test = np.concatenate(y_test)
if train_only:
return X_train, y_train
return X_train, X_test, y_train, y_test
def get_color_codes(y):
"""Internal function to generate color codes for inliers and outliers.
Inliers (0): blue; Outlier (1): red.
Parameters
----------
y : list or numpy array of shape (n_samples,)
The ground truth. Binary (0: inliers, 1: outliers).
Returns
-------
c : numpy array of shape (n_samples,)
Color codes.
"""
y = column_or_1d(y)
# inliers are assigned blue
c = np.full([len(y)], 'b', dtype=str)
outliers_ind = np.where(y == 1)
# outlier are assigned red
c[outliers_ind] = 'r'
return c
def check_consistent_shape(X_train, y_train, X_test, y_test, y_train_pred,
y_test_pred):
"""Internal shape to check input data shapes are consistent.
Parameters
----------
X_train : numpy array of shape (n_samples, n_features)
The training samples.
y_train : list or array of shape (n_samples,)
The ground truth of training samples.
X_test : numpy array of shape (n_samples, n_features)
The test samples.
y_test : list or array of shape (n_samples,)
The ground truth of test samples.
y_train_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the training samples.
y_test_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the test samples.
Returns
-------
X_train : numpy array of shape (n_samples, n_features)
The training samples.
y_train : list or array of shape (n_samples,)
The ground truth of training samples.
X_test : numpy array of shape (n_samples, n_features)
The test samples.
y_test : list or array of shape (n_samples,)
The ground truth of test samples.
y_train_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the training samples.
y_test_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the test samples.
"""
# check input data shapes are consistent
X_train, y_train = check_X_y(X_train, y_train)
X_test, y_test = check_X_y(X_test, y_test)
y_test_pred = column_or_1d(y_test_pred)
y_train_pred = column_or_1d(y_train_pred)
check_consistent_length(y_train, y_train_pred)
check_consistent_length(y_test, y_test_pred)
if X_train.shape[1] != X_test.shape[1]:
raise ValueError("X_train {0} and X_test {1} have different number "
"of features.".format(X_train.shape, X_test.shape))
return X_train, y_train, X_test, y_test, y_train_pred, y_test_pred
def evaluate_print(clf_name, y, y_pred):
"""Utility function for evaluating and printing the results for examples.
Default metrics include ROC and Precision @ n
Parameters
----------
clf_name : str
The name of the detector.
y : list or numpy array of shape (n_samples,)
The ground truth. Binary (0: inliers, 1: outliers).
y_pred : list or numpy array of shape (n_samples,)
The raw outlier scores as returned by a fitted model.
"""
y = column_or_1d(y)
y_pred = column_or_1d(y_pred)
check_consistent_length(y, y_pred)
print('{clf_name} ROC:{roc}, precision @ rank n:{prn}'.format(
clf_name=clf_name,
roc=np.round(roc_auc_score(y, y_pred), decimals=4),
prn=np.round(precision_n_scores(y, y_pred), decimals=4)))
def generate_data_clusters(n_train=1000, n_test=500, n_clusters=2,
n_features=2, contamination=0.1, size='same',
density='same', dist=0.25, random_state=None,
return_in_clusters=False):
"""Utility function to generate synthesized data in clusters.
Generated data can involve the low density pattern problem and global
outliers which are considered as difficult tasks for outliers detection
algorithms.
Parameters
----------
n_train : int, (default=1000)
| |
"""
Process results
This script process results for the final report of SCOOP
"""
# ==============================================================================
# Imports
# ==============================================================================
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import rc
rc("font", **{"family": "serif", "serif": ["Times"]})
rc("text", usetex=True)
from glob import glob
import re
# ==============================================================================
# Constants
# ==============================================================================
ddir_lst = ["data/eu4dpfmix_mpr0.csv", "data/eu4dpfmix.csv"]
# ==============================================================================
# Functions
# ==============================================================================
def column_generator(data_frame):
"""
Create supplementary columns
"""
str_splt = data_frame["Cycle"].split("-")
veh_id = int(str_splt[0])
mpr = float(str_splt[2].split("_")[0])
flow = float(str_splt[3].split("_")[0])
distance = int(str_splt[4].split(".dri")[0])
return pd.Series([veh_id, mpr, flow, distance])
def create_columns(data_frame, function):
"""
Apply function to dataframe
"""
fields = ["veh_id", "mpr", "flow", "distance"]
data_frame[fields] = data_frame.apply(function, axis=1)
return data_frame
def refer_to_mpr(data_frame, field, new_field):
"""
Refer to MPR 0 %
"""
# Create reference data_frame
reference = data_frame[data_frame["mpr"].eq(0)]
reference = pd.concat([reference] * 5).reset_index()
reference = reference.drop("index", axis=1)
# Compute difference
diff_df = reference[field] - data_frame[field]
# diff_df = diff_df.reset_index()
data_frame[new_field] = (diff_df.divide(reference[field])) * 100
# Round for results
data_frame = data_frame.round(3)
return data_frame
def plot_var(
data_frame,
x_var="flow",
y_var="CO_TP",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label="CO2 %",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
x_size=5,
y_size=7.5,
transpose=False,
):
"""
Plot variables
"""
pivoter = data_frame[pivot].unique()
N = len(pivoter)
if transpose:
n, m = 1, N
else:
m, n = 1, N
fig, axes = plt.subplots(m, n, figsize=(x_size * N, y_size), sharey=True)
for pvt, ax in zip(pivoter, axes):
flt = data_frame[pivot].eq(pvt)
df = data_frame[flt]
df.pivot_table(
index=x_var, columns=label_var, values=y_var, aggfunc="mean"
).plot(kind="bar", ax=ax, grid=True)
ax.set_xlabel(x_label, fontdict=fnt_size)
ax.set_ylabel(y_label, fontdict=fnt_size)
ax.set_title(t_label + str(pvt), fontdict=fnt_size)
ax.legend(legends)
return fig, axes
def plot_co2perc(data_frame):
"""
Create Dataframe CO2 % Data vs flow
"""
figco2, axco2 = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="CO2 %",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label=r"Change in CO$_2$ [\%]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figco2, axco2
def plot_co2(data_frame):
"""
Create Dataframe CO2 consumption vs flow
"""
figco2, axco2 = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="CO2_TP",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label="CO$_2$ [g/km]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
for ax in axco2:
ax.set(ylim=(120, 160))
return figco2, axco2
def plot_ttt(data_frame):
"""
Plot absolute Total Travel Time vs flow
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="totalTT",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label="Total Travel Time [s]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_tttprc(data_frame):
"""
Plot Change Total Travel Time % vs flow
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="totTT %",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label=r"Change in Total TT [\%]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_tttd(data_frame):
"""
Plot Absolute Total Travel Time vs distance
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="distance",
y_var="totalTT",
label_var="mpr",
pivot="flow",
x_label="Distance [m]",
y_label="Total Travel Time [s]",
t_label="Flow [veh/h]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_tttdprc(data_frame):
"""
Plot Change Total Travel Time % vs distance
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="distance",
y_var="totTT %",
label_var="mpr",
pivot="flow",
x_label="Distance [m]",
y_label=r"Change in Total TT [\%]",
t_label="Flow [veh/h]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_mtt(data_frame):
"""
Plot absolute Avg Travel Time vs flow
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="meanTT",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label="Avg. Travel Time [s]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_mttperc(data_frame):
"""
Plot Change Avg Travel Time % vs flow
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="avgTT %",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label=r"Change in Avg. TT [\%]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_mttd(data_frame):
"""
Plot Absolute Total Travel Time vs distance
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="distance",
y_var="meanTT",
label_var="mpr",
pivot="flow",
x_label="Distance [m]",
y_label="Average Travel Time [s]",
t_label="Flow [veh/h]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_mttdprc(data_frame):
"""
Plot Change Total Travel Time % vs distance
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="distance",
y_var="avgTT %",
label_var="mpr",
pivot="flow",
x_label="Distance [m]",
y_label=r"Change in Avg. TT [\%]",
t_label="Flow [veh/h]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_ttc(data_frame):
"""
Plot time to Colission vs flow
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="timetC",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label="Time To Collision [s]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_ttcprc(data_frame):
"""
Plot Change Total Travel Time % vs flow
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="timeTC %",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label=r"Change in Time to Collision [\%]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_ttcd(data_frame):
"""
Plot Absolute Total Travel Time vs distance
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="distance",
y_var="timetC",
label_var="mpr",
pivot="flow",
x_label="Distance [m]",
y_label="Time To Collision [s]",
t_label="Flow [veh/h]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_ttcdprc(data_frame):
"""
Plot Change Total Travel Time % vs distance
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="distance",
y_var="timeTC %",
label_var="mpr",
pivot="flow",
x_label="Distance [m]",
y_label=r"Change in Time to Collision [\%]",
t_label="Flow [veh/h]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_hwy(data_frame):
"""
Plot spacing vs time
"""
fighwy, axhwy = plot_var(
data_frame=data_frame,
x_var="time",
y_var="hwy",
label_var="mpr",
pivot="flow",
x_label=" Time [hh:mm]",
y_label="Headway space [m]",
t_label="Flow [veh/h]",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
x_size=7.5,
transpose=True,
)
return fighwy, axhwy
# ==============================================================================
# Processing
# ==============================================================================
# CO2
# ==============================================================================
# Import csv files
dflst = [pd.read_csv(file) for file in ddir_lst]
fltstr = "PC_EU4_D_DPFMix_HBEFA41.gen"
dflst = [df[df["Input File"].eq(fltstr)] for df in dflst]
# Combine data + column selection
sel_cols = ["Input File", "Cycle", "CO2_TP"]
co2_df = pd.concat([x.filter(items=sel_cols) for x in dflst])
co2_df = co2_df.reset_index()
# Create supplementary columns
co2_df = create_columns(co2_df, column_generator)
co2_df = co2_df.filter(items=["CO2_TP", "veh_id", "mpr", "flow", "distance"])
# Replace values
co2_df["mpr"] = co2_df["mpr"] * 100 # Flow
co2_df["flow"] = co2_df["flow"] * 2880
# Refer data to MPR 0%
co2prc_df = refer_to_mpr(co2_df, "CO2_TP", "CO2 %")
# Plot CO 2 % vs Flow
figco2, axco2 = plot_co2(co2prc_df)
plt.savefig("data/img/summary/CO2vsFlow.png")
figco2prc, axco2prc = plot_co2perc(co2prc_df)
plt.savefig("data/img/summary/CO2%vsFlow.png")
# plt.show()
# Travel Time
# ==============================================================================
# Import csv files
tt_df = pd.read_csv(
"data/Indicators.csv",
names=["mpr", "flow", "distance", "meanTT", "stdTT", "totalTT", "timetC"],
)
# Replace values
tt_df = tt_df.drop_duplicates()
tt_df["flow"] = tt_df["flow"] * 3600
# Refer to data MPR 0%
avgtt_df = refer_to_mpr(tt_df, "meanTT", "avgTT %")
tottt_df = refer_to_mpr(tt_df, "totalTT", "totTT %")
timtc_df = refer_to_mpr(tt_df, "timetC", "timeTC %")
# Average Travel Time
# ==============================================================================
# Plot Avg TT vs Flow
figmtt, axmtt = plot_mtt(avgtt_df)
plt.savefig("data/img/summary/avgTTvsFlow.png")
# Plot Avg TT % Change vs Flow
figmttprc, axmttprc = plot_mttperc(avgtt_df)
plt.savefig("data/img/summary/avgTT%vsFlow.png")
# Plot Avg TT vs distance
figmttd, axmttd = plot_mttd(avgtt_df)
plt.savefig("data/img/summary/avgTTvsDistance.png")
# Plot Avg TT % Change vs distance
figmttdprc, axmttdprc = plot_mttdprc(avgtt_df)
plt.savefig("data/img/summary/avgTT%vsDistance.png")
# Total Travel Time
# ==============================================================================
# Plot total TT vs Flow
figttt, axttt = plot_ttt(tottt_df)
plt.savefig("data/img/summary/totalTTvsFlow.png")
# Plot total TT % Change vs Flow
figtttprc, axtttprc = plot_tttprc(tottt_df)
plt.savefig("data/img/summary/totalTT%vsFlow.png")
# Plot total TT vs distance
figtttd, axtttd = plot_tttd(tottt_df)
plt.savefig("data/img/summary/totalTTvsDistance.png")
# Plot total TT % Change vs distance
figtttdprc, axtttdprc = plot_tttdprc(tottt_df)
plt.savefig("data/img/summary/totalTT%vsDistance.png")
# Time to Collision
# ==============================================================================
# Plot total TT vs Flow
figttc, axttc = plot_ttc(timtc_df)
plt.savefig("data/img/summary/timeTCvsFlow.png")
# Plot total TT % Change vs Flow
figttcprc, axttcprc = plot_ttcprc(timtc_df)
plt.savefig("data/img/summary/timeTC%vsFlow.png")
# Plot total TT vs distance
figttcd, axttcd = plot_ttcd(timtc_df)
plt.savefig("data/img/summary/timeTCvsDistance.png")
# Plot total TT % Change vs distance
figttcdprc, axttcdprc = plot_ttcdprc(timtc_df)
plt.savefig("data/img/summary/timeTC%vsDistance.png")
# Headway space
# ==============================================================================
files = glob("data/csv/spacing_*.csv")
df_list = []
# Pattern to recover x-0.1
pattern = re.compile(r"[a-z]*-\d*[.]?\d*")
for file in files:
tmp = pd.read_csv(file, names=["time", "hwy"])
lst_raw = pattern.findall(file.split(".csv")[0])
dct_prop = {
prop.split("-")[0]: float(prop.split("-")[1]) for prop in lst_raw
}
tmp["mpr"] = dct_prop["mpr"]
tmp["flow"] = dct_prop["q"]
tmp["distance"] = dct_prop["d"]
df_list.append(tmp)
# Combine everything
df_hwy = pd.concat(df_list)
df_hwy_d = []
plt_hwyd = []
# Plot and save for each distance
# Headway vs
for dst in df_hwy.distance.unique():
df2plot = df_hwy[df_hwy.distance.eq(dst) & df_hwy.flow.eq(1)]
df_hwy_d.append(df2plot)
# Manual Pivoting for easyness
# Case 1
case_a = df_hwy_d[0]
df_a = case_a.pivot_table(index="time",columns="mpr",values="hwy",aggfunc="mean").reset_index()
fig, ax = plt.subplots(figsize = (10,7.5))
df_a.plot(ax=ax, grid = True)
ax.set_xlabel("Time [s]",fontdict={"fontsize": 16})
ax.set_ylabel("Avg. Headway Space [m]",fontdict={"fontsize": | |
<reponame>yamizi/taskaugment
### THis file is moved to eval.py, not use this anymore
import models.drn as drn
from models.DRNSeg import DRNSeg
from models.FCN32s import FCN32s
import data_transforms as transforms
import json
import math
import os
from os.path import exists, join, split
import threading
import time, datetime
import numpy as np
import shutil
import sys
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets
from torch.autograd import Variable
import logging
from learning.dataloader import SegList, SegListMS, get_info,get_loader
from learning.utils_learn import *
from learning.attack import PGD_masked_attack_mtask_city, PGD_drnseg_masked_attack_city
import data_transforms as transforms
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def test_seg(args):
batch_size = args.batch_size
num_workers = args.workers
phase = args.phase
for k, v in args.__dict__.items():
print(k, ':', v)
# model specific
model_arch = args.arch
task_set_present = hasattr(args, 'task_set')
# if (model_arch.startswith('drn')):
# if task_set_present:
# from models.DRNSegDepth import DRNSegDepth
# print("LENGTH OF TASK SET IN CONFIG>1 => LOADING DRNSEGDEPTH model for multitask, to load DRNSEG, remove the task_set from config args.")
# single_model = DRNSegDepth(args.arch,
# classes=19,
# pretrained_model=None,
# pretrained=False,
# tasks=args.task_set)
# else:
# single_model = DRNSeg(args.arch, args.classes, pretrained_model=None,
# pretrained=False)
# elif (model_arch.startswith('fcn32')):
# # define the architecture for FCN.
# single_model = FCN32s(args.classes)
# else:
single_model = DRNSeg(args.arch, args.classes, pretrained_model=None,
pretrained=False) # Replace with some other model
print("Architecture unidentifiable, please choose between : fcn32s, dnn_")
if args.pretrained:
print('args.pretrained', args.pretrained)
single_model.load_state_dict(torch.load(args.pretrained))
model = torch.nn.DataParallel(single_model)
if torch.cuda.is_available():
model.cuda()
data_dir = args.data_dir
# info = json.load(open(join(data_dir, 'info.json'), 'r'))
# normalize = transforms.Normalize(mean=info['mean'], std=info['std'])
# scales = [0.5, 0.75, 1.25, 1.5, 1.75]
# if args.ms:
# dataset = SegListMS(data_dir, phase, transforms.Compose([
# transforms.ToTensor(),
# normalize,
# ]), scales, list_dir=args.list_dir)
# else:
#
# dataset = SegList(data_dir, phase, transforms.Compose([
# transforms.ToTensor(),
# normalize,
# ]), list_dir=args.list_dir, out_name=True)
# test_loader = torch.utils.data.DataLoader(
# dataset,
# batch_size=batch_size, shuffle=False, num_workers=num_workers,
# pin_memory=False
# )
test_loader = get_loader(args, phase,out_name=True)
info = get_info(args.dataset)
cudnn.benchmark = True
# Backup files before resuming/starting training
backup_output_dir = args.backup_output_dir
os.makedirs(backup_output_dir, exist_ok=True)
if os.path.exists(backup_output_dir):
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S')
experiment_backup_folder = "test_" + args.arch + "_" + args.dataset + "_" + timestamp
experiment_backup_folder = os.path.join(backup_output_dir, experiment_backup_folder)
os.makedirs(experiment_backup_folder)
print(experiment_backup_folder)
fh = logging.FileHandler(experiment_backup_folder + '/log.txt')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# optionally resume from a checkpoint
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
logger.info("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
logger.info("=> no checkpoint found at '{}'".format(args.resume))
# Make sure the name of the dataset and model are included in the output file.
out_dir = 'output/{}_{:03d}_{}'.format(args.arch, start_epoch, phase)
if len(args.test_suffix) > 0:
out_dir += '_' + args.test_suffix
if args.ms:
out_dir += '_ms'
if args.adv_test:
from learning.validate import validate_adv_test
mAP = validate_adv_test(test_loader, model, args.classes, save_vis=True,
has_gt=True, output_dir=out_dir, downsize_scale=args.downsize_scale,
args=args, info=info)
elif args.ms:
mAP = test_ms(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt,
output_dir=out_dir,
scales=scales)
else:
if args.test_acc_output_dim:
test_drnseg_masked_attack(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt, output_dir=out_dir,
downsize_scale=args.downsize_scale,
args=args)
# test_masked_accuracy_outdim(test_loader, model, args.classes, save_vis=True,
# has_gt=phase != 'test' or args.with_gt, output_dir=out_dir,
# downsize_scale=args.downsize_scale,
# args=args)
else:
mAP = test_grad_diffoutdim(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt, output_dir=out_dir, downsize_scale=args.downsize_scale,
args=args)
logger.info('mAP: %f', mAP)
def test_ms(eval_data_loader, model, num_classes, scales,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
num_scales = len(scales)
for iter, input_data in enumerate(eval_data_loader):
data_time.update(time.time() - end)
if has_gt:
name = input_data[2]
label = input_data[1]
else:
name = input_data[1]
h, w = input_data[0].size()[2:4]
images = [input_data[0]]
images.extend(input_data[-num_scales:])
# pdb.set_trace()
outputs = []
for image in images:
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)[0]
outputs.append(final.data)
final = sum([resize_4d_tensor(out, w, h) for out in outputs])
# _, pred = torch.max(torch.from_numpy(final), 1)
# pred = pred.cpu().numpy()
pred = final.argmax(axis=1)
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALETTE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: #val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def test_grad_diffoutdim(eval_data_loader, model, num_classes,
output_dir='pred', has_gt=True, save_vis=False, downsize_scale=1, args=None):
"""
Evaluates the effect of increasing output dimension on the norm of the gradient.
Monte Carlo sampling will be used and the result would be averaged.
First choose the number of pixels to calculate the loss for (output dimension) --> select_num.
For each select_num, we do the following MC_times(as Monte Carlo sampling):
Calculate the loss for select_num pixels chosen, backpropagate and get the input gradient.
Average all these.
:param eval_data_loader:
:param model:
:param num_classes:
:param output_dir:
:param has_gt:
:param save_vis:
:param downsize_scale:
:param args:
:return:
"""
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
# exit(0)
if torch.cuda.is_available():
GPU_flag = True
else:
GPU_flag = False
# Number of points to be selected for masking - analogous to number of output dimensions. Only these many pixels will be considered to calculate the loss.
select_num_list = [1] + [i * 4 for i in range(1, 100)] + [400 + i*200 for i in range(100)]
result_list = []
for select_num in select_num_list:
print("********")
print("selecting {} of output".format(select_num))
import random
grad_sample_avg_sum = 0
if select_num < 400:
MCtimes = 20
else:
MCtimes = 5
MCtimes = 1
# Monte Carlo Sampling - MCTimes is the number of times that we sample
for inner_i in range(MCtimes):
grad_sum = 0
cnt = 0
print("MC time {}".format(inner_i))
for iter, (image, label, name) in enumerate(eval_data_loader):
# break if 50 images (batches) done
if cnt > 1 and args.debug:
break
elif cnt > 200:
break
data_time.update(time.time() - end)
if torch.cuda.is_available():
image_var = Variable(image.cuda(), requires_grad=True)
else:
image_var = Variable(image, requires_grad=True)
# print("__shape of image var__", image_var.shape) # [1,3,1024,2048]
final = model(image_var)[0]
# print("__shape of final__", final.shape) # [1, 19, 1024,2048]
_, pred = torch.max(final, 1)
# print("__shape of pred__", pred.shape) # [1,1024,2048]
# for this image, sample select_num number of pixels
temp = [i for i in range(image_var.size(2) * image_var.size(3))]
selected = random.sample(temp, select_num)
# Build mask for image -
mask = np.zeros((image_var.size(2) * image_var.size(3)), dtype=np.uint8)
for iii in range(select_num):
mask[selected[iii]] = 1
mask = mask.reshape(1, 1, image_var.size(2), image_var.size(3))
mask = torch.from_numpy(mask)
mask = mask.float()
mask_target = mask.long()
# print('label', label)
label = label.long()
if GPU_flag:
# image.cuda()
# image_var.cuda() # BUG: too late
mask = mask.cuda()
mask_target = mask_target.cuda()
label = label.cuda()
target, mask = Variable(label), Variable(mask)
loss = cross_entropy2d(final * mask, target * mask_target, size_average=False)
loss.backward()
data_grad = image_var.grad
np_data_grad = data_grad.cpu().numpy()
# print(np_data_grad.shape)
L2_grad_norm = np.linalg.norm(np_data_grad) / select_num # the 1/M \sum_M \partial{Loss_i}/\partial{input}
grad_sum += L2_grad_norm
# increment the batch # counter
cnt += 1
pred = pred.cpu().data.numpy()
batch_time.update(time.time() - end)
end = time.time()
grad_avg = grad_sum / cnt # Represents the gradient average for batch. cnt is the number of samples in a batch.
grad_sample_avg_sum += grad_avg # For each sampling this is the sum of avg gradients in that sample.
grad_sample_avg_sum /= MCtimes
result_list.append(grad_sample_avg_sum)
print(select_num, 'middle result', result_list)
np.save('{}_{}_graph_more.npy'.format(args.dataset, args.arch), result_list)
print('Final', result_list)
np.save('{}_{}_graph_more.npy'.format(args.dataset, args.arch), result_list)
# not sure if has to be moved
if has_gt: # val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def test_drnseg_masked_attack(eval_data_loader, model, num_classes,
output_dir='pred', has_gt=True, save_vis=False, downsize_scale=1, args=None):
"""
Evaluates the effect of increasing output dimension on the norm of the gradient.
Monte Carlo sampling will be used and the result would be averaged.
First choose the number of pixels to calculate the loss for (output dimension) --> select_num.
For each select_num, we do the following MC_times(as Monte Carlo sampling):
Calculate the loss for select_num pixels chosen, backpropagate and get the input gradient.
Average all these.
:param eval_data_loader:
:param model:
:param num_classes:
:param output_dir:
:param has_gt:
:param save_vis:
:param downsize_scale:
:param args:
:return:
"""
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
# hist = np.zeros((num_classes, num_classes))
# exit(0)
if torch.cuda.is_available():
GPU_flag = True
else:
GPU_flag = False
# Number of | |
AKISetSectionUserDefinedCost2(*args):
return _AAPI.AKISetSectionUserDefinedCost2(*args)
AKISetSectionUserDefinedCost2 = _AAPI.AKISetSectionUserDefinedCost2
def AKISetSectionUserDefinedCost3(*args):
return _AAPI.AKISetSectionUserDefinedCost3(*args)
AKISetSectionUserDefinedCost3 = _AAPI.AKISetSectionUserDefinedCost3
def AKIGetSectionCapacity(*args):
return _AAPI.AKIGetSectionCapacity(*args)
AKIGetSectionCapacity = _AAPI.AKIGetSectionCapacity
def AKIGetSectionUserDefinedCost(*args):
return _AAPI.AKIGetSectionUserDefinedCost(*args)
AKIGetSectionUserDefinedCost = _AAPI.AKIGetSectionUserDefinedCost
def AKIGetSectionUserDefinedCost2(*args):
return _AAPI.AKIGetSectionUserDefinedCost2(*args)
AKIGetSectionUserDefinedCost2 = _AAPI.AKIGetSectionUserDefinedCost2
def AKIGetSectionUserDefinedCost3(*args):
return _AAPI.AKIGetSectionUserDefinedCost3(*args)
AKIGetSectionUserDefinedCost3 = _AAPI.AKIGetSectionUserDefinedCost3
def AKIInfNetNbJunctions():
return _AAPI.AKIInfNetNbJunctions()
AKIInfNetNbJunctions = _AAPI.AKIInfNetNbJunctions
def AKIInfNetGetJunctionId(*args):
return _AAPI.AKIInfNetGetJunctionId(*args)
AKIInfNetGetJunctionId = _AAPI.AKIInfNetGetJunctionId
def AKIInfNetNbCentroids():
return _AAPI.AKIInfNetNbCentroids()
AKIInfNetNbCentroids = _AAPI.AKIInfNetNbCentroids
def AKIInfNetGetCentroidId(*args):
return _AAPI.AKIInfNetGetCentroidId(*args)
AKIInfNetGetCentroidId = _AAPI.AKIInfNetGetCentroidId
def AKIInfNetGetCentroidInf(*args):
return _AAPI.AKIInfNetGetCentroidInf(*args)
AKIInfNetGetCentroidInf = _AAPI.AKIInfNetGetCentroidInf
def AKIInfNetGetIdObjectofOriginCentroidConnector(*args):
return _AAPI.AKIInfNetGetIdObjectofOriginCentroidConnector(*args)
AKIInfNetGetIdObjectofOriginCentroidConnector = _AAPI.AKIInfNetGetIdObjectofOriginCentroidConnector
def AKIInfNetGetIdObjectofDestinationCentroidConnector(*args):
return _AAPI.AKIInfNetGetIdObjectofDestinationCentroidConnector(*args)
AKIInfNetGetIdObjectofDestinationCentroidConnector = _AAPI.AKIInfNetGetIdObjectofDestinationCentroidConnector
def AKIInfNetGetIdObjectANGofOriginCentroidConnector(*args):
return _AAPI.AKIInfNetGetIdObjectANGofOriginCentroidConnector(*args)
AKIInfNetGetIdObjectANGofOriginCentroidConnector = _AAPI.AKIInfNetGetIdObjectANGofOriginCentroidConnector
def AKIInfNetGetIdObjectANGofDestinationCentroidConnector(*args):
return _AAPI.AKIInfNetGetIdObjectANGofDestinationCentroidConnector(*args)
AKIInfNetGetIdObjectANGofDestinationCentroidConnector = _AAPI.AKIInfNetGetIdObjectANGofDestinationCentroidConnector
def AKIInfNetGetShortestPathNbSections(*args):
return _AAPI.AKIInfNetGetShortestPathNbSections(*args)
AKIInfNetGetShortestPathNbSections = _AAPI.AKIInfNetGetShortestPathNbSections
def AKIInfNetGetShortestPath(*args):
return _AAPI.AKIInfNetGetShortestPath(*args)
AKIInfNetGetShortestPath = _AAPI.AKIInfNetGetShortestPath
def AKIInfNetGetNetworkPathA():
return _AAPI.AKIInfNetGetNetworkPathA()
AKIInfNetGetNetworkPathA = _AAPI.AKIInfNetGetNetworkPathA
def AKIInfNetGetNetworkNameA():
return _AAPI.AKIInfNetGetNetworkNameA()
AKIInfNetGetNetworkNameA = _AAPI.AKIInfNetGetNetworkNameA
def AKIInfNetGetTrafficDemandNameA():
return _AAPI.AKIInfNetGetTrafficDemandNameA()
AKIInfNetGetTrafficDemandNameA = _AAPI.AKIInfNetGetTrafficDemandNameA
def AKIInfNetGetTrafficDemandType():
return _AAPI.AKIInfNetGetTrafficDemandType()
AKIInfNetGetTrafficDemandType = _AAPI.AKIInfNetGetTrafficDemandType
class StructAkiEstadSystem(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, StructAkiEstadSystem, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, StructAkiEstadSystem, name)
__repr__ = _swig_repr
__swig_setmethods__["report"] = _AAPI.StructAkiEstadSystem_report_set
__swig_getmethods__["report"] = _AAPI.StructAkiEstadSystem_report_get
if _newclass:report = _swig_property(_AAPI.StructAkiEstadSystem_report_get, _AAPI.StructAkiEstadSystem_report_set)
__swig_setmethods__["Flow"] = _AAPI.StructAkiEstadSystem_Flow_set
__swig_getmethods__["Flow"] = _AAPI.StructAkiEstadSystem_Flow_get
if _newclass:Flow = _swig_property(_AAPI.StructAkiEstadSystem_Flow_get, _AAPI.StructAkiEstadSystem_Flow_set)
__swig_setmethods__["TTa"] = _AAPI.StructAkiEstadSystem_TTa_set
__swig_getmethods__["TTa"] = _AAPI.StructAkiEstadSystem_TTa_get
if _newclass:TTa = _swig_property(_AAPI.StructAkiEstadSystem_TTa_get, _AAPI.StructAkiEstadSystem_TTa_set)
__swig_setmethods__["TTd"] = _AAPI.StructAkiEstadSystem_TTd_set
__swig_getmethods__["TTd"] = _AAPI.StructAkiEstadSystem_TTd_get
if _newclass:TTd = _swig_property(_AAPI.StructAkiEstadSystem_TTd_get, _AAPI.StructAkiEstadSystem_TTd_set)
__swig_setmethods__["DTa"] = _AAPI.StructAkiEstadSystem_DTa_set
__swig_getmethods__["DTa"] = _AAPI.StructAkiEstadSystem_DTa_get
if _newclass:DTa = _swig_property(_AAPI.StructAkiEstadSystem_DTa_get, _AAPI.StructAkiEstadSystem_DTa_set)
__swig_setmethods__["DTd"] = _AAPI.StructAkiEstadSystem_DTd_set
__swig_getmethods__["DTd"] = _AAPI.StructAkiEstadSystem_DTd_get
if _newclass:DTd = _swig_property(_AAPI.StructAkiEstadSystem_DTd_get, _AAPI.StructAkiEstadSystem_DTd_set)
__swig_setmethods__["Sa"] = _AAPI.StructAkiEstadSystem_Sa_set
__swig_getmethods__["Sa"] = _AAPI.StructAkiEstadSystem_Sa_get
if _newclass:Sa = _swig_property(_AAPI.StructAkiEstadSystem_Sa_get, _AAPI.StructAkiEstadSystem_Sa_set)
__swig_setmethods__["Sd"] = _AAPI.StructAkiEstadSystem_Sd_set
__swig_getmethods__["Sd"] = _AAPI.StructAkiEstadSystem_Sd_get
if _newclass:Sd = _swig_property(_AAPI.StructAkiEstadSystem_Sd_get, _AAPI.StructAkiEstadSystem_Sd_set)
__swig_setmethods__["SHa"] = _AAPI.StructAkiEstadSystem_SHa_set
__swig_getmethods__["SHa"] = _AAPI.StructAkiEstadSystem_SHa_get
if _newclass:SHa = _swig_property(_AAPI.StructAkiEstadSystem_SHa_get, _AAPI.StructAkiEstadSystem_SHa_set)
__swig_setmethods__["SHd"] = _AAPI.StructAkiEstadSystem_SHd_set
__swig_getmethods__["SHd"] = _AAPI.StructAkiEstadSystem_SHd_get
if _newclass:SHd = _swig_property(_AAPI.StructAkiEstadSystem_SHd_get, _AAPI.StructAkiEstadSystem_SHd_set)
__swig_setmethods__["Density"] = _AAPI.StructAkiEstadSystem_Density_set
__swig_getmethods__["Density"] = _AAPI.StructAkiEstadSystem_Density_get
if _newclass:Density = _swig_property(_AAPI.StructAkiEstadSystem_Density_get, _AAPI.StructAkiEstadSystem_Density_set)
__swig_setmethods__["STa"] = _AAPI.StructAkiEstadSystem_STa_set
__swig_getmethods__["STa"] = _AAPI.StructAkiEstadSystem_STa_get
if _newclass:STa = _swig_property(_AAPI.StructAkiEstadSystem_STa_get, _AAPI.StructAkiEstadSystem_STa_set)
__swig_setmethods__["STd"] = _AAPI.StructAkiEstadSystem_STd_set
__swig_getmethods__["STd"] = _AAPI.StructAkiEstadSystem_STd_get
if _newclass:STd = _swig_property(_AAPI.StructAkiEstadSystem_STd_get, _AAPI.StructAkiEstadSystem_STd_set)
__swig_setmethods__["NumStops"] = _AAPI.StructAkiEstadSystem_NumStops_set
__swig_getmethods__["NumStops"] = _AAPI.StructAkiEstadSystem_NumStops_get
if _newclass:NumStops = _swig_property(_AAPI.StructAkiEstadSystem_NumStops_get, _AAPI.StructAkiEstadSystem_NumStops_set)
__swig_setmethods__["LongQueueAvg"] = _AAPI.StructAkiEstadSystem_LongQueueAvg_set
__swig_getmethods__["LongQueueAvg"] = _AAPI.StructAkiEstadSystem_LongQueueAvg_get
if _newclass:LongQueueAvg = _swig_property(_AAPI.StructAkiEstadSystem_LongQueueAvg_get, _AAPI.StructAkiEstadSystem_LongQueueAvg_set)
__swig_setmethods__["LongQueueMax"] = _AAPI.StructAkiEstadSystem_LongQueueMax_set
__swig_getmethods__["LongQueueMax"] = _AAPI.StructAkiEstadSystem_LongQueueMax_get
if _newclass:LongQueueMax = _swig_property(_AAPI.StructAkiEstadSystem_LongQueueMax_get, _AAPI.StructAkiEstadSystem_LongQueueMax_set)
__swig_setmethods__["TotalTravel"] = _AAPI.StructAkiEstadSystem_TotalTravel_set
__swig_getmethods__["TotalTravel"] = _AAPI.StructAkiEstadSystem_TotalTravel_get
if _newclass:TotalTravel = _swig_property(_AAPI.StructAkiEstadSystem_TotalTravel_get, _AAPI.StructAkiEstadSystem_TotalTravel_set)
__swig_setmethods__["TotalTravelTime"] = _AAPI.StructAkiEstadSystem_TotalTravelTime_set
__swig_getmethods__["TotalTravelTime"] = _AAPI.StructAkiEstadSystem_TotalTravelTime_get
if _newclass:TotalTravelTime = _swig_property(_AAPI.StructAkiEstadSystem_TotalTravelTime_get, _AAPI.StructAkiEstadSystem_TotalTravelTime_set)
__swig_setmethods__["virtualQueueAvg"] = _AAPI.StructAkiEstadSystem_virtualQueueAvg_set
__swig_getmethods__["virtualQueueAvg"] = _AAPI.StructAkiEstadSystem_virtualQueueAvg_get
if _newclass:virtualQueueAvg = _swig_property(_AAPI.StructAkiEstadSystem_virtualQueueAvg_get, _AAPI.StructAkiEstadSystem_virtualQueueAvg_set)
__swig_setmethods__["virtualQueueMax"] = _AAPI.StructAkiEstadSystem_virtualQueueMax_set
__swig_getmethods__["virtualQueueMax"] = _AAPI.StructAkiEstadSystem_virtualQueueMax_get
if _newclass:virtualQueueMax = _swig_property(_AAPI.StructAkiEstadSystem_virtualQueueMax_get, _AAPI.StructAkiEstadSystem_virtualQueueMax_set)
__swig_setmethods__["count"] = _AAPI.StructAkiEstadSystem_count_set
__swig_getmethods__["count"] = _AAPI.StructAkiEstadSystem_count_get
if _newclass:count = _swig_property(_AAPI.StructAkiEstadSystem_count_get, _AAPI.StructAkiEstadSystem_count_set)
__swig_setmethods__["inputFlow"] = _AAPI.StructAkiEstadSystem_inputFlow_set
__swig_getmethods__["inputFlow"] = _AAPI.StructAkiEstadSystem_inputFlow_get
if _newclass:inputFlow = _swig_property(_AAPI.StructAkiEstadSystem_inputFlow_get, _AAPI.StructAkiEstadSystem_inputFlow_set)
__swig_setmethods__["inputCount"] = _AAPI.StructAkiEstadSystem_inputCount_set
__swig_getmethods__["inputCount"] = _AAPI.StructAkiEstadSystem_inputCount_get
if _newclass:inputCount = _swig_property(_AAPI.StructAkiEstadSystem_inputCount_get, _AAPI.StructAkiEstadSystem_inputCount_set)
__swig_setmethods__["vehsWaiting"] = _AAPI.StructAkiEstadSystem_vehsWaiting_set
__swig_getmethods__["vehsWaiting"] = _AAPI.StructAkiEstadSystem_vehsWaiting_get
if _newclass:vehsWaiting = _swig_property(_AAPI.StructAkiEstadSystem_vehsWaiting_get, _AAPI.StructAkiEstadSystem_vehsWaiting_set)
__swig_setmethods__["vehIn"] = _AAPI.StructAkiEstadSystem_vehIn_set
__swig_getmethods__["vehIn"] = _AAPI.StructAkiEstadSystem_vehIn_get
if _newclass:vehIn = _swig_property(_AAPI.StructAkiEstadSystem_vehIn_get, _AAPI.StructAkiEstadSystem_vehIn_set)
__swig_setmethods__["vehsLostIn"] = _AAPI.StructAkiEstadSystem_vehsLostIn_set
__swig_getmethods__["vehsLostIn"] = _AAPI.StructAkiEstadSystem_vehsLostIn_get
if _newclass:vehsLostIn = _swig_property(_AAPI.StructAkiEstadSystem_vehsLostIn_get, _AAPI.StructAkiEstadSystem_vehsLostIn_set)
__swig_setmethods__["vehsLostOut"] = _AAPI.StructAkiEstadSystem_vehsLostOut_set
__swig_getmethods__["vehsLostOut"] = _AAPI.StructAkiEstadSystem_vehsLostOut_get
if _newclass:vehsLostOut = _swig_property(_AAPI.StructAkiEstadSystem_vehsLostOut_get, _AAPI.StructAkiEstadSystem_vehsLostOut_set)
__swig_setmethods__["missedTurns"] = _AAPI.StructAkiEstadSystem_missedTurns_set
__swig_getmethods__["missedTurns"] = _AAPI.StructAkiEstadSystem_missedTurns_get
if _newclass:missedTurns = _swig_property(_AAPI.StructAkiEstadSystem_missedTurns_get, _AAPI.StructAkiEstadSystem_missedTurns_set)
__swig_setmethods__["laneChanges"] = _AAPI.StructAkiEstadSystem_laneChanges_set
__swig_getmethods__["laneChanges"] = _AAPI.StructAkiEstadSystem_laneChanges_get
if _newclass:laneChanges = _swig_property(_AAPI.StructAkiEstadSystem_laneChanges_get, _AAPI.StructAkiEstadSystem_laneChanges_set)
__swig_setmethods__["totalLaneChanges"] = _AAPI.StructAkiEstadSystem_totalLaneChanges_set
__swig_getmethods__["totalLaneChanges"] = _AAPI.StructAkiEstadSystem_totalLaneChanges_get
if _newclass:totalLaneChanges = _swig_property(_AAPI.StructAkiEstadSystem_totalLaneChanges_get, _AAPI.StructAkiEstadSystem_totalLaneChanges_set)
def __init__(self):
this = _AAPI.new_StructAkiEstadSystem()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _AAPI.delete_StructAkiEstadSystem
__del__ = lambda self : None;
StructAkiEstadSystem_swigregister = _AAPI.StructAkiEstadSystem_swigregister
StructAkiEstadSystem_swigregister(StructAkiEstadSystem)
class StructAkiEstadSection(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, StructAkiEstadSection, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, StructAkiEstadSection, name)
__repr__ = _swig_repr
__swig_setmethods__["report"] = _AAPI.StructAkiEstadSection_report_set
__swig_getmethods__["report"] = _AAPI.StructAkiEstadSection_report_get
if _newclass:report = _swig_property(_AAPI.StructAkiEstadSection_report_get, _AAPI.StructAkiEstadSection_report_set)
__swig_setmethods__["Id"] = _AAPI.StructAkiEstadSection_Id_set
__swig_getmethods__["Id"] = _AAPI.StructAkiEstadSection_Id_get
if _newclass:Id = _swig_property(_AAPI.StructAkiEstadSection_Id_get, _AAPI.StructAkiEstadSection_Id_set)
__swig_setmethods__["Flow"] = _AAPI.StructAkiEstadSection_Flow_set
__swig_getmethods__["Flow"] = _AAPI.StructAkiEstadSection_Flow_get
if _newclass:Flow = _swig_property(_AAPI.StructAkiEstadSection_Flow_get, _AAPI.StructAkiEstadSection_Flow_set)
__swig_setmethods__["TTa"] = _AAPI.StructAkiEstadSection_TTa_set
__swig_getmethods__["TTa"] = _AAPI.StructAkiEstadSection_TTa_get
if _newclass:TTa = _swig_property(_AAPI.StructAkiEstadSection_TTa_get, _AAPI.StructAkiEstadSection_TTa_set)
__swig_setmethods__["TTd"] = _AAPI.StructAkiEstadSection_TTd_set
__swig_getmethods__["TTd"] = _AAPI.StructAkiEstadSection_TTd_get
if _newclass:TTd = _swig_property(_AAPI.StructAkiEstadSection_TTd_get, _AAPI.StructAkiEstadSection_TTd_set)
__swig_setmethods__["DTa"] = _AAPI.StructAkiEstadSection_DTa_set
__swig_getmethods__["DTa"] = _AAPI.StructAkiEstadSection_DTa_get
if _newclass:DTa = _swig_property(_AAPI.StructAkiEstadSection_DTa_get, _AAPI.StructAkiEstadSection_DTa_set)
__swig_setmethods__["DTd"] = _AAPI.StructAkiEstadSection_DTd_set
__swig_getmethods__["DTd"] = _AAPI.StructAkiEstadSection_DTd_get
if _newclass:DTd = _swig_property(_AAPI.StructAkiEstadSection_DTd_get, _AAPI.StructAkiEstadSection_DTd_set)
__swig_setmethods__["Sa"] = _AAPI.StructAkiEstadSection_Sa_set
__swig_getmethods__["Sa"] = _AAPI.StructAkiEstadSection_Sa_get
if _newclass:Sa = _swig_property(_AAPI.StructAkiEstadSection_Sa_get, _AAPI.StructAkiEstadSection_Sa_set)
__swig_setmethods__["Sd"] = _AAPI.StructAkiEstadSection_Sd_set
__swig_getmethods__["Sd"] = _AAPI.StructAkiEstadSection_Sd_get
if _newclass:Sd = _swig_property(_AAPI.StructAkiEstadSection_Sd_get, _AAPI.StructAkiEstadSection_Sd_set)
__swig_setmethods__["SHa"] = _AAPI.StructAkiEstadSection_SHa_set
__swig_getmethods__["SHa"] = _AAPI.StructAkiEstadSection_SHa_get
if _newclass:SHa = _swig_property(_AAPI.StructAkiEstadSection_SHa_get, _AAPI.StructAkiEstadSection_SHa_set)
__swig_setmethods__["SHd"] = _AAPI.StructAkiEstadSection_SHd_set
__swig_getmethods__["SHd"] = _AAPI.StructAkiEstadSection_SHd_get
if _newclass:SHd = _swig_property(_AAPI.StructAkiEstadSection_SHd_get, _AAPI.StructAkiEstadSection_SHd_set)
__swig_setmethods__["Density"] = _AAPI.StructAkiEstadSection_Density_set
__swig_getmethods__["Density"] = _AAPI.StructAkiEstadSection_Density_get
if _newclass:Density = _swig_property(_AAPI.StructAkiEstadSection_Density_get, _AAPI.StructAkiEstadSection_Density_set)
__swig_setmethods__["STa"] = _AAPI.StructAkiEstadSection_STa_set
__swig_getmethods__["STa"] = _AAPI.StructAkiEstadSection_STa_get
if _newclass:STa = _swig_property(_AAPI.StructAkiEstadSection_STa_get, _AAPI.StructAkiEstadSection_STa_set)
__swig_setmethods__["STd"] = _AAPI.StructAkiEstadSection_STd_set
__swig_getmethods__["STd"] = _AAPI.StructAkiEstadSection_STd_get
if _newclass:STd = _swig_property(_AAPI.StructAkiEstadSection_STd_get, _AAPI.StructAkiEstadSection_STd_set)
__swig_setmethods__["NumStops"] = _AAPI.StructAkiEstadSection_NumStops_set
__swig_getmethods__["NumStops"] = _AAPI.StructAkiEstadSection_NumStops_get
if _newclass:NumStops = _swig_property(_AAPI.StructAkiEstadSection_NumStops_get, _AAPI.StructAkiEstadSection_NumStops_set)
__swig_setmethods__["LongQueueAvg"] = _AAPI.StructAkiEstadSection_LongQueueAvg_set
__swig_getmethods__["LongQueueAvg"] = _AAPI.StructAkiEstadSection_LongQueueAvg_get
if _newclass:LongQueueAvg = _swig_property(_AAPI.StructAkiEstadSection_LongQueueAvg_get, _AAPI.StructAkiEstadSection_LongQueueAvg_set)
__swig_setmethods__["LongQueueMax"] = _AAPI.StructAkiEstadSection_LongQueueMax_set
__swig_getmethods__["LongQueueMax"] = _AAPI.StructAkiEstadSection_LongQueueMax_get
if _newclass:LongQueueMax = _swig_property(_AAPI.StructAkiEstadSection_LongQueueMax_get, _AAPI.StructAkiEstadSection_LongQueueMax_set)
__swig_setmethods__["TotalTravel"] = _AAPI.StructAkiEstadSection_TotalTravel_set
__swig_getmethods__["TotalTravel"] = _AAPI.StructAkiEstadSection_TotalTravel_get
if _newclass:TotalTravel = _swig_property(_AAPI.StructAkiEstadSection_TotalTravel_get, _AAPI.StructAkiEstadSection_TotalTravel_set)
__swig_setmethods__["TotalTravelTime"] = _AAPI.StructAkiEstadSection_TotalTravelTime_set
__swig_getmethods__["TotalTravelTime"] = _AAPI.StructAkiEstadSection_TotalTravelTime_get
if _newclass:TotalTravelTime = _swig_property(_AAPI.StructAkiEstadSection_TotalTravelTime_get, _AAPI.StructAkiEstadSection_TotalTravelTime_set)
__swig_setmethods__["virtualQueueAvg"] = _AAPI.StructAkiEstadSection_virtualQueueAvg_set
__swig_getmethods__["virtualQueueAvg"] = _AAPI.StructAkiEstadSection_virtualQueueAvg_get
if _newclass:virtualQueueAvg = _swig_property(_AAPI.StructAkiEstadSection_virtualQueueAvg_get, _AAPI.StructAkiEstadSection_virtualQueueAvg_set)
__swig_setmethods__["virtualQueueMax"] = _AAPI.StructAkiEstadSection_virtualQueueMax_set
__swig_getmethods__["virtualQueueMax"] = _AAPI.StructAkiEstadSection_virtualQueueMax_get
if _newclass:virtualQueueMax = _swig_property(_AAPI.StructAkiEstadSection_virtualQueueMax_get, _AAPI.StructAkiEstadSection_virtualQueueMax_set)
__swig_setmethods__["count"] = _AAPI.StructAkiEstadSection_count_set
__swig_getmethods__["count"] = _AAPI.StructAkiEstadSection_count_get
if _newclass:count = _swig_property(_AAPI.StructAkiEstadSection_count_get, _AAPI.StructAkiEstadSection_count_set)
__swig_setmethods__["inputFlow"] = _AAPI.StructAkiEstadSection_inputFlow_set
__swig_getmethods__["inputFlow"] = _AAPI.StructAkiEstadSection_inputFlow_get
if _newclass:inputFlow = _swig_property(_AAPI.StructAkiEstadSection_inputFlow_get, _AAPI.StructAkiEstadSection_inputFlow_set)
__swig_setmethods__["inputCount"] = _AAPI.StructAkiEstadSection_inputCount_set
__swig_getmethods__["inputCount"] = _AAPI.StructAkiEstadSection_inputCount_get
if _newclass:inputCount = _swig_property(_AAPI.StructAkiEstadSection_inputCount_get, _AAPI.StructAkiEstadSection_inputCount_set)
__swig_setmethods__["flowCapacity"] = _AAPI.StructAkiEstadSection_flowCapacity_set
__swig_getmethods__["flowCapacity"] = _AAPI.StructAkiEstadSection_flowCapacity_get
if _newclass:flowCapacity = _swig_property(_AAPI.StructAkiEstadSection_flowCapacity_get, _AAPI.StructAkiEstadSection_flowCapacity_set)
__swig_setmethods__["laneChanges"] = _AAPI.StructAkiEstadSection_laneChanges_set
__swig_getmethods__["laneChanges"] = _AAPI.StructAkiEstadSection_laneChanges_get
if _newclass:laneChanges = _swig_property(_AAPI.StructAkiEstadSection_laneChanges_get, _AAPI.StructAkiEstadSection_laneChanges_set)
__swig_setmethods__["totalLaneChanges"] = _AAPI.StructAkiEstadSection_totalLaneChanges_set
__swig_getmethods__["totalLaneChanges"] = _AAPI.StructAkiEstadSection_totalLaneChanges_get
if _newclass:totalLaneChanges = _swig_property(_AAPI.StructAkiEstadSection_totalLaneChanges_get, _AAPI.StructAkiEstadSection_totalLaneChanges_set)
def __init__(self):
this = _AAPI.new_StructAkiEstadSection()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _AAPI.delete_StructAkiEstadSection
__del__ = lambda self : None;
StructAkiEstadSection_swigregister = _AAPI.StructAkiEstadSection_swigregister
StructAkiEstadSection_swigregister(StructAkiEstadSection)
class StructAkiEstadSectionLane(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, StructAkiEstadSectionLane, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, StructAkiEstadSectionLane, name)
__repr__ = _swig_repr
__swig_setmethods__["report"] = _AAPI.StructAkiEstadSectionLane_report_set
__swig_getmethods__["report"] = _AAPI.StructAkiEstadSectionLane_report_get
if _newclass:report = _swig_property(_AAPI.StructAkiEstadSectionLane_report_get, _AAPI.StructAkiEstadSectionLane_report_set)
__swig_setmethods__["IdSection"] = _AAPI.StructAkiEstadSectionLane_IdSection_set
__swig_getmethods__["IdSection"] = _AAPI.StructAkiEstadSectionLane_IdSection_get
if _newclass:IdSection = _swig_property(_AAPI.StructAkiEstadSectionLane_IdSection_get, _AAPI.StructAkiEstadSectionLane_IdSection_set)
__swig_setmethods__["Flow"] = _AAPI.StructAkiEstadSectionLane_Flow_set
__swig_getmethods__["Flow"] = _AAPI.StructAkiEstadSectionLane_Flow_get
if _newclass:Flow = _swig_property(_AAPI.StructAkiEstadSectionLane_Flow_get, _AAPI.StructAkiEstadSectionLane_Flow_set)
__swig_setmethods__["TTa"] = _AAPI.StructAkiEstadSectionLane_TTa_set
__swig_getmethods__["TTa"] = _AAPI.StructAkiEstadSectionLane_TTa_get
if _newclass:TTa = _swig_property(_AAPI.StructAkiEstadSectionLane_TTa_get, _AAPI.StructAkiEstadSectionLane_TTa_set)
__swig_setmethods__["TTd"] = _AAPI.StructAkiEstadSectionLane_TTd_set
__swig_getmethods__["TTd"] = _AAPI.StructAkiEstadSectionLane_TTd_get
if _newclass:TTd = _swig_property(_AAPI.StructAkiEstadSectionLane_TTd_get, _AAPI.StructAkiEstadSectionLane_TTd_set)
__swig_setmethods__["DTa"] = _AAPI.StructAkiEstadSectionLane_DTa_set
__swig_getmethods__["DTa"] = _AAPI.StructAkiEstadSectionLane_DTa_get
if _newclass:DTa = _swig_property(_AAPI.StructAkiEstadSectionLane_DTa_get, _AAPI.StructAkiEstadSectionLane_DTa_set)
__swig_setmethods__["DTd"] = _AAPI.StructAkiEstadSectionLane_DTd_set
__swig_getmethods__["DTd"] = _AAPI.StructAkiEstadSectionLane_DTd_get
if _newclass:DTd = _swig_property(_AAPI.StructAkiEstadSectionLane_DTd_get, _AAPI.StructAkiEstadSectionLane_DTd_set)
__swig_setmethods__["Sa"] = _AAPI.StructAkiEstadSectionLane_Sa_set
__swig_getmethods__["Sa"] = _AAPI.StructAkiEstadSectionLane_Sa_get
if _newclass:Sa = _swig_property(_AAPI.StructAkiEstadSectionLane_Sa_get, _AAPI.StructAkiEstadSectionLane_Sa_set)
__swig_setmethods__["Sd"] = _AAPI.StructAkiEstadSectionLane_Sd_set
__swig_getmethods__["Sd"] = _AAPI.StructAkiEstadSectionLane_Sd_get
if _newclass:Sd = _swig_property(_AAPI.StructAkiEstadSectionLane_Sd_get, _AAPI.StructAkiEstadSectionLane_Sd_set)
__swig_setmethods__["SHa"] = _AAPI.StructAkiEstadSectionLane_SHa_set
__swig_getmethods__["SHa"] = _AAPI.StructAkiEstadSectionLane_SHa_get
if _newclass:SHa = _swig_property(_AAPI.StructAkiEstadSectionLane_SHa_get, _AAPI.StructAkiEstadSectionLane_SHa_set)
__swig_setmethods__["SHd"] = _AAPI.StructAkiEstadSectionLane_SHd_set
__swig_getmethods__["SHd"] = _AAPI.StructAkiEstadSectionLane_SHd_get
if _newclass:SHd = _swig_property(_AAPI.StructAkiEstadSectionLane_SHd_get, _AAPI.StructAkiEstadSectionLane_SHd_set)
__swig_setmethods__["Density"] = _AAPI.StructAkiEstadSectionLane_Density_set
__swig_getmethods__["Density"] = _AAPI.StructAkiEstadSectionLane_Density_get
if _newclass:Density = _swig_property(_AAPI.StructAkiEstadSectionLane_Density_get, _AAPI.StructAkiEstadSectionLane_Density_set)
__swig_setmethods__["LongQueueAvg"] = _AAPI.StructAkiEstadSectionLane_LongQueueAvg_set
__swig_getmethods__["LongQueueAvg"] = _AAPI.StructAkiEstadSectionLane_LongQueueAvg_get
if _newclass:LongQueueAvg = _swig_property(_AAPI.StructAkiEstadSectionLane_LongQueueAvg_get, _AAPI.StructAkiEstadSectionLane_LongQueueAvg_set)
__swig_setmethods__["LongQueueMax"] = _AAPI.StructAkiEstadSectionLane_LongQueueMax_set
__swig_getmethods__["LongQueueMax"] = _AAPI.StructAkiEstadSectionLane_LongQueueMax_get
if _newclass:LongQueueMax = _swig_property(_AAPI.StructAkiEstadSectionLane_LongQueueMax_get, _AAPI.StructAkiEstadSectionLane_LongQueueMax_set)
def __init__(self):
this = _AAPI.new_StructAkiEstadSectionLane()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _AAPI.delete_StructAkiEstadSectionLane
__del__ = lambda self : None;
StructAkiEstadSectionLane_swigregister = _AAPI.StructAkiEstadSectionLane_swigregister
StructAkiEstadSectionLane_swigregister(StructAkiEstadSectionLane)
class StructAkiEstadTurning(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, StructAkiEstadTurning, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, StructAkiEstadTurning, name)
__repr__ = _swig_repr
__swig_setmethods__["report"] = _AAPI.StructAkiEstadTurning_report_set
__swig_getmethods__["report"] = _AAPI.StructAkiEstadTurning_report_get
if _newclass:report = _swig_property(_AAPI.StructAkiEstadTurning_report_get, _AAPI.StructAkiEstadTurning_report_set)
__swig_setmethods__["IdSectionFrom"] = _AAPI.StructAkiEstadTurning_IdSectionFrom_set
__swig_getmethods__["IdSectionFrom"] = _AAPI.StructAkiEstadTurning_IdSectionFrom_get
if _newclass:IdSectionFrom = _swig_property(_AAPI.StructAkiEstadTurning_IdSectionFrom_get, _AAPI.StructAkiEstadTurning_IdSectionFrom_set)
__swig_setmethods__["IdSectionTo"] = _AAPI.StructAkiEstadTurning_IdSectionTo_set
__swig_getmethods__["IdSectionTo"] = _AAPI.StructAkiEstadTurning_IdSectionTo_get
if _newclass:IdSectionTo = _swig_property(_AAPI.StructAkiEstadTurning_IdSectionTo_get, _AAPI.StructAkiEstadTurning_IdSectionTo_set)
__swig_setmethods__["Flow"] = _AAPI.StructAkiEstadTurning_Flow_set
__swig_getmethods__["Flow"] = _AAPI.StructAkiEstadTurning_Flow_get
if _newclass:Flow = _swig_property(_AAPI.StructAkiEstadTurning_Flow_get, _AAPI.StructAkiEstadTurning_Flow_set)
__swig_setmethods__["TTa"] = _AAPI.StructAkiEstadTurning_TTa_set
__swig_getmethods__["TTa"] = _AAPI.StructAkiEstadTurning_TTa_get
if _newclass:TTa = _swig_property(_AAPI.StructAkiEstadTurning_TTa_get, _AAPI.StructAkiEstadTurning_TTa_set)
__swig_setmethods__["TTd"] = _AAPI.StructAkiEstadTurning_TTd_set
__swig_getmethods__["TTd"] = _AAPI.StructAkiEstadTurning_TTd_get
if _newclass:TTd = _swig_property(_AAPI.StructAkiEstadTurning_TTd_get, _AAPI.StructAkiEstadTurning_TTd_set)
__swig_setmethods__["DTa"] = _AAPI.StructAkiEstadTurning_DTa_set
__swig_getmethods__["DTa"] = _AAPI.StructAkiEstadTurning_DTa_get
if _newclass:DTa = _swig_property(_AAPI.StructAkiEstadTurning_DTa_get, _AAPI.StructAkiEstadTurning_DTa_set)
__swig_setmethods__["DTd"] = _AAPI.StructAkiEstadTurning_DTd_set
__swig_getmethods__["DTd"] = _AAPI.StructAkiEstadTurning_DTd_get
if _newclass:DTd = _swig_property(_AAPI.StructAkiEstadTurning_DTd_get, _AAPI.StructAkiEstadTurning_DTd_set)
__swig_setmethods__["Sa"] = _AAPI.StructAkiEstadTurning_Sa_set
__swig_getmethods__["Sa"] = _AAPI.StructAkiEstadTurning_Sa_get
if _newclass:Sa = _swig_property(_AAPI.StructAkiEstadTurning_Sa_get, _AAPI.StructAkiEstadTurning_Sa_set)
__swig_setmethods__["Sd"] = _AAPI.StructAkiEstadTurning_Sd_set
__swig_getmethods__["Sd"] = _AAPI.StructAkiEstadTurning_Sd_get
if _newclass:Sd = _swig_property(_AAPI.StructAkiEstadTurning_Sd_get, _AAPI.StructAkiEstadTurning_Sd_set)
__swig_setmethods__["SHa"] = _AAPI.StructAkiEstadTurning_SHa_set
__swig_getmethods__["SHa"] = _AAPI.StructAkiEstadTurning_SHa_get
if _newclass:SHa = _swig_property(_AAPI.StructAkiEstadTurning_SHa_get, _AAPI.StructAkiEstadTurning_SHa_set)
__swig_setmethods__["SHd"] = _AAPI.StructAkiEstadTurning_SHd_set
__swig_getmethods__["SHd"] = _AAPI.StructAkiEstadTurning_SHd_get
if _newclass:SHd = _swig_property(_AAPI.StructAkiEstadTurning_SHd_get, _AAPI.StructAkiEstadTurning_SHd_set)
__swig_setmethods__["STa"] = _AAPI.StructAkiEstadTurning_STa_set
__swig_getmethods__["STa"] = _AAPI.StructAkiEstadTurning_STa_get
if _newclass:STa = _swig_property(_AAPI.StructAkiEstadTurning_STa_get, _AAPI.StructAkiEstadTurning_STa_set)
__swig_setmethods__["STd"] = _AAPI.StructAkiEstadTurning_STd_set
__swig_getmethods__["STd"] = _AAPI.StructAkiEstadTurning_STd_get
if _newclass:STd = _swig_property(_AAPI.StructAkiEstadTurning_STd_get, _AAPI.StructAkiEstadTurning_STd_set)
__swig_setmethods__["NumStops"] = _AAPI.StructAkiEstadTurning_NumStops_set
__swig_getmethods__["NumStops"] = _AAPI.StructAkiEstadTurning_NumStops_get
if _newclass:NumStops = _swig_property(_AAPI.StructAkiEstadTurning_NumStops_get, _AAPI.StructAkiEstadTurning_NumStops_set)
__swig_setmethods__["LongQueueAvg"] = _AAPI.StructAkiEstadTurning_LongQueueAvg_set
__swig_getmethods__["LongQueueAvg"] = _AAPI.StructAkiEstadTurning_LongQueueAvg_get
if _newclass:LongQueueAvg = _swig_property(_AAPI.StructAkiEstadTurning_LongQueueAvg_get, _AAPI.StructAkiEstadTurning_LongQueueAvg_set)
__swig_setmethods__["LongQueueMax"] = _AAPI.StructAkiEstadTurning_LongQueueMax_set
__swig_getmethods__["LongQueueMax"] = _AAPI.StructAkiEstadTurning_LongQueueMax_get
if _newclass:LongQueueMax = _swig_property(_AAPI.StructAkiEstadTurning_LongQueueMax_get, _AAPI.StructAkiEstadTurning_LongQueueMax_set)
__swig_setmethods__["TotalTravel"] = _AAPI.StructAkiEstadTurning_TotalTravel_set
__swig_getmethods__["TotalTravel"] = _AAPI.StructAkiEstadTurning_TotalTravel_get
if _newclass:TotalTravel = _swig_property(_AAPI.StructAkiEstadTurning_TotalTravel_get, _AAPI.StructAkiEstadTurning_TotalTravel_set)
__swig_setmethods__["TotalTravelTime"] = _AAPI.StructAkiEstadTurning_TotalTravelTime_set
__swig_getmethods__["TotalTravelTime"] = _AAPI.StructAkiEstadTurning_TotalTravelTime_get
if _newclass:TotalTravelTime = _swig_property(_AAPI.StructAkiEstadTurning_TotalTravelTime_get, _AAPI.StructAkiEstadTurning_TotalTravelTime_set)
__swig_setmethods__["laneChanges"] = _AAPI.StructAkiEstadTurning_laneChanges_set
__swig_getmethods__["laneChanges"] | |
<gh_stars>0
"""
Comparison with a gtlike model
"""
import os, glob
import pickle
import numpy as np
import pylab as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as ticker
import pandas as pd
from skymaps import SkyDir
#from uw.utilities import makepivot
from uw.like import Models
from . import (sourcecomparison, sourceinfo,fermi_catalog,)
from . analysis_base import html_table, FloatFormat
from .. import tools
def add_galactic(tt, add=True):
from skymaps import SkyDir
sd = map (SkyDir, tt.ra, tt.dec)
glon = np.array(map(lambda x: x.l(), sd))
glon[glon>180]-=360
glat = map(lambda x: x.b(), sd)
tt['glat'] =glat
tt['glon']=glon
class FL8YComparison(sourceinfo.SourceInfo):
"""Comparison with 4FGL
This analysis uses the %(cat)s catalog file %(fhl_file)s.
<p>This is using the %(skymodel)s model, with the same 8-year data set,
with Source class events. There are some differences:
<ul>
<li>The zenith cut is 100 degrees, for all energies, while %(cat)s varies from 90 to 110.
<li>It restricts theta<66.4 degrees, since the IRF is not reliable above this: about 3%% loss
<li>It uses Front/Back event types, with Front only for E<316 MeV. This loses some localization resolution, but avoids the PSF3 effective area problem.
<li>The diffuse models are not modified for each ROI. </li>
</ul>
"""
def setup(self, pattern=None, **kwargs):
super(FL8YComparison, self).setup(**kwargs)
self.cat='4FGL'
self.plotfolder='{}_comparison'.format(self.cat)
# make copy dataframe with compressed names
df=self.df
self.old_index = self.df.index
cindex = [n.replace(' ','') for n in self.df.index]
self.df.index = cindex
# add info on E>10 GeV
systematic = self.config['localization_systematics']
f95, quad = 2.45*systematic[0], systematic[1]/60.
self.df['r95'] = (f95**2*(self.df.a * self.df.b) + quad**2)** 0.5
# get the catalog "gll" entries as a DataFrame and set corresponding values
if pattern is None:
pattern=self.config['gllcat']
if not pattern.startswith('/'):
pattern = '$FERMI/catalog/'+pattern
filename = sorted(glob.glob(os.path.expandvars(pattern)))[-1]
fcat = fermi_catalog.GLL_PSC2(filename)
self.fhl_file = fcat.filename.split('/')[-1]
self.gdf = gdf= fcat.df
gdf['uw_ts'] = self.df.ts
gdf['uw_r95'] = self.df.r95
gdf['uw_pindex']= self.df.pindex
gdf['uw_eflux100']=self.df.eflux100
# add boolean for in FL8Y
self.df['fl8y'] = [n in gdf.index for n in cindex]
# for sources not already tagged via the pointlike name being the same as the gtlike nickname
# look for nearest 4FGL source: add name, its distance to DataFrame
ok = df.fl8y==True
added = np.logical_not(ok)
df.loc[df.index[ok],'otherid']= df[ok].name
df.loc[df.index[ok], 'distance']=0
# look for nearest 4FGL source in rejected list: add name, distance to DataFrame
print ('Searching 4FGL for nearest source to the {} not found in it...'.format(sum(added)),)
close = tools.find_close(df[added], self.gdf)
df.loc[df.index[~ok],'otherid'] = close.otherid
df.loc[df.index[~ok], 'distance'] = close.distance
df['b4fgl'] = df.distance<0.015
df['otherts'] = [self.gdf.loc[s.otherid.replace(' ','')].ts for name,s in df.iterrows() ]
df['other_extended'] = [self.gdf.loc[s.otherid.replace(' ','')].extended for name,s in df.iterrows() ]
printprintprint ('done.')
a = set(cindex)
b = set(self.gdf.index);
lost = np.array(list(set(b.difference(a))))
if len(lost)>10:
print ('{} {} sources not here:,{}...'.format(len(lost), self.cat, lost[:10]))
self.lost=lost # save for further analysis
def add_info(self):
df = self.df
ok = df.fl8y==True
added = np.logical_not(ok)
df['otherid']=''
df['distance']=np.nan
df.otherid[ok] = df[ok].name
df.distance[ok] = 0
from uw.like2 import tools
# look for nearest 4FGL source in rejected list: add name, distance to DataFrame
close = tools.find_close(df[added], self.gdf)
df.otherid[new] = close.otherid
df.distance[new] = close.distance
df['otherts'] = [self.gdf.loc[s.otherid.replace(' ','')].ts for name,s in df.iterrows() ]
df['other_extended'] = [self.gdf.loc[s.otherid.replace(' ','')].extended for name,s in df.iterrows() ]
df['b4fgl'] = df.distance<0.015
def seed_selection(self, patterns = '605 504'.split(), close_tol=0.20, nearest_ts_limit=2e3,
nocut=False):
"""Seed selection
Output from code that selects seeds by source name prefix, finds those not in 4FGL,
and removes those that should be eliminated by the 4FGL selection criteria.
<pre>%(logstream)s</pre>
<p>Table with seeds not too close, or nearest source that were rejected.
%(rejected_4fgl)s
"""
self.startlog()
df=self.df
gdf = self.gdf
print ('Selecting seeds by first characters in source name'\
'\n pattern seeds TS>25 kept' )
def get_seeds(df, pattern):
seeded = np.array([name.startswith(pattern) for name in df.index])
sdf = df[seeded].copy()
print (' {:8} {:6d} {:6d} {:6d} '.format(
pattern, sum(seeded), sum(sdf.ts>25), sum(sdf.fl8y)))
sdf['key'] = [name[3] for name in sdf.index]
return sdf
sdf = get_seeds(df, patterns[0])
for pattern in patterns[1:]:
sdf = sdf.append(get_seeds(df, pattern))
print ('Created DF with {} seeds, {} in 4FGL'.format(len(sdf), sum(sdf.b4fgl)))
self.seed_df = sdf
print ('\nSelect a subset of seeds for comparison with 4FGL by removing those that are: ')
too_close = np.array([close_tol<d<0.5 for d in sdf.distance],bool);
print ('\t{:4d} too close to a 4FGL source by {} deg'.format( sum(too_close), close_tol, ))
not_close = np.array([d>0.5 for d in sdf.distance],bool)
too_soft = sdf.pindex>3
print ('\t{:4d} too soft, index>3'.format(sum(too_soft)))
#strong_or_extended = (sdf.otherts>nearest_ts_limit) | sdf.other_extended
strong = (sdf.otherts>nearest_ts_limit)
print ('\t{:4d} nearest is strong (TS>{})'.format(sum(strong), nearest_ts_limit))
extended =sdf.other_extended
print ('\t{:4d} nearest is extended '.format(sum(extended)))
#print ('\t{:4d} nearest is strong (TS>{}) or extended'.format(sum(strong_or_extended), nearest_ts_limit,))
ignore = strong | extended | too_close | too_soft
print ('\t{:4d} Any of the above'.format(sum(ignore)))
if nocut:
print ('Not using these cuts, for now')
self.seed_subset = sdf
else:
self.seed_subset= sdf[~ignore];
self.logstream=self.stoplog()
# make a table of those that perhaps should have been in 4FGL
t=self.seed_subset.query('ts>100 & ~b4fgl')['ts distance otherid otherts '.split()].sort_values(
by='ts', ascending=False)
self.rejected_4fgl =html_table(t, name=self.plotfolder+'/seed_subset',
heading='<h4>{} not in 4FGL, but should be w/ TS>100 </h4>'.format(len(t)),
href=True, float_format=FloatFormat(2))
def seed_plots(self, tsmax=100, title='seed spectral parameters', cols=2,):
"""Seed properties, plot used in the 4FGL paper
Distributions of the photon index (at pivot energy) and
curvature for the seeded sources. The upper row shows the three power-law sources, and the
lower the two curved sets.
"""
sdf = self.seed_df
groups = sdf.groupby('key');
fig, axx = plt.subplots(2,cols, figsize=(3*(1+cols),8))
hatch_type=dict(H='//', F=r'\\', S='||', P='//', N=r'\\')
for name, group in groups:
hkw = dict(histtype='step', lw=2, hatch=hatch_type[name])
label = dict(H='Hard',F='Intermediate',S='Soft', P='Peaked', N='Pulsar-like')[name]
print (label, len(group))
curved = dict(H=0, F=0, S=0, P=1, N=1)[name]
pi = np.array(group.pindex, float)
ts = np.array(group.ts, float).clip(0,tsmax)
axi = axx[curved,0] # photon index
axi.hist(pi, np.linspace(1, 3.5, 16), label=label, **hkw)
if curved==0:
x = dict(H=1.7, F=2.2, S=2.7)[name]
axi.axvline(x, ls='--', lw=2, color=dict(H='orange', F='blue', S='green')[name])
axi.set(xlabel='Photon index')
axi.legend(prop=dict(size=10), loc='upper left');
axc = axx[curved, 1]
curvature = np.asarray(group.curvature,float)
axc.hist(curvature, np.linspace(0,1,21), label=label, **hkw)
axc.set(xlabel='curvature')
axc.legend(prop=dict(size=10));
fig.tight_layout()
fig.subplots_adjust(top=0.92)
if title is not None: fig.suptitle(title, fontsize=24)
return fig
def acceptance_plots(self, title='gtlike seed acceptance', query=None):
"""Seed acceptance, plots in 4FGL paper
Distributions of $TS$ and energy flux (0.1 to 100 GeV), as measured by $pointlike$, for sources added to the
$pointlike$ model, filtered by rejection criteria, and the subset of same that was accepted by gtlike.
%(seed_subset_label)s
"""
sdf = self.seed_df
subset = self.seed_subset
self.seed_subset_label=''
if query is not None:
print ('applying query("{}")'.format(query) )
subset = subset.query(query)
self.seed_subset_label='<h4> selection: {}'.format(query)
fig, axx = plt.subplots(1,2, figsize=(10,4))# gridspec_kw=dict(left=0.1, wspace=0.25))
ax=axx[0]
hkw= dict(bins= np.logspace(1,3, 21), histtype='step', lw=2, log=True)
ts= subset.ts.astype(float).clip(0,1e3)
ax.hist(ts, color='orange', label='seeds', **hkw);
ax.hist(ts[subset.b4fgl], label='in 4FGL', color='green', **hkw);
ax.set(ylim=(0.8,None), xlabel='TS', xscale='log',); #ax.grid(alpha=0.4);
ax.xaxis.set_major_formatter(ticker.FuncFormatter(
lambda val,pos: { 1.0:'1', 10.0:'10', 100.:'100', 1e3:'1000'}.get(val,'')))
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda val,pos: { 1.0:'1', 10.0:'10', 100.:'100'}.get(val,'')))
for x in (25,34): ax.axvline(x, ls='--', color='grey')
ax.legend()
ax=axx[1]
hkw= dict(bins= np.logspace(-0.4,2.0, 25), histtype='step', lw=2, log=True)
eflux = subset.eflux100.astype( float)*1e12
ax.hist(eflux, label='seeds', color='orange', **hkw)
#ax.hist(eflux[ts>25], label='TS>25', color='grey', **hkw)
ax.hist(eflux[subset.b4fgl],label='in 4FGL', color='green', **hkw)
ax.set(xscale='log', xlabel=r'$\mathsf{Energy\ Flux\ [10^{-12}\ erg/cm^2/s}]$',
ylim=(0.9,None), xlim=(None, 40.))
ax.legend()
ax.xaxis.set_major_formatter(ticker.FuncFormatter(
lambda val,pos: { 1.0:'1', 10.0:'10', 100.:'100'}.get(val,'')))
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda val,pos: { 1.0:'1', 10.0:'10', 100.:'100'}.get(val,'')))
fig.tight_layout()
if title != '':
fig.subplots_adjust(top=0.90)
fig.suptitle(title, fontsize=18);
return fig
def source_info_plots(self, tt, tscut=100):
sd = map (SkyDir, tt.ra, tt.dec)
glon = np.array(map(lambda x: x.l(), sd))
glon[glon>180]-=360
glat = map(lambda x: x.b(), sd)
singlat = np.sin(np.radians(glat))
hights = tt.ts>tscut
fig, axx = plt.subplots(1,2, figsize=(12,6))
plt.subplots_adjust(wspace=0.25)
ax = axx[0]
ts=np.array(tt.ts, float)
hkw=dict(bins=np.logspace(np.log10(20),3,41), log=True, lw=2)
ax.hist(ts[~pd.isnull(ts)].clip(10,1e3),histtype='step', **hkw);
ax.hist(ts[~pd.isnull(ts) & hights].clip(10,1e3), color='red',
label='TS>{}: {}\nmax:{:.0f}'.format(tscut, sum(hights), tt.ts.max()),histtype='stepfilled', **hkw);
ax.legend()
ax.set(xscale='log', xlabel='TS', ylim=(0.9, None));
ax.axvline(25, color='green', ls='--')
ax = axx[1]
self.basic_skyplot(ax, glon,singlat, 'blue', s=10, title='Locations')
self.basic_skyplot(ax, glon[hights],singlat[hights],'red', s=30, title='Locations')
return fig
def filter_for_4fgl(self, df=None, close_tol=0.15):
df = (self.df if df is None else df).copy()
print ('Filtering {} sources with 4FGL accepance critera'.format(len(df)))
add_galactic(df)
# look for nearest 4FGL source in rejected list: add name, distance to DataFrame
close = tools.find_close(df, self.gdf)
df.loc[:,'otherid'] = close.otherid
df.loc[:,'distance'] = close.distance
df.loc[:,'otherts'] = [self.gdf.loc[s.otherid].ts for name,s in df.iterrows() ]
df.loc[:,'other_extended'] = [self.gdf.loc[s.otherid].extended for name,s in df.iterrows() ]
# create subset that are
# * not just a rename,
# * more than 0.5 deg away
# * closest does not have | |
response = self.client.get(self.facet_url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
department_facets = response.data["fields"]["department"]
department_projects_after_filtering = 0
for value in department_facets:
if department == value["text"]:
department_projects_after_filtering = value["count"]
self.assertEqual(department_projects_after_filtering, 1)
def test_search_by_province(self):
province = "Eastern Cape"
data = {"q": province}
response = self.client.get(self.url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
num_of_projects = response.data["count"]
self.assertEqual(num_of_projects, 2)
def test_facet_search_by_province(self):
province = "Eastern Cape"
department = "Department 1"
response = self.client.get(self.facet_url)
department_facets = response.data["fields"]["department"]
department_projects_before_filtering = 0
for value in department_facets:
if department == value["text"]:
department_projects_before_filtering = value["count"]
self.assertEqual(department_projects_before_filtering, 2)
data = {"q": province}
response = self.client.get(self.facet_url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
department_facets = response.data["fields"]["department"]
department_projects_after_filtering = 0
for value in department_facets:
if department == value["text"]:
department_projects_after_filtering = value["count"]
self.assertEqual(department_projects_after_filtering, 1)
class InfraProjectAPIStatusTestCase(APITestCase):
def setUp(self):
InfraProjectIndex().clear()
self.file = open(EMPTY_FILE_PATH, "rb")
self.url = reverse("infrastructure-project-api-list")
self.facet_url = reverse("infrastructure-project-api-facets")
fin_year = FinancialYear.objects.create(slug="2030-31")
self.sphere = Sphere.objects.create(financial_year=fin_year, name="Provincial")
self.quarter = Quarter.objects.create(number=1)
self.date = date(year=2050, month=1, day=1)
self.irm_snapshot = IRMSnapshot.objects.create(
sphere=self.sphere,
quarter=self.quarter,
date_taken=self.date,
file=File(self.file),
)
self.project_1 = InfraProject.objects.create(IRM_project_id=1)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_1,
province="Eastern Cape",
status="Construction",
estimated_completion_date=self.date,
)
self.project_2 = InfraProject.objects.create(IRM_project_id=2)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_2,
province="Free State",
status="Construction",
estimated_completion_date=self.date,
)
self.project_3 = InfraProject.objects.create(IRM_project_id=3)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_3,
province="Eastern Cape",
status="Completed",
estimated_completion_date=self.date,
)
InfraProjectIndex().reindex()
def tearDown(self):
InfraProjectIndex().clear()
self.file.close()
def test_filter_by_status(self):
status_ = "Construction"
data = {"status": status_}
response = self.client.get(self.url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
number_of_projects = response.data["count"]
self.assertEqual(number_of_projects, 2)
def test_facet_filter_by_status(self):
status_ = "Construction"
province = "Eastern Cape"
response = self.client.get(self.facet_url)
province_facets = response.data["fields"]["province"]
province_projects_before_filtering = 0
for value in province_facets:
if province == value["text"]:
province_projects_before_filtering = value["count"]
self.assertEqual(province_projects_before_filtering, 2)
data = {"selected_facets": "status_exact:{0}".format(status_)}
response = self.client.get(self.facet_url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
province_facets = response.data["fields"]["province"]
province_projects_after_filtering = 0
for value in province_facets:
if province == value["text"]:
province_projects_after_filtering = value["count"]
self.assertEqual(province_projects_after_filtering, 1)
class InfraProjectAPIFundingSourceTestCase(APITestCase):
def setUp(self):
InfraProjectIndex().clear()
self.file = open(EMPTY_FILE_PATH, "rb")
self.url = reverse("infrastructure-project-api-list")
self.facet_url = reverse("infrastructure-project-api-facets")
fin_year = FinancialYear.objects.create(slug="2030-31")
self.sphere = Sphere.objects.create(financial_year=fin_year, name="Provincial")
self.quarter = Quarter.objects.create(number=1)
self.date = date(year=2050, month=1, day=1)
self.irm_snapshot = IRMSnapshot.objects.create(
sphere=self.sphere,
quarter=self.quarter,
date_taken=self.date,
file=File(self.file),
)
self.project_1 = InfraProject.objects.create(IRM_project_id=1)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_1,
province="Eastern Cape",
primary_funding_source="Community Library Service Grant",
estimated_completion_date=self.date,
)
self.project_2 = InfraProject.objects.create(IRM_project_id=2)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_2,
province="Free State",
primary_funding_source="Community Library Service Grant",
estimated_completion_date=self.date,
)
self.project_3 = InfraProject.objects.create(IRM_project_id=3)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_3,
province="Eastern Cape",
primary_funding_source="Equitable Share",
estimated_completion_date=self.date,
)
InfraProjectIndex().reindex()
def tearDown(self):
InfraProjectIndex().clear()
self.file.close()
def test_filter_by_funding_source(self):
source = "Community Library Service Grant"
data = {"primary_funding_source": source}
response = self.client.get(self.url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
number_of_projects = response.data["count"]
self.assertEqual(number_of_projects, 2)
def test_facet_filter_by_funding_source(self):
source = "Community Library Service Grant"
province = "Eastern Cape"
response = self.client.get(self.facet_url)
province_facets = response.data["fields"]["province"]
province_projects_before_filtering = 0
for value in province_facets:
if province == value["text"]:
province_projects_before_filtering = value["count"]
self.assertEqual(province_projects_before_filtering, 2)
data = {"selected_facets": "primary_funding_source_exact:{0}".format(source)}
response = self.client.get(self.facet_url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
province_facets = response.data["fields"]["province"]
province_projects_after_filtering = 0
for value in province_facets:
if province == value["text"]:
province_projects_after_filtering = value["count"]
self.assertEqual(province_projects_after_filtering, 1)
class InfraProjectAPIProjectNameTestCase(APITestCase):
def setUp(self):
InfraProjectIndex().clear()
self.file = open(EMPTY_FILE_PATH, "rb")
self.url = reverse("infrastructure-project-api-list")
self.facet_url = reverse("infrastructure-project-api-facets")
fin_year = FinancialYear.objects.create(slug="2030-31")
self.sphere = Sphere.objects.create(financial_year=fin_year, name="Provincial")
self.quarter = Quarter.objects.create(number=1)
self.date = date(year=2050, month=1, day=1)
self.irm_snapshot = IRMSnapshot.objects.create(
sphere=self.sphere,
quarter=self.quarter,
date_taken=self.date,
file=File(self.file),
)
self.project_1 = InfraProject.objects.create(IRM_project_id=1)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_1,
name="Project 1",
province="Eastern Cape",
estimated_completion_date=self.date,
)
self.project_2 = InfraProject.objects.create(IRM_project_id=2)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_2,
name="Project 2",
province="Eastern Cape",
estimated_completion_date=self.date,
)
InfraProjectIndex().reindex()
def tearDown(self):
InfraProjectIndex().clear()
self.file.close()
def test_search_by_project_name(self):
name = "Project 1"
data = {"q": name}
response = self.client.get(self.url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = response.data["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["name"], name)
def test_facet_search_by_project_name(self):
name = "Project 1"
province = "Eastern Cape"
response = self.client.get(self.facet_url)
province_facets = response.data["fields"]["province"]
province_projects_before_filtering = 0
for value in province_facets:
if province == value["text"]:
province_projects_before_filtering = value["count"]
self.assertEqual(province_projects_before_filtering, 2)
data = {"q": name}
response = self.client.get(self.facet_url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
province_facets = response.data["fields"]["province"]
province_projects_after_filtering = 0
for value in province_facets:
if province == value["text"]:
province_projects_after_filtering = value["count"]
self.assertEqual(province_projects_after_filtering, 1)
class InfraProjectAPIMunicipalityTestCase(APITestCase):
def setUp(self):
InfraProjectIndex().clear()
self.file = open(EMPTY_FILE_PATH, "rb")
self.url = reverse("infrastructure-project-api-list")
self.facet_url = reverse("infrastructure-project-api-facets")
fin_year = FinancialYear.objects.create(slug="2030-31")
self.sphere = Sphere.objects.create(financial_year=fin_year, name="Provincial")
self.quarter = Quarter.objects.create(number=1)
self.date = date(year=2050, month=1, day=1)
self.irm_snapshot = IRMSnapshot.objects.create(
sphere=self.sphere,
quarter=self.quarter,
date_taken=self.date,
file=File(self.file),
)
self.project_1 = InfraProject.objects.create(IRM_project_id=1)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_1,
name="Project 1",
province="Eastern Cape",
local_municipality="Local 1",
estimated_completion_date=self.date,
)
self.project_2 = InfraProject.objects.create(IRM_project_id=2)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_2,
name="Project 2",
province="Eastern Cape",
local_municipality="Local 2",
estimated_completion_date=self.date,
)
InfraProjectIndex().reindex()
def tearDown(self):
InfraProjectIndex().clear()
self.file.close()
def test_search_by_municipality(self):
name = "Project 1"
municipality = "Local 1"
data = {"q": municipality}
response = self.client.get(self.url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = response.data["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["name"], name)
def test_facet_search_by_municipality(self):
province = "Eastern Cape"
municipality = "Local 1"
response = self.client.get(self.facet_url)
province_facets = response.data["fields"]["province"]
province_projects_before_filtering = 0
for value in province_facets:
if province == value["text"]:
province_projects_before_filtering = value["count"]
self.assertEqual(province_projects_before_filtering, 2)
data = {"q": municipality}
response = self.client.get(self.facet_url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
province_facets = response.data["fields"]["province"]
province_projects_after_filtering = 0
for value in province_facets:
if province == value["text"]:
province_projects_after_filtering = value["count"]
self.assertEqual(province_projects_after_filtering, 1)
class InfraProjectAPIContractorTestCase(APITestCase):
def setUp(self):
InfraProjectIndex().clear()
self.file = open(EMPTY_FILE_PATH, "rb")
self.url = reverse("infrastructure-project-api-list")
self.facet_url = reverse("infrastructure-project-api-facets")
fin_year = FinancialYear.objects.create(slug="2030-31")
self.sphere = Sphere.objects.create(financial_year=fin_year, name="Provincial")
self.quarter = Quarter.objects.create(number=1)
self.date = date(year=2050, month=1, day=1)
self.irm_snapshot = IRMSnapshot.objects.create(
sphere=self.sphere,
quarter=self.quarter,
date_taken=self.date,
file=File(self.file),
)
self.project_1 = InfraProject.objects.create(IRM_project_id=1)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_1,
name="Project 1",
main_contractor="Contractor 1",
province="Eastern Cape",
estimated_completion_date=self.date,
)
self.project_2 = InfraProject.objects.create(IRM_project_id=2)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_2,
name="Project 2",
main_contractor="Contractor 2",
province="Eastern Cape",
estimated_completion_date=self.date,
)
InfraProjectIndex().reindex()
def tearDown(self):
InfraProjectIndex().clear()
self.file.close()
def test_search_by_contractor(self):
name = "Project 1"
contractor = "Contractor 1"
data = {"q": contractor}
response = self.client.get(self.url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = response.data["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["name"], name)
def test_facet_search_by_contractor(self):
contractor = "Contractor 1"
province = "Eastern Cape"
response = self.client.get(self.facet_url)
province_facets = response.data["fields"]["province"]
province_projects_before_filtering = 0
for value in province_facets:
if province == value["text"]:
province_projects_before_filtering = value["count"]
self.assertEqual(province_projects_before_filtering, 2)
data = {"q": contractor}
response = self.client.get(self.facet_url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
province_facets = response.data["fields"]["province"]
province_projects_after_filtering = 0
for value in province_facets:
if province == value["text"]:
province_projects_after_filtering = value["count"]
self.assertEqual(province_projects_after_filtering, 1)
class InfraProjectAPISearchMultipleFieldsTestCase(APITestCase):
def setUp(self):
InfraProjectIndex().clear()
self.file = open(EMPTY_FILE_PATH, "rb")
self.url = reverse("infrastructure-project-api-list")
self.facet_url = reverse("infrastructure-project-api-facets")
fin_year = FinancialYear.objects.create(slug="2030-31")
self.sphere = Sphere.objects.create(financial_year=fin_year, name="Provincial")
self.quarter = Quarter.objects.create(number=1)
self.date = date(year=2050, month=1, day=1)
self.irm_snapshot = IRMSnapshot.objects.create(
sphere=self.sphere,
quarter=self.quarter,
date_taken=self.date,
file=File(self.file),
)
self.project_1 = InfraProject.objects.create(IRM_project_id=1)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_1,
name="Something School",
province="Eastern Cape",
estimated_completion_date=date(year=2020, month=6, day=1),
)
self.project_2 = InfraProject.objects.create(IRM_project_id=2)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project_2,
name="Project 2",
province="Eastern Cape",
estimated_completion_date=date(year=2020, month=6, day=1),
)
InfraProjectIndex().reindex()
def tearDown(self):
InfraProjectIndex().clear()
self.file.close()
def test_search_multiple_fields(self):
data = {"q": "Eastern Cape School"}
response = self.client.get(self.url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = response.data["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["province"], "Eastern Cape")
self.assertEqual(results[0]["name"], "Something School")
def test_facet_search_multiple_fields(self):
data = {"q": "Eastern Cape School"}
response = self.client.get(self.facet_url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = response.data["objects"]["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["province"], "Eastern Cape")
self.assertEqual(results[0]["name"], "Something School")
class InfraProjectAPIURLPathTestCase(APITestCase):
def setUp(self):
InfraProjectIndex().clear()
self.file = open(EMPTY_FILE_PATH, "rb")
fin_year = FinancialYear.objects.create(slug="2030-31", published=True)
self.sphere = Sphere.objects.create(financial_year=fin_year, name="Provincial")
self.quarter = Quarter.objects.create(number=1)
self.date = date(year=2050, month=1, day=1)
self.irm_snapshot = IRMSnapshot.objects.create(
sphere=self.sphere,
quarter=self.quarter,
date_taken=self.date,
file=File(self.file),
)
self.url = reverse("infrastructure-project-api-list")
self.facet_url = reverse("infrastructure-project-api-facets")
self.project = InfraProject.objects.create(IRM_project_id=1)
InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot,
project=self.project,
name="Project 1",
estimated_completion_date=date(year=2020, month=1, day=1),
province="Fake prov",
department="Fake dept",
)
InfraProjectIndex().reindex()
def tearDown(self):
InfraProjectIndex().clear()
self.file.close()
def test_url_path(self):
name = "Project 1"
data = {"name": name}
response = self.client.get(self.url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
result = response.data["results"][0]
url_path = result["url_path"]
response = self.client.get(url_path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertContains(response, name)
def test_facet_url_path(self):
name = "Project 1"
data = {"q": name}
response = self.client.get(self.facet_url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
result = response.data["objects"]["results"][0]
url_path = result["url_path"]
response = self.client.get(url_path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertContains(response, name)
class InfraProjectSnapshotTestCase(APITestCase):
def setUp(self):
InfraProjectIndex().clear()
self.file_1 = open(EMPTY_FILE_PATH, "rb")
self.file_2 = open(EMPTY_FILE_PATH, "rb")
self.project = InfraProject.objects.create(IRM_project_id=1)
fin_year = FinancialYear.objects.create(slug="2030-31", published=True)
self.sphere = Sphere.objects.create(financial_year=fin_year, name="Provincial")
self.quarter_1 = Quarter.objects.create(number=1)
self.quarter_2 = Quarter.objects.create(number=2)
self.date_1 = date(year=2050, month=1, day=1)
self.irm_snapshot_1 = IRMSnapshot.objects.create(
sphere=self.sphere,
quarter=self.quarter_1,
date_taken=self.date_1,
file=File(self.file_1),
)
self.project_snapshot_1 = InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot_1,
project=self.project,
local_municipality="MUNI A",
estimated_completion_date=date(year=2020, month=1, day=1),
department="Fake Dept",
province="Fake Prov",
)
self.irm_snapshot_2 = IRMSnapshot.objects.create(
sphere=self.sphere,
quarter=self.quarter_2,
date_taken=self.date_1,
file=File(self.file_2),
)
self.project_snapshot_2 = InfraProjectSnapshot.objects.create(
irm_snapshot=self.irm_snapshot_2,
project=self.project,
local_municipality="MUNI B",
estimated_completion_date=date(year=2020, month=1, day=1),
department="Fake Dept",
province="Fake Prov",
)
def tearDown(self):
self.file_1.close()
self.file_2.close()
def test_latest_status_in_the_content(self):
response = self.client.get(self.project.get_absolute_url())
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertContains(response, "MUNI B")
self.assertNotContains(response, "MUNI A")
def test_latest_in_the_same_year(self):
latest = self.project.project_snapshots.latest()
self.assertEqual(self.project_snapshot_2, latest)
class InfraProjectSnapshotDifferentYearsTestCase(APITestCase):
def setUp(self):
InfraProjectIndex().clear()
self.file_1 = open(EMPTY_FILE_PATH, "rb")
self.file_2 = open(EMPTY_FILE_PATH, "rb")
self.project = InfraProject.objects.create(IRM_project_id=1)
fin_year_1 = FinancialYear.objects.create(slug="2030-31")
fin_year_2 = FinancialYear.objects.create(slug="2031-32")
self.sphere_1 = Sphere.objects.create(
financial_year=fin_year_1, name="Provincial"
)
self.sphere_2 = Sphere.objects.create(
financial_year=fin_year_2, name="Provincial"
)
self.quarter_1 = Quarter.objects.create(number=1)
self.date_1 = date(year=2050, month=1, day=1)
self.date_2 = | |
#!/usr/bin/env python
#
# Copyright 2018 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ansible.module_utils.policy_communicator import PolicyCommunicator
from ansible.module_utils.policy_communicator import DuplicateRequestError
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import sys
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
from abc import ABC, abstractmethod
import time
import json
import inspect
# Add all the base resources that can be configured in the
# Policy API here. Required to infer base resource params.
BASE_RESOURCES = {"NSXTSegment", "NSXTTier0", "NSXTTier1",
"NSXTSecurityPolicy", "NSXTPolicyGroup",
"NSXTIpBlock", "NSXTIpPool", "NSXTBFDConfig",
"NSXTGatewayPolicy"}
class NSXTBaseRealizableResource(ABC):
INCORRECT_ARGUMENT_NAME_VALUE = "error_invalid_parameter"
def realize(self, supports_check_mode=True,
successful_resource_exec_logs=[],
baseline_arg_names=[], resource_params=None):
# must call this method to realize the creation, update, or deletion of
# resource
self.resource_class = self.__class__
if not hasattr(self, "_arg_spec"):
# Base resource
self._make_ansible_arg_spec(
supports_check_mode=supports_check_mode)
if not hasattr(self, 'module'):
self.module = AnsibleModule(
argument_spec=self._arg_spec,
supports_check_mode=supports_check_mode)
self.set_baseline_args(baseline_arg_names)
# Infer manager credentials
mgr_hostname = self.module.params['hostname']
mgr_username = self.module.params['username']
mgr_password = self.module.params['password']
nsx_cert_path = self.module.params['nsx_cert_path']
nsx_key_path = self.module.params['nsx_key_path']
request_headers = self.module.params['request_headers']
ca_path = self.module.params['ca_path']
validate_certs = self.module.params['validate_certs']
# Each manager has an associated PolicyCommunicator
self.policy_communicator = PolicyCommunicator.get_instance(
mgr_hostname, mgr_username, mgr_password, nsx_cert_path,
nsx_key_path, request_headers, ca_path, validate_certs)
if resource_params is None:
resource_params = self.module.params
self.resource_params = resource_params
self._state = self.get_attribute('state', resource_params)
if not (hasattr(self, 'id') and self.id):
if self.get_resource_name() in BASE_RESOURCES:
self.id = self._get_id_using_attr_name(
None, resource_params,
self.get_resource_base_url(self.baseline_args),
self.get_spec_identifier())
else:
self.id = self._get_id_using_attr_name(
None, resource_params,
self.get_resource_base_url(self._parent_info),
self.get_spec_identifier())
if self.id is None:
return
# Extract the resource params from module
self.nsx_resource_params = self._extract_nsx_resource_params(
resource_params)
# parent_info is passed to subresources of a resource automatically
if not hasattr(self, "_parent_info"):
self._parent_info = {}
self.update_parent_info(self._parent_info)
try:
# get existing resource schema
_, self.existing_resource = self._send_request_to_API(
"/" + self.id, ignore_error=False,
accepted_error_codes=set([404]))
# As Policy API's PATCH requires all attributes to be filled,
# we fill the missing resource params (the params not specified)
# by user using the existing params
self._fill_missing_resource_params(
self.existing_resource, self.nsx_resource_params)
except Exception as err:
# the resource does not exist currently on the manager
self.existing_resource = None
self._achieve_state(resource_params, successful_resource_exec_logs)
@classmethod
def get_spec_identifier(cls):
# Can be overriden in the subclass to provide different
# unique_arg_identifier. It is used to infer which args belong to which
# subresource.
# By default, class name is used for subresources.
return cls.get_resource_name()
def get_state(self):
return self._state
def get_parent_info(self):
return self._parent_info
@staticmethod
@abstractmethod
def get_resource_base_url(parent_info):
# Must be overridden by the subclass
raise NotImplementedError
@staticmethod
@abstractmethod
def get_resource_spec():
# Must be overridden by the subclass
raise NotImplementedError
@classmethod
def get_resource_name(cls):
return cls.__name__
def create_or_update_subresource_first(self):
# return True if subresource should be created/updated before parent
# resource
return self.resource_params.get(
"create_or_update_subresource_first", False)
def delete_subresource_first(self):
# return True if subresource should be deleted before parent resource
return self.resource_params.get("delete_subresource_first", True)
def achieve_subresource_state_if_del_parent(self):
# return True if this resource is to be realized with its own specified
# state irrespective of the state of its parent resource.
return self.resource_params.get(
"achieve_subresource_state_if_del_parent", False)
def do_wait_till_create(self):
# By default, we do not wait for the parent resource to be created or
# updated before its subresource is to be realized.
return self.resource_params.get("do_wait_till_create", False)
@staticmethod
def get_resource_update_priority():
# this priority can be used to create/delete subresources
# at the same level in a particular order.
# by default, it returns 1 so the resources are created/updated/
# deleted in a fixed but random order.
# should be overloaded in subclass to specify its priority.
# for creation or update, we iterate in descending order.
# for deletion, we iterate in ascending order.
return 1
def set_arg_spec(self, arg_spec):
self._arg_spec = arg_spec
def set_ansible_module(self, ansible_module):
self.module = ansible_module
def set_parent_info(self, parent_info):
self._parent_info = parent_info
def achieve_subresource_state(
self, resource_params, successful_resource_exec_logs):
"""
Achieve the state of each sub-resource.
"""
for sub_resource_class in self._get_sub_resources_class_of(
self.resource_class):
if sub_resource_class.allows_multiple_resource_spec():
children_resource_spec = (resource_params.get(
sub_resource_class.get_spec_identifier()) or [])
else:
children_resource_spec = ([resource_params.get(
sub_resource_class.get_spec_identifier())] or [])
# Update the parent pointer
my_parent = self._parent_info.get('_parent', '')
self._update_parent_info()
for resource_param_spec in children_resource_spec:
if resource_param_spec is not None:
sub_resource = sub_resource_class()
sub_resource.set_arg_spec(self._arg_spec)
sub_resource.set_ansible_module(self.module)
sub_resource.set_parent_info(self._parent_info)
sub_resource.realize(
successful_resource_exec_logs=(
successful_resource_exec_logs),
resource_params=resource_param_spec)
# Restore the parent pointer
self._parent_info['_parent'] = my_parent
def update_resource_params(self, nsx_resource_params):
# Can be used to updates the params of resource before making
# the API call.
# Should be overridden in the subclass if needed
pass
def check_for_update(self, existing_params, resource_params):
"""
resource_params: dict
existing_params: dict
Compares the existing_params with resource_params and returns
True if they are different. At a base level, it traverses the
params and matches one-to-one. If the value to be matched is a
- dict, it traverses that also.
- list, it merely compares the order.
Can be overriden in the subclass for specific custom checking.
Returns true if the params differ
"""
if not existing_params:
return False
for k, v in resource_params.items():
if k not in existing_params:
return True
elif type(v).__name__ == 'dict':
if self.check_for_update(existing_params[k], v):
return True
elif v != existing_params[k]:
def compare_lists(list1, list2):
# Returns True if list1 and list2 differ
try:
# If the lists can be converted into sets, do so and
# compare lists as sets.
set1 = set(list1)
set2 = set(list2)
return set1 != set2
except Exception:
return True
if type(v).__name__ == 'list':
if compare_lists(v, existing_params[k]):
return True
continue
return True
return False
def update_parent_info(self, parent_info):
# Override this and fill in self._parent_info if that is to be passed
# to the sub-resource
# By default, parent's id is passed
parent_info[self.get_spec_identifier() + "_id"] = self.id
def get_attribute(self, attribute, resource_params):
"""
attribute: String
resource_params: Parameters of the resource
"""
if (attribute == "state" and
self.get_resource_name() not in BASE_RESOURCES):
# if parent has absent state, subresources should have absent
# state if . So, irrespective of what user specifies, if parent
# is to be deleted, the child resources will be deleted.
# override achieve_subresource_state_if_del_parent
# in resource class to change this behavior
if (self._parent_info["_parent"].get_state() == "absent" and
not self.achieve_subresource_state_if_del_parent()):
return "absent"
return resource_params.get(
attribute, self.INCORRECT_ARGUMENT_NAME_VALUE)
def set_baseline_args(self, baseline_arg_names):
# Can be overriden in subclass
self.baseline_args = {}
for baseline_arg_name in baseline_arg_names:
self.baseline_args[baseline_arg_name] = self.module.params[
baseline_arg_name]
def do_resource_params_have_attr_with_id_or_display_name(self, attr):
if (attr + "_id" in self.nsx_resource_params or
attr + "_display_name" in self.nsx_resource_params):
return True
return False
def get_id_using_attr_name_else_fail(self, attr_name, params,
resource_base_url, resource_type,
ignore_not_found_error=True):
resource_id = self._get_id_using_attr_name(
attr_name, params, resource_base_url, resource_type,
ignore_not_found_error)
if resource_id is not None:
return resource_id
# Incorrect usage of Ansible Module
self.module.fail_json(msg="Please specify either {} id or display_name"
" for the resource {}".format(
attr_name, str(resource_type)))
def exit_with_failure(self, msg, **kwargs):
self.module.fail_json(msg=msg, **kwargs)
def skip_delete(self):
"""
Override in subclass if this resource is skipped to be deleted.
Note that the children of this resource will still be deleted unless
they override this method as well.
"""
return False
@classmethod
def is_required_in_spec(cls):
"""
Override in subclass if this resource is optional to be specified
in the ansible playbook.
"""
return False
@classmethod
def allows_multiple_resource_spec(cls):
"""
Override in the resource class definition with False if only one
resource can be associated with the parent. By default, we accept
multiple
"""
return True
def _get_id_using_attr_name(self, attr_name, params,
resource_base_url, resource_type,
ignore_not_found_error=True):
# Pass attr_name '' or None to infer base resource's ID
id_identifier = 'id'
display_name_identifier = 'display_name'
if attr_name:
id_identifier = attr_name + "_id"
display_name_identifier = attr_name + "_display_name"
if id_identifier in params and params[id_identifier]:
return params.pop(id_identifier)
if (display_name_identifier in params and
params[display_name_identifier]):
resource_display_name = params.pop(display_name_identifier)
# Use display_name as ID if ID is not specified.
return | |
<reponame>WeilerWebServices/PostgreSQL<filename>pgAdmin/browser/server_groups/servers/pgagent/schedules/__init__.py
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements pgAgent Job Schedule Node"""
import json
from functools import wraps
from flask import render_template, request, jsonify
from flask_babelex import gettext
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, gone, \
make_response as ajax_response, internal_server_error
from pgadmin.utils.driver import get_driver
from pgadmin.browser.server_groups.servers.pgagent.utils \
import format_schedule_data
from config import PG_DEFAULT_DRIVER
class JobScheduleModule(CollectionNodeModule):
"""
class JobScheduleModule(CollectionNodeModule)
A module class for JobSchedule node derived from CollectionNodeModule.
Methods:
-------
* get_nodes(gid, sid, jid)
- Method is used to generate the browser collection node.
* node_inode()
- Method is overridden from its base class to make the node as leaf node.
"""
NODE_TYPE = 'pga_schedule'
COLLECTION_LABEL = gettext("Schedules")
def get_nodes(self, gid, sid, jid):
"""
Method is used to generate the browser collection node
Args:
gid: Server Group ID
sid: Server ID
jid: Database Id
"""
yield self.generate_browser_collection_node(jid)
@property
def node_inode(self):
"""
Override this property to make the node a leaf node.
Returns: False as this is the leaf node
"""
return False
@property
def script_load(self):
"""
Load the module script for schedule, when any of the pga_job
nodes are initialized.
Returns: node type of the server module.
"""
return 'pga_job'
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
snippets = [
render_template(
"pga_schedule/css/pga_schedule.css",
node_type=self.node_type
)
]
for submodule in self.submodules:
snippets.extend(submodule.csssnippets)
return snippets
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
blueprint = JobScheduleModule(__name__)
class JobScheduleView(PGChildNodeView):
"""
class JobScheduleView(PGChildNodeView)
A view class for JobSchedule node derived from PGChildNodeView.
This class is responsible for all the stuff related to view like
updating schedule node, showing properties, showing sql in sql pane.
Methods:
-------
* __init__(**kwargs)
- Method is used to initialize the JobScheduleView and it's base view.
* check_precondition()
- This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
* list()
- This function is used to list all the schedule nodes within that
collection.
* nodes()
- This function will used to create all the child node within that
collection. Here it will create all the schedule node.
* properties(gid, sid, jid, jscid)
- This function will show the properties of the selected schedule node
* update(gid, sid, jid, jscid)
- This function will update the data for the selected schedule node
* msql(gid, sid, jid, jscid)
- This function is used to return modified SQL for the
selected schedule node
* sql(gid, sid, jid, jscid)
- Dummy response for sql panel
* delete(gid, sid, jid, jscid)
- Drops job schedule
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'jid'}
]
ids = [
{'type': 'int', 'id': 'jscid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'put': 'update', 'delete': 'delete'},
{'get': 'list', 'post': 'create', 'delete': 'delete'}
],
'nodes': [{'get': 'nodes'}, {'get': 'nodes'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'sql': [{'get': 'sql'}]
})
def _init_(self, **kwargs):
"""
Method is used to initialize the JobScheduleView and its base view.
Initialize all the variables create/used dynamically like conn,
template_path.
Args:
**kwargs:
"""
self.conn = None
self.template_path = None
self.manager = None
super(JobScheduleView, self).__init__(**kwargs)
def check_precondition(f):
"""
This function will behave as a decorator which will check the
database connection before running the view. It also attaches
manager, conn & template_path properties to self
"""
@wraps(f)
def wrap(*args, **kwargs):
# Here args[0] will hold self & kwargs will hold gid,sid,jid
self = args[0]
self.driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = self.driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection()
self.template_path = 'pga_schedule/sql/pre3.4'
return f(*args, **kwargs)
return wrap
@check_precondition
def list(self, gid, sid, jid):
"""
This function is used to list all the language nodes within
that collection.
Args:
gid: Server Group ID
sid: Server ID
jid: Job ID
"""
sql = render_template(
"/".join([self.template_path, 'properties.sql']),
jid=jid
)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return ajax_response(
response=res['rows'],
status=200
)
@check_precondition
def nodes(self, gid, sid, jid, jscid=None):
"""
This function is used to create all the child nodes within
the collection. Here it will create all the language nodes.
Args:
gid: Server Group ID
sid: Server ID
jid: Job ID
"""
res = []
sql = render_template(
"/".join([self.template_path, 'nodes.sql']),
jscid=jscid,
jid=jid
)
status, result = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=result)
if jscid is not None:
if len(result['rows']) == 0:
return gone(
errormsg=gettext("Could not find the specified job step.")
)
row = result['rows'][0]
return make_json_response(
data=self.blueprint.generate_browser_node(
row['jscid'],
row['jscjobid'],
row['jscname'],
icon="icon-pga_schedule" if row['jscenabled'] else
"icon-pga_schedule-disabled",
enabled=row['jscenabled']
)
)
for row in result['rows']:
res.append(
self.blueprint.generate_browser_node(
row['jscid'],
row['jscjobid'],
row['jscname'],
icon="icon-pga_schedule" if row['jscenabled'] else
"icon-pga_schedule-disabled",
enabled=row['jscenabled']
)
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def properties(self, gid, sid, jid, jscid):
"""
This function will show the properties of the selected language node.
Args:
gid: Server Group ID
sid: Server ID
jid: Job ID
jscid: JobSchedule ID
"""
sql = render_template(
"/".join([self.template_path, 'properties.sql']),
jscid=jscid, jid=jid
)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
errormsg=gettext("Could not find the specified job step.")
)
return ajax_response(
response=res['rows'][0],
status=200
)
@check_precondition
def create(self, gid, sid, jid):
"""
This function will update the data for the selected schedule node.
Args:
gid: Server Group ID
sid: Server ID
jid: Job ID
"""
data = {}
if request.args:
for k, v in request.args.items():
try:
data[k] = json.loads(
v.decode('utf-8') if hasattr(v, 'decode') else v
)
except ValueError:
data[k] = v
else:
data = json.loads(request.data.decode())
# convert python list literal to postgres array literal.
format_schedule_data(data)
sql = render_template(
"/".join([self.template_path, 'create.sql']),
jid=jid,
data=data,
fetch_id=True
)
status, res = self.conn.execute_void('BEGIN')
if not status:
return internal_server_error(errormsg=res)
status, res = self.conn.execute_scalar(sql)
if not status:
if self.conn.connected():
self.conn.execute_void('END')
return internal_server_error(errormsg=res)
self.conn.execute_void('END')
sql = render_template(
"/".join([self.template_path, 'properties.sql']),
jscid=res,
jid=jid
)
status, res = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
errormsg=gettext("Job schedule creation failed.")
)
row = res['rows'][0]
return jsonify(
node=self.blueprint.generate_browser_node(
row['jscid'],
row['jscjobid'],
row['jscname'],
icon="icon-pga_schedule" if row['jscenabled'] else
"icon-pga_schedule-disabled",
enabled=row['jscenabled']
)
)
@check_precondition
def update(self, gid, sid, jid, jscid):
"""
This function will update the data for the selected schedule node.
Args:
gid: Server Group ID
sid: Server ID
jid: Job ID
jscid: JobSchedule ID
"""
data = {}
if request.args:
for k, v in request.args.items():
try:
data[k] = json.loads(
v.decode('utf-8') if hasattr(v, 'decode') else v
)
except ValueError:
data[k] = v
else:
data = json.loads(request.data.decode())
# convert python list literal to postgres array literal.
format_schedule_data(data)
sql = render_template(
"/".join([self.template_path, 'update.sql']),
jid=jid,
jscid=jscid,
data=data
)
status, res = self.conn.execute_void(sql)
if not status:
return internal_server_error(errormsg=res)
sql = render_template(
"/".join([self.template_path, 'properties.sql']),
jscid=jscid,
jid=jid
)
status, res = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=res)
row = res['rows'][0]
if len(res['rows']) == 0:
return gone(
errormsg=gettext("Job schedule update failed.")
)
return jsonify(
node=self.blueprint.generate_browser_node(
jscid,
jid,
row['jscname'],
icon="icon-pga_schedule" if row['jscenabled'] else
"icon-pga_schedule-disabled",
enabled=row['jscenabled']
)
)
@check_precondition
def delete(self, gid, sid, jid, jscid=None):
"""Delete the Job Schedule."""
if jscid is None:
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
else:
data = {'ids': [jscid]}
for jscid in data['ids']:
status, res = self.conn.execute_void(
render_template(
"/".join([self.template_path, 'delete.sql']),
jid=jid, jscid=jscid, conn=self.conn
)
)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(success=1)
@check_precondition
def msql(self, gid, sid, jid, jscid=None):
"""
This function is used to return modified SQL for the
selected Schedule node.
Args:
gid: Server Group ID
sid: Server ID
jid: Job ID
jscid: Job Schedule ID (optional)
"""
data = {}
sql = ''
for k, v in request.args.items():
try:
data[k] = json.loads(
v.decode('utf-8') if hasattr(v, 'decode') else v
)
except ValueError:
data[k] = v
if jscid is None:
sql = render_template(
"/".join([self.template_path, 'create.sql']),
jid=jid,
data=data,
fetch_id=False
)
else:
sql = render_template(
"/".join([self.template_path, 'update.sql']),
jid=jid,
jscid=jscid,
data=data
)
return make_json_response(
data=sql,
status=200
)
@check_precondition
def sql(self, gid, | |
the type of object this is. Should always . # noqa: E501
:param category_id: The category_id of this BlogPost. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and category_id is None: # noqa: E501
raise ValueError("Invalid value for `category_id`, must not be `None`") # noqa: E501
self._category_id = category_id
@property
def state(self):
"""Gets the state of this BlogPost. # noqa: E501
An ENUM descibing the current state of this Blog Post. # noqa: E501
:return: The state of this BlogPost. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this BlogPost.
An ENUM descibing the current state of this Blog Post. # noqa: E501
:param state: The state of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and state is None: # noqa: E501
raise ValueError("Invalid value for `state`, must not be `None`") # noqa: E501
if self.local_vars_configuration.client_side_validation and state is not None and len(state) > 25:
raise ValueError("Invalid value for `state`, length must be less than or equal to `25`") # noqa: E501
self._state = state
@property
def template_path(self):
"""Gets the template_path of this BlogPost. # noqa: E501
:return: The template_path of this BlogPost. # noqa: E501
:rtype: str
"""
return self._template_path
@template_path.setter
def template_path(self, template_path):
"""Sets the template_path of this BlogPost.
:param template_path: The template_path of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and template_path is None: # noqa: E501
raise ValueError("Invalid value for `template_path`, must not be `None`") # noqa: E501
self._template_path = template_path
@property
def name(self):
"""Gets the name of this BlogPost. # noqa: E501
The internal name of the blog post. # noqa: E501
:return: The name of this BlogPost. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this BlogPost.
The internal name of the blog post. # noqa: E501
:param name: The name of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def mab_experiment_id(self):
"""Gets the mab_experiment_id of this BlogPost. # noqa: E501
:return: The mab_experiment_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._mab_experiment_id
@mab_experiment_id.setter
def mab_experiment_id(self, mab_experiment_id):
"""Sets the mab_experiment_id of this BlogPost.
:param mab_experiment_id: The mab_experiment_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and mab_experiment_id is None: # noqa: E501
raise ValueError("Invalid value for `mab_experiment_id`, must not be `None`") # noqa: E501
self._mab_experiment_id = mab_experiment_id
@property
def archived(self):
"""Gets the archived of this BlogPost. # noqa: E501
If True, the post will not show up in your dashboard, although the post could still be live. # noqa: E501
:return: The archived of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._archived
@archived.setter
def archived(self, archived):
"""Sets the archived of this BlogPost.
If True, the post will not show up in your dashboard, although the post could still be live. # noqa: E501
:param archived: The archived of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and archived is None: # noqa: E501
raise ValueError("Invalid value for `archived`, must not be `None`") # noqa: E501
self._archived = archived
@property
def author_name(self):
"""Gets the author_name of this BlogPost. # noqa: E501
The name of the user that updated this blog post. # noqa: E501
:return: The author_name of this BlogPost. # noqa: E501
:rtype: str
"""
return self._author_name
@author_name.setter
def author_name(self, author_name):
"""Sets the author_name of this BlogPost.
The name of the user that updated this blog post. # noqa: E501
:param author_name: The author_name of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and author_name is None: # noqa: E501
raise ValueError("Invalid value for `author_name`, must not be `None`") # noqa: E501
self._author_name = author_name
@property
def ab_test_id(self):
"""Gets the ab_test_id of this BlogPost. # noqa: E501
:return: The ab_test_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._ab_test_id
@ab_test_id.setter
def ab_test_id(self, ab_test_id):
"""Sets the ab_test_id of this BlogPost.
:param ab_test_id: The ab_test_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and ab_test_id is None: # noqa: E501
raise ValueError("Invalid value for `ab_test_id`, must not be `None`") # noqa: E501
self._ab_test_id = ab_test_id
@property
def created_by_id(self):
"""Gets the created_by_id of this BlogPost. # noqa: E501
The ID of the user that created this blog post. # noqa: E501
:return: The created_by_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._created_by_id
@created_by_id.setter
def created_by_id(self, created_by_id):
"""Sets the created_by_id of this BlogPost.
The ID of the user that created this blog post. # noqa: E501
:param created_by_id: The created_by_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and created_by_id is None: # noqa: E501
raise ValueError("Invalid value for `created_by_id`, must not be `None`") # noqa: E501
self._created_by_id = created_by_id
@property
def updated_by_id(self):
"""Gets the updated_by_id of this BlogPost. # noqa: E501
The ID of the user that updated this blog post. # noqa: E501
:return: The updated_by_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._updated_by_id
@updated_by_id.setter
def updated_by_id(self, updated_by_id):
"""Sets the updated_by_id of this BlogPost.
The ID of the user that updated this blog post. # noqa: E501
:param updated_by_id: The updated_by_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and updated_by_id is None: # noqa: E501
raise ValueError("Invalid value for `updated_by_id`, must not be `None`") # noqa: E501
self._updated_by_id = updated_by_id
@property
def domain(self):
"""Gets the domain of this BlogPost. # noqa: E501
The domain this Blog Post will resolve to. If null, the Blog Post will default to the domain of the ParentBlog. # noqa: E501
:return: The domain of this BlogPost. # noqa: E501
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this BlogPost.
The domain this Blog Post will resolve to. If null, the Blog Post will default to the domain of the ParentBlog. # noqa: E501
:param domain: The domain of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and domain is None: # noqa: E501
raise ValueError("Invalid value for `domain`, must not be `None`") # noqa: E501
self._domain = domain
@property
def subcategory(self):
"""Gets the subcategory of this BlogPost. # noqa: E501
:return: The subcategory of this BlogPost. # noqa: E501
:rtype: str
"""
return self._subcategory
@subcategory.setter
def subcategory(self, subcategory):
"""Sets the subcategory of this BlogPost.
:param subcategory: The subcategory of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and subcategory is None: # noqa: E501
raise ValueError("Invalid value for `subcategory`, must not be `None`") # noqa: E501
self._subcategory = subcategory
@property
def ab_status(self):
"""Gets the ab_status of this BlogPost. # noqa: E501
:return: The ab_status of this BlogPost. # noqa: E501
:rtype: str
"""
return self._ab_status
@ab_status.setter
def ab_status(self, ab_status):
"""Sets the ab_status of this BlogPost.
:param ab_status: The ab_status of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and ab_status is None: # noqa: E501
raise ValueError("Invalid value for `ab_status`, must not be `None`") # noqa: E501
allowed_values = ["master", "variant", "loser_variant", "mab_master", "mab_variant", "automated_master", "automated_variant", "automated_loser_variant"] # noqa: E501
if self.local_vars_configuration.client_side_validation and ab_status not in allowed_values: # noqa: E501
raise ValueError("Invalid value for `ab_status` ({0}), must be one of {1}".format(ab_status, allowed_values)) # noqa: E501
self._ab_status = ab_status
@property
def folder_id(self):
"""Gets the folder_id of this BlogPost. # noqa: E501
:return: The folder_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._folder_id
@folder_id.setter
def folder_id(self, folder_id):
"""Sets the folder_id of this BlogPost.
:param folder_id: The folder_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and folder_id is None: # noqa: E501
raise ValueError("Invalid value for `folder_id`, must not be `None`") # noqa: E501
self._folder_id = folder_id
@property
def | |
from dataclasses import dataclass
from typing import List
import numpy as np
import scipy.spatial
import matplotlib.pyplot as plt
from spherical_geometry.polygon import SingleSphericalPolygon
from .celeri_util import sph2cart
def angle_between_vectors(v1, v2, v3):
"""
Compute the angle between the vector (v2, v3) and (v1, v2)
The angle is constrained to lie in [-np.pi, np.pi]
No turn will result in an angle of 0.
A left turn will produce a positive angle.
A right turn will produce a negative angle.
The function is designed for units in (longitude degrees, latitude degrees)
and will handle the meridian. If you have units in another coordinate system,
this function will cause problems.
"""
# *Meridian handling*
# The furthest longitudinal distance between any two points is 180 degrees,
# so if the distance is greater than that, then we subtract 360 degrees.
# Note that this solution should work well regardless of whether the longitude
# coordinate range is [0,360) or [-180,180)
A1x = v3[0] - v2[0]
if A1x > 180:
A1x -= 360
if A1x < -180:
A1x += 360
A2x = v2[0] - v1[0]
if A2x > 180:
A2x -= 360
if A2x < -180:
A2x += 360
A1 = np.arctan2(v3[1] - v2[1], A1x)
A2 = np.arctan2(v2[1] - v1[1], A2x)
angle = A1 - A2
if angle < -np.pi:
angle += 2 * np.pi
elif angle > np.pi:
angle -= 2 * np.pi
return angle
@dataclass()
class BoundingBox:
"""
A bounding box on a sphere can be defined by the minimum and maximum latitude
and longitude.
*Inverse longitude*:
In the case where the box crosses the meridian, we specify the inverse region
of longitude. As an example, suppose the box spans from 355 deg longitude to
5 degrees longitude, we instead store the [5,355] range of longitude and when
we do queries to identify if a point is inside the bounding box we exclude rather
than include values between min_lon and max_lon.
"""
min_lon: float
max_lon: float
inverse_lon: bool
min_lat: float
max_lat: float
@classmethod
def from_polygon(cls, vertices):
lon_interval, inverse = find_longitude_interval(vertices[:, 0])
return BoundingBox(
min_lon=lon_interval[0],
max_lon=lon_interval[1],
inverse_lon=inverse,
min_lat=np.min(vertices[:, 1]),
max_lat=np.max(vertices[:, 1]),
)
def contains(self, lon, lat):
in_lat = (self.min_lat <= lat) & (lat <= self.max_lat)
if not self.inverse_lon:
# If the polygon spans more than 180 degrees, don't trust the
# bounding box. The possible failure mode here is that a
# circumpolar block can exclude points that are south of its
# southernmost edge.
if self.max_lon - self.min_lon > 180:
return np.ones_like(lon, dtype=bool)
in_lon = (self.min_lon <= lon) & (lon <= self.max_lon)
else:
# Same as above, but for an inverse min/max lon range, having
# max-min longitude < 180 is equivalent to having the true extent of
# the block greater than 180 degrees.
if self.max_lon - self.min_lon < 180:
return np.ones_like(lon, dtype=bool)
in_lon = (self.min_lon >= lon) | (lon >= self.max_lon)
return in_lat & in_lon
def find_longitude_interval(lon):
"""
Given a list of polygon longitude values, we want to identify the maximum
and minimum longitude for that polygon. On its face, that seems like a
simple (min, max), but the meridian means that the problem is not quite
that simple. First, we need to split all the intervals across the meridian.
Then, we combine the resulting intervals.
After combining the intervals, there should be either one or two intervals.
- If there is one interval, that polygon does not cross the meridian and
we return the actual (min, max) longitude.
- If there are two intervals, the polygon crosses the meridian and there
is a (X, 360) interval and a (0, Y) interval. As a result, we instead
return the inverse longitude interval of (Y, X) and specify the inverse
= True return value.
"""
# Step 1) Split intervals.
intervals = []
for i in range(lon.shape[0] - 1):
s1 = lon[i]
s2 = lon[i + 1]
# If the longitudes are separated by more than 180 degrees, then
# this is a meridian crossing segment where s1 is <180 and s2 is
# >180 degrees. So use (s1, 0) and (360, s2).
if s2 - s1 > 180:
intervals.append((0, s1))
intervals.append((s2, 360))
# Similiarly, separated by -180 degrees suggests that s1 is >180
# and s2 is <180. So use (s1,360), (0,s2)
elif s2 - s1 < -180:
intervals.append((s1, 360))
intervals.append((0, s2))
else:
intervals.append((s1, s2))
intervals = np.array([(s1, s2) if s1 < s2 else (s2, s1) for s1, s2 in intervals])
# Step 2) Combine intervals
# Fun classic intro algorithms problem: how to combine intervals in O(n log(n))?
# Sort them by the first value, and then combine adjacent intervals.
sorted_intervals = intervals[intervals[:, 0].argsort()]
combined_intervals = []
cur_interval = sorted_intervals[0]
for next_interval in sorted_intervals[1:]:
if next_interval[0] > cur_interval[1]:
combined_intervals.append(cur_interval)
cur_interval = next_interval
else:
cur_interval[1] = max(cur_interval[1], next_interval[1])
combined_intervals.append(cur_interval)
# Step 3) Determine if we want the specified interval or the inverse of the
# meridian-split interval.
if len(combined_intervals) == 1:
final_interval = combined_intervals[0]
inverse = False
elif len(combined_intervals) == 2:
if combined_intervals[0][0] == 0:
final_interval = [combined_intervals[0][1], combined_intervals[1][0]]
else:
final_interval = [combined_intervals[1][0], combined_intervals[0][1]]
inverse = True
else:
raise Exception(
"More than two longitude intervals identified in "
"find_longitude_intervals. This is an unexpected and "
"surprising error that suggests a malformed polygon."
)
return final_interval, inverse
@dataclass()
class Polygon:
# The half edges on the boundary, in rightwards order.
edge_idxs: np.ndarray
# The vertex indices on the boundary, in rightwards order.
vertex_idxs: np.ndarray
# The actual vertices
vertices: np.ndarray
# The actual vertices in unit sphere x,y,z coordinates
vertices_xyz: np.ndarray = None
# an arbitrary point that is interior to the polygon
interior_xyz: np.ndarray = None
# A bounding box defining the minimum and maximum lon/lat used for
# a fast shortcircuiting of the in-polygon test.
bounds: BoundingBox = None
# The spherical_geometry has some useful tools so we store a reference to
# a spherical_geometry.polygon.SingleSphericalPolygon in case we need
# those methods.
_sg_polygon: SingleSphericalPolygon = None
# Angle in steradians. A full sphere is 4*pi, half sphere is 2*pi.
area_steradians: float = None
def __init__(self, edge_idxs, vertex_idxs, vs):
self.edge_idxs = edge_idxs
self.vertex_idxs = vertex_idxs
self.vertices = vs
x, y, z = sph2cart(vs[:, 0], vs[:, 1], 1.0)
xyz = np.hstack((x[:, None], y[:, None], z[:, None]))
for i in range(vs.shape[0] - 1):
dx = vs[i + 1, 0] - vs[i, 0]
# Make sure we skip any meridian crossing edges.
if -180 < dx < 180:
midpt = (vs[i + 1, :] + vs[i, :]) / 2
edge_vector = vs[i + 1, :] - vs[i, :]
edge_right_normal = np.array([edge_vector[1], -edge_vector[0]])
# Offset only a small amount into the interior to avoid stepping
# back across a different edge into the exterior.
interior_pt = midpt + edge_right_normal * 0.01
# Stop after we've found an acceptable interior point.
break
x, y, z = sph2cart(interior_pt[0], interior_pt[1], 1.0)
self.vertices = vs
self.vertices_xyz = xyz
self.interior_xyz = np.array([x, y, z])
self.bounds = BoundingBox.from_polygon(self.vertices)
self._sg_polygon = SingleSphericalPolygon(self.vertices_xyz, self.interior_xyz)
self.area_steradians = self._sg_polygon.area()
def contains_point(self, lon, lat):
"""
Returns whether each point specified by (lon, lat) is within the
spherical polygon defined by polygon_idx.
The intermediate calculation uses a great circle intersection test.
An explanation of this calculation is copied from. The source code is
modified from the same source. The primary modification is to vectorize
over a list of points rather than a single test point.
https://github.com/spacetelescope/spherical_geometry/blob/e00f4ef619eb2871b305eded2a537a95c858b001/spherical_geometry/great_circle_arc.py#L91
A, B : (*x*, *y*, *z*) Nx3 arrays of triples
Endpoints of the first great circle arcs.
C, D : (*x*, *y*, *z*) Nx3 arrays of triples
Endpoints of the second great circle arcs.
Notes
-----
The basic intersection is computed using linear algebra as follows
[1]_:
.. math::
T = \\lVert(A × B) × (C × D)\rVert
To determine the correct sign (i.e. hemisphere) of the
intersection, the following four values are computed:
.. math::
s_1 = ((A × B) × A) \\cdot | |
Hub operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of IoT Hub operations supported by the Microsoft.Devices resource provider.
:vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class RegistryStatistics(msrest.serialization.Model):
"""Identity registry statistics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar total_device_count: The total count of devices in the identity registry.
:vartype total_device_count: long
:ivar enabled_device_count: The count of enabled devices in the identity registry.
:vartype enabled_device_count: long
:ivar disabled_device_count: The count of disabled devices in the identity registry.
:vartype disabled_device_count: long
"""
_validation = {
'total_device_count': {'readonly': True},
'enabled_device_count': {'readonly': True},
'disabled_device_count': {'readonly': True},
}
_attribute_map = {
'total_device_count': {'key': 'totalDeviceCount', 'type': 'long'},
'enabled_device_count': {'key': 'enabledDeviceCount', 'type': 'long'},
'disabled_device_count': {'key': 'disabledDeviceCount', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(RegistryStatistics, self).__init__(**kwargs)
self.total_device_count = None
self.enabled_device_count = None
self.disabled_device_count = None
class RouteCompilationError(msrest.serialization.Model):
"""Compilation error when evaluating route.
:param message: Route error message.
:type message: str
:param severity: Severity of the route error. Possible values include: "error", "warning".
:type severity: str or ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorSeverity
:param location: Location where the route error happened.
:type location: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorRange
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'location': {'key': 'location', 'type': 'RouteErrorRange'},
}
def __init__(
self,
**kwargs
):
super(RouteCompilationError, self).__init__(**kwargs)
self.message = kwargs.get('message', None)
self.severity = kwargs.get('severity', None)
self.location = kwargs.get('location', None)
class RouteErrorPosition(msrest.serialization.Model):
"""Position where the route error happened.
:param line: Line where the route error happened.
:type line: int
:param column: Column where the route error happened.
:type column: int
"""
_attribute_map = {
'line': {'key': 'line', 'type': 'int'},
'column': {'key': 'column', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RouteErrorPosition, self).__init__(**kwargs)
self.line = kwargs.get('line', None)
self.column = kwargs.get('column', None)
class RouteErrorRange(msrest.serialization.Model):
"""Range of route errors.
:param start: Start where the route error happened.
:type start: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition
:param end: End where the route error happened.
:type end: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition
"""
_attribute_map = {
'start': {'key': 'start', 'type': 'RouteErrorPosition'},
'end': {'key': 'end', 'type': 'RouteErrorPosition'},
}
def __init__(
self,
**kwargs
):
super(RouteErrorRange, self).__init__(**kwargs)
self.start = kwargs.get('start', None)
self.end = kwargs.get('end', None)
class RouteProperties(msrest.serialization.Model):
"""The properties of a routing rule that your IoT hub uses to route messages to endpoints.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the route. The name can only include alphanumeric
characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be
unique.
:type name: str
:param source: Required. The source that the routing rule is to be applied to, such as
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:type source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:param condition: The condition that is evaluated to apply the routing rule. If no condition is
provided, it evaluates to true by default. For grammar, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:type condition: str
:param endpoint_names: Required. The list of endpoints to which messages that satisfy the
condition are routed. Currently only one endpoint is allowed.
:type endpoint_names: list[str]
:param is_enabled: Required. Used to specify whether a route is enabled.
:type is_enabled: bool
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
'source': {'required': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(RouteProperties, self).__init__(**kwargs)
self.name = kwargs['name']
self.source = kwargs['source']
self.condition = kwargs.get('condition', None)
self.endpoint_names = kwargs['endpoint_names']
self.is_enabled = kwargs['is_enabled']
class RoutingEndpoints(msrest.serialization.Model):
"""The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
:param service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:type service_bus_queues:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusQueueEndpointProperties]
:param service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes the
messages to, based on the routing rules.
:type service_bus_topics:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusTopicEndpointProperties]
:param event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:type event_hubs: list[~azure.mgmt.iothub.v2019_11_04.models.RoutingEventHubProperties]
:param storage_containers: The list of storage container endpoints that IoT hub routes messages
to, based on the routing rules.
:type storage_containers:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingStorageContainerProperties]
"""
_attribute_map = {
'service_bus_queues': {'key': 'serviceBusQueues', 'type': '[RoutingServiceBusQueueEndpointProperties]'},
'service_bus_topics': {'key': 'serviceBusTopics', 'type': '[RoutingServiceBusTopicEndpointProperties]'},
'event_hubs': {'key': 'eventHubs', 'type': '[RoutingEventHubProperties]'},
'storage_containers': {'key': 'storageContainers', 'type': '[RoutingStorageContainerProperties]'},
}
def __init__(
self,
**kwargs
):
super(RoutingEndpoints, self).__init__(**kwargs)
self.service_bus_queues = kwargs.get('service_bus_queues', None)
self.service_bus_topics = kwargs.get('service_bus_topics', None)
self.event_hubs = kwargs.get('event_hubs', None)
self.storage_containers = kwargs.get('storage_containers', None)
class RoutingEventHubProperties(msrest.serialization.Model):
"""The properties related to an event hub endpoint.
All required parameters must be populated in order to send to Azure.
:param connection_string: Required. The connection string of the event hub endpoint.
:type connection_string: str
:param name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:type name: str
:param subscription_id: The subscription identifier of the event hub endpoint.
:type subscription_id: str
:param resource_group: The name of the resource group of the event hub endpoint.
:type resource_group: str
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoutingEventHubProperties, self).__init__(**kwargs)
self.connection_string = kwargs['connection_string']
self.name = kwargs['name']
self.subscription_id = kwargs.get('subscription_id', None)
self.resource_group = kwargs.get('resource_group', None)
class RoutingMessage(msrest.serialization.Model):
"""Routing message.
:param body: Body of routing message.
:type body: str
:param app_properties: App properties.
:type app_properties: dict[str, str]
:param system_properties: System properties.
:type system_properties: dict[str, str]
"""
_attribute_map = {
'body': {'key': 'body', 'type': 'str'},
'app_properties': {'key': 'appProperties', 'type': '{str}'},
'system_properties': {'key': 'systemProperties', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(RoutingMessage, self).__init__(**kwargs)
self.body = kwargs.get('body', None)
self.app_properties = kwargs.get('app_properties', None)
self.system_properties = kwargs.get('system_properties', None)
class RoutingProperties(msrest.serialization.Model):
"""The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:param endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:type endpoints: ~azure.mgmt.iothub.v2019_11_04.models.RoutingEndpoints
:param routes: The list of user-provided routing rules that the IoT hub uses to route messages
to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and
a maximum of 5 routing rules are allowed for free hubs.
:type routes: list[~azure.mgmt.iothub.v2019_11_04.models.RouteProperties]
:param fallback_route: The properties of the route that is used as a fall-back route when none
of the conditions specified in the 'routes' section are met. This is an optional parameter.
When this property is not set, the messages which do not meet any of the conditions specified
in the 'routes' section get routed to the built-in eventhub endpoint.
:type fallback_route: ~azure.mgmt.iothub.v2019_11_04.models.FallbackRouteProperties
:param enrichments: The list of user-provided enrichments that the IoT hub applies to messages
to be delivered to built-in and custom endpoints. See: | |
% ",".join(memeber_set))
class DescribeHttpStatusInfoListResponse(AbstractModel):
"""DescribeHttpStatusInfoList返回参数结构体
"""
def __init__(self):
r"""
:param DataInfoList: 播放状态码列表。
:type DataInfoList: list of HttpStatusData
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = HttpStatusData()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveCallbackRulesRequest(AbstractModel):
"""DescribeLiveCallbackRules请求参数结构体
"""
class DescribeLiveCallbackRulesResponse(AbstractModel):
"""DescribeLiveCallbackRules返回参数结构体
"""
def __init__(self):
r"""
:param Rules: 规则信息列表。
:type Rules: list of CallBackRuleInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = CallBackRuleInfo()
obj._deserialize(item)
self.Rules.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveCallbackTemplateRequest(AbstractModel):
"""DescribeLiveCallbackTemplate请求参数结构体
"""
def __init__(self):
r"""
:param TemplateId: 模板 ID。
1. 在创建回调模板接口 [CreateLiveCallbackTemplate](/document/product/267/32637) 调用的返回值中获取模板 ID。
2. 可以从接口 [DescribeLiveCallbackTemplates](/document/product/267/32632) 查询已经创建的过的模板列表。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLiveCallbackTemplateResponse(AbstractModel):
"""DescribeLiveCallbackTemplate返回参数结构体
"""
def __init__(self):
r"""
:param Template: 回调模板信息。
:type Template: :class:`tencentcloud.live.v20180801.models.CallBackTemplateInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Template = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Template") is not None:
self.Template = CallBackTemplateInfo()
self.Template._deserialize(params.get("Template"))
self.RequestId = params.get("RequestId")
class DescribeLiveCallbackTemplatesRequest(AbstractModel):
"""DescribeLiveCallbackTemplates请求参数结构体
"""
class DescribeLiveCallbackTemplatesResponse(AbstractModel):
"""DescribeLiveCallbackTemplates返回参数结构体
"""
def __init__(self):
r"""
:param Templates: 模板信息列表。
:type Templates: list of CallBackTemplateInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Templates = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Templates") is not None:
self.Templates = []
for item in params.get("Templates"):
obj = CallBackTemplateInfo()
obj._deserialize(item)
self.Templates.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveCertRequest(AbstractModel):
"""DescribeLiveCert请求参数结构体
"""
def __init__(self):
r"""
:param CertId: DescribeLiveCerts接口获取到的证书Id。
:type CertId: int
"""
self.CertId = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLiveCertResponse(AbstractModel):
"""DescribeLiveCert返回参数结构体
"""
def __init__(self):
r"""
:param CertInfo: 证书信息。
:type CertInfo: :class:`tencentcloud.live.v20180801.models.CertInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CertInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CertInfo") is not None:
self.CertInfo = CertInfo()
self.CertInfo._deserialize(params.get("CertInfo"))
self.RequestId = params.get("RequestId")
class DescribeLiveCertsRequest(AbstractModel):
"""DescribeLiveCerts请求参数结构体
"""
class DescribeLiveCertsResponse(AbstractModel):
"""DescribeLiveCerts返回参数结构体
"""
def __init__(self):
r"""
:param CertInfoSet: 证书信息列表。
:type CertInfoSet: list of CertInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CertInfoSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CertInfoSet") is not None:
self.CertInfoSet = []
for item in params.get("CertInfoSet"):
obj = CertInfo()
obj._deserialize(item)
self.CertInfoSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveDelayInfoListRequest(AbstractModel):
"""DescribeLiveDelayInfoList请求参数结构体
"""
class DescribeLiveDelayInfoListResponse(AbstractModel):
"""DescribeLiveDelayInfoList返回参数结构体
"""
def __init__(self):
r"""
:param DelayInfoList: 延播信息列表。
:type DelayInfoList: list of DelayInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DelayInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DelayInfoList") is not None:
self.DelayInfoList = []
for item in params.get("DelayInfoList"):
obj = DelayInfo()
obj._deserialize(item)
self.DelayInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveDomainCertRequest(AbstractModel):
"""DescribeLiveDomainCert请求参数结构体
"""
def __init__(self):
r"""
:param DomainName: 播放域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLiveDomainCertResponse(AbstractModel):
"""DescribeLiveDomainCert返回参数结构体
"""
def __init__(self):
r"""
:param DomainCertInfo: 证书信息。
:type DomainCertInfo: :class:`tencentcloud.live.v20180801.models.DomainCertInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DomainCertInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DomainCertInfo") is not None:
self.DomainCertInfo = DomainCertInfo()
self.DomainCertInfo._deserialize(params.get("DomainCertInfo"))
self.RequestId = params.get("RequestId")
class DescribeLiveDomainPlayInfoListRequest(AbstractModel):
"""DescribeLiveDomainPlayInfoList请求参数结构体
"""
def __init__(self):
r"""
:param PlayDomains: 播放域名列表。
:type PlayDomains: list of str
"""
self.PlayDomains = None
def _deserialize(self, params):
self.PlayDomains = params.get("PlayDomains")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLiveDomainPlayInfoListResponse(AbstractModel):
"""DescribeLiveDomainPlayInfoList返回参数结构体
"""
def __init__(self):
r"""
:param Time: 数据时间,格式为yyyy-mm-dd HH:MM:SS。
:type Time: str
:param TotalBandwidth: 实时总带宽。
:type TotalBandwidth: float
:param TotalFlux: 实时总流量。
:type TotalFlux: float
:param TotalRequest: 总请求数。
:type TotalRequest: int
:param TotalOnline: 实时总连接数。
:type TotalOnline: int
:param DomainInfoList: 分域名的数据情况。
:type DomainInfoList: list of DomainInfoList
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Time = None
self.TotalBandwidth = None
self.TotalFlux = None
self.TotalRequest = None
self.TotalOnline = None
self.DomainInfoList = None
self.RequestId = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.TotalBandwidth = params.get("TotalBandwidth")
self.TotalFlux = params.get("TotalFlux")
self.TotalRequest = params.get("TotalRequest")
self.TotalOnline = params.get("TotalOnline")
if params.get("DomainInfoList") is not None:
self.DomainInfoList = []
for item in params.get("DomainInfoList"):
obj = DomainInfoList()
obj._deserialize(item)
self.DomainInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveDomainRefererRequest(AbstractModel):
"""DescribeLiveDomainReferer请求参数结构体
"""
def __init__(self):
r"""
:param DomainName: 播放域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLiveDomainRefererResponse(AbstractModel):
"""DescribeLiveDomainReferer返回参数结构体
"""
def __init__(self):
r"""
:param RefererAuthConfig: 域名 Referer 黑白名单配置。
:type RefererAuthConfig: :class:`tencentcloud.live.v20180801.models.RefererAuthConfig`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RefererAuthConfig = None
self.RequestId = None
def _deserialize(self, params):
if params.get("RefererAuthConfig") is not None:
self.RefererAuthConfig = RefererAuthConfig()
self.RefererAuthConfig._deserialize(params.get("RefererAuthConfig"))
self.RequestId = params.get("RequestId")
class DescribeLiveDomainRequest(AbstractModel):
"""DescribeLiveDomain请求参数结构体
"""
def __init__(self):
r"""
:param DomainName: 域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLiveDomainResponse(AbstractModel):
"""DescribeLiveDomain返回参数结构体
"""
def __init__(self):
r"""
:param DomainInfo: 域名信息。
注意:此字段可能返回 null,表示取不到有效值。
:type DomainInfo: :class:`tencentcloud.live.v20180801.models.DomainInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DomainInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DomainInfo") is not None:
self.DomainInfo = DomainInfo()
self.DomainInfo._deserialize(params.get("DomainInfo"))
self.RequestId = params.get("RequestId")
class DescribeLiveDomainsRequest(AbstractModel):
"""DescribeLiveDomains请求参数结构体
"""
def __init__(self):
r"""
:param DomainStatus: 域名状态过滤。0-停用,1-启用。
:type DomainStatus: int
:param DomainType: 域名类型过滤。0-推流,1-播放。
:type DomainType: int
:param PageSize: 分页大小,范围:10~100。默认10。
:type PageSize: int
:param PageNum: 取第几页,范围:1~100000。默认1。
:type PageNum: int
:param IsDelayLive: 0 普通直播 1慢直播 默认0。
:type IsDelayLive: int
:param DomainPrefix: 域名前缀。
:type DomainPrefix: str
"""
self.DomainStatus = None
self.DomainType = None
self.PageSize = None
self.PageNum = None
self.IsDelayLive = None
self.DomainPrefix = None
def _deserialize(self, params):
self.DomainStatus = params.get("DomainStatus")
self.DomainType = params.get("DomainType")
self.PageSize = params.get("PageSize")
self.PageNum = params.get("PageNum")
self.IsDelayLive = params.get("IsDelayLive")
self.DomainPrefix = params.get("DomainPrefix")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLiveDomainsResponse(AbstractModel):
"""DescribeLiveDomains返回参数结构体
"""
def __init__(self):
r"""
:param AllCount: 总记录数。
:type AllCount: int
:param DomainList: 域名详细信息列表。
:type DomainList: list of DomainInfo
:param CreateLimitCount: 可继续添加域名数量。
注意:此字段可能返回 null,表示取不到有效值。
:type CreateLimitCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AllCount = None
self.DomainList = None
self.CreateLimitCount = None
self.RequestId = None
def _deserialize(self, params):
self.AllCount = params.get("AllCount")
if params.get("DomainList") is not None:
self.DomainList = []
for item in params.get("DomainList"):
obj = DomainInfo()
obj._deserialize(item)
self.DomainList.append(obj)
self.CreateLimitCount = params.get("CreateLimitCount")
self.RequestId = params.get("RequestId")
class DescribeLiveForbidStreamListRequest(AbstractModel):
"""DescribeLiveForbidStreamList请求参数结构体
"""
def __init__(self):
r"""
:param PageNum: 取得第几页,默认1。
:type PageNum: int
:param PageSize: 每页大小,最大100。
取值:1~100之前的任意整数。
默认值:10。
:type PageSize: int
:param StreamName: 搜索的推流 id 名称。
:type StreamName: str
"""
self.PageNum = None
self.PageSize = None
self.StreamName = None
def _deserialize(self, params):
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.StreamName = params.get("StreamName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLiveForbidStreamListResponse(AbstractModel):
"""DescribeLiveForbidStreamList返回参数结构体
"""
def __init__(self):
r"""
:param TotalNum: 符合条件的总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param PageNum: 分页的页码。
:type PageNum: int
:param PageSize: 每页显示的条数。
:type PageSize: int
:param ForbidStreamList: 禁推流列表。
:type ForbidStreamList: list of ForbidStreamInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.TotalPage = None
self.PageNum = None
self.PageSize = None
self.ForbidStreamList = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
if params.get("ForbidStreamList") is not None:
self.ForbidStreamList = []
for item in params.get("ForbidStreamList"):
obj = ForbidStreamInfo()
obj._deserialize(item)
self.ForbidStreamList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLivePackageInfoRequest(AbstractModel):
"""DescribeLivePackageInfo请求参数结构体
"""
def __init__(self):
r"""
:param PackageType: 包类型,可选值:
0:流量包;
1:转码包。
2: 连麦包。
:type PackageType: int
:param OrderBy: 排序规则:
1. BuyTimeDesc: 最新购买的排在最前面
2. BuyTimeAsc: 最老购买的排在最前面
3. ExpireTimeDesc: 最后过期的排在最前面
4. ExpireTimeAsc:最先过期的排在最前面
注意:
1. PackageType 为 2(连麦包) 的时候,不支持 3、4 排序
:type OrderBy: str
:param PageNum: | |
class driver session, it may return
interchangeability warnings generated by the IVI class driver as well as
interchangeability warnings generated by the IVI specific driver. The IVI
class driver determines the relative order in which the IVI class driver
warnings are returned in relation to the IVI specific driver warnings.
The function returns an empty string in the InterchangeWarning parameter
if no interchangeability warnings remain for the session.
Refer to the Interchange Check attribute for more information on
interchangeability checking.
""")
add_method(self, 'driver_operation.invalidate_all_attributes',
self._driver_operation_invalidate_all_attributes,
"""
This function invalidates the cached values of all attributes for the
session.
""")
add_method(self, 'driver_operation.reset_interchange_check',
self._driver_operation_reset_interchange_check,
"""
This function resets the interchangeability checking algorithms of the IVI
specific driver so that specific driver functions that execute prior to
calling this function have no effect on whether future calls to the
specific driver generate interchangeability warnings.
When developing a complex test system that consists of multiple test
modules, it is generally a good idea to design the test modules so that
they can run in any order. To do so requires ensuring that each test
module completely configures the state of each instrument it uses. If a
particular test module does not completely configure the state of an
instrument, the state of the instrument depends on the configuration from
a previously executed test module. If the test modules execute in a
different order, the behavior of the instrument and therefore the entire
test module is likely to change. This change in behavior is generally
instrument specific and represents an interchangeability problem.
Users can use this function to test for such cases. By calling this
function at the beginning of a test module, users can determine whether
the test module has dependencies on the operation of previously executed
test modules. Any interchangeability warnings that occur after the user
calls this function indicate that the section of the test program that
executes after this function and prior to the generation of the warning
does not completely configure the instrument and that the user is likely
to experience different behavior if the user changes the execution order
of the test modules or if the user changes instruments.
Note: This function does not clear interchangeability warnings from the
list of interchangeability warnings. To guarantee that the Get Next
Interchange Warning function returns interchangeability warnings that
occur only after the program calls function, the user must clear the list
of interchangeability warnings by calling the Clear Interchange Warnings
function.
Refer to the Interchange Check attribute for more information on
interchangeability checking.
""")
def _get_driver_operation_cache(self):
return self._driver_operation_cache
def _set_driver_operation_cache(self, value):
self._driver_operation_cache = bool(value)
def _get_driver_operation_driver_setup(self):
return self._driver_operation_driver_setup
def _get_driver_operation_interchange_check(self):
return self._driver_operation_interchange_check
def _set_driver_operation_interchange_check(self, value):
self._driver_operation_interchange_check = bool(value)
def _get_driver_operation_logical_name(self):
return self._driver_operation_logical_name
def _get_driver_operation_query_instrument_status(self):
return self._driver_operation_query_instrument_status
def _set_driver_operation_query_instrument_status(self, value):
self._driver_operation_query_instrument_status = bool(value)
def _get_driver_operation_range_check(self):
return self._driver_operation_range_check
def _set_driver_operation_range_check(self, value):
self._driver_operation_range_check = bool(value)
def _get_driver_operation_record_coercions(self):
return self._driver_operation_record_coercions
def _set_driver_operation_record_coercions(self, value):
self._driver_operation_record_coercions = bool(value)
def _get_driver_operation_io_resource_descriptor(self):
return self._driver_operation_io_resource_descriptor
def _get_driver_operation_simulate(self):
return self._driver_operation_simulate
def _set_driver_operation_simulate(self, value):
value = bool(value)
if self._driver_operation_simulate and not value:
raise SimulationStateException()
self._driver_operation_simulate = value
def _driver_operation_clear_interchange_warnings(self):
self._driver_operation_interchange_warnings = list()
def _driver_operation_get_next_coercion_record(self):
if len(self._driver_operation_coercion_records) > 0:
return self._driver_operation_coercion_records.pop()
return ""
def _driver_operation_get_next_interchange_warning(self):
if len(self._driver_operation_interchange_warnings) > 0:
return self._driver_operation_interchange_warnings.pop()
return ""
def _driver_operation_invalidate_all_attributes(self):
pass
def _driver_operation_reset_interchange_check(self):
pass
class DriverIdentity(object):
"Inherent IVI methods for identification"
def __init__(self, *args, **kwargs):
super(DriverIdentity, self).__init__(*args, **kwargs)
self._identity_description = "Base IVI Driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = ""
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 0
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = list()
self.__dict__.setdefault('_identity_group_capabilities', list())
add_property(self, 'identity.description',
self._get_identity_description,
None,
None,
"""
Returns a brief description of the IVI software component.
The string that this attribute returns has no maximum size.
""")
add_property(self, 'identity.identifier',
self._get_identity_identifier,
None,
None,
"""
Returns the case-sensitive unique identifier of the IVI software
component. The string that this attribute returns contains a maximum of 32
characters including the NULL character.
""")
add_property(self, 'identity.revision',
self._get_identity_revision,
None,
None,
"""
Returns version information about the IVI software component. Refer to
Section 3.1.2.2, Additional Compliance Rules for Revision String
Attributes, for additional rules regarding this attribute.
The string that this attribute returns has no maximum size.
""")
add_property(self, 'identity.vendor',
self._get_identity_vendor,
None,
None,
"""
Returns the name of the vendor that supplies the IVI software component.
The string that this attribute returns has no maximum size.
""")
add_property(self, 'identity.instrument_manufacturer',
self._get_identity_instrument_manufacturer,
None,
None,
"""
Returns the name of the manufacturer of the instrument. The IVI specific
driver returns the value it queries from the instrument as the value of
this attribute or a string indicating that it cannot query the instrument
identity.
In some cases, it is not possible for the specific driver to query the
firmware revision of the instrument. This can occur when the Simulate
attribute is set to True or if the instrument is not capable of returning
the firmware revision. For these cases, the specific driver returns
defined strings for this attribute. If the Simulate attribute is set to
True, the specific driver returns "Not available while simulating" as the
value of this attribute. If the instrument is not capable of returning the
firmware version and the Simulate attribute is set to False, the specific
driver returns "Cannot query from instrument" as the value of this
attribute.
The string that this attribute returns does not have a predefined maximum
length.
""")
add_property(self, 'identity.instrument_model',
self._get_identity_instrument_model,
None,
None,
"""
Returns the model number or name of the physical instrument. The IVI
specific driver returns the value it queries from the instrument or a
string indicating that it cannot query the instrument identity.
In some cases, it is not possible for the specific driver to query the
firmware revision of the instrument. This can occur when the Simulate
attribute is set to True or if the instrument is not capable of returning
the firmware revision. For these cases, the specific driver returns
defined strings for this attribute. If the Simulate attribute is set to
True, the specific driver returns "Not available while simulating" as the
value of this attribute. If the instrument is not capable of returning the
firmware version and the Simulate attribute is set to False, the specific
driver returns "Cannot query from instrument" as the value of this
attribute.
The string that this attribute returns does not have a predefined maximum
length.
""")
add_property(self, 'identity.instrument_firmware_revision',
self._get_identity_instrument_firmware_revision,
None,
None,
"""
Returns an instrument specific string that contains the firmware
revision information of the physical instrument. The IVI specific driver
returns the value it queries from the instrument as the value of this
attribute or a string indicating that it cannot query the instrument
identity.
In some cases, it is not possible for the specific driver to query the
firmware revision of the instrument. This can occur when the Simulate
attribute is set to True or if the instrument is not capable of returning
the firmware revision. For these cases, the specific driver returns
defined strings for this attribute. If the Simulate attribute is set to
True, the specific driver returns "Not available while simulating" as the
value of this attribute. If the instrument is not capable of returning the
firmware version and the Simulate attribute is set to False, the specific
driver returns "Cannot query from instrument" as the value of this
attribute.
The string that this attribute returns does not have a predefined maximum
length.
""")
add_property(self, 'identity.specification_major_version',
self._get_identity_specification_major_version,
None,
None,
"""
Returns the major version number of the class specification in accordance
with which the IVI software component was developed. The | |
on the client system.
"""
if self.tf is None:
logging.debug('Unable to execute "tf help": skipping TFS')
return None
workfold = self._run_tf(['workfold', os.getcwd()])
m = re.search('^Collection: (.*)$', workfold, re.MULTILINE)
if m:
return unquote(m.group(1))
logging.debug('Could not find the collection from "tf workfold"')
return None
def get_repository_info(self):
"""Return repository information for the current working tree.
Returns:
rbtools.clients.RepositoryInfo:
The repository info structure.
"""
path = self.get_local_path()
if path:
# Now that we know it's TFS, make sure we have GNU diff installed,
# and error out if we don't.
check_gnu_diff()
return RepositoryInfo(path=path, local_path=path)
return None
def parse_revision_spec(self, revisions):
"""Parse the given revision spec.
Args:
revisions (list of unicode):
A list of revisions as specified by the user. Items in the list
do not necessarily represent a single revision, since the user
can use the TFS-native syntax of ``r1~r2``. Versions passed in
can be any versionspec, such as a changeset number,
``L``-prefixed label name, ``W`` (latest workspace version), or
``T`` (latest upstream version).
Returns:
dict:
A dictionary with the following keys:
``base`` (:py:class:`unicode`):
A revision to use as the base of the resulting diff.
``tip`` (:py:class:`unicode`):
A revision to use as the tip of the resulting diff.
``parent_base`` (:py:class:`unicode`, optional):
The revision to use as the base of a parent diff.
These will be used to generate the diffs to upload to Review Board
(or print). The diff for review will include the changes in (base,
tip], and the parent diff (if necessary) will include (parent,
base].
If a single revision is passed in, this will return the parent of
that revision for "base" and the passed-in revision for "tip".
If zero revisions are passed in, this will return revisions
relevant for the "current change" (changes in the work folder which
have not yet been checked in).
Raises:
rbtools.clients.errors.TooManyRevisionsError:
Too many revisions were specified.
rbtools.clients.errors.InvalidRevisionSpecError:
The given revision spec could not be parsed.
"""
n_revisions = len(revisions)
if n_revisions == 1 and '~' in revisions[0]:
revisions = revisions[0].split('~')
n_revisions = len(revisions)
if n_revisions == 0:
# Most recent checked-out revision -- working copy
return {
'base': self._convert_symbolic_revision('W'),
'tip': self.REVISION_WORKING_COPY,
}
elif n_revisions == 1:
# Either a numeric revision (n-1:n) or a changelist
revision = self._convert_symbolic_revision(revisions[0])
return {
'base': revision - 1,
'tip': revision,
}
elif n_revisions == 2:
# Diff between two numeric revisions
return {
'base': self._convert_symbolic_revision(revisions[0]),
'tip': self._convert_symbolic_revision(revisions[1]),
}
else:
raise TooManyRevisionsError
return {
'base': None,
'tip': None,
}
def _convert_symbolic_revision(self, revision, path=None):
"""Convert a symbolic revision into a numeric changeset.
Args:
revision (unicode):
The TFS versionspec to convert.
path (unicode, optional):
The itemspec that the revision applies to.
Returns:
int:
The changeset number corresponding to the versionspec.
"""
args = ['history', '-stopafter:1', '-recursive', '-format:xml']
# 'tf history -version:W'` doesn't seem to work (even though it's
# supposed to). Luckily, W is the default when -version isn't passed,
# so just elide it.
if revision != 'W':
args.append('-version:%s' % revision)
args.append(path or os.getcwd())
# We pass results_unicode=False because that uses the filesystem
# encoding to decode the output, but the XML results we get should
# always be UTF-8, and are well-formed with the encoding specified. We
# can therefore let ElementTree determine how to decode it.
data = self._run_tf(args, results_unicode=False)
try:
root = ET.fromstring(data)
item = root.find('./changeset')
if item is not None:
return int(item.attrib['id'])
else:
raise Exception('No changesets found')
except Exception as e:
logging.debug('Failed to parse output from "tf history": %s\n%s',
e, data, exc_info=True)
raise InvalidRevisionSpecError(
'"%s" does not appear to be a valid versionspec' % revision)
def diff(self, revisions, include_files, exclude_patterns):
"""Return the generated diff.
Args:
revisions (dict):
A dictionary containing ``base`` and ``tip`` keys.
include_files (list):
A list of file paths to include in the diff.
exclude_patterns (list):
A list of file paths to exclude from the diff.
Returns:
dict:
A dictionary containing the following keys:
``diff`` (:py:class:`bytes`):
The contents of the diff to upload.
``base_commit_id` (:py:class:`unicode`, optional):
The ID of the commit that the change is based on, if available.
This is necessary for some hosting services that don't provide
individual file access.
"""
base = str(revisions['base'])
tip = str(revisions['tip'])
if tip == self.REVISION_WORKING_COPY:
return self._diff_working_copy(base, include_files,
exclude_patterns)
else:
raise SCMError('Posting committed changes is not yet supported '
'for TFS when using the Team Explorer Everywhere '
'wrapper.')
def _diff_working_copy(self, base, include_files, exclude_patterns):
"""Return a diff of the working copy.
Args:
base (unicode):
The base revision to diff against.
include_files (list):
A list of file paths to include in the diff.
exclude_patterns (list):
A list of file paths to exclude from the diff.
Returns:
dict:
A dictionary containing ``diff``, ``parent_diff``, and
``base_commit_id`` keys. In the case of TFS, the parent diff key
will always be ``None``.
"""
# We pass results_unicode=False because that uses the filesystem
# encoding, but the XML results we get should always be UTF-8, and are
# well-formed with the encoding specified. We can therefore let
# ElementTree determine how to decode it.
status = self._run_tf(['status', '-format:xml'], results_unicode=False)
root = ET.fromstring(status)
diff = []
for pending_change in root.findall('./pending-changes/pending-change'):
action = pending_change.attrib['change-type'].split(', ')
new_filename = pending_change.attrib['server-item'].encode('utf-8')
local_filename = pending_change.attrib['local-item']
old_version = pending_change.attrib['version'].encode('utf-8')
file_type = pending_change.attrib.get('file-type')
new_version = b'(pending)'
old_data = b''
new_data = b''
copied = 'branch' in action
if (not file_type or (not os.path.isfile(local_filename) and
'delete' not in action)):
continue
if (exclude_patterns and
filename_match_any_patterns(local_filename,
exclude_patterns,
base_dir=None)):
continue
if 'rename' in action:
old_filename = \
pending_change.attrib['source-item'].encode('utf-8')
else:
old_filename = new_filename
if copied:
old_filename = \
pending_change.attrib['source-item'].encode('utf-8')
old_version = (
'%d' % self._convert_symbolic_revision(
'W', old_filename.decode('utf-8')))
if 'add' in action:
old_filename = b'/dev/null'
if file_type != 'binary':
with open(local_filename) as f:
new_data = f.read()
old_data = b''
elif 'delete' in action:
old_data = self._run_tf(
['print', '-version:%s' % old_version.decode('utf-8'),
old_filename.decode('utf-8')],
results_unicode=False)
new_data = b''
new_version = b'(deleted)'
elif 'edit' in action:
old_data = self._run_tf(
['print', '-version:%s' % old_version.decode('utf-8'),
old_filename.decode('utf-8')],
results_unicode=False)
with open(local_filename) as f:
new_data = f.read()
old_label = b'%s\t%s' % (old_filename, old_version)
new_label = b'%s\t%s' % (new_filename, new_version)
if copied:
diff.append(b'Copied from: %s\n' % old_filename)
if file_type == 'binary':
if 'add' in action:
old_filename = new_filename
diff.append(b'--- %s\n' % old_label)
diff.append(b'+++ %s\n' % new_label)
diff.append(b'Binary files %s and %s differ\n'
% (old_filename, new_filename))
elif old_filename != new_filename and old_data == new_data:
# Renamed file with no changes
diff.append(b'--- %s\n' % old_label)
diff.append(b'+++ %s\n' % new_label)
else:
old_tmp = tempfile.NamedTemporaryFile(delete=False)
old_tmp.write(old_data)
old_tmp.close()
new_tmp = tempfile.NamedTemporaryFile(delete=False)
new_tmp.write(new_data)
new_tmp.close()
unified_diff = execute(
['diff', '-u',
'--label', old_label.decode('utf-8'),
'--label', new_label.decode('utf-8'),
old_tmp.name, new_tmp.name],
extra_ignore_errors=(1,),
log_output_on_error=False,
results_unicode=False)
diff.append(unified_diff)
os.unlink(old_tmp.name)
os.unlink(new_tmp.name)
if len(root.findall('./candidate-pending-changes/pending-change')) > 0:
logging.warning('There are added or deleted files which have not '
'been added to TFS. These will not be included '
'in your review request.')
return {
'diff': b''.join(diff),
'parent_diff': None,
'base_commit_id': base,
}
def _run_tf(self, args, **kwargs):
"""Run the "tf" command.
Args:
args (list):
A list of arguments to pass to rb-tfs.
**kwargs (dict):
Additional keyword arguments for the :py:meth:`execute` call.
Returns:
unicode:
The output of the command.
"""
cmdline = [self.tf, '-noprompt']
if getattr(self.options, 'tfs_login', None):
cmdline.append('-login:%s' % self.options.tfs_login)
cmdline += args
# Use / style arguments when running on windows.
if sys.platform.startswith('win'):
for i, arg in enumerate(cmdline):
if arg.startswith('-'):
cmdline[i] = '/' + arg[1:]
return execute(cmdline, ignore_errors=True, **kwargs)
class TFHelperWrapper(object):
"""Implementation wrapper using our own helper."""
def __init__(self, helper_path, config=None, options=None):
"""Initialize the wrapper.
Args:
helper_path (unicode):
The path to the helper binary.
config (dict, optional):
The loaded configuration.
options (argparse.Namespace, optional):
The command line options.
"""
self.helper_path = helper_path
self.config = config
self.options = options
def get_local_path(self):
"""Return the local path to the working tree.
Returns:
unicode:
The filesystem path of the repository on the client system.
"""
rc, path, errors = self._run_helper(['get-collection'],
ignore_errors=True)
if rc == 0:
return path.strip()
return None
def get_repository_info(self):
"""Return repository information for the current working tree.
Returns:
rbtools.clients.RepositoryInfo:
The repository info structure.
"""
path = self.get_local_path()
if path:
return RepositoryInfo(path=path, local_path=path)
return | |
separated values."""
return "\t".join(self.getHeaders())
def __str__(self):
"""return string representation of data."""
if self._mode == "int":
format_vals = "%i"
format_median = "%.1f"
else:
format_vals = self._format
format_median = self._format
return "\t".join(("%i" % self.counts,
format_vals % self.min,
format_vals % self.max,
self._format % self.mean,
format_median % self.median,
self._format % self.samplestd,
format_vals % self.sum,
format_vals % self.q1,
format_vals % self.q3,
))
def doFDRPython(pvalues,
vlambda=None,
pi0_method="smoother",
fdr_level=None,
robust=False,
smooth_df=3,
smooth_log_pi0=False,
pi0=None,
plot=False):
"""modeled after code taken from
http://genomics.princeton.edu/storeylab/qvalue/linux.html.
I did not like the error handling so I translated most to python.
Compute FDR after method by <NAME> al. (2002).
"""
if min(pvalues) < 0 or max(pvalues) > 1:
raise ValueError("p-values out of range")
# set to default of qvalue method
if vlambda is None:
vlambda = numpy.arange(0, 0.95, 0.05)
m = len(pvalues)
pvalues = numpy.array(pvalues, dtype=numpy.float)
if pi0 is None:
if type(vlambda) == float:
vlambda = (vlambda,)
if len(vlambda) > 1 and len(vlambda) < 4:
raise ValueError(
"if length of vlambda greater than 1, you need at least 4 values.")
if len(vlambda) > 1 and (min(vlambda) < 0 or max(vlambda) >= 1):
raise ValueError("vlambda must be within [0, 1).")
# estimate pi0
if len(vlambda) == 1:
vlambda = vlambda[0]
if vlambda < 0 or vlambda >= 1:
raise ValueError("vlambda must be within [0, 1).")
pi0 = numpy.mean([x >= vlambda for x in pvalues]) / (1.0 - vlambda)
pi0 = min(pi0, 1.0)
else:
pi0 = numpy.zeros(len(vlambda), numpy.float)
for i in range(len(vlambda)):
pi0[i] = numpy.mean([x >= vlambda[i]
for x in pvalues]) / (1.0 - vlambda[i])
if pi0_method == "smoother":
if smooth_log_pi0:
pi0 = math.log(pi0)
tck = scipy.interpolate.splrep(vlambda,
pi0,
k=smooth_df,
s=10000)
if plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(vlambda, pi0)
x2 = numpy.arange(0, 1, 0.001)
y2 = scipy.interpolate.splev(x2, tck)
plt.plot(x2, y2)
plt.show()
pi0 = scipy.interpolate.splev(max(vlambda), tck)
if smooth_log_pi0:
pi0 = math.exp(pi0)
elif pi0_method == "bootstrap":
minpi0 = min(pi0)
mse = numpy.zeros(len(vlambda), numpy.float)
pi0_boot = numpy.zeros(len(vlambda), numpy.float)
for i in range(100):
# sample pvalues
idx_boot = numpy.random.random_integers(0, m - 1, m)
pvalues_boot = pvalues[idx_boot]
for x in range(len(vlambda)):
# compute number of pvalues larger than lambda[x]
pi0_boot[x] = numpy.mean(
pvalues_boot > vlambda[x]) / (1.0 - vlambda[x])
mse += (pi0_boot - minpi0) ** 2
pi0 = min(pi0[mse == min(mse)])
else:
raise ValueError(
"'pi0_method' must be one of 'smoother' or 'bootstrap'.")
pi0 = min(pi0, 1.0)
if pi0 <= 0:
raise ValueError(
"The estimated pi0 <= 0. Check that you have valid p-values "
"or use another vlambda method.")
if fdr_level is not None and (fdr_level <= 0 or fdr_level > 1):
raise ValueError("'fdr_level' must be within (0, 1].")
# compute qvalues
idx = numpy.argsort(pvalues)
# monotonically decreasing bins, so that bins[i-1] > x >= bins[i]
bins = numpy.unique(pvalues)[::-1]
# v[i] = number of observations less than or equal to pvalue[i]
# could this be done more elegantly?
val2bin = len(bins) - numpy.digitize(pvalues, bins)
v = numpy.zeros(m, dtype=numpy.int)
lastbin = None
for x in range(m - 1, -1, -1):
bin = val2bin[idx[x]]
if bin != lastbin:
c = x
v[idx[x]] = c + 1
lastbin = bin
qvalues = pvalues * pi0 * m / v
if robust:
qvalues /= (1.0 - (1.0 - pvalues) ** m)
# bound qvalues by 1 and make them monotonic
qvalues[idx[m - 1]] = min(qvalues[idx[m - 1]], 1.0)
for i in range(m - 2, -1, -1):
qvalues[idx[i]] = min(min(qvalues[idx[i]], qvalues[idx[i + 1]]), 1.0)
result = FDRResult()
result.mQValues = qvalues
if fdr_level is not None:
result.mPassed = [x <= fdr_level for x in result.mQValues]
else:
result.mPassed = [False for x in result.mQValues]
result.mPValues = pvalues
result.mPi0 = pi0
result.mLambda = vlambda
result.xvalues = qvalues
return result
class CorrelationTest:
'''coefficient is r, not r squared'''
def __init__(self,
s_result=None,
method=None):
self.mPValue = None
self.mMethod = None
if s_result:
self.mCoefficient = s_result[0]
self.mPValue = s_result[1]
self.mNObservations = 0
self.mAlternative = "two-sided"
else:
self.mCoefficient = 0
self.mPValue = 1
self.mSignificance = "na"
self.mNObservations = 0
self.mAlternative = "na"
self.mMethod = "na"
if method:
self.mMethod = method
if self.mPValue is not None:
self.mSignificance = getSignificance(self.mPValue)
def __str__(self):
return "\t".join((
"%6.4f" % self.mCoefficient,
"%e" % self.mPValue,
self.mSignificance,
"%i" % self.mNObservations,
self.mMethod,
self.mAlternative))
@classmethod
def getHeaders(cls):
return ("coeff", "pvalue", "significance", "observations",
"method", "alternative")
def filterMasked(xvals, yvals, missing=("na", "Nan", None, ""),
dtype=numpy.float):
"""convert xvals and yvals to numpy array skipping pairs with
one or more missing values."""
xmask = [i in missing for i in xvals]
ymask = [i in missing for i in yvals]
return (numpy.array([xvals[i] for i in range(len(xvals))
if not xmask[i]], dtype=dtype),
numpy.array([yvals[i] for i in range(len(yvals))
if not ymask[i]], dtype=dtype))
def doCorrelationTest(xvals, yvals):
"""compute correlation between x and y.
Raises a value-error if there are not enough observations.
"""
if len(xvals) <= 1 or len(yvals) <= 1:
raise ValueError("can not compute correlation with no data")
if len(xvals) != len(yvals):
raise ValueError("data vectors have unequal length")
x, y = filterMasked(xvals, yvals)
result = CorrelationTest(s_result=scipy.stats.pearsonr(x, y),
method="pearson")
result.mNObservations = len(x)
return result
def getPooledVariance(data):
"""return pooled variance from a
list of tuples (sample_size, variance)."""
t, var = 0, 0
for n, s in data:
t += n
var += (n - 1) * s
assert t > len(data), "sample size smaller than samples combined"
return var / float(t - len(data))
def computeROC(values):
'''return a roc curve for *values*. Values
is a sorted list of (value, bool) pairs.
Deprecated - use getPerformance instead
returns a list of (FPR,TPR) tuples.
'''
roc = []
npositives = len([x for x in values if x[1]])
if npositives == 0:
raise ValueError("no positives among values")
ntotal = len(values)
last_value, last_fpr = None, None
tp, fp = 0, 0
tn, fn = ntotal - npositives, npositives
for value, is_positive in values:
if is_positive:
tp += 1
fn -= 1
else:
fp += 1
tn -= 1
if last_value != value:
try:
tpr = float(tp) / (tp + fn)
except ZeroDivisionError:
tpr = 0
try:
fpr = float(fp) / (fp + tn)
except ZeroDivisionError:
fpr = 0
if last_fpr != fpr:
roc.append((fpr, tpr))
last_fpr = fpr
last_values = value
return roc
class TTest:
def __init__(self):
pass
class WelchTTest:
def __init__(self):
pass
PairedTTest = collections.namedtuple("PairedTTest", "statistic pvalue")
def doPairedTTest(vals1, vals2):
'''perform paired t-test.
vals1 and vals2 need to contain the same number of elements.
'''
return PairedTTest._make(scipy.stats.ttest_rel(vals1, vals2))
def doWelchsTTest(n1, mean1, std1,
n2, mean2, std2,
alpha=0.05):
'''Welch''s approximate t-test for the difference of two means of
heteroscedasctic populations.
This functions does a two-tailed test.
see PMID: 12016052
:Parameters:
n1 : int
number of variates in sample 1
n2 : int
number of variates in sample 2
mean1 : float
mean of sample 1
mean2 : float
mean of sample 2
std1 : float
standard deviation of sample 1
std2 : float
standard deviation of sample 2
returns a WelchTTest
'''
if std1 == 0 and std2 == 0:
raise ValueError('standard deviations are 0.')
# convert standard deviation to sample variance
svar1 = std1 ** 2 * n1 / float(n1 - 1)
svar2 = std2 ** 2 * n2 / float(n2 - 1)
# compute df and test statistic
df = ((svar1 / n1 + svar2 / n2) ** 2) / \
(((svar1 / n1) ** 2) / (n1 - 1) + ((svar2 / n2) ** 2) / (n2 - 1))
denom = numpy.sqrt(svar1 / n1 + svar2 / n2)
z = abs(mean1 - mean2) / denom
# do the test
pvalue = 2 * scipy.stats.t.sf(z, df)
result = WelchTTest()
result.mPValue = pvalue
result.mDegreesFreedom = df
result.mZ = z
result.mMean1 = mean1
result.mMean2 = mean2
result.mSampleVariance1 = svar1
result.mSampleVariance2 = svar2
result.mDifference = mean1 - mean2
result.mZLower = scipy.stats.t.ppf(alpha, df)
result.mZUpper = scipy.stats.t.ppf(1.0 - alpha, df)
result.mDifferenceLower = result.mZLower * denom
result.mDifferenceUpper = result.mZUpper * denom
return result
def getAreaUnderCurve(xvalues, yvalues):
'''compute area under curve from a set of discrete x,y coordinates
using trapezoids.
This is only as accurate as the density of points.
'''
assert len(xvalues) == len(yvalues)
last_x, last_y = xvalues[0], yvalues[0]
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import types so that we can reference ListType in sphinx param declarations.
# We can't just use list, because sphinx gets confused by
# openstack.resource.Resource.list and openstack.resource2.Resource.list
import time
import threading
import types # noqa
from openstack.cloud import exc
from openstack.cloud import _normalize
from openstack.cloud import _utils
from openstack import exceptions
from openstack import proxy
class NetworkCloudMixin(_normalize.Normalizer):
def __init__(self):
self._ports = None
self._ports_time = 0
self._ports_lock = threading.Lock()
@_utils.cache_on_arguments()
def _neutron_extensions(self):
extensions = set()
resp = self.network.get('/extensions')
data = proxy._json_response(
resp,
error_message="Error fetching extension list for neutron")
for extension in self._get_and_munchify('extensions', data):
extensions.add(extension['alias'])
return extensions
def _has_neutron_extension(self, extension_alias):
return extension_alias in self._neutron_extensions()
def search_networks(self, name_or_id=None, filters=None):
"""Search networks
:param name_or_id: Name or ID of the desired network.
:param filters: a dict containing additional filters to use. e.g.
{'router:external': True}
:returns: a list of ``munch.Munch`` containing the network description.
:raises: ``OpenStackCloudException`` if something goes wrong during the
OpenStack API call.
"""
networks = self.list_networks(
filters if isinstance(filters, dict) else None)
return _utils._filter_list(networks, name_or_id, filters)
def search_routers(self, name_or_id=None, filters=None):
"""Search routers
:param name_or_id: Name or ID of the desired router.
:param filters: a dict containing additional filters to use. e.g.
{'admin_state_up': True}
:returns: a list of ``munch.Munch`` containing the router description.
:raises: ``OpenStackCloudException`` if something goes wrong during the
OpenStack API call.
"""
routers = self.list_routers(
filters if isinstance(filters, dict) else None)
return _utils._filter_list(routers, name_or_id, filters)
def search_subnets(self, name_or_id=None, filters=None):
"""Search subnets
:param name_or_id: Name or ID of the desired subnet.
:param filters: a dict containing additional filters to use. e.g.
{'enable_dhcp': True}
:returns: a list of ``munch.Munch`` containing the subnet description.
:raises: ``OpenStackCloudException`` if something goes wrong during the
OpenStack API call.
"""
subnets = self.list_subnets(
filters if isinstance(filters, dict) else None)
return _utils._filter_list(subnets, name_or_id, filters)
def search_ports(self, name_or_id=None, filters=None):
"""Search ports
:param name_or_id: Name or ID of the desired port.
:param filters: a dict containing additional filters to use. e.g.
{'device_id': '2711c67a-b4a7-43dd-ace7-6187b791c3f0'}
:returns: a list of ``munch.Munch`` containing the port description.
:raises: ``OpenStackCloudException`` if something goes wrong during the
OpenStack API call.
"""
# If port caching is enabled, do not push the filter down to
# neutron; get all the ports (potentially from the cache) and
# filter locally.
if self._PORT_AGE or isinstance(filters, str):
pushdown_filters = None
else:
pushdown_filters = filters
ports = self.list_ports(pushdown_filters)
return _utils._filter_list(ports, name_or_id, filters)
def list_networks(self, filters=None):
"""List all available networks.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of ``munch.Munch`` containing network info.
"""
# If the cloud is running nova-network, just return an empty list.
if not self.has_service('network'):
return []
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
data = self.network.get("/networks", params=filters)
return self._get_and_munchify('networks', data)
def list_routers(self, filters=None):
"""List all available routers.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of router ``munch.Munch``.
"""
# If the cloud is running nova-network, just return an empty list.
if not self.has_service('network'):
return []
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
resp = self.network.get("/routers", params=filters)
data = proxy._json_response(
resp,
error_message="Error fetching router list")
return self._get_and_munchify('routers', data)
def list_subnets(self, filters=None):
"""List all available subnets.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of subnet ``munch.Munch``.
"""
# If the cloud is running nova-network, just return an empty list.
if not self.has_service('network'):
return []
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
data = self.network.get("/subnets", params=filters)
return self._get_and_munchify('subnets', data)
def list_ports(self, filters=None):
"""List all available ports.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of port ``munch.Munch``.
"""
# If pushdown filters are specified and we do not have batched caching
# enabled, bypass local caching and push down the filters.
if filters and self._PORT_AGE == 0:
return self._list_ports(filters)
if (time.time() - self._ports_time) >= self._PORT_AGE:
# Since we're using cached data anyway, we don't need to
# have more than one thread actually submit the list
# ports task. Let the first one submit it while holding
# a lock, and the non-blocking acquire method will cause
# subsequent threads to just skip this and use the old
# data until it succeeds.
# Initially when we never got data, block to retrieve some data.
first_run = self._ports is None
if self._ports_lock.acquire(first_run):
try:
if not (first_run and self._ports is not None):
self._ports = self._list_ports({})
self._ports_time = time.time()
finally:
self._ports_lock.release()
# Wrap the return with filter_list so that if filters were passed
# but we were batching/caching and thus always fetching the whole
# list from the cloud, we still return a filtered list.
return _utils._filter_list(self._ports, None, filters or {})
def _list_ports(self, filters):
# If the cloud is running nova-network, just return an empty list.
if not self.has_service('network'):
return []
resp = self.network.get("/ports", params=filters)
data = proxy._json_response(
resp,
error_message="Error fetching port list")
return self._get_and_munchify('ports', data)
def get_qos_policy(self, name_or_id, filters=None):
"""Get a QoS policy by name or ID.
:param name_or_id: Name or ID of the policy.
:param filters:
A dictionary of meta data to use for further filtering. Elements
of this dictionary may, themselves, be dictionaries. Example::
{
'last_name': 'Smith',
'other': {
'gender': 'Female'
}
}
OR
A string containing a jmespath expression for further filtering.
Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
:returns: A policy ``munch.Munch`` or None if no matching network is
found.
"""
return _utils._get_entity(
self, 'qos_policie', name_or_id, filters)
def search_qos_policies(self, name_or_id=None, filters=None):
"""Search QoS policies
:param name_or_id: Name or ID of the desired policy.
:param filters: a dict containing additional filters to use. e.g.
{'shared': True}
:returns: a list of ``munch.Munch`` containing the network description.
:raises: ``OpenStackCloudException`` if something goes wrong during the
OpenStack API call.
"""
policies = self.list_qos_policies(filters)
return _utils._filter_list(policies, name_or_id, filters)
def list_qos_rule_types(self, filters=None):
"""List all available QoS rule types.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of rule types ``munch.Munch``.
"""
if not self._has_neutron_extension('qos'):
raise exc.OpenStackCloudUnavailableExtension(
'QoS extension is not available on target cloud')
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
resp = self.network.get("/qos/rule-types", params=filters)
data = proxy._json_response(
resp,
error_message="Error fetching QoS rule types list")
return self._get_and_munchify('rule_types', data)
def get_qos_rule_type_details(self, rule_type, filters=None):
"""Get a QoS rule type details by rule type name.
:param string rule_type: Name of the QoS rule type.
:returns: A rule type details ``munch.Munch`` or None if
no matching rule type is found.
"""
if not self._has_neutron_extension('qos'):
raise exc.OpenStackCloudUnavailableExtension(
'QoS extension is not available on target cloud')
if not self._has_neutron_extension('qos-rule-type-details'):
raise exc.OpenStackCloudUnavailableExtension(
'qos-rule-type-details extension is not available '
'on target cloud')
resp = self.network.get(
"/qos/rule-types/{rule_type}".format(rule_type=rule_type))
data = proxy._json_response(
resp,
error_message="Error fetching QoS details of {rule_type} "
"rule type".format(rule_type=rule_type))
return self._get_and_munchify('rule_type', data)
def list_qos_policies(self, filters=None):
"""List all available QoS policies.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of policies ``munch.Munch``.
"""
if not self._has_neutron_extension('qos'):
raise exc.OpenStackCloudUnavailableExtension(
'QoS extension is not available on target cloud')
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
resp = self.network.get("/qos/policies", params=filters)
data = proxy._json_response(
resp,
error_message="Error fetching QoS policies list")
return self._get_and_munchify('policies', data)
def get_network(self, name_or_id, filters=None):
"""Get a network by name or ID.
:param name_or_id: Name or ID of the network.
:param filters:
A dictionary | |
<gh_stars>1-10
# File for plotting utilities
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import os
from utils.sim import *
from utils.data import *
# Included functions with descriptions:
# plot_flight_time: Plots flight time vs rollout
# plot_trained_points: plots flight time vs log trained points
# plot_sensor_quality: plots std dev of sensor measurements vs flight chronologically
# plot_waterfall: Plots model predictions and best action for a traj
# plot_traj_model: plots model predictions from beginning of a trajectory
# plot_battery_thrust: plots thrust vs battery for a trajectory
# plot_euler_preds: plots the one step predictions for the dynamics model on euler angles
# plot_rollout_compare: Plots euler angles over time for first 4 rollouts
# plot_flight_segment: plots a segment of a flight at given location
def plot_flight_time(csv_dir):
'''
tool to take in a directory of flight summaries and plot the flight time vs rollouts
the file structure should be as follows:
flights/
..freq1
..rand
..roll1
..roll2
..freq2
..rand
..roll1
..roll2
....
'''
print_flag = False
if print_flag:
print('~~~~~~~~~~~~')
# gather dirs (each will be one line on the plot)
dirs = [dI for dI in os.listdir(
csv_dir) if os.path.isdir(os.path.join(csv_dir, dI))]
# print(dirs)
# font = {'size': 23}
font = {'size': 12}
matplotlib.rc('font', **font)
matplotlib.rc('lines', linewidth=3)
matplotlib.rc('text', usetex=True)
fig = plt.figure()
with sns.axes_style("whitegrid"):
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams["axes.edgecolor"] = "0.15"
plt.rcParams["axes.linewidth"] = 1.5
plt.subplots_adjust( top = .96, bottom = 0.15,left=.09, right=1-.03)#, wspace=.25, hspace=.3)
ax1 = plt.subplot(111)
colors = ['#208080', '#F83030', '#808000']
markers = ['*', '.', 'x']
i = 0
for dir, c in zip(reversed(sorted(dirs)), colors):
if print_flag:
print('---' + dir + '---')
# Load each frequency fo rollouts individually
label = dir
files = os.listdir(csv_dir+dir)
# create list for dataframes
data = []
df_main = pd.DataFrame(columns=[
"Flight Idx", "Flight Time (ms)", "Mean Objective", "RMS Pitch Roll", "roll"])
means = []
stds = []
labels = []
for f in sorted(files):
if f != '.DS_Store':
if print_flag:
print("-> Rollout: " + f[-10:-3])
with open(csv_dir+dir+'/'+f, "rb") as csvfile:
# laod data
roll = f[-f[::-1].find('_'):-f[::-1].find('.')-1]
df = pd.read_csv(csvfile, sep=",")
df['roll'] = roll
df_main = df_main.append(df)
mean_sub = df["Flight Time (ms)"].mean()
std_sub = df["Flight Time (ms)"].std()
means.append(mean_sub)
stds.append(std_sub)
if roll == 'rand':
roll = '00'
else:
roll = roll[-2:]
labels.append(roll)
# mean = np.mean(new_data[:,1])
# std = np.std(new_data[:,1])
if print_flag:
new_data = np.loadtxt(
csvfile, delimiter=",", skiprows=1)
print(" Mean flight length is: ",
np.mean(new_data[:, 1]))
print(" Std flight length is: ",
np.std(new_data[:, 1]))
means = np.array(means)
stds = np.array(stds)
x = np.arange(0, len(labels))
ax1.plot(x, means/1000, label=label, color=c,
marker=markers[i], markersize='14')
ax1.set_xlim([0,len(labels)-1])
ax1.axhline(np.max(means)/1000, linestyle=':',
label=str("Max" + dir), alpha=.8, color=c)
ax1.set_ylabel("Flight Time (s)")
ax1.set_xlabel("Rollout (10 Flights Per)")
ax1.grid(b=True, which='major', color='k',
linestyle='-', linewidth=0, alpha=.5)
ax1.grid(b=True, which='minor', color='r', linestyle='--', linewidth=0)
# ax1.set_title("Flight Time vs Rollout")
ax1.set_ylim([0, 2.500])
ax1.set_xticks(x)
ax1.set_xticklabels(labels, rotation=75, fontsize=12)
ax1.legend()
# , edgecolor='#CC4F1B', facecolor='#FF9848')
plt.fill_between(x, (means-stds)/1000, (means+stds) /
1000, alpha=0.3, color=c)
i += 1
###############
fig.set_size_inches(7, 3.5)
# plt.savefig('psoter', edgecolor='black', dpi=100, transparent=True)
plt.savefig('destination_path.pdf', format='pdf', dpi=300)
# plt.show()
print('\n')
def plot_trained_points(csv_dir):
"""
tool to take in a directory of flight summaries and plot the flight points vs rollouts
the file structure should be as follows:
flights/
..freq1
..rand
..roll1
..roll2
..freq2
..rand
..roll1
..roll2
....
"""
print_flag = False
if print_flag:
print('~~~~~~~~~~~~')
# gather dirs (each will be one line on the plot)
dirs = [dI for dI in os.listdir(
csv_dir) if os.path.isdir(os.path.join(csv_dir, dI))]
# print(dirs)
font = {'size': 22}
matplotlib.rc('font', **font)
matplotlib.rc('lines', linewidth=2.5)
with sns.axes_style("whitegrid"):
plt.rcParams["axes.edgecolor"] = "0.15"
plt.rcParams["axes.linewidth"] = 1.5
ax1 = plt.subplot(111)
plt.subplots_adjust(wspace=.15, left=.1, right=1-.07) # , hspace=.15)
colors = ['#208080', '#F83030', '#808000']
markers = ['*', 'x', '.']
for i, (dir, c) in enumerate(zip(reversed(sorted(dirs)), colors)):
if print_flag:
print('---' + dir + '---')
# Load each frequency fo rollouts individually
label = dir + " Rollouts"
files = os.listdir(csv_dir+dir)
# create list for dataframes
data = []
df_main = pd.DataFrame(columns=["Flight Idx", "Flight Time (ms)",
"Trained Points", "Mean Objective", "RMS Pitch Roll", "roll"])
means = []
stds = []
labels = []
cum_points = 0
for f in sorted(files):
print(f)
if print_flag:
print("-> Rollout: " + f[-10:-3])
with open(csv_dir+dir+'/'+f, "rb") as csvfile:
# laod data
roll = f[-f[::-1].find('_'):-f[::-1].find('.')-1]
df = pd.read_csv(csvfile, sep=",")
df['roll'] = roll
df_main = df_main.append(df)
mean_sub = df["Flight Time (ms)"].mean()
std_sub = df["Flight Time (ms)"].std()
data_points = np.sum(df["Trained Points"])
means.append(mean_sub)
stds.append(std_sub)
labels.append(data_points)
# mean = np.mean(new_data[:,1])
# std = np.std(new_data[:,1])
if print_flag:
new_data = np.loadtxt(csvfile, delimiter=",", skiprows=1)
print(" Mean flight length is: ",
np.mean(new_data[:, 1]))
print(" Std flight length is: ", np.std(new_data[:, 1]))
means = np.array(means)
stds = np.array(stds)
labels = np.array(labels)
labels = np.cumsum(labels)
print(labels)
x = np.arange(0, len(labels))
ax1.scatter(labels, means/1000, label=label,
marker=markers[i], color=c, linewidth='16')
# ax1.axhline(np.max(means),linestyle='--', label=str("Max" + dir),alpha=.5, color=c)
ax1.set_xscale("log", nonposx='clip')
ax1.set_xlim([50, 20000])
ax1.set_ylabel("Flight Time (s)")
ax1.set_xlabel("Trained Datapoints")
ax1.grid(b=True, which='major', color='k',
linestyle='-', linewidth=1.2, alpha=.75)
ax1.grid(b=True, which='minor', color='b',
linestyle='--', linewidth=1.2, alpha=.5)
# ax1.set_title("Flight Time vs Datapoints")
# Customize the major grid
# ax1.grid(which='major', linestyle='-', linewidth='0.5', color='red')
# Customize the minor grid
# plt.grid(True, which='majorminor', linestyle=':', linewidth='0.75', color='black')
# ax1.set_ylim([0,5000])
# ax1.set_xticks(x)
# ax1.set_xticklabels(labels, rotation = 75, fontsize = 14)
ax1.legend()
###############
plt.show()
print('\n')
def plot_sensor_quality(dir):
"""
Goes through subfolders of a given directory to see if there is any noticable changes
in how the data is logged that may indicated why some flights are so much better
Takes the mean and variance of the state data through each takeoff.
Will return a matrix of dimesnions (n, dx), so
- n number of rollouts
- dx is the dimension of the state
"""
print('------------')
print('RUNNING TEST OF LOGGEST STATE DATA NOISE')
dirs = os.listdir("_logged_data_autonomous/"+dir)
# init arrays for the means of each rollout for large scale analysis
means = np.zeros([len(dirs), 9])
noises = np.zeros([len(dirs), 9])
# init array for a long list of all flights
total_list = np.zeros([len(dirs)*10, 9])
# dim = 1
l1 = []
l2 = []
l3 = []
l7 = []
l8 = []
l9 = []
flight_times = []
for d in sorted(dirs)[:-1]:
if d != '.DS_Store':
print('~~~ dir:', d, ' ~~~')
files = os.listdir("_logged_data_autonomous/"+dir+'/'+d+'/')
i = 0
for f in sorted(files):
if len(f) > 5 and f[-4:] == '.csv':
# print("File num: ", i)
new_data = np.loadtxt(
"_logged_data_autonomous/"+dir+'/'+d+'/'+f, delimiter=",")
Objv = new_data[:, 14]
move_idx = np.argmax(Objv != -1)
Time = new_data[:, 13]
flight_len = Time[-1]-Time[move_idx-5]
flight_times.append(flight_len)
# GRABS DATA DURING AND BEFORE TAKEOFF
state_data = new_data[:move_idx-5, :9]
means = np.mean(state_data, axis=0)
noise = np.std(state_data, axis=0)
# print("Means: ", means)
# print("Noises: ", noise)
l1.append(noise[0])
l2.append(noise[1])
l3.append(noise[2])
l7.append(noise[6])
l8.append(noise[7])
l9.append(noise[8])
i += 1 # not using enumarate becase DS_Store
plotting_keys = np.loadtxt(
"_logged_data_autonomous/"+dir+'/'+"data.csv", delimiter='\t')
print(np.shape(plotting_keys))
print(np.shape(l1))
font = {'size': 18}
matplotlib.rc('font', **font)
matplotlib.rc('lines', linewidth=2.5)
with sns.axes_style("whitegrid"):
plt.rcParams["axes.edgecolor"] = "0.15"
plt.rcParams["axes.linewidth"] = 1.5
ax1 = plt.subplot(111)
plt.subplots_adjust(wspace=.15, left=.1, right=1-.07) # , hspace=.15)
for i, row in enumerate(plotting_keys):
# drift fails
if row[0] == 1:
lab1 = plt.axvline(i, linestyle='--', color='r',
alpha=.3, label='Drift Failures')
# sensor fails
elif row[1] == 1:
lab2 = plt.axvline(i, linestyle='--', color='b',
alpha=.3, label='Sensor Failures')
# replace parts
elif row[2] == 1:
lab3 = plt.axvline(i, linestyle='--', color='k',
alpha=.3, label='Replace Parts')
# Lines for frequency cutoffs
lab50 = plt.axvline(160, linestyle=':', color='k',
alpha=.8, label='50Hz Begins')
lab75 = plt.axvline(290, linestyle='-.', color='k',
alpha=.8, label='75Hz Begins')
# ax1.set_title("Noise on certain variables across flights")
ax1.set_xlabel("Chronological Flight")
ax1.set_ylabel("Std. Dev. Data (deg/s^2) - Noise")
p1 = plt.plot(l1, label='angularx')
p2 = plt.plot(l2, label='angulary')
p3 = plt.plot(l3, label='angularz')
ax1.set_ylim([0, 6])
# p4 = plt.plot(l7, label='linearx')
# p5 = plt.plot(l8, label='lineary')
# p6 = plt.plot(l9, label='linearz')
lns = p1+p2+p3+[lab1]+[lab2]+[lab3]+[lab50]+[lab75]
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, fancybox=True, framealpha=1,
shadow=True, borderpad=1, ncol=3)
# ax2 = ax1.twinx()
# ax2.plot(flight_times, linestyle='-', color = 'k', label='Flight Time')
# ax2.set_ylabel("Flight Time (ms)")
plt.show()
def plot_voltage_context(model, df, action = [37000,37000, 30000, 45000], act_range = 25000, normalize = False, ground_truth = False, model_nobat = []):
'''
Takes in a dynamics model and plots the distributions of points in the dataset
and plots various lines verses different voltage levels
'''
################ Figure out what to do with | |
<gh_stars>0
# -*- coding: utf-8 -*-
from pysignfe.corr_unicode import *
from pysignfe.xml_sped import *
from pysignfe.nfe.manual_401 import ESQUEMA_ATUAL
from pysignfe.nfe.manual_300 import nfe_110
import os
from lxml.etree import tounicode
from pysignfe.nfe.webservices_3 import ESTADO_SVC_CONTINGENCIA
from pysignfe.nfe.webservices_flags import UF_CODIGO
from pysignfe import __version__
DIRNAME = os.path.dirname(__file__)
class Deduc(XMLNFe):
def __init__(self):
super(Deduc, self).__init__()
self.xDed = TagCaracter(nome=u'xDed', codigo=u'ZC11', tamanho=[1, 60] , raiz=u'//deduc')
self.vDed = TagDecimal(nome=u'vDed' , codigo=u'ZC12', tamanho=[1, 15, 1], decimais=[1, 2, 2], raiz=u'//deduc')
def get_xml(self):
if not (self.xDed.valor or self.vDed.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<deduc>'
xml += self.xDed.xml
xml += self.vDed.xml
xml += u'</deduc>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xDed.xml = arquivo
self.vDed.xml = arquivo
xml = property(get_xml, set_xml)
class ForDia(XMLNFe):
def __init__(self):
super(ForDia, self).__init__()
self.dia = TagInteiro(nome=u'dia' , codigo=u'ZC05', tamanho=[1, 2, 1] , raiz=u'//forDia')
self.qtde = TagDecimal(nome=u'qtde', codigo=u'ZC06', tamanho=[1, 11, 1], decimais=[1, 10, 10], raiz=u'//forDia')
def get_xml(self):
if not (self.dia.valor or self.qtde.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<forDia>'
xml += self.dia.xml
xml += self.qtde.xml
xml += u'</forDia>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.dia.xml = arquivo
self.qtde.xml = arquivo
xml = property(get_xml, set_xml)
class Cana(XMLNFe):
def __init__(self):
super(Cana, self).__init__()
self.safra = TagCaracter(nome=u'safra' , codigo=u'ZC02', tamanho=[4, 9] , raiz=u'//NFe/infNFe/cana')
self.ref = TagCaracter(nome=u'ref' , codigo=u'ZC03', tamanho=[7, 7] , raiz=u'//NFe/infNFe/cana')
self.forDia = []
self.qTotMes = TagDecimal(nome=u'qTotMes', codigo=u'ZC07', tamanho=[1, 11, 1], decimais=[1, 10, 10], raiz=u'//NFe/infNFe/cana')
self.qTotAnt = TagDecimal(nome=u'qTotAnt', codigo=u'ZC08', tamanho=[1, 11, 1], decimais=[1, 10, 10], raiz=u'//NFe/infNFe/cana')
self.qTotGer = TagDecimal(nome=u'qTotGer', codigo=u'ZC09', tamanho=[1, 11, 1], decimais=[1, 10, 10], raiz=u'//NFe/infNFe/cana')
self.deduc = []
self.vFor = TagDecimal(nome=u'vFor' , codigo=u'ZC13', tamanho=[1, 15, 1], decimais=[1, 2, 2], raiz=u'//NFe/infNFe/cana')
self.vTotDed = TagDecimal(nome=u'vTotDed', codigo=u'ZC14', tamanho=[1, 15, 1], decimais=[1, 2, 2], raiz=u'//NFe/infNFe/cana')
self.vLiqFor = TagDecimal(nome=u'vLiqFor', codigo=u'ZC15', tamanho=[1, 15, 1], decimais=[1, 2, 2], raiz=u'//NFe/infNFe/cana')
def get_xml(self):
if not (self.safra.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<cana>'
xml += self.safra.xml
xml += self.ref.xml
for fd in self.forDia:
xml += fd.xml
xml += self.qTotMes.xml
xml += self.qTotAnt.xml
xml += self.qTotGer.xml
for d in self.deduc:
xml += d.xml
xml += self.vFor.xml
xml += self.vTotDed.xml
xml += self.vLiqFor.xml
xml += u'</cana>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.safra.xml = arquivo
self.ref.xml = arquivo
self.forDia = self.le_grupo(u'//NFe/infNFe/cana/forDia', ForDia)
self.qTotMes.xml = arquivo
self.qTotAnt.xml = arquivo
self.qTotGer.xml = arquivo
self.deduc = self.le_grupo(u'//NFe/infNFe/cana/deduc', Deduc)
self.vFor.xml = arquivo
self.vTotDed.xml = arquivo
self.vLiqFor.xml = arquivo
xml = property(get_xml, set_xml)
class ISSQN(nfe_110.ISSQN):
def __init__(self):
super(ISSQN, self).__init__()
self.cSitTrib = TagCaracter(nome=u'cSitTrib', codigo=u'U07', tamanho=[1, 1], raiz=u'//det/imposto/ISSQN')
def get_xml(self):
if not (self.cSitTrib.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<ISSQN>'
xml += self.vBC.xml
xml += self.vAliq.xml
xml += self.vISSQN.xml
xml += self.cMunFG.xml
xml += self.cListServ.xml
xml += self.cSitTrib.xml
xml += u'</ISSQN>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.vAliq.xml = arquivo
self.vISSQN.xml = arquivo
self.cMunFG.xml = arquivo
self.cListServ.xml = arquivo
self.cSitTrib.xml = arquivo
xml = property(get_xml, set_xml)
class COFINSST(nfe_110.COFINSST):
def __init__(self):
super(COFINSST, self).__init__()
class TagCSTCOFINS(nfe_110.TagCSTCOFINS):
def __init__(self, *args, **kwargs):
super(TagCSTCOFINS, self).__init__(*args, **kwargs)
class COFINS(nfe_110.COFINS):
def __init__(self):
super(COFINS, self).__init__()
class PISST(nfe_110.PISST):
def __init__(self):
super(PISST, self).__init__()
class TagCSTPIS(nfe_110.TagCSTPIS):
def __init__(self, *args, **kwargs):
super(TagCSTPIS, self).__init__(*args, **kwargs)
class PIS(nfe_110.PIS):
def __init__(self):
super(PIS, self).__init__()
class II(nfe_110.II):
def __init__(self):
super(II, self).__init__()
class TagCSTIPI(nfe_110.TagCSTIPI):
def __init__(self, *args, **kwargs):
super(TagCSTIPI, self).__init__(*args, **kwargs)
class IPI(nfe_110.IPI):
def __init__(self):
super(IPI, self).__init__()
class TagCSOSN(TagCaracter):
def __init__(self, *args, **kwargs):
super(TagCSOSN, self).__init__(*args, **kwargs)
self.nome = u'CSOSN'
self.codigo = u'N12a'
self.tamanho = [3, 3]
self.raiz = u''
self.grupo_icms = None
def set_valor(self, novo_valor):
super(TagCSOSN, self).set_valor(novo_valor)
if not self.grupo_icms:
return None
#
# Definimos todas as tags como não obrigatórias
#
# self.grupo_icms.modBC.obrigatorio = False
# self.grupo_icms.vBC.obrigatorio = False
# self.grupo_icms.pRedBC.obrigatorio = False
# self.grupo_icms.pICMS.obrigatorio = False
# self.grupo_icms.vICMS.obrigatorio = False
# self.grupo_icms.modBCST.obrigatorio = False
# self.grupo_icms.pMVAST.obrigatorio = False
# self.grupo_icms.pRedBCST.obrigatorio = False
# self.grupo_icms.vBCST.obrigatorio = False
# self.grupo_icms.pICMSST.obrigatorio = False
# self.grupo_icms.vICMSST.obrigatorio = False
# self.grupo_icms.vBCSTRet.obrigatorio = False
# self.grupo_icms.vICMSSTRet.obrigatorio = False
# self.grupo_icms.pCredSN.obrigatorio = False
# self.grupo_icms.vCredICMSSN.obrigatorio = False
#
# #
# # Por segurança, zeramos os valores das tags do
# # grupo ICMS ao redefinirmos o código da situação
# # tributária
# #
# self.grupo_icms.modBC.valor = 3
# self.grupo_icms.vBC.valor = u'0.00'
# self.grupo_icms.pRedBC.valor = u'0.00'
# self.grupo_icms.pICMS.valor = u'0.00'
# self.grupo_icms.vICMS.valor = u'0.00'
# self.grupo_icms.modBCST.valor = 4
# self.grupo_icms.pMVAST.valor = u'0.00'
# self.grupo_icms.pRedBCST.valor = u'0.00'
# self.grupo_icms.vBCST.valor = u'0.00'
# self.grupo_icms.pICMSST.valor = u'0.00'
# self.grupo_icms.vICMSST.valor = u'0.00'
# self.grupo_icms.vBCSTRet.valor = u'0.00'
# self.grupo_icms.vICMSSTRet.valor = u'0.00'
# self.grupo_icms.pCredSN.valor = u'0.00'
# self.grupo_icms.vCredICMSSN.valor = u'0.00'
#
# Para cada código de situação tributária,
# redefinimos a raiz e a obrigatoriedade das
# tags do grupo de ICMS
#
# Definimos também o valor da tag CST do ICMS
# tradicional para mapear os novos valores
# na impressão do DANFE
#
# Mapeamento de acordo com a Nota Técnica 2009.004
#
#
# Usamos a propriedade privada, para evitar
# o processamento do set_valor da classe TagCSTICMS
#
if self.valor == u'101':
self.grupo_icms.nome_tag = u'ICMSSN101'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMSSN101'
self.grupo_icms.pCredSN.obrigatorio = True
self.grupo_icms.vCredICMSSN.obrigatorio = True
self.grupo_icms.CST._valor_string = self.valor
elif self.valor in (u'102', u'103', u'300', u'400'):
self.grupo_icms.nome_tag = u'ICMSSN102'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMSSN102'
self.grupo_icms.CST._valor_string = self.valor
elif self.valor == u'201':
self.grupo_icms.nome_tag = u'ICMSSN201'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMSSN201'
self.grupo_icms.modBCST.obrigatorio = True
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.pICMSST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
self.grupo_icms.pCredSN.obrigatorio = True
self.grupo_icms.vCredICMSSN.obrigatorio = True
self.grupo_icms.CST._valor_string = self.valor
elif self.valor in (u'202', u'203'):
self.grupo_icms.nome_tag = u'ICMSSN202'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMSSN202'
self.grupo_icms.modBCST.obrigatorio = True
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.pICMSST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
self.grupo_icms.CST._valor_string = self.valor
elif self.valor == u'500':
self.grupo_icms.nome_tag = u'ICMSSN500'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMSSN500'
self.grupo_icms.vBCSTRet.obrigatorio = True
self.grupo_icms.vICMSSTRet.obrigatorio = True
self.grupo_icms.CST._valor_string = self.valor
elif self.valor == u'900':
self.grupo_icms.nome_tag = u'ICMSSN900'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMSSN900'
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
self.grupo_icms.modBCST.obrigatorio = True
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.pICMSST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
self.grupo_icms.pCredSN.obrigatorio = True
self.grupo_icms.vCredICMSSN.obrigatorio = True
self.grupo_icms.CST._valor_string = self.valor
#
# Redefine a raiz para todas as tags do grupo ICMS
#
self.grupo_icms.orig.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.CSOSN.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.modBC.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vBC.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pRedBC.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pICMS.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vICMS.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.modBCST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pMVAST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pRedBCST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vBCST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pICMSST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vICMSST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vBCSTRet.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vICMSSTRet.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pCredSN.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vCredICMSSN.raiz = self.grupo_icms.raiz_tag
def get_valor(self):
return self._valor_string
valor = property(get_valor, set_valor)
class TagCSTICMS(nfe_110.TagCSTICMS):
def __init__(self, *args, **kwargs):
super(TagCSTICMS, self).__init__(*args, **kwargs)
self.nome = u'CST'
self.codigo = u'N12'
self.tamanho = [2, 2]
self.raiz = u''
self.grupo_icms = None
def set_valor(self, novo_valor):
super(TagCSTICMS, self).set_valor(novo_valor)
if not self.grupo_icms:
return None
#
# Definimos todas as tags como não obrigatórias
#
self.grupo_icms.modBC.obrigatorio = False
self.grupo_icms.vBC.obrigatorio = False
self.grupo_icms.pRedBC.obrigatorio = False
self.grupo_icms.pICMS.obrigatorio = False
self.grupo_icms.vICMS.obrigatorio = False
self.grupo_icms.modBCST.obrigatorio = False
self.grupo_icms.pMVAST.obrigatorio = False
self.grupo_icms.pRedBCST.obrigatorio = False
self.grupo_icms.vBCST.obrigatorio = False
self.grupo_icms.pICMSST.obrigatorio = False
self.grupo_icms.vICMSST.obrigatorio = False
self.grupo_icms.motDesICMS.obrigatorio = False
self.grupo_icms.vBCSTRet.obrigatorio = False
self.grupo_icms.vICMSSTRet.obrigatorio = False
self.grupo_icms.vBCSTDest.obrigatorio = False
self.grupo_icms.vICMSSTDest.obrigatorio = False
self.grupo_icms.UFST.obrigatorio = False
self.grupo_icms.pBCOp.obrigatorio = False
#
# Por segurança, zeramos os valores das tags do
# grupo ICMS ao redefinirmos o código da situação
# tributária
#
self.grupo_icms.modBC.valor = 3
self.grupo_icms.vBC.valor = u'0.00'
self.grupo_icms.pRedBC.valor = u'0.00'
self.grupo_icms.pICMS.valor = u'0.00'
self.grupo_icms.vICMS.valor = u'0.00'
self.grupo_icms.modBCST.valor = 4
self.grupo_icms.pMVAST.valor = u'0.00'
self.grupo_icms.pRedBCST.valor = u'0.00'
self.grupo_icms.vBCST.valor = u'0.00'
self.grupo_icms.pICMSST.valor = u'0.00'
self.grupo_icms.vICMSST.valor = u'0.00'
self.grupo_icms.motDesICMS.valor = 0
self.grupo_icms.vBCSTRet.valor = u'0.00'
self.grupo_icms.vICMSSTRet.valor = u'0.00'
self.grupo_icms.vBCSTDest.valor = u'0.00'
self.grupo_icms.vICMSSTDest.valor = u'0.00'
self.grupo_icms.UFST.valor = u''
self.grupo_icms.pBCOp.valor = u'0.00'
#
# Para cada código de situação tributária,
# redefinimos a raiz e a obrigatoriedade das
# tags do grupo de ICMS
#
if self.valor == u'00':
self.grupo_icms.nome_tag = u'ICMS00'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMS00'
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
elif self.valor == u'10':
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
self.grupo_icms.modBCST.obrigatorio = True
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.pICMSST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
if not self.grupo_icms.partilha:
self.grupo_icms.nome_tag = u'ICMS10'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMS10'
else:
self.grupo_icms.nome_tag = u'ICMSPart'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMSPart'
self.grupo_icms.pBCOp.obrigatorio = True
self.grupo_icms.UFST.obrigatorio = True
elif self.valor == u'20':
self.grupo_icms.nome_tag = u'ICMS20'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMS20'
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pRedBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
elif self.valor == u'30':
self.grupo_icms.nome_tag = u'ICMS30'
self.grupo_icms.raiz_tag | |
<filename>nuitka/codegen/CodeGeneration.py
# Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" The code generation.
No language specifics at all are supposed to be present here. Instead it is
using primitives from the given generator to build code sequences (list of
strings).
As such this is the place that knows how to take a condition and two code
branches and make a code block out of it. But it doesn't contain any target
language syntax.
"""
from nuitka.build.DataComposerInterface import deriveModuleConstantsBlobName
from . import Contexts
from .AsyncgenCodes import (
generateMakeAsyncgenObjectCode,
getAsyncgenObjectCode,
getAsyncgenObjectDeclCode,
)
from .AttributeCodes import (
generateAssignmentAttributeCode,
generateAttributeCheckCode,
generateAttributeLookupCode,
generateAttributeLookupSpecialCode,
generateBuiltinGetattrCode,
generateBuiltinHasattrCode,
generateBuiltinSetattrCode,
generateDelAttributeCode,
)
from .BranchCodes import generateBranchCode
from .BuiltinCodes import (
generateBuiltinAbsCode,
generateBuiltinAnonymousRefCode,
generateBuiltinBinCode,
generateBuiltinBoolCode,
generateBuiltinBytearray1Code,
generateBuiltinBytearray3Code,
generateBuiltinClassmethodCode,
generateBuiltinComplex1Code,
generateBuiltinComplex2Code,
generateBuiltinFloatCode,
generateBuiltinHexCode,
generateBuiltinOctCode,
generateBuiltinOpenCode,
generateBuiltinRange1Code,
generateBuiltinRange2Code,
generateBuiltinRange3Code,
generateBuiltinRefCode,
generateBuiltinStaticmethodCode,
generateBuiltinSum1Code,
generateBuiltinSum2Code,
generateBuiltinType1Code,
generateBuiltinType3Code,
generateBuiltinXrange1Code,
generateBuiltinXrange2Code,
generateBuiltinXrange3Code,
)
from .CallCodes import generateCallCode, getCallsCode, getCallsDecls
from .ClassCodes import generateBuiltinSuperCode, generateSelectMetaclassCode
from .CodeHelpers import setExpressionDispatchDict, setStatementDispatchDict
from .ComparisonCodes import (
generateBuiltinIsinstanceCode,
generateBuiltinIssubclassCode,
generateComparisonExpressionCode,
)
from .ConditionalCodes import (
generateConditionalAndOrCode,
generateConditionalCode,
)
from .ConstantCodes import (
generateConstantEllipsisReferenceCode,
generateConstantFalseReferenceCode,
generateConstantNoneReferenceCode,
generateConstantReferenceCode,
generateConstantTrueReferenceCode,
getConstantsDefinitionCode,
)
from .CoroutineCodes import (
generateAsyncIterCode,
generateAsyncNextCode,
generateAsyncWaitCode,
generateMakeCoroutineObjectCode,
getCoroutineObjectCode,
getCoroutineObjectDeclCode,
)
from .DictCodes import (
generateBuiltinDictCode,
generateDictionaryCreationCode,
generateDictOperationGetCode,
generateDictOperationInCode,
generateDictOperationRemoveCode,
generateDictOperationSetCode,
generateDictOperationSetCodeKeyValue,
generateDictOperationUpdateCode,
)
from .EvalCodes import (
generateBuiltinCompileCode,
generateEvalCode,
generateExecCode,
generateExecfileCode,
generateLocalsDictSyncCode,
)
from .ExceptionCodes import (
generateBuiltinMakeExceptionCode,
generateExceptionCaughtTracebackCode,
generateExceptionCaughtTypeCode,
generateExceptionCaughtValueCode,
generateExceptionPublishCode,
generateExceptionRefCode,
)
from .ExpressionCodes import (
generateExpressionOnlyCode,
generateSideEffectsCode,
)
from .FrameCodes import (
generateFramePreserveExceptionCode,
generateFrameRestoreExceptionCode,
)
from .FunctionCodes import (
generateFunctionCallCode,
generateFunctionCreationCode,
generateFunctionErrorStrCode,
generateFunctionOutlineCode,
getExportScopeCode,
getFunctionCode,
getFunctionDirectDecl,
)
from .GeneratorCodes import (
generateMakeGeneratorObjectCode,
getGeneratorObjectCode,
getGeneratorObjectDeclCode,
)
from .GlobalsLocalsCodes import (
generateBuiltinDir1Code,
generateBuiltinGlobalsCode,
generateBuiltinLocalsCode,
generateBuiltinLocalsRefCode,
generateBuiltinVarsCode,
)
from .IdCodes import generateBuiltinHashCode, generateBuiltinIdCode
from .ImportCodes import (
generateBuiltinImportCode,
generateImportModuleHardCode,
generateImportModuleNameHardCode,
generateImportNameCode,
generateImportStarCode,
)
from .IntegerCodes import (
generateBuiltinInt1Code,
generateBuiltinInt2Code,
generateBuiltinLong1Code,
generateBuiltinLong2Code,
)
from .IteratorCodes import (
generateBuiltinAllCode,
generateBuiltinAnyCode,
generateBuiltinIter1Code,
generateBuiltinIter2Code,
generateBuiltinIterForUnpackCode,
generateBuiltinLenCode,
generateBuiltinNext1Code,
generateBuiltinNext2Code,
generateSpecialUnpackCode,
generateUnpackCheckCode,
)
from .ListCodes import (
generateBuiltinListCode,
generateListCreationCode,
generateListOperationAppendCode,
generateListOperationExtendCode,
generateListOperationPopCode,
)
from .LocalsDictCodes import (
generateLocalsDictDelCode,
generateLocalsDictSetCode,
generateLocalsDictVariableCheckCode,
generateLocalsDictVariableRefCode,
generateLocalsDictVariableRefOrFallbackCode,
generateReleaseLocalsDictCode,
generateSetLocalsDictCode,
)
from .LoopCodes import (
generateLoopBreakCode,
generateLoopCode,
generateLoopContinueCode,
)
from .ModuleCodes import (
generateModuleAttributeCode,
generateModuleAttributeFileCode,
generateNuitkaLoaderCreationCode,
getModuleCode,
)
from .OperationCodes import (
generateOperationBinaryCode,
generateOperationNotCode,
generateOperationUnaryCode,
)
from .PrintCodes import generatePrintNewlineCode, generatePrintValueCode
from .RaisingCodes import (
generateRaiseCode,
generateRaiseExpressionCode,
generateReraiseCode,
)
from .ReturnCodes import (
generateGeneratorReturnNoneCode,
generateGeneratorReturnValueCode,
generateReturnCode,
generateReturnConstantCode,
generateReturnedValueCode,
)
from .SetCodes import (
generateBuiltinFrozensetCode,
generateBuiltinSetCode,
generateSetCreationCode,
generateSetLiteralCreationCode,
generateSetOperationAddCode,
generateSetOperationUpdateCode,
)
from .SliceCodes import (
generateAssignmentSliceCode,
generateBuiltinSlice1Code,
generateBuiltinSlice2Code,
generateBuiltinSlice3Code,
generateDelSliceCode,
generateSliceLookupCode,
)
from .StringCodes import (
generateBuiltinAsciiCode,
generateBuiltinBytes1Code,
generateBuiltinBytes3Code,
generateBuiltinChrCode,
generateBuiltinFormatCode,
generateBuiltinOrdCode,
generateBuiltinStrCode,
generateBuiltinUnicodeCode,
generateStringContenationCode,
)
from .SubscriptCodes import (
generateAssignmentSubscriptCode,
generateDelSubscriptCode,
generateSubscriptLookupCode,
)
from .TryCodes import generateTryCode
from .TupleCodes import generateBuiltinTupleCode, generateTupleCreationCode
from .VariableCodes import (
generateAssignmentVariableCode,
generateDelVariableCode,
generateVariableReferenceCode,
generateVariableReleaseCode,
)
from .YieldCodes import (
generateYieldCode,
generateYieldFromCode,
generateYieldFromWaitableCode,
)
_generated_functions = {}
def generateFunctionBodyCode(function_body, context):
# TODO: Generate both codes, and base direct/etc. decisions on context.
# pylint: disable=too-many-branches
function_identifier = function_body.getCodeName()
if function_identifier in _generated_functions:
return _generated_functions[function_identifier]
if function_body.isExpressionGeneratorObjectBody():
function_context = Contexts.PythonGeneratorObjectContext(
parent=context, function=function_body
)
elif function_body.isExpressionClassBody():
function_context = Contexts.PythonFunctionDirectContext(
parent=context, function=function_body
)
elif function_body.isExpressionCoroutineObjectBody():
function_context = Contexts.PythonCoroutineObjectContext(
parent=context, function=function_body
)
elif function_body.isExpressionAsyncgenObjectBody():
function_context = Contexts.PythonAsyncgenObjectContext(
parent=context, function=function_body
)
elif function_body.needsCreation():
function_context = Contexts.PythonFunctionCreatedContext(
parent=context, function=function_body
)
else:
function_context = Contexts.PythonFunctionDirectContext(
parent=context, function=function_body
)
needs_exception_exit = function_body.mayRaiseException(BaseException)
if function_body.isExpressionGeneratorObjectBody():
function_code = getGeneratorObjectCode(
context=function_context,
function_identifier=function_identifier,
closure_variables=function_body.getClosureVariables(),
user_variables=function_body.getUserLocalVariables(),
outline_variables=function_body.getOutlineLocalVariables(),
temp_variables=function_body.getTempVariables(),
needs_exception_exit=needs_exception_exit,
needs_generator_return=function_body.needsGeneratorReturnExit(),
)
function_decl = getGeneratorObjectDeclCode(
function_identifier=function_identifier,
closure_variables=function_body.getClosureVariables(),
)
elif function_body.isExpressionCoroutineObjectBody():
function_code = getCoroutineObjectCode(
context=function_context,
function_identifier=function_identifier,
closure_variables=function_body.getClosureVariables(),
user_variables=function_body.getUserLocalVariables(),
outline_variables=function_body.getOutlineLocalVariables(),
temp_variables=function_body.getTempVariables(),
needs_exception_exit=needs_exception_exit,
needs_generator_return=function_body.needsGeneratorReturnExit(),
)
function_decl = getCoroutineObjectDeclCode(
function_identifier=function_body.getCodeName(),
closure_variables=function_body.getClosureVariables(),
)
elif function_body.isExpressionAsyncgenObjectBody():
function_code = getAsyncgenObjectCode(
context=function_context,
function_identifier=function_identifier,
closure_variables=function_body.getClosureVariables(),
user_variables=function_body.getUserLocalVariables(),
outline_variables=function_body.getOutlineLocalVariables(),
temp_variables=function_body.getTempVariables(),
needs_exception_exit=needs_exception_exit,
needs_generator_return=function_body.needsGeneratorReturnExit(),
)
function_decl = getAsyncgenObjectDeclCode(
function_identifier=function_body.getCodeName(),
closure_variables=function_body.getClosureVariables(),
)
elif function_body.isExpressionClassBody():
function_code = getFunctionCode(
context=function_context,
function_identifier=function_identifier,
parameters=None,
closure_variables=function_body.getClosureVariables(),
user_variables=function_body.getUserLocalVariables()
+ function_body.getOutlineLocalVariables(),
temp_variables=function_body.getTempVariables(),
function_doc=function_body.getDoc(),
needs_exception_exit=needs_exception_exit,
file_scope=getExportScopeCode(cross_module=False),
)
function_decl = getFunctionDirectDecl(
function_identifier=function_identifier,
closure_variables=function_body.getClosureVariables(),
file_scope=getExportScopeCode(cross_module=False),
context=function_context,
)
else:
function_code = getFunctionCode(
context=function_context,
function_identifier=function_identifier,
parameters=function_body.getParameters(),
closure_variables=function_body.getClosureVariables(),
user_variables=function_body.getUserLocalVariables()
+ function_body.getOutlineLocalVariables(),
temp_variables=function_body.getTempVariables(),
function_doc=function_body.getDoc(),
needs_exception_exit=needs_exception_exit,
file_scope=getExportScopeCode(
cross_module=function_body.isCrossModuleUsed()
),
)
if function_body.needsDirectCall():
function_decl = getFunctionDirectDecl(
function_identifier=function_identifier,
closure_variables=function_body.getClosureVariables(),
file_scope=getExportScopeCode(
cross_module=function_body.isCrossModuleUsed()
),
context=function_context,
)
else:
function_decl = None
return function_code, function_decl
def generateModuleCode(module, data_filename):
# As this not only creates all modules, but also functions, it deals
# also with its functions.
assert module.isCompiledPythonModule(), module
context = Contexts.PythonModuleContext(
module=module,
data_filename=data_filename,
)
context.setExceptionEscape("module_exception_exit")
function_decl_codes = []
function_body_codes = []
for function_body in module.getUsedFunctions():
# Constant function returners get no code.
(
is_constant_returning,
_constant_return_value,
) = function_body.getConstantReturnValue()
if is_constant_returning:
continue
function_code, function_decl = generateFunctionBodyCode(
function_body=function_body, context=context
)
function_body_codes.append(function_code)
if function_decl is not None:
function_decl_codes.append(function_decl)
# These are for functions used from other modules. Due to cyclic
# dependencies, we cannot rely on those to be already created.
for function_body in module.getCrossUsedFunctions():
assert function_body.isCrossModuleUsed()
function_decl = getFunctionDirectDecl(
function_identifier=function_body.getCodeName(),
closure_variables=function_body.getClosureVariables(),
file_scope=getExportScopeCode(
cross_module=function_body.isCrossModuleUsed()
),
context=Contexts.PythonFunctionDirectContext(
parent=context, function=function_body
),
)
function_decl_codes.append(function_decl)
return getModuleCode(
module=module,
function_decl_codes=function_decl_codes,
function_body_codes=function_body_codes,
module_const_blob_name=deriveModuleConstantsBlobName(data_filename),
context=context,
)
def generateHelpersCode():
calls_decl_code = getCallsDecls()
calls_body_code = getCallsCode()
constants_header_code, constants_body_code = getConstantsDefinitionCode()
return (
calls_decl_code,
calls_body_code,
constants_header_code,
constants_body_code,
)
setExpressionDispatchDict(
{
"EXPRESSION_ATTRIBUTE_CHECK": generateAttributeCheckCode,
"EXPRESSION_ATTRIBUTE_LOOKUP": generateAttributeLookupCode,
"EXPRESSION_ATTRIBUTE_LOOKUP_SPECIAL": generateAttributeLookupSpecialCode,
"EXPRESSION_BUILTIN_SLICE3": generateBuiltinSlice3Code,
"EXPRESSION_BUILTIN_SLICE2": generateBuiltinSlice2Code,
"EXPRESSION_BUILTIN_SLICE1": generateBuiltinSlice1Code,
"EXPRESSION_BUILTIN_HASH": generateBuiltinHashCode,
"EXPRESSION_BUILTIN_ID": generateBuiltinIdCode,
"EXPRESSION_BUILTIN_COMPILE": generateBuiltinCompileCode,
"EXPRESSION_BUILTIN_EXECFILE": generateExecfileCode,
"EXPRESSION_BUILTIN_EVAL": generateEvalCode,
"EXPRESSION_BUILTIN_EXEC": generateEvalCode,
"EXPRESSION_BUILTIN_ITER_FOR_UNPACK": generateBuiltinIterForUnpackCode,
"EXPRESSION_BUILTIN_ITER1": generateBuiltinIter1Code,
"EXPRESSION_BUILTIN_ITER2": generateBuiltinIter2Code,
"EXPRESSION_BUILTIN_NEXT1": generateBuiltinNext1Code,
"EXPRESSION_BUILTIN_NEXT2": generateBuiltinNext2Code,
"EXPRESSION_BUILTIN_SUM1": generateBuiltinSum1Code,
"EXPRESSION_BUILTIN_SUM2": generateBuiltinSum2Code,
"EXPRESSION_BUILTIN_TYPE1": generateBuiltinType1Code,
"EXPRESSION_BUILTIN_TYPE3": generateBuiltinType3Code,
"EXPRESSION_BUILTIN_IMPORT": generateBuiltinImportCode,
"EXPRESSION_BUILTIN_BOOL": generateBuiltinBoolCode,
"EXPRESSION_BUILTIN_BYTEARRAY1": generateBuiltinBytearray1Code,
"EXPRESSION_BUILTIN_BYTEARRAY3": generateBuiltinBytearray3Code,
"EXPRESSION_BUILTIN_INT1": generateBuiltinInt1Code,
"EXPRESSION_BUILTIN_INT2": generateBuiltinInt2Code,
"EXPRESSION_BUILTIN_LONG1": generateBuiltinLong1Code,
"EXPRESSION_BUILTIN_LONG2": generateBuiltinLong2Code,
"EXPRESSION_BUILTIN_FLOAT": generateBuiltinFloatCode,
"EXPRESSION_BUILTIN_COMPLEX1": generateBuiltinComplex1Code,
"EXPRESSION_BUILTIN_COMPLEX2": generateBuiltinComplex2Code,
"EXPRESSION_BUILTIN_LEN": generateBuiltinLenCode,
"EXPRESSION_BUILTIN_STR_P2": generateBuiltinStrCode,
"EXPRESSION_BUILTIN_STR_P3": generateBuiltinStrCode,
"EXPRESSION_BUILTIN_BYTES1": generateBuiltinBytes1Code,
"EXPRESSION_BUILTIN_BYTES3": generateBuiltinBytes3Code,
"EXPRESSION_BUILTIN_UNICODE_P2": generateBuiltinUnicodeCode,
"EXPRESSION_BUILTIN_CHR": generateBuiltinChrCode,
"EXPRESSION_BUILTIN_ORD": generateBuiltinOrdCode,
"EXPRESSION_BUILTIN_BIN": generateBuiltinBinCode,
"EXPRESSION_BUILTIN_OCT": generateBuiltinOctCode,
"EXPRESSION_BUILTIN_HEX": generateBuiltinHexCode,
"EXPRESSION_BUILTIN_TUPLE": generateBuiltinTupleCode,
"EXPRESSION_BUILTIN_LIST": generateBuiltinListCode,
"EXPRESSION_BUILTIN_SET": generateBuiltinSetCode,
"EXPRESSION_BUILTIN_ANY": generateBuiltinAnyCode,
"EXPRESSION_BUILTIN_FROZENSET": generateBuiltinFrozensetCode,
"EXPRESSION_BUILTIN_ALL": generateBuiltinAllCode,
"EXPRESSION_BUILTIN_DICT": generateBuiltinDictCode,
"EXPRESSION_BUILTIN_LOCALS_COPY": generateBuiltinLocalsCode,
"EXPRESSION_BUILTIN_LOCALS_UPDATED": generateBuiltinLocalsCode,
"EXPRESSION_BUILTIN_LOCALS_REF": generateBuiltinLocalsRefCode,
"EXPRESSION_BUILTIN_GLOBALS": generateBuiltinGlobalsCode,
"EXPRESSION_BUILTIN_SUPER0": generateBuiltinSuperCode,
"EXPRESSION_BUILTIN_SUPER2": generateBuiltinSuperCode,
"EXPRESSION_BUILTIN_ISINSTANCE": generateBuiltinIsinstanceCode,
"EXPRESSION_BUILTIN_ISSUBCLASS": generateBuiltinIssubclassCode,
"EXPRESSION_BUILTIN_DIR1": generateBuiltinDir1Code,
"EXPRESSION_BUILTIN_VARS": generateBuiltinVarsCode,
"EXPRESSION_BUILTIN_HASATTR": generateBuiltinHasattrCode,
"EXPRESSION_BUILTIN_GETATTR": generateBuiltinGetattrCode,
"EXPRESSION_BUILTIN_SETATTR": generateBuiltinSetattrCode,
"EXPRESSION_BUILTIN_OPEN": generateBuiltinOpenCode,
"EXPRESSION_BUILTIN_STATICMETHOD": generateBuiltinStaticmethodCode,
"EXPRESSION_BUILTIN_CLASSMETHOD": generateBuiltinClassmethodCode,
"EXPRESSION_BUILTIN_RANGE1": generateBuiltinRange1Code,
"EXPRESSION_BUILTIN_RANGE2": generateBuiltinRange2Code,
"EXPRESSION_BUILTIN_RANGE3": generateBuiltinRange3Code,
"EXPRESSION_BUILTIN_XRANGE1": generateBuiltinXrange1Code,
"EXPRESSION_BUILTIN_XRANGE2": generateBuiltinXrange2Code,
"EXPRESSION_BUILTIN_XRANGE3": generateBuiltinXrange3Code,
"EXPRESSION_BUILTIN_MAKE_EXCEPTION": generateBuiltinMakeExceptionCode,
"EXPRESSION_BUILTIN_MAKE_EXCEPTION_IMPORT_ERROR": generateBuiltinMakeExceptionCode,
"EXPRESSION_BUILTIN_REF": generateBuiltinRefCode,
"EXPRESSION_BUILTIN_WITH_CONTEXT_REF": generateBuiltinRefCode,
"EXPRESSION_BUILTIN_EXCEPTION_REF": generateExceptionRefCode,
"EXPRESSION_BUILTIN_ANONYMOUS_REF": generateBuiltinAnonymousRefCode,
"EXPRESSION_CAUGHT_EXCEPTION_TYPE_REF": generateExceptionCaughtTypeCode,
"EXPRESSION_CAUGHT_EXCEPTION_VALUE_REF": generateExceptionCaughtValueCode,
"EXPRESSION_CAUGHT_EXCEPTION_TRACEBACK_REF": generateExceptionCaughtTracebackCode,
"EXPRESSION_CALL_EMPTY": generateCallCode,
"EXPRESSION_CALL_KEYWORDS_ONLY": generateCallCode,
"EXPRESSION_CALL_NO_KEYWORDS": generateCallCode,
"EXPRESSION_CALL": generateCallCode,
"EXPRESSION_CONSTANT_NONE_REF": generateConstantNoneReferenceCode,
"EXPRESSION_CONSTANT_TRUE_REF": generateConstantTrueReferenceCode,
"EXPRESSION_CONSTANT_FALSE_REF": generateConstantFalseReferenceCode,
"EXPRESSION_CONSTANT_STR_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_STR_EMPTY_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_UNICODE_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_UNICODE_EMPTY_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_BYTES_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_BYTES_EMPTY_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_INT_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_LONG_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_FLOAT_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_COMPLEX_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_ELLIPSIS_REF": generateConstantEllipsisReferenceCode,
"EXPRESSION_CONSTANT_DICT_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_DICT_EMPTY_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_TUPLE_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_TUPLE_EMPTY_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_TUPLE_MUTABLE_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_LIST_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_LIST_EMPTY_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_SET_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_SET_EMPTY_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_FROZENSET_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_FROZENSET_EMPTY_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_SLICE_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_XRANGE_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_TYPE_REF": generateConstantReferenceCode,
"EXPRESSION_CONSTANT_BYTEARRAY_REF": generateConstantReferenceCode,
"EXPRESSION_CONDITIONAL": generateConditionalCode,
"EXPRESSION_CONDITIONAL_OR": generateConditionalAndOrCode,
"EXPRESSION_CONDITIONAL_AND": generateConditionalAndOrCode,
"EXPRESSION_COMPARISON": generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_IS": generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_IS_NOT": generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_IN": generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_NOT_IN": generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_EXCEPTION_MATCH": generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_EXCEPTION_MISMATCH": generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_LT": generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_LTE": generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_GT": generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_GTE": generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_EQ": generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_NEQ": generateComparisonExpressionCode,
"EXPRESSION_DICT_OPERATION_GET": generateDictOperationGetCode,
"EXPRESSION_DICT_OPERATION_IN": generateDictOperationInCode,
"EXPRESSION_DICT_OPERATION_NOT_IN": generateDictOperationInCode,
"EXPRESSION_FUNCTION_CREATION": generateFunctionCreationCode,
"EXPRESSION_FUNCTION_CALL": generateFunctionCallCode,
"EXPRESSION_FUNCTION_ERROR_STR": generateFunctionErrorStrCode,
"EXPRESSION_IMPORT_MODULE_HARD": generateImportModuleHardCode,
"EXPRESSION_IMPORT_MODULE_NAME_HARD": generateImportModuleNameHardCode,
"EXPRESSION_IMPORT_NAME": generateImportNameCode,
"EXPRESSION_LIST_OPERATION_EXTEND": generateListOperationExtendCode,
"EXPRESSION_LIST_OPERATION_EXTEND_FOR_UNPACK": generateListOperationExtendCode,
"EXPRESSION_LIST_OPERATION_POP": generateListOperationPopCode,
"EXPRESSION_MODULE_ATTRIBUTE_FILE_REF": generateModuleAttributeFileCode,
"EXPRESSION_MODULE_ATTRIBUTE_NAME_REF": generateModuleAttributeCode,
"EXPRESSION_MODULE_ATTRIBUTE_PACKAGE_REF": generateModuleAttributeCode,
"EXPRESSION_MODULE_ATTRIBUTE_LOADER_REF": generateModuleAttributeCode,
"EXPRESSION_MODULE_ATTRIBUTE_SPEC_REF": generateModuleAttributeCode,
"EXPRESSION_MAKE_GENERATOR_OBJECT": generateMakeGeneratorObjectCode,
"EXPRESSION_MAKE_COROUTINE_OBJECT": generateMakeCoroutineObjectCode,
"EXPRESSION_MAKE_ASYNCGEN_OBJECT": generateMakeAsyncgenObjectCode,
"EXPRESSION_MAKE_SET": generateSetCreationCode,
"EXPRESSION_MAKE_SET_LITERAL": generateSetLiteralCreationCode,
"EXPRESSION_MAKE_TUPLE": generateTupleCreationCode,
"EXPRESSION_MAKE_LIST": generateListCreationCode,
"EXPRESSION_MAKE_DICT": generateDictionaryCreationCode,
"EXPRESSION_OPERATION_BINARY_ADD": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_SUB": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_MULT": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_FLOOR_DIV": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_OLD_DIV": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_TRUE_DIV": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_DIVMOD": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_MOD": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_POW": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_LSHIFT": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_RSHIFT": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_BIT_OR": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_BIT_AND": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_BIT_XOR": generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_MAT_MULT": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_ADD": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_SUB": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_MULT": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_FLOOR_DIV": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_OLD_DIV": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_TRUE_DIV": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_MOD": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_POW": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_LSHIFT": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_RSHIFT": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_BIT_OR": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_BIT_AND": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_BIT_XOR": generateOperationBinaryCode,
"EXPRESSION_OPERATION_INPLACE_MAT_MULT": generateOperationBinaryCode,
"EXPRESSION_OPERATION_UNARY_REPR": generateOperationUnaryCode,
"EXPRESSION_OPERATION_UNARY_SUB": generateOperationUnaryCode,
"EXPRESSION_OPERATION_UNARY_ADD": generateOperationUnaryCode,
"EXPRESSION_OPERATION_UNARY_INVERT": generateOperationUnaryCode,
"EXPRESSION_OPERATION_UNARY_ABS": generateBuiltinAbsCode,
"EXPRESSION_OPERATION_NOT": generateOperationNotCode,
"EXPRESSION_OUTLINE_BODY": generateFunctionOutlineCode,
"EXPRESSION_OUTLINE_FUNCTION": generateFunctionOutlineCode,
# TODO: Rename to make more clear it is an outline
"EXPRESSION_CLASS_BODY": generateFunctionOutlineCode,
"EXPRESSION_SUBSCRIPT_LOOKUP": generateSubscriptLookupCode,
"EXPRESSION_SLICE_LOOKUP": generateSliceLookupCode,
"EXPRESSION_SET_OPERATION_UPDATE": generateSetOperationUpdateCode,
"EXPRESSION_SIDE_EFFECTS": generateSideEffectsCode,
"EXPRESSION_SPECIAL_UNPACK": generateSpecialUnpackCode,
"EXPRESSION_TEMP_VARIABLE_REF": generateVariableReferenceCode,
"EXPRESSION_VARIABLE_REF": generateVariableReferenceCode,
"EXPRESSION_VARIABLE_OR_BUILTIN_REF": generateVariableReferenceCode,
"EXPRESSION_YIELD": generateYieldCode,
"EXPRESSION_YIELD_FROM": generateYieldFromCode,
"EXPRESSION_YIELD_FROM_WAITABLE": generateYieldFromWaitableCode,
"EXPRESSION_ASYNC_WAIT": generateAsyncWaitCode,
"EXPRESSION_ASYNC_WAIT_ENTER": generateAsyncWaitCode,
"EXPRESSION_ASYNC_WAIT_EXIT": generateAsyncWaitCode,
"EXPRESSION_ASYNC_ITER": generateAsyncIterCode,
"EXPRESSION_ASYNC_NEXT": generateAsyncNextCode,
"EXPRESSION_SELECT_METACLASS": generateSelectMetaclassCode,
"EXPRESSION_STRING_CONCATENATION": generateStringContenationCode,
"EXPRESSION_BUILTIN_FORMAT": generateBuiltinFormatCode,
"EXPRESSION_BUILTIN_ASCII": generateBuiltinAsciiCode,
"EXPRESSION_LOCALS_VARIABLE_CHECK": generateLocalsDictVariableCheckCode,
"EXPRESSION_LOCALS_VARIABLE_REF_OR_FALLBACK": generateLocalsDictVariableRefOrFallbackCode,
"EXPRESSION_LOCALS_VARIABLE_REF": generateLocalsDictVariableRefCode,
"EXPRESSION_RAISE_EXCEPTION": generateRaiseExpressionCode,
"EXPRESSION_NUITKA_LOADER_CREATION": generateNuitkaLoaderCreationCode,
}
)
setStatementDispatchDict(
{
"STATEMENT_ASSIGNMENT_VARIABLE": generateAssignmentVariableCode,
"STATEMENT_ASSIGNMENT_ATTRIBUTE": generateAssignmentAttributeCode,
"STATEMENT_ASSIGNMENT_SUBSCRIPT": generateAssignmentSubscriptCode,
"STATEMENT_ASSIGNMENT_SLICE": generateAssignmentSliceCode,
"STATEMENT_DEL_VARIABLE": generateDelVariableCode,
"STATEMENT_DEL_ATTRIBUTE": generateDelAttributeCode,
"STATEMENT_DEL_SUBSCRIPT": generateDelSubscriptCode,
"STATEMENT_DEL_SLICE": generateDelSliceCode,
"STATEMENT_DICT_OPERATION_REMOVE": generateDictOperationRemoveCode,
"STATEMENT_DICT_OPERATION_UPDATE": generateDictOperationUpdateCode,
"STATEMENT_RELEASE_VARIABLE": generateVariableReleaseCode,
"STATEMENT_EXPRESSION_ONLY": generateExpressionOnlyCode,
"STATEMENT_RETURN": generateReturnCode,
"STATEMENT_RETURN_TRUE": generateReturnConstantCode,
"STATEMENT_RETURN_FALSE": generateReturnConstantCode,
"STATEMENT_RETURN_NONE": | |
(unless the underlying
feature importances are categorical, in which a list of DataFrames
will be returned.)
If mean=True, then a pandas Series (or in the case of
underlying categorical feature importances, list of)
will be returned, with the mean value from each fold
and all features with a value of 0 excluded.
Note: To get the mean values without zero's excluded,
just call .mean() on the result of this method
with mean=False.
'''
fis = self._get_base_fis_list()
base = fis_to_df(fis)
# Proc. abs arg
if abs:
if isinstance(base, list):
base = [b.abs() for b in base]
else:
base = base.abs()
# If not mean, return as is
if not mean:
return base
# Categorical mean case
if isinstance(base, list):
return [mean_no_zeros(b) for b in base]
# Base mean case
return mean_no_zeros(base)
def get_inverse_fis(self, fis=None):
'''Try to inverse transform stored
feature importances (either beta weights or
automatically calculated feature importances)
to their original space.
.. warning::
If there are any underlying non-recoverable
transformations in the pipeline, this method
will fail! For example, if a PCA was applied,
then a reverse transformation cannot be computed.
This method can be especially helpful when using :class:`Loader`.
Returns
-------
inverse_fis : list of pandas Series
| The inverse feature importances will be returned
as a list, where each index of the list refers to
a fold of the cross-validation, and each element
of the list is either a pandas Series or a list
of pandas Series (in the case of a categorical
problem type where separate feature importances
were calculated for each class).
| If a :class:`Loader` was used, the returned Series
may contain multi-dimensional arrays instead of scalar
values, representing feature importances as transformed
back into the original loaded space / shape.
'''
# As list of series or list of list of series
if fis is None:
fis = self._get_base_fis_list()
# If passed in df format, convert first
# Drop any NaN also ~
# @ TODO handle categorical case ...
elif isinstance(fis, pd.DataFrame):
fis = [fis.loc[i].dropna() for i in range(len(fis))]
# Otherwise, assumes passed
inv_trans_fis = []
for i, fi in enumerate(fis):
# The estimator for this fold
estimator = self.estimators[i]
# Non-categorical case
if isinstance(fi, pd.Series):
inv_trans_fis.append(
estimator.inverse_transform_fis(fi))
# Categorical case
else:
cat_inv_fis =\
[estimator.inverse_transform_fis(f) for f in fi]
inv_trans_fis.append(cat_inv_fis)
return inv_trans_fis
def _get_val_fold_Xy(self, estimator, X_df,
y_df, fold, just_model=True,
nested_model=True):
# Get the X and y df's - assume self.val_subjects stores
# only subjects with non nan target variables
X_val_df = X_df.loc[self.val_subjects[fold]]
y_val_df = y_df.loc[self.val_subjects[fold]]
# Base as array, and all feat names
X_trans, feat_names = np.array(X_val_df), list(X_val_df)
# Transform the X df, casts to array if just_model.
if just_model:
# Calculate corresponding feat names
# with or without nested_model
feat_names =\
estimator.transform_feat_names(feat_names,
encoders=self.encoders_,
nested_model=nested_model)
# Also calculate X_trans, with and without nested model
X_trans = estimator.transform(X_trans,
transform_index=X_val_df.index,
nested_model=nested_model)
# If nested model, then we need to make sure to
# grab the nested final estimator
if nested_model:
estimator = estimator._nested_final_estimator
# Otherwise use the one deep final estimator
else:
estimator = estimator._final_estimator
return estimator, X_trans, np.array(y_val_df), feat_names
@doc(dataset=_base_docs['dataset'])
def permutation_importance(self, dataset=None,
n_repeats=10, scorer='default',
just_model=True, nested_model=True,
return_as='dfs', n_jobs=1, random_state='default'):
'''This function computes the permutation feature importances
from the base scikit-learn function
:func:`sklearn.inspection.permutation_importance`
Parameters
-----------
{dataset}
| If left as default=None, then will try to use
a shallow copy of the dataset passed to the original
evaluate call (assuming evaluate was run with store_data_ref=True).
::
default = None
n_repeats : int, optional
The number of times to randomly permute each feature.
::
default = 10
scorer : sklearn-style scoring, optional
Scorer to use. It can be a single sklearn style str,
or a callable.
If left as 'default' will use the first scorer defined when
evaluating the underlying estimator.
::
default = 'default'
just_model : bool, optional
When set to true, the permutation feature importances
will be computed using the final set of transformed features
as passed when fitting the base model. This is reccomended
behavior because it means that the features do not need to
be re-transformed through the full pipeline to evaluate each
feature. If set to False, will permute the features in the
original feature space (which may be useful in some context).
::
default = True
nested_model : bool, optional
In the case that `just_model` is set to True, there exists
in some cases the further option to use an even more transformed
set of features. For example, in the case where in the main pipeline
the final estimator is another pipeline, there could be more static
transformations applied in this second pipeline. If `nested_model` is
set to True, then it will attempt to apply these further nested
transformations in the same way as with just_model, feeding in
eventually an even further transformed set of features and even more
specific final estimator when calculating the permutation feature
importances.
By default, this value is True, so the calculated
feature importances here will correspond to the
saved `self.feat_names` in this object.
::
default = True
return_as : ['dfs', 'raw'], optional
This parameter controls if calculated permutation
feature importances should be returned as a DataFrame
with column names as the corresponding feature names,
or if it should be returned as a list with the raw
output from each fold, e.g., sklearn Batch's with
parameters 'importances_mean', 'importances_std'
and 'importances'.
If return as DataFrame is requested, then
'importances_mean' and 'importances_std'
will be returned, but not the raw 'importances'.
::
default = 'dfs'
n_jobs : int, optional
The number of jobs to use for this function. Note
that if the underlying estimator supports multiple jobs
during inference (predicting), and the original
problem_spec was set with multiple n_jobs then that original
behavior will still hold, and you may wish to keep this
parameter as 1. On the otherhand, if the base estimator
does not use multiple jobs, passing a higher value here
could greatly speed up computation.
::
default = 1
random_state : int, 'default' or None, optional
Pseudo-random number generator to control the permutations
of each feature.
If left as 'default' then use the random state defined
during the initial evaluation of the model. Otherwise, you may
pass an int for a different fixed random state or None
for explicitly no
random state.
::
default = 'default'
'''
# @TODO in case of just_model = False, won't pass along
# transform_index correctly to scorer.
from sklearn.inspection import permutation_importance
# Check dataset
dataset = self._dataset_check(dataset)
# Check estimators
self._estimators_check()
# If default scorer, take the first one
if scorer == 'default':
first = list(self.ps.scorer)[0]
scorer = self.ps.scorer[first]
self._print('Using scorer:', first, level=1)
# If default random_state use the one saved in
# original problem spec.
if random_state == 'default':
random_state = self.ps.random_state
# Get X and y from saved problem spec
X, y = dataset.get_Xy(self.ps)
# For each estimator
all_fis, all_feat_names = [], []
for fold, estimator in enumerate(self.estimators):
# Get correct estimator, X_val, y_val and feat_names
estimator, X_val, y_val, feat_names =\
self._get_val_fold_Xy(estimator, X_df=X, y_df=y,
fold=fold, just_model=just_model,
nested_model=nested_model)
all_feat_names.append(feat_names)
# Run the sklearn feature importances.
fis = permutation_importance(estimator, X_val, y_val,
scoring=scorer, n_repeats=n_repeats,
n_jobs=n_jobs,
random_state=random_state)
# Add to all fis
all_fis.append(fis)
# If raw, return as raw
if return_as == 'raw':
return all_fis
# Otherwise, use return df
mean_series, std_series = [], []
for fis, feat_names in zip(all_fis, all_feat_names):
mean_series.append(
fi_to_series(fis['importances_mean'], feat_names))
std_series.append(
fi_to_series(fis['importances_std'], feat_names))
# Return as sklearn bunch of DataFrames
return Bunch(importances_mean=fis_to_df(mean_series),
importances_std=fis_to_df(std_series))
@doc(dataset=_base_docs['dataset'])
def get_X_transform_df(self, dataset=None, fold=0, subjects='tr',
nested_model=True, trans_y=False):
'''This method is used as a helper for getting the transformed
input data for one of the saved models run during evaluate.
Parameters
-----------
{dataset}
| If left as default=None, then will try to use
a shallow copy of the dataset passed to the original
evaluate call (assuming evaluate was run with | |
import itertools
import numpy as np
from PartSegCore.segmentation.border_smoothing import IterativeVoteSmoothing, OpeningSmoothing, VoteSmoothing
from PartSegCore.segmentation.watershed import NeighType
class TestVoteSmoothing:
def test_cube_sides(self):
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 1})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 3})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 4})
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 5})
res2 = np.copy(data)
for pos in itertools.permutations([2, 2, -3, -3, slice(2, -2)], 3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 6})
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[3:-3, 3:-3, 3:-3] = 1
assert np.all(res2 == res)
def test_cube_edges(self):
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 1})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 6})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 7})
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 9})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 10})
res2 = np.copy(data)
for pos in itertools.permutations([2, 2, -3, -3, slice(2, -2)], 3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 13})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 14})
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[3:-3, 3:-3, 3:-3] = 1
assert np.all(res2 == res)
def test_cube_vertex(self):
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 1})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 7})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 8})
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 11})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 12})
res2 = np.copy(data)
for pos in itertools.permutations([2, 2, -3, -3, slice(2, -2)], 3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 17})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 18})
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[3:-3, 3:-3, 3:-3] = 1
assert np.all(res2 == res)
def test_square_sides(self):
data = np.zeros((1, 50, 50), dtype=np.uint8)
data[:, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 1})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 2})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 3})
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=2):
res2[(0,) + pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 4})
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[:, 3:-3, 3:-3] = 1
assert np.all(res == res2)
def test_square_edges(self):
data = np.zeros((1, 50, 50), dtype=np.uint8)
data[:, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 1})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 3})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 4})
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=2):
res2[(0,) + pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 5})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 6})
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[:, 3:-3, 3:-3] = 1
assert np.all(res == res2)
def test_square_vertex(self):
data = np.zeros((1, 50, 50), dtype=np.uint8)
data[:, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 1})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 3})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 4})
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=2):
res2[(0,) + pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 5})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 6})
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[:, 3:-3, 3:-3] = 1
assert np.all(res == res2)
def calc_cord(pos, sign, shift):
return tuple(np.array(pos) + np.array(sign) * np.array(shift))
def generate_neighbour_sides(dist, ndim):
return filter(lambda x: np.sum(x) <= dist, itertools.product(range(dist + 1), repeat=ndim))
def generate_neighbour_edge(dist, ndim):
def sub_filter(arr):
arr = np.sort(arr)
val = 0
mul = 1
for el in reversed(arr):
val += el * mul
mul *= 2
return val <= dist
return filter(sub_filter, itertools.product(range(dist + 1), repeat=ndim))
class TestIterativeVoteSmoothing:
def test_cube_sides_base(self):
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.sides, "support_level": 1, "max_steps": 1}
)
assert np.all(res == data)
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.sides, "support_level": 3, "max_steps": 1}
)
assert np.all(res == data)
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.sides, "support_level": 4, "max_steps": 1}
)
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=3):
res2[pos] = 0
assert np.all(res2 == res)
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.sides, "support_level": 5, "max_steps": 1}
)
res2 = np.copy(data)
for pos in itertools.permutations([2, 2, -3, -3, slice(2, -2)], 3):
res2[pos] = 0
assert np.all(res2 == res)
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.sides, "support_level": 6, "max_steps": 1}
)
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[3:-3, 3:-3, 3:-3] = 1
assert np.all(res2 == res)
def test_cube_sides_iter(self):
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
for i in range(2, 8):
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.sides, "support_level": 4, "max_steps": i}
)
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=3):
sign = np.sign(pos)
for shift in generate_neighbour_sides(i - 1, 3):
res2[calc_cord(pos, sign, shift)] = 0
assert np.all(res2 == res), f"Fail on step {i}"
for i in range(2, 8):
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.sides, "support_level": 5, "max_steps": i}
)
res2 = np.copy(data)
for pos in itertools.permutations([2, 2, -3, -3, slice(2, -2)], 3):
res2[pos] = 0
for ind in [0, 1, 2]:
for pos in itertools.product([2, -3], repeat=2):
sign = np.sign(pos)
for shift in generate_neighbour_sides(i - 1, 2):
pos2 = list(calc_cord(pos, sign, shift))
pos2.insert(ind, slice(2, -2))
res2[tuple(pos2)] = 0
assert np.all(res2 == res), f"Fail on step {i}"
for i in range(2, 8):
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.sides, "support_level": 6, "max_steps": i}
)
res2 = np.zeros(data.shape, dtype=data.dtype)
shift = 2 + i
p = slice(shift, -shift)
res2[p, p, p] = 1
assert np.all(res2 == res), f"Fail on step {i}"
def test_cube_edges_base(self):
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.edges, "support_level": 1, "max_steps": 1}
)
assert np.all(res == data)
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.edges, "support_level": 6, "max_steps": 1}
)
assert np.all(res == data)
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.edges, "support_level": 7, "max_steps": 1}
)
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=3):
res2[pos] = 0
assert np.all(res2 == res)
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.edges, "support_level": 9, "max_steps": 1}
)
assert np.all(res2 == res)
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.edges, "support_level": 10, "max_steps": 1}
)
res2 = np.copy(data)
for pos in itertools.permutations([2, 2, -3, -3, slice(2, -2)], 3):
res2[pos] = 0
assert np.all(res2 == res)
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.edges, "support_level": 13, "max_steps": 1}
)
assert np.all(res2 == res)
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.edges, "support_level": 14, "max_steps": 1}
)
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[3:-3, 3:-3, 3:-3] = 1
assert np.all(res2 == res)
def test_cube_edge_iter(self):
# This test can fail if IterativeVoting do not work correctly.
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=3):
res2[pos] = 0
for i in range(2, 4):
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.edges, "support_level": 7, "max_steps": i}
)
assert np.all(res2 == res), f"Fail on step {i}"
for support_level in [8, 9, 10, 11, 12]:
res2 = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": support_level})
for i in range(2, 8):
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.edges, "support_level": support_level, "max_steps": i}
)
res2 = VoteSmoothing.smooth(
res2, {"neighbourhood_type": NeighType.edges, "support_level": support_level}
)
assert np.all(res2 == res), f"Fail on step {i} for support level {support_level}"
def test_cube_vertex_base(self):
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
res = IterativeVoteSmoothing.smooth(
data, {"neighbourhood_type": NeighType.vertex, "support_level": | |
"emigrate",
"knighthood",
"kathie",
"desai",
"migrated",
"dominus",
"octavian",
"matsuda",
"namesake",
"weepy",
"shiner",
"grifter",
"basking",
"opa",
"adolfo",
"sagittarius",
"'look",
"smurfs",
"censored",
"wares",
"memorizing",
"seabed",
"weasels",
"councilwoman",
"nationalism",
"hsu",
"technodrome",
"one-of-a-kind",
"handball",
"mayumi",
"pacifier",
"magnificence",
"atheists",
"zbz",
"paine",
"embankment",
"upgrades",
"outed",
"siphon",
"shoals",
"one-legged",
"three-time",
"kilometre",
"neutrality",
"rhade",
"radishes",
"bas",
"coined",
"sequins",
"baudelaire",
"impersonator",
"concoction",
"hi-tech",
"incessant",
"whirling",
"chevrolet",
"menstrual",
"nesbitt",
"ven",
"light-headed",
"ogling",
"trig",
"mauricio",
"kaylee",
"tylenol",
"hectares",
"money-",
"spiro",
"lynched",
"glorify",
"pl",
"soraya",
"triumphed",
"masking",
"prolific",
"see.",
"mutts",
"magoo",
"lectured",
"thirteenth",
"protege",
"lobbyist",
"directs",
"impressing",
"plantagenet",
"jakes",
"furies",
"flatmate",
"taki",
"wind-up",
"meta",
"recuperate",
"crossover",
"lyrical",
"self-serving",
"shunt",
"forsythe",
"youll",
"jesuit",
"arrgh",
"concord",
"silvana",
"yeogu",
"durand",
"i`ve",
"tivo",
"shopped",
"blacklisted",
"unsatisfied",
"infantile",
"niang",
"decadence",
"motherless",
"unbridled",
"momoko",
"backlash",
"leftist",
"myspace",
"acronym",
"clots",
"lorries",
"barometer",
"raucous",
"mournful",
"you-know-what",
"redheaded",
"swerved",
"peeked",
"lundy",
"i.c.u.",
"ovulating",
"obelix",
"energon",
"ferg",
"dermatologist",
"murtaugh",
"daniele",
"band-aids",
"mahdi",
"antwon",
"naga",
"kaze",
"duvall",
"stilts",
"'d-",
"assimilate",
"grieved",
"fermented",
"siddhu",
"occupant",
"ala",
"derivatives",
"spectacularly",
"narrate",
"reenactment",
"gwi",
"palma",
"goren",
"wh-where",
"krakow",
"quiero",
"machiavelli",
"laney",
"chitti",
"pistons",
"bumming",
"rawlins",
"swordsmanship",
"pennant",
"dillinger",
"marooned",
"storehouse",
"crouching",
"pernell",
"storybook",
"hickok",
"mailer",
"dl",
"sandstone",
"gault",
"mouch",
"emanating",
"godard",
"sidewalks",
"breadth",
"baywatch",
"synergy",
"60th",
"labelled",
"coexist",
"rasta",
"ashleigh",
"coldness",
"crais",
"orthopedic",
"close-ups",
"quimby",
"workhouse",
"ak-47",
"panics",
"brainwash",
"perplexed",
"hustled",
"untill",
"muldoon",
"anni",
"dinesh",
"tish",
"tosses",
"specializing",
"tiller",
"newfoundland",
"medellin",
"causeway",
"midlands",
"vouched",
"extraordinaire",
"natsumi",
"scarab",
"illumination",
"barns",
"argo",
"chunghae",
"oddball",
"larsson",
"emo",
"reprehensible",
"pips",
"'ok",
"convoys",
"gunderson",
"mutter",
"dijon",
"muthu",
"diocese",
"bartenders",
"licensing",
"restful",
"workup",
"getcha",
"scotsman",
"sycamore",
"reappeared",
"malay",
"jami",
"poppers",
"beat-up",
"backhand",
"lawnmower",
"cleric",
"kickoff",
"bethesda",
"numerical",
"cordy",
"watermelons",
"carnivorous",
"guadalcanal",
"governmental",
"endures",
"wellness",
"boeing",
"sher",
"tamer",
"certification",
"reminiscing",
"omo",
"sul",
"sunder",
"shire",
"bludgeoned",
"futility",
"halliday",
"alli",
"keita",
"cla",
"impervious",
"one-horse",
"yukawa",
"weiner",
"paltry",
"jemma",
"skeet",
"unreachable",
"analytical",
"fruitless",
"corsage",
"bangin",
"rife",
"masquerading",
"kt",
"bullfighter",
"graced",
"bookshelf",
"toxicity",
"taxation",
"subsidies",
"barron",
"camcorder",
"two-day",
"tamra",
"kneecap",
"perforated",
"wrinkly",
"armenians",
"ten-hut",
"gaudy",
"lesion",
"nibbling",
"macao",
"nss",
"perfecting",
"amicable",
"veera",
"claustrophobia",
"'keefe",
"just--just",
"periodically",
"pop-pop",
"pennington",
"prado",
"dark-haired",
"zuko",
"carats",
"notoriety",
"fungi",
"nev",
"landau",
"nzt",
"aggravating",
"doused",
"course.",
"toothbrushes",
"immersion",
"afis",
"skeletal",
"beatty",
"bhagat",
"scents",
"mitya",
"endowment",
"bookworm",
"vertically",
"pelant",
"heh-heh-heh",
"prioritize",
"luciana",
"loman",
"bancroft",
"intermediary",
"myeong",
"firestorm",
"centerpiece",
"proprietary",
"jayden",
"hindrance",
"biologists",
"obscenity",
"mothra",
"dake",
"retires",
"meridian",
"oboe",
"blurt",
"tallahassee",
"inflammatory",
"coochie",
"treatable",
"hubbub",
"fractions",
"entail",
"narrowing",
"tetsuo",
"lida",
"festering",
"wetter",
"stumbles",
"battleground",
"pantomime",
"voracious",
"fleshy",
"hamada",
"traverse",
"repose",
"stimulates",
"madhuri",
"decontamination",
"clapton",
"peering",
"foust",
"fscx140",
"first-year",
"remiss",
"omi",
"nauseating",
"underfoot",
"discredited",
"two-and-a-half",
"acquisitions",
"magnanimous",
"fscy140",
"slovak",
"ishikawa",
"tenement",
"karine",
"derive",
"reiner",
"schoolgirls",
"hilltop",
"soared",
"palle",
"cotillion",
"poplar",
"radford",
"depressive",
"underlined",
"bib",
"stroked",
"separatist",
"mothefrucker",
"lynx",
"nudes",
"tectonic",
"danforth",
"crixus",
"empirical",
"eunuchs",
"oregano",
"inhospitable",
"ryoko",
"discerning",
"kidder",
"chantelle",
"gobbling",
"kodai",
"acoustics",
"pecs",
"schuyler",
"sunflowers",
"galvin",
"tomi",
"ziegfeld",
"toyou",
"dilly",
"sellin",
"pittman",
"incas",
"lasagne",
"prototypes",
"elective",
"vulgarity",
"drinkers",
"moos",
"dress-up",
"mortician",
"youve",
"dirtiest",
"voluptuous",
"aprons",
"sibley",
"gillis",
"tortoises",
"leighton",
"masochist",
"trilogy",
"evergreen",
"transcribed",
"porta",
"proletarian",
"pygmy",
"endora",
"bogs",
"shaka",
"shetty",
"fixin",
"walmart",
"deities",
"hovel",
"lazlo",
"niklaus",
"forester",
"bracken",
"dickson",
"yuma",
"indefinite",
"matrimonial",
"fullness",
"abbi",
"frey",
"airwaves",
"bhabhi",
"cruises",
"damper",
"damnedest",
"bushy",
"shizuko",
"remix",
"romney",
"look-see",
"emission",
"revolutionize",
"plummet",
"mojito",
"rickie",
"exemption",
"listen.",
"subways",
"leticia",
"carruthers",
"poser",
"moomin",
"impasse",
"angling",
"gibby",
"kronor",
"s.o.s.",
"mousetrap",
"cassini",
"echelon",
"inconspicuous",
"skepticism",
"smoldering",
"birgitte",
"undisputed",
"c-can",
"strikers",
"penchant",
"long-haired",
"dependence",
"livery",
"clucks",
"adela",
"roark",
"blackstone",
"huffing",
"infront",
"iive",
"'right",
"viciously",
"browne",
"schnitzel",
"hms",
"racine",
"scribble",
"nobodies",
"morrissey",
"'these",
"pheebs",
"odile",
"spic",
"offed",
"eiko",
"atkinson",
"battlestar",
"fatherhood",
"silks",
"trembled",
"rudd",
"balding",
"trifles",
"blinky",
"pres",
"worshiped",
"fabrice",
"isak",
"knuckleheads",
"catty",
"burgled",
"giddap",
"yearned",
"faii",
"mulcahy",
"reprimanded",
"saintly",
"hedgehogs",
"alters",
"natsu",
"pei",
"breeland",
"ack",
"able-bodied",
"truer",
"chesapeake",
"traitorous",
"ayn",
"industrious",
"agnew",
"cottages",
"goings",
"shari",
"renegotiate",
"epicenter",
"cantaloupe",
"dapper",
"sais",
"upton",
"doctor-patient",
"sant",
"perseus",
"overtaken",
"nother",
"millionth",
"one-year",
"compile",
"well-done",
"touche",
"dosed",
"lightbulb",
"mourners",
"pick-me-up",
"desktop",
"krissi",
"carcasses",
"togetherness",
"listenin",
"kink",
"disobedient",
"packaged",
"selby",
"gables",
"enterprising",
"leaped",
"sang-woo",
"gentlemanly",
"wry",
"hippopotamus",
"discrepancies",
"hashimoto",
"rowland",
"kawasaki",
"margarine",
"parameter",
"mahmut",
"router",
"convinces",
"applesauce",
"ills",
"agra",
"confidentially",
"rydell",
"nunnery",
"regaining",
"aground",
"pah",
"mcdeere",
"sommers",
"abducting",
"insulated",
"unwritten",
"respectability",
"rooming",
"mirrored",
"fishin",
"suman",
"quay",
"recoil",
"'hi",
"brash",
"dweeb",
"iearn",
"peih-gee",
"palpable",
"shanty",
"gatekeeper",
"recreated",
"insemination",
"mojave",
"schoolhouse",
"figurines",
"fucking-",
"elongated",
"lifestyles",
"boos",
"adoration",
"quoi",
"virginie",
"strictest",
"oiled",
"mics",
"huh-uh",
"continual",
"teacup",
"fertilized",
"hertz",
"manet",
"matte",
"scrapped",
"spaulding",
"inventors",
"mendel",
"baiting",
"mobilization",
"jeannette",
"beady",
"screamer",
"faker",
"weighted",
"la-la-la-la",
"adjective",
"thirty-eight",
"stupor",
"intensified",
"sleaze",
"'up",
"randal",
"stimulant",
"nis",
"cl",
"grigory",
"mitigating",
"tosca",
"pillage",
"financier",
"headquarter",
"over-",
"naidu",
"talons",
"reruns",
"whither",
"babysitters",
"saori",
"maison",
"noting",
"pio",
"geared",
"af",
"sunup",
"cheshire",
"parr",
"pulley",
"pled",
"fringes",
"grasshoppers",
"stupider",
"unending",
"morpheus",
"hondo",
"comprised",
"cohesive",
"sigrid",
"belgians",
"jilly",
"juli",
"infer",
"trifling",
"nursemaid",
"mens",
"magenta",
"justly",
"stuttgart",
"diya",
"roadie",
"disabilities",
"armoury",
"ogami",
"lyme",
"yeow",
"norwood",
"onlookers",
"sizeable",
"martel",
"taxing",
"arseholes",
"foxxy",
"somersault",
"clings",
"backups",
"mocks",
"snitched",
"clamped",
"michi",
"scamming",
"tucking",
"sumptuous",
"doodles",
"unlicensed",
"grandest",
"high-rise",
"flagg",
"jerseys",
"steers",
"autobot",
"councils",
"sebastien",
"volley",
"advocating",
"bestest",
"varma",
"infiltrating",
"grounding",
"reclaimed",
"macedonia",
"ajar",
"ramone",
"drawbridge",
"brahmin",
"lucca",
"writhing",
"momento",
"pisces",
"refueling",
"nonviolent",
"ua",
"shil",
"dum-dum",
"manoeuvres",
"queuing",
"jovi",
"saratoga",
"hieroglyphics",
"magi",
"tentative",
"ml",
"reprisals",
"sethu",
"dividends",
"nietzschean",
"scammed",
"taub",
"graeme",
"he-man",
"quinlan",
"cilla",
"mi-ho",
"subliminal",
"disapproval",
"grug",
"oahu",
"fenwick",
"manju",
"ebb",
"sidebar",
"eclair",
"purdy",
"iffy",
"manifested",
"promotes",
"reminiscent",
"unveiled",
"choral",
"hernando",
"pattering",
"grendel",
"gallivanting",
"bosh",
"matchbox",
"probate",
"zipped",
"low-down",
"dissected",
"gae",
"mammoths",
"today-",
"magnify",
"marlboro",
"scaled",
"gallbladder",
"madea",
"asami",
"waring",
"vapour",
"floorboard",
"dismantling",
"tippi",
"bak",
"zords",
"fuckhead",
"snide",
"squabbling",
"rescues",
"cropped",
"roams",
"het",
"boosting",
"sikh",
"yadda",
"roadkill",
"modesto",
"kant",
"restlessness",
"conti",
"khurana",
"telecom",
"ellingham",
"tierney",
"noreen",
"mohini",
"flaherty",
"conch",
"inescapable",
"on-screen",
"rc",
"steeped",
"hilde",
"diluted",
"orangutan",
"single-handed",
"progeny",
"busybody",
"clawing",
"dreyfuss",
"vacancies",
"conquerors",
"tra",
"beheading",
"annapolis",
"cogs",
"chatterbox",
"grossman",
"annually",
"farewells",
"heart-to-heart",
"jarrett",
"glossy",
"tama",
"traumas",
"lieut",
"jeweller",
"braden",
"hennessey",
"medley",
"slugged",
"paraphernalia",
"departmental",
"squealed",
"jabba",
"counterparts",
"snapshots",
"kneecaps",
"belligerent",
"fissure",
"puppeteer",
"costanza",
"orestes",
"rosanna",
"allotment",
"'aime",
"sincerest",
"transcends",
"gagarin",
"djinn",
"cancun",
"fuselage",
"wembley",
"abuser",
"capers",
"propel",
"good-night",
"murderface",
"shortstop",
"mathew",
"mima",
"showman",
"popper",
"kumbaya",
"transgressions",
"buyin",
"pensioners",
"brimming",
"soya",
"spot-on",
"windowsill",
"methamphetamine",
"unquote",
"marky",
"noun",
"rouen",
"gerber",
"baguette",
"patter",
"mediate",
"patties",
"kita",
"eugenio",
"ponderosa",
"hows",
"bedridden",
"imperialist",
"behest",
"ourself",
"originate",
"squalor",
"canaries",
"arraigned",
"nighty-night",
"endangerment",
"pay-per-view",
"messaging",
"god-",
"tethered",
"j-just",
"thrifty",
"vash",
"adnan",
"buongiorno",
"third-rate",
"hyperdrive",
"backwoods",
"catered",
"formulate",
"lovell",
"young-hee",
"croaks",
"uncooperative",
"crazy-ass",
"craved",
"thankless",
"antennae",
"carabinieri",
"snazzy",
"refrigeration",
"inert",
"gamekeeper",
"textures",
"argentinean",
"skirmish",
"specialties",
"deep-fried",
"lópez",
"ela",
"billion-dollar",
"holiest",
"ganesha",
"manservant",
"verdun",
"cheech",
"mariel",
"intermediate",
"parkman",
"nips",
"palpitations",
"jacinto",
"spans",
"emanuel",
"vivaldi",
"polarity",
"fine-looking",
"smokescreen",
"smudged",
"adriano",
"collaborated",
"skimmed",
"first-born",
"unrecognizable",
"overzealous",
"rea",
"ariane",
"freer",
"mannequins",
"mclane",
"nk",
"kraken",
"shekels",
"gauls",
"biotech",
"sterilized",
"pressurized",
"second-degree",
"scattering",
"ziegler",
"assange",
"naz",
"jeopardized",
"consoling",
"appointing",
"wiper",
"harping",
"carmelo",
"cinemas",
"midland",
"petticoat",
"broyles",
"flippin",
"harrowing",
"tearful",
"clavin",
"truest",
"kwun",
"cleft",
"wray",
"malware",
"kanzaki",
"riverdale",
"bream",
"justifiable",
"fretting",
"frowning",
"potomac",
"sten",
"barked",
"pre-med",
"cancels",
"homeowners",
"fiendish",
"shinobu",
"notwithstanding",
"redskins",
"voyeur",
"havers",
"chronos",
"abilene",
"auctions",
"mincemeat",
"out-of-town",
"prosthetics",
"second-class",
"hershey",
"procured",
"loathed",
"decrepit",
"bly",
"fast-food",
"eventful",
"shimada",
"skinheads",
"petya",
"oomph",
"19th-century",
"polluting",
"hochstetter",
"multiplication",
"incentives",
"remarked",
"phlox",
"thirty-seven",
"horned",
"'lf",
"pontius",
"matchmaking",
"soma",
"basements",
"barmy",
"stockpile",
"weavers",
"one.",
"meme",
"50-year-old",
"hazelnut",
"mailroom",
"saturation",
"ley",
"sinkhole",
"greenwood",
"inertia",
"impossibly",
"westward",
"pff",
"philistine",
"gnaw",
"mother-daughter",
"crepes",
"paki",
"geriatric",
"honorably",
"lope",
"toasts",
"hypertension",
"puritan",
"entropy",
"palo",
"mili",
"baring",
"come-",
"imprinted",
"sylar",
"petter",
"winks",
"conditional",
"marriott",
"foreseeable",
"evens",
"vandalized",
"garters",
"flatulence",
"samira",
"rusk",
"best-looking",
"designation",
"raus",
"shamans",
"garnett",
"gunners",
"captivating",
"impresses",
"extravagance",
"vve",
"chutes",
"sphincter",
"oshin",
"two-headed",
"leprechauns",
"eben",
"gsr",
"pastoral",
"beaufort",
"proverbs",
"bade",
"shoebox",
"responsibly",
"sigurd",
"rekha",
"heston",
"deducted",
"oncoming",
"trafalgar",
"persists",
"initiatives",
"paşa",
"listings",
"wameru",
"yοur",
"proficient",
"klondike",
"kenna",
"boog",
"sty",
"gallantry",
"dutton",
"assailants",
"noboru",
"duels",
"forfeited",
"absolved",
"advantageous",
"amps",
"biochemical",
"contraception",
"lago",
"winkle",
"crayfish",
"hotties",
"obsessions",
"accumulation",
"gwendolyn",
"converter",
"deuces",
"percentages",
"assimilated",
"swingers",
"ivanov",
"cobain",
"institutional",
"changeling",
"vertebra",
"hoofbeats",
"sd",
"machu",
"his-",
"romulus",
"malayalam",
"peacemaker",
"plaintiffs",
"universally",
"corman",
"instructing",
"alissa",
"ladylike",
"loveable",
"humps",
"rien",
"ea",
"turnbull",
"raza",
"forster",
"pentagram",
"familial",
"salutes",
"separatists",
"'till",
"transistor",
"mirai",
"therapies",
"sensibly",
"spandex",
"sinker",
"all-stars",
"incur",
"decomposed",
"informally",
"trollop",
"metz",
| |
<filename>netforce_service/netforce_service/models/job.py
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.access import get_active_user, set_active_user
from datetime import *
from dateutil.relativedelta import relativedelta
import time
from netforce.database import get_connection
from netforce.access import get_active_company, check_permission_other
from netforce.utils import get_data_path
class Job(Model):
_name = "job"
_string = "Service Order"
_name_field = "number"
_audit_log = True
_multi_company = True
_fields = {
"project_id": fields.Many2One("project", "Project", search=True),
"contact_id": fields.Many2One("contact", "Customer", required=True, search=True),
"template_id": fields.Many2One("job.template", "Template"),
"service_type_id": fields.Many2One("service.type", "Service Type", search=True),
"product_id": fields.Many2One("product", "Product"), # XXX: deprecated
"name": fields.Char("Order Name", search=True),
"number": fields.Char("Order Number", required=True, search=True),
"description": fields.Text("Description"),
"due_date": fields.Date("Due Date", search=True),
"close_date": fields.Date("Close Date", search=True),
"priority": fields.Selection([["low", "Low"], ["medium", "Medium"], ["high", "High"]], "Priority", search=True),
"state": fields.Selection([["planned", "Planned"], ["allocated", "Allocated"], ["in_progress", "In Progress"], ["done", "Completed"], ["canceled", "Canceled"]], "Status", required=True),
"overdue": fields.Boolean("Overdue", function="get_overdue", function_search="search_overdue"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"documents": fields.One2Many("document", "related_id", "Documents"),
"tasks": fields.One2Many("task", "job_id", "Tasks"),
"days_late": fields.Integer("Days Late", function="get_days_late"),
"user_id": fields.Many2One("base.user", "Assigned To"), # XXX: deprecated
"resource_id": fields.Many2One("service.resource", "Assigned Resource", search=True), # XXX: deprecated
"skill_level_id": fields.Many2One("skill.level", "Required Skill Level", search=True),
"request_by_id": fields.Many2One("base.user", "Requested By", search=True),
"user_board_id": fields.Boolean("User", store=False, function_search="search_user_board_id"),
"sharing": fields.One2Many("share.record", "related_id", "Sharing"),
"invoice_no": fields.Char("Invoice No."), # XXX: not used any more...
"shared_board": fields.Boolean("Shared", store=False, function_search="search_shared_board"),
"quotation_id": fields.Many2One("sale.quot", "Quotation"),
"cancel_reason": fields.Text("Cancel Reason"),
"cancel_periodic": fields.Boolean("Cancel Periodic"),
"next_job_id": fields.Many2One("job", "Next Order"),
"emails": fields.One2Many("email.message", "related_id", "Emails"),
"company_id": fields.Many2One("company", "Company"),
"invoices": fields.One2Many("account.invoice", "related_id", "Invoices"),
"bill_amount": fields.Decimal("Billable Amount"),
"invoice_id": fields.Many2One("account.invoice", "Invoice"),
"is_duplicate": fields.Boolean("Duplicate"),
"work_time": fields.One2Many("work.time", "job_id", "Work Time"),
"pickings": fields.One2Many("stock.picking", "related_id", "Pickings"),
"stock_moves": fields.One2Many("stock.move", "related_id", "Stock Movements"),
"parts": fields.One2Many("job.part", "job_id", "Parts"),
"other_costs": fields.One2Many("job.cost", "job_id", "Other Costs"),
"items": fields.One2Many("job.item", "job_id", "Service Items"),
"allocs": fields.One2Many("service.resource.alloc", "job_id", "Resource Allocations"),
"time_start": fields.DateTime("Planned Start Time"),
"time_stop": fields.DateTime("Planned Stop Time"),
"location_id": fields.Many2One("stock.location", "Job Location"),
"related_id": fields.Reference([["sale.order", "Sales Order"], ["rental.order","Rental Order"], ["issue", "Issue"]], "Related To"),
"lines": fields.One2Many("job.line", "job_id", "Worksheet"),
"complaints": fields.Text("Complaints"),
"cause": fields.Text("Cause"),
"correction": fields.Text("Correction"),
"amount_total": fields.Decimal("Total Selling", function="get_total", function_multi=True),
"amount_contract": fields.Decimal("Included In Contract", function="get_total", function_multi=True),
"amount_job": fields.Decimal("Not Included In Contract", function="get_total", function_multi=True),
"overdue": fields.Boolean("Overdue", function="get_overdue", function_search="search_overdue"),
"date_open": fields.DateTime("Actual Start"),
"date_close": fields.DateTime("Actual Stop"),
"labor_cost": fields.Decimal("Labor Cost", function="get_cost", function_multi=True),
"part_cost": fields.Decimal("Parts Cost", function="get_cost", function_multi=True),
"other_cost": fields.Decimal("Other Cost", function="get_cost", function_multi=True),
"total_cost": fields.Decimal("Total Cost", function="get_cost", function_multi=True),
"labor_sell": fields.Decimal("Labor Selling", function="get_sell", function_multi=True),
"part_sell": fields.Decimal("Parts Selling", function="get_sell", function_multi=True),
"other_sell": fields.Decimal("Other Selling", function="get_sell", function_multi=True),
"done_approved_by_id": fields.Many2One("base.user", "Approved By", readonly=True),
"multi_visit_code_id": fields.Many2One("reason.code", "Multi Visit Reason Code", condition=[["type", "=", "service_multi_visit"]]),
"late_response_code_id": fields.Many2One("reason.code", "Late Response Reason Code", condition=[["type", "=", "service_late_response"]]),
"year": fields.Char("Year", sql_function=["year", "due_date"]),
"quarter": fields.Char("Quarter", sql_function=["quarter", "due_date"]),
"month": fields.Char("Month", sql_function=["month", "due_date"]),
"week": fields.Char("Week", sql_function=["week", "due_date"]),
"activities": fields.One2Many("activity","related_id","Activities"),
"track_id": fields.Many2One("account.track.categ","Tracking Code"),
"track_entries": fields.One2Many("account.track.entry",None,"Tracking Entries",function="get_track_entries",function_write="write_track_entries"),
"track_balance": fields.Decimal("Tracking Balance",function="_get_related",function_context={"path":"track_id.balance"}),
}
_order = "number"
_sql_constraints = [
("number_uniq", "unique (number)", "The job number must be unique!"),
]
def _get_number(self, context={}):
while 1:
num = get_model("sequence").get_number(type="job")
if not num:
return None
user_id = get_active_user()
set_active_user(1)
res = self.search([["number", "=", num]])
set_active_user(user_id)
if not res:
return num
get_model("sequence").increment(type="job")
def name_get(self, ids, context={}):
vals = []
for obj in self.browse(ids):
name = obj.number
if obj.name:
name += " - " + obj.name
vals.append((obj.id, name))
return vals
_defaults = {
"state": "planned",
"number": _get_number,
"request_by_id": lambda *a: get_active_user(),
#"company_id": lambda *a: get_active_company(), # XXX: don't use this yet
"date_open": lambda *a: time.strftime("%Y-%m-%d"),
}
def write(self, ids, vals, **kw):
if vals.get("state") == "done":
vals["date_close"] = time.strftime("%Y-%m-%d")
for obj in self.browse(ids):
if not obj.done_approved_by_id:
raise Exception("Service order has to be approved first")
super().write(ids, vals, **kw)
def get_total(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
amt_total = 0
amt_contract = 0
amt_job = 0
for line in obj.lines:
amt_total += line.amount
if line.payment_type == "contract":
amt_contract += line.amount
elif line.payment_type == "job":
amt_job += line.amount
vals[obj.id] = {
"amount_total": amt_total,
"amount_contract": amt_contract,
"amount_job": amt_job,
}
return vals
def onchange_template(self, context={}):
data = context["data"]
template_id = data["template_id"]
tmpl = get_model("job.template").browse(template_id)
data["service_type_id"] = tmpl.service_type_id.id
data["description"] = tmpl.description
data["skill_level_id"] = tmpl.skill_level_id.id
data["lines"] = []
for line in tmpl.lines:
line_vals = {
"type": line.type,
"product_id": line.product_id.id,
"description": line.description,
"qty": line.qty,
"uom_id": line.uom_id.id,
"unit_price": line.unit_price,
}
data["lines"].append(line_vals)
return data
def get_overdue(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
if obj.due_date:
vals[obj.id] = obj.due_date < time.strftime(
"%Y-%m-%d") and obj.state in ("planned", "allocated", "in_progress")
else:
vals[obj.id] = False
return vals
def search_overdue(self, clause, context={}):
return [["due_date", "<", time.strftime("%Y-%m-%d")], ["state", "in", ["planned", "allocated", "in_progress"]]]
def copy_to_pick_out(self, ids, context={}):
obj = self.browse(ids)[0]
vals = {
"type": "out",
"contact_id": obj.contact_id.id,
"related_id": "job,%d" % obj.id,
"lines": [],
}
res = get_model("stock.location").search([["type", "=", "customer"]])
if not res:
raise Exception("Customer location not found")
cust_loc_id = res[0]
res = get_model("stock.location").search([["type", "=", "internal"]])
if not res:
raise Exception("Warehouse location not found")
wh_loc_id = res[0]
for line in obj.lines:
prod = line.product_id
if prod.type not in ("stock", "consumable"):
continue
line_vals = {
"product_id": prod.id,
"qty": line.qty,
"uom_id": line.uom_id.id,
"location_from_id": prod.location_id.id or wh_loc_id,
"location_to_id": obj.location_id.id or cust_loc_id,
}
vals["lines"].append(("create", line_vals))
if not vals["lines"]:
raise Exception("Nothing to issue")
new_id = get_model("stock.picking").create(vals, context={"pick_type": "out"})
pick = get_model("stock.picking").browse(new_id)
return {
"flash": "Goods issue %s copied from service order %s" % (pick.number, obj.number),
"next": {
"name": "pick_out",
"mode": "form",
"active_id": new_id,
}
}
def copy_to_invoice(self, ids, context={}):
obj = self.browse(ids)[0]
inv_vals = {
"type": "out",
"inv_type": "invoice",
"ref": obj.number,
"related_id": "job,%s" % obj.id,
"contact_id": obj.contact_id.id,
"lines": [],
}
for line in obj.lines:
if line.payment_type != "job":
continue
prod = line.product_id
line_vals = {
"product_id": prod.id,
"description": line.description,
"qty": line.qty,
"uom_id": line.uom_id.id,
"unit_price": line.unit_price,
"account_id": prod.sale_account_id.id if prod else None,
"tax_id": prod.sale_tax_id.id if prod else None,
"amount": line.amount,
}
inv_vals["lines"].append(("create", line_vals))
if not inv_vals["lines"]:
raise Exception("Nothing to invoice")
inv_id = get_model("account.invoice").create(inv_vals, {"type": "out", "inv_type": "invoice"})
inv = get_model("account.invoice").browse(inv_id)
return {
"next": {
"name": "view_invoice",
"active_id": inv_id,
},
"flash": "Invoice %s created from job %s" % (inv.number, obj.number),
}
def onchange_product(self, context={}):
data = context["data"]
path = context["path"]
line = get_data_path(data, path, parent=True)
prod_id = line["product_id"]
prod = get_model("product").browse(prod_id)
line["uom_id"] = prod.uom_id.id
line["unit_price"] = prod.sale_price
line["description"] = prod.description
return data
def onchange_due_date(self, context={}):
print("onchange_due_date")
data = context["data"]
data['time_start'] = data['due_date']
return data
def onchange_close_date(self, context={}):
print("onchange_close_date")
data = context["data"]
crr_date = time.strftime("%Y-%m-%d")
close_date = data['close_date']
due_date = data['due_date']
if crr_date >= close_date:
data['state'] = 'done'
elif crr_date >= due_date and crr_date <= close_date:
data['state'] = 'in_progress'
return data
def get_cost(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
labor_cost = 0
for time in obj.work_time:
labor_cost += time.amount or 0
other_cost = 0
for line in obj.lines:
if line.type != "other":
continue
prod = line.product_id
other_cost += prod.cost_price or 0
job_loc_id = obj.location_id.id
if not job_loc_id:
res = get_model("stock.location").search([["type", "=", "customer"]])
if res:
job_loc_id = res[0]
part_cost = 0
for pick in obj.pickings:
for move in pick.lines:
amt = move.qty * (move.unit_price or 0)
if move.location_to_id.id == job_loc_id and move.location_from_id.id != job_loc_id:
part_cost += amt
elif move.location_from_id.id == job_loc_id and move.location_to_id.id != job_loc_id:
part_cost -= amt
vals[obj.id] = {
"labor_cost": labor_cost,
"part_cost": part_cost,
"other_cost": other_cost,
"total_cost": labor_cost + part_cost + other_cost,
}
return vals
def get_sell(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
labor_sell = 0
other_sell = 0
part_sell = 0
for | |
for k, v in mapping.iteritems()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
if ExternalEncoding:
encoding = ExternalEncoding
else:
encoding = 'utf-8'
return instring.encode(encoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
if type(self) != type(other):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'utf-8'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name,
base64.b64encode(self.value),
self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(
element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0,
optional=0, child_attrs=None, choice=None):
self.name = name
self.data_type = data_type
self.container = container
self.child_attrs = child_attrs
self.choice = choice
self.optional = optional
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs
def get_child_attrs(self): return self.child_attrs
def set_choice(self, choice): self.choice = choice
def get_choice(self): return self.choice
def set_optional(self, optional): self.optional = optional
def get_optional(self): return self.optional
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class AlertIntervalType(object):
NONE='NONE'
INTERVAL__5='INTERVAL_5'
INTERVAL__15='INTERVAL_15'
INTERVAL__30='INTERVAL_30'
INTERVAL__60='INTERVAL_60'
INTERVAL__300='INTERVAL_300'
INTERVAL__900='INTERVAL_900'
INTERVAL__3600='INTERVAL_3600'
class AlertType(object):
BEEP='BEEP'
SILENT='SILENT'
RING__5='RING_5'
RING__15='RING_15'
RING__30='RING_30'
RING__60='RING_60'
class FormButton(object):
POSITIVE='positive'
NEGATIVE='negative'
class KeyboardType(object):
DEFAULT='DEFAULT'
AUTO_CAPITALIZED='AUTO_CAPITALIZED'
EMAIL='EMAIL'
URL='URL'
PHONE='PHONE'
NUMBER='NUMBER'
DECIMAL='DECIMAL'
PASSWORD='PASSWORD'
NUMBER_PASSWORD='<PASSWORD>'
class MemberStatus(object):
SUBMITTED='SUBMITTED'
INITIATED='INITIATED'
RUNNING='RUNNING'
FINISHED='FINISHED'
class ProgrammingLanguage(object):
JYTHON='JYTHON'
JRUBY='JRUBY'
class Attachment(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, url=None, contentType=None, size=None):
self.original_tagname_ = None
self.name = _cast(None, name)
self.url = _cast(None, url)
self.contentType = _cast(None, contentType)
self.size = _cast(int, size)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Attachment)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Attachment.subclass:
return Attachment.subclass(*args_, **kwargs_)
else:
return Attachment(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_url(self): return self.url
def set_url(self, url): self.url = url
def get_contentType(self): return self.contentType
def set_contentType(self, contentType): self.contentType = contentType
def get_size(self): return self.size
def set_size(self, size): self.size = size
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='Attachment', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Attachment')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Attachment')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='Attachment', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Attachment'):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), ))
if self.url is not None and 'url' not in already_processed:
already_processed.add('url')
outfile.write(' url=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.url), input_name='url')), ))
if self.contentType is not None and 'contentType' not in already_processed:
already_processed.add('contentType')
outfile.write(' contentType=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.contentType), input_name='contentType')), ))
if self.size is not None and 'size' not in already_processed:
already_processed.add('size')
outfile.write(' size="%s"' % self.gds_format_integer(self.size, input_name='size'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='Attachment', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.